Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/abc.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/conftest.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/galgebra.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/release.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/this.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/diffgeom.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/rn.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_class_structure.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_diffgeom.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_function_diffgeom_book.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_hyperbolic_space.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/test_diffgeom.py +342 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/test_function_diffgeom_book.py +145 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/test_hyperbolic_space.py +91 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/__init__.py +23 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/functions.py +154 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/index_methods.py +469 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/indexed.py +793 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/tensor.py +0 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/tests/test_printing.py +13 -0
- evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/toperators.py +256 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/__init__.py +33 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/accelerator.py +0 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/hooks.py +597 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/logging.py +112 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/memory_utils.py +22 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/optimizer.py +187 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/state.py +1046 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/__init__.py +24 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py +0 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py +0 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py +269 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py +266 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py +277 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py +231 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py +13 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py +238 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_notebook.py +17 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ops.py +159 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_script.py +616 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py +367 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/testing.py +452 -0
- evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/training.py +101 -0
evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (28.3 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/abc.cpython-310.pyc
ADDED
|
Binary file (3.52 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/conftest.cpython-310.pyc
ADDED
|
Binary file (2.94 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/galgebra.cpython-310.pyc
ADDED
|
Binary file (279 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/release.cpython-310.pyc
ADDED
|
Binary file (188 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/this.cpython-310.pyc
ADDED
|
Binary file (727 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (856 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/diffgeom.cpython-310.pyc
ADDED
|
Binary file (76.3 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/rn.cpython-310.pyc
ADDED
|
Binary file (3.64 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (182 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_class_structure.cpython-310.pyc
ADDED
|
Binary file (1.47 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_diffgeom.cpython-310.pyc
ADDED
|
Binary file (13.5 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_function_diffgeom_book.cpython-310.pyc
ADDED
|
Binary file (4.97 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_hyperbolic_space.cpython-310.pyc
ADDED
|
Binary file (2.74 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/test_diffgeom.py
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sympy.core import Lambda, Symbol, symbols
|
| 2 |
+
from sympy.diffgeom.rn import R2, R2_p, R2_r, R3_r, R3_c, R3_s, R2_origin
|
| 3 |
+
from sympy.diffgeom import (Manifold, Patch, CoordSystem, Commutator, Differential, TensorProduct,
|
| 4 |
+
WedgeProduct, BaseCovarDerivativeOp, CovarDerivativeOp, LieDerivative,
|
| 5 |
+
covariant_order, contravariant_order, twoform_to_matrix, metric_to_Christoffel_1st,
|
| 6 |
+
metric_to_Christoffel_2nd, metric_to_Riemann_components,
|
| 7 |
+
metric_to_Ricci_components, intcurve_diffequ, intcurve_series)
|
| 8 |
+
from sympy.simplify import trigsimp, simplify
|
| 9 |
+
from sympy.functions import sqrt, atan2, sin
|
| 10 |
+
from sympy.matrices import Matrix
|
| 11 |
+
from sympy.testing.pytest import raises, nocache_fail
|
| 12 |
+
from sympy.testing.pytest import warns_deprecated_sympy
|
| 13 |
+
|
| 14 |
+
TP = TensorProduct
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def test_coordsys_transform():
|
| 18 |
+
# test inverse transforms
|
| 19 |
+
p, q, r, s = symbols('p q r s')
|
| 20 |
+
rel = {('first', 'second'): [(p, q), (q, -p)]}
|
| 21 |
+
R2_pq = CoordSystem('first', R2_origin, [p, q], rel)
|
| 22 |
+
R2_rs = CoordSystem('second', R2_origin, [r, s], rel)
|
| 23 |
+
r, s = R2_rs.symbols
|
| 24 |
+
assert R2_rs.transform(R2_pq) == Matrix([[-s], [r]])
|
| 25 |
+
|
| 26 |
+
# inverse transform impossible case
|
| 27 |
+
a, b = symbols('a b', positive=True)
|
| 28 |
+
rel = {('first', 'second'): [(a,), (-a,)]}
|
| 29 |
+
R2_a = CoordSystem('first', R2_origin, [a], rel)
|
| 30 |
+
R2_b = CoordSystem('second', R2_origin, [b], rel)
|
| 31 |
+
# This transformation is uninvertible because there is no positive a, b satisfying a = -b
|
| 32 |
+
with raises(NotImplementedError):
|
| 33 |
+
R2_b.transform(R2_a)
|
| 34 |
+
|
| 35 |
+
# inverse transform ambiguous case
|
| 36 |
+
c, d = symbols('c d')
|
| 37 |
+
rel = {('first', 'second'): [(c,), (c**2,)]}
|
| 38 |
+
R2_c = CoordSystem('first', R2_origin, [c], rel)
|
| 39 |
+
R2_d = CoordSystem('second', R2_origin, [d], rel)
|
| 40 |
+
# The transform method should throw if it finds multiple inverses for a coordinate transformation.
|
| 41 |
+
with raises(ValueError):
|
| 42 |
+
R2_d.transform(R2_c)
|
| 43 |
+
|
| 44 |
+
# test indirect transformation
|
| 45 |
+
a, b, c, d, e, f = symbols('a, b, c, d, e, f')
|
| 46 |
+
rel = {('C1', 'C2'): [(a, b), (2*a, 3*b)],
|
| 47 |
+
('C2', 'C3'): [(c, d), (3*c, 2*d)]}
|
| 48 |
+
C1 = CoordSystem('C1', R2_origin, (a, b), rel)
|
| 49 |
+
C2 = CoordSystem('C2', R2_origin, (c, d), rel)
|
| 50 |
+
C3 = CoordSystem('C3', R2_origin, (e, f), rel)
|
| 51 |
+
a, b = C1.symbols
|
| 52 |
+
c, d = C2.symbols
|
| 53 |
+
e, f = C3.symbols
|
| 54 |
+
assert C2.transform(C1) == Matrix([c/2, d/3])
|
| 55 |
+
assert C1.transform(C3) == Matrix([6*a, 6*b])
|
| 56 |
+
assert C3.transform(C1) == Matrix([e/6, f/6])
|
| 57 |
+
assert C3.transform(C2) == Matrix([e/3, f/2])
|
| 58 |
+
|
| 59 |
+
a, b, c, d, e, f = symbols('a, b, c, d, e, f')
|
| 60 |
+
rel = {('C1', 'C2'): [(a, b), (2*a, 3*b + 1)],
|
| 61 |
+
('C3', 'C2'): [(e, f), (-e - 2, 2*f)]}
|
| 62 |
+
C1 = CoordSystem('C1', R2_origin, (a, b), rel)
|
| 63 |
+
C2 = CoordSystem('C2', R2_origin, (c, d), rel)
|
| 64 |
+
C3 = CoordSystem('C3', R2_origin, (e, f), rel)
|
| 65 |
+
a, b = C1.symbols
|
| 66 |
+
c, d = C2.symbols
|
| 67 |
+
e, f = C3.symbols
|
| 68 |
+
assert C2.transform(C1) == Matrix([c/2, (d - 1)/3])
|
| 69 |
+
assert C1.transform(C3) == Matrix([-2*a - 2, (3*b + 1)/2])
|
| 70 |
+
assert C3.transform(C1) == Matrix([-e/2 - 1, (2*f - 1)/3])
|
| 71 |
+
assert C3.transform(C2) == Matrix([-e - 2, 2*f])
|
| 72 |
+
|
| 73 |
+
# old signature uses Lambda
|
| 74 |
+
a, b, c, d, e, f = symbols('a, b, c, d, e, f')
|
| 75 |
+
rel = {('C1', 'C2'): Lambda((a, b), (2*a, 3*b + 1)),
|
| 76 |
+
('C3', 'C2'): Lambda((e, f), (-e - 2, 2*f))}
|
| 77 |
+
C1 = CoordSystem('C1', R2_origin, (a, b), rel)
|
| 78 |
+
C2 = CoordSystem('C2', R2_origin, (c, d), rel)
|
| 79 |
+
C3 = CoordSystem('C3', R2_origin, (e, f), rel)
|
| 80 |
+
a, b = C1.symbols
|
| 81 |
+
c, d = C2.symbols
|
| 82 |
+
e, f = C3.symbols
|
| 83 |
+
assert C2.transform(C1) == Matrix([c/2, (d - 1)/3])
|
| 84 |
+
assert C1.transform(C3) == Matrix([-2*a - 2, (3*b + 1)/2])
|
| 85 |
+
assert C3.transform(C1) == Matrix([-e/2 - 1, (2*f - 1)/3])
|
| 86 |
+
assert C3.transform(C2) == Matrix([-e - 2, 2*f])
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def test_R2():
|
| 90 |
+
x0, y0, r0, theta0 = symbols('x0, y0, r0, theta0', real=True)
|
| 91 |
+
point_r = R2_r.point([x0, y0])
|
| 92 |
+
point_p = R2_p.point([r0, theta0])
|
| 93 |
+
|
| 94 |
+
# r**2 = x**2 + y**2
|
| 95 |
+
assert (R2.r**2 - R2.x**2 - R2.y**2).rcall(point_r) == 0
|
| 96 |
+
assert trigsimp( (R2.r**2 - R2.x**2 - R2.y**2).rcall(point_p) ) == 0
|
| 97 |
+
assert trigsimp(R2.e_r(R2.x**2 + R2.y**2).rcall(point_p).doit()) == 2*r0
|
| 98 |
+
|
| 99 |
+
# polar->rect->polar == Id
|
| 100 |
+
a, b = symbols('a b', positive=True)
|
| 101 |
+
m = Matrix([[a], [b]])
|
| 102 |
+
|
| 103 |
+
#TODO assert m == R2_r.transform(R2_p, R2_p.transform(R2_r, [a, b])).applyfunc(simplify)
|
| 104 |
+
assert m == R2_p.transform(R2_r, R2_r.transform(R2_p, m)).applyfunc(simplify)
|
| 105 |
+
|
| 106 |
+
# deprecated method
|
| 107 |
+
with warns_deprecated_sympy():
|
| 108 |
+
assert m == R2_p.coord_tuple_transform_to(
|
| 109 |
+
R2_r, R2_r.coord_tuple_transform_to(R2_p, m)).applyfunc(simplify)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def test_R3():
|
| 113 |
+
a, b, c = symbols('a b c', positive=True)
|
| 114 |
+
m = Matrix([[a], [b], [c]])
|
| 115 |
+
|
| 116 |
+
assert m == R3_c.transform(R3_r, R3_r.transform(R3_c, m)).applyfunc(simplify)
|
| 117 |
+
#TODO assert m == R3_r.transform(R3_c, R3_c.transform(R3_r, m)).applyfunc(simplify)
|
| 118 |
+
assert m == R3_s.transform(
|
| 119 |
+
R3_r, R3_r.transform(R3_s, m)).applyfunc(simplify)
|
| 120 |
+
#TODO assert m == R3_r.transform(R3_s, R3_s.transform(R3_r, m)).applyfunc(simplify)
|
| 121 |
+
assert m == R3_s.transform(
|
| 122 |
+
R3_c, R3_c.transform(R3_s, m)).applyfunc(simplify)
|
| 123 |
+
#TODO assert m == R3_c.transform(R3_s, R3_s.transform(R3_c, m)).applyfunc(simplify)
|
| 124 |
+
|
| 125 |
+
with warns_deprecated_sympy():
|
| 126 |
+
assert m == R3_c.coord_tuple_transform_to(
|
| 127 |
+
R3_r, R3_r.coord_tuple_transform_to(R3_c, m)).applyfunc(simplify)
|
| 128 |
+
#TODO assert m == R3_r.coord_tuple_transform_to(R3_c, R3_c.coord_tuple_transform_to(R3_r, m)).applyfunc(simplify)
|
| 129 |
+
assert m == R3_s.coord_tuple_transform_to(
|
| 130 |
+
R3_r, R3_r.coord_tuple_transform_to(R3_s, m)).applyfunc(simplify)
|
| 131 |
+
#TODO assert m == R3_r.coord_tuple_transform_to(R3_s, R3_s.coord_tuple_transform_to(R3_r, m)).applyfunc(simplify)
|
| 132 |
+
assert m == R3_s.coord_tuple_transform_to(
|
| 133 |
+
R3_c, R3_c.coord_tuple_transform_to(R3_s, m)).applyfunc(simplify)
|
| 134 |
+
#TODO assert m == R3_c.coord_tuple_transform_to(R3_s, R3_s.coord_tuple_transform_to(R3_c, m)).applyfunc(simplify)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def test_CoordinateSymbol():
|
| 138 |
+
x, y = R2_r.symbols
|
| 139 |
+
r, theta = R2_p.symbols
|
| 140 |
+
assert y.rewrite(R2_p) == r*sin(theta)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def test_point():
|
| 144 |
+
x, y = symbols('x, y')
|
| 145 |
+
p = R2_r.point([x, y])
|
| 146 |
+
assert p.free_symbols == {x, y}
|
| 147 |
+
assert p.coords(R2_r) == p.coords() == Matrix([x, y])
|
| 148 |
+
assert p.coords(R2_p) == Matrix([sqrt(x**2 + y**2), atan2(y, x)])
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def test_commutator():
|
| 152 |
+
assert Commutator(R2.e_x, R2.e_y) == 0
|
| 153 |
+
assert Commutator(R2.x*R2.e_x, R2.x*R2.e_x) == 0
|
| 154 |
+
assert Commutator(R2.x*R2.e_x, R2.x*R2.e_y) == R2.x*R2.e_y
|
| 155 |
+
c = Commutator(R2.e_x, R2.e_r)
|
| 156 |
+
assert c(R2.x) == R2.y*(R2.x**2 + R2.y**2)**(-1)*sin(R2.theta)
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def test_differential():
|
| 160 |
+
xdy = R2.x*R2.dy
|
| 161 |
+
dxdy = Differential(xdy)
|
| 162 |
+
assert xdy.rcall(None) == xdy
|
| 163 |
+
assert dxdy(R2.e_x, R2.e_y) == 1
|
| 164 |
+
assert dxdy(R2.e_x, R2.x*R2.e_y) == R2.x
|
| 165 |
+
assert Differential(dxdy) == 0
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def test_products():
|
| 169 |
+
assert TensorProduct(
|
| 170 |
+
R2.dx, R2.dy)(R2.e_x, R2.e_y) == R2.dx(R2.e_x)*R2.dy(R2.e_y) == 1
|
| 171 |
+
assert TensorProduct(R2.dx, R2.dy)(None, R2.e_y) == R2.dx
|
| 172 |
+
assert TensorProduct(R2.dx, R2.dy)(R2.e_x, None) == R2.dy
|
| 173 |
+
assert TensorProduct(R2.dx, R2.dy)(R2.e_x) == R2.dy
|
| 174 |
+
assert TensorProduct(R2.x, R2.dx) == R2.x*R2.dx
|
| 175 |
+
assert TensorProduct(
|
| 176 |
+
R2.e_x, R2.e_y)(R2.x, R2.y) == R2.e_x(R2.x) * R2.e_y(R2.y) == 1
|
| 177 |
+
assert TensorProduct(R2.e_x, R2.e_y)(None, R2.y) == R2.e_x
|
| 178 |
+
assert TensorProduct(R2.e_x, R2.e_y)(R2.x, None) == R2.e_y
|
| 179 |
+
assert TensorProduct(R2.e_x, R2.e_y)(R2.x) == R2.e_y
|
| 180 |
+
assert TensorProduct(R2.x, R2.e_x) == R2.x * R2.e_x
|
| 181 |
+
assert TensorProduct(
|
| 182 |
+
R2.dx, R2.e_y)(R2.e_x, R2.y) == R2.dx(R2.e_x) * R2.e_y(R2.y) == 1
|
| 183 |
+
assert TensorProduct(R2.dx, R2.e_y)(None, R2.y) == R2.dx
|
| 184 |
+
assert TensorProduct(R2.dx, R2.e_y)(R2.e_x, None) == R2.e_y
|
| 185 |
+
assert TensorProduct(R2.dx, R2.e_y)(R2.e_x) == R2.e_y
|
| 186 |
+
assert TensorProduct(R2.x, R2.e_x) == R2.x * R2.e_x
|
| 187 |
+
assert TensorProduct(
|
| 188 |
+
R2.e_x, R2.dy)(R2.x, R2.e_y) == R2.e_x(R2.x) * R2.dy(R2.e_y) == 1
|
| 189 |
+
assert TensorProduct(R2.e_x, R2.dy)(None, R2.e_y) == R2.e_x
|
| 190 |
+
assert TensorProduct(R2.e_x, R2.dy)(R2.x, None) == R2.dy
|
| 191 |
+
assert TensorProduct(R2.e_x, R2.dy)(R2.x) == R2.dy
|
| 192 |
+
assert TensorProduct(R2.e_y,R2.e_x)(R2.x**2 + R2.y**2,R2.x**2 + R2.y**2) == 4*R2.x*R2.y
|
| 193 |
+
|
| 194 |
+
assert WedgeProduct(R2.dx, R2.dy)(R2.e_x, R2.e_y) == 1
|
| 195 |
+
assert WedgeProduct(R2.e_x, R2.e_y)(R2.x, R2.y) == 1
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def test_lie_derivative():
|
| 199 |
+
assert LieDerivative(R2.e_x, R2.y) == R2.e_x(R2.y) == 0
|
| 200 |
+
assert LieDerivative(R2.e_x, R2.x) == R2.e_x(R2.x) == 1
|
| 201 |
+
assert LieDerivative(R2.e_x, R2.e_x) == Commutator(R2.e_x, R2.e_x) == 0
|
| 202 |
+
assert LieDerivative(R2.e_x, R2.e_r) == Commutator(R2.e_x, R2.e_r)
|
| 203 |
+
assert LieDerivative(R2.e_x + R2.e_y, R2.x) == 1
|
| 204 |
+
assert LieDerivative(
|
| 205 |
+
R2.e_x, TensorProduct(R2.dx, R2.dy))(R2.e_x, R2.e_y) == 0
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
@nocache_fail
|
| 209 |
+
def test_covar_deriv():
|
| 210 |
+
ch = metric_to_Christoffel_2nd(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy))
|
| 211 |
+
cvd = BaseCovarDerivativeOp(R2_r, 0, ch)
|
| 212 |
+
assert cvd(R2.x) == 1
|
| 213 |
+
# This line fails if the cache is disabled:
|
| 214 |
+
assert cvd(R2.x*R2.e_x) == R2.e_x
|
| 215 |
+
cvd = CovarDerivativeOp(R2.x*R2.e_x, ch)
|
| 216 |
+
assert cvd(R2.x) == R2.x
|
| 217 |
+
assert cvd(R2.x*R2.e_x) == R2.x*R2.e_x
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def test_intcurve_diffequ():
|
| 221 |
+
t = symbols('t')
|
| 222 |
+
start_point = R2_r.point([1, 0])
|
| 223 |
+
vector_field = -R2.y*R2.e_x + R2.x*R2.e_y
|
| 224 |
+
equations, init_cond = intcurve_diffequ(vector_field, t, start_point)
|
| 225 |
+
assert str(equations) == '[f_1(t) + Derivative(f_0(t), t), -f_0(t) + Derivative(f_1(t), t)]'
|
| 226 |
+
assert str(init_cond) == '[f_0(0) - 1, f_1(0)]'
|
| 227 |
+
equations, init_cond = intcurve_diffequ(vector_field, t, start_point, R2_p)
|
| 228 |
+
assert str(
|
| 229 |
+
equations) == '[Derivative(f_0(t), t), Derivative(f_1(t), t) - 1]'
|
| 230 |
+
assert str(init_cond) == '[f_0(0) - 1, f_1(0)]'
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
def test_helpers_and_coordinate_dependent():
|
| 234 |
+
one_form = R2.dr + R2.dx
|
| 235 |
+
two_form = Differential(R2.x*R2.dr + R2.r*R2.dx)
|
| 236 |
+
three_form = Differential(
|
| 237 |
+
R2.y*two_form) + Differential(R2.x*Differential(R2.r*R2.dr))
|
| 238 |
+
metric = TensorProduct(R2.dx, R2.dx) + TensorProduct(R2.dy, R2.dy)
|
| 239 |
+
metric_ambig = TensorProduct(R2.dx, R2.dx) + TensorProduct(R2.dr, R2.dr)
|
| 240 |
+
misform_a = TensorProduct(R2.dr, R2.dr) + R2.dr
|
| 241 |
+
misform_b = R2.dr**4
|
| 242 |
+
misform_c = R2.dx*R2.dy
|
| 243 |
+
twoform_not_sym = TensorProduct(R2.dx, R2.dx) + TensorProduct(R2.dx, R2.dy)
|
| 244 |
+
twoform_not_TP = WedgeProduct(R2.dx, R2.dy)
|
| 245 |
+
|
| 246 |
+
one_vector = R2.e_x + R2.e_y
|
| 247 |
+
two_vector = TensorProduct(R2.e_x, R2.e_y)
|
| 248 |
+
three_vector = TensorProduct(R2.e_x, R2.e_y, R2.e_x)
|
| 249 |
+
two_wp = WedgeProduct(R2.e_x,R2.e_y)
|
| 250 |
+
|
| 251 |
+
assert covariant_order(one_form) == 1
|
| 252 |
+
assert covariant_order(two_form) == 2
|
| 253 |
+
assert covariant_order(three_form) == 3
|
| 254 |
+
assert covariant_order(two_form + metric) == 2
|
| 255 |
+
assert covariant_order(two_form + metric_ambig) == 2
|
| 256 |
+
assert covariant_order(two_form + twoform_not_sym) == 2
|
| 257 |
+
assert covariant_order(two_form + twoform_not_TP) == 2
|
| 258 |
+
|
| 259 |
+
assert contravariant_order(one_vector) == 1
|
| 260 |
+
assert contravariant_order(two_vector) == 2
|
| 261 |
+
assert contravariant_order(three_vector) == 3
|
| 262 |
+
assert contravariant_order(two_vector + two_wp) == 2
|
| 263 |
+
|
| 264 |
+
raises(ValueError, lambda: covariant_order(misform_a))
|
| 265 |
+
raises(ValueError, lambda: covariant_order(misform_b))
|
| 266 |
+
raises(ValueError, lambda: covariant_order(misform_c))
|
| 267 |
+
|
| 268 |
+
assert twoform_to_matrix(metric) == Matrix([[1, 0], [0, 1]])
|
| 269 |
+
assert twoform_to_matrix(twoform_not_sym) == Matrix([[1, 0], [1, 0]])
|
| 270 |
+
assert twoform_to_matrix(twoform_not_TP) == Matrix([[0, -1], [1, 0]])
|
| 271 |
+
|
| 272 |
+
raises(ValueError, lambda: twoform_to_matrix(one_form))
|
| 273 |
+
raises(ValueError, lambda: twoform_to_matrix(three_form))
|
| 274 |
+
raises(ValueError, lambda: twoform_to_matrix(metric_ambig))
|
| 275 |
+
|
| 276 |
+
raises(ValueError, lambda: metric_to_Christoffel_1st(twoform_not_sym))
|
| 277 |
+
raises(ValueError, lambda: metric_to_Christoffel_2nd(twoform_not_sym))
|
| 278 |
+
raises(ValueError, lambda: metric_to_Riemann_components(twoform_not_sym))
|
| 279 |
+
raises(ValueError, lambda: metric_to_Ricci_components(twoform_not_sym))
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
def test_correct_arguments():
|
| 283 |
+
raises(ValueError, lambda: R2.e_x(R2.e_x))
|
| 284 |
+
raises(ValueError, lambda: R2.e_x(R2.dx))
|
| 285 |
+
|
| 286 |
+
raises(ValueError, lambda: Commutator(R2.e_x, R2.x))
|
| 287 |
+
raises(ValueError, lambda: Commutator(R2.dx, R2.e_x))
|
| 288 |
+
|
| 289 |
+
raises(ValueError, lambda: Differential(Differential(R2.e_x)))
|
| 290 |
+
|
| 291 |
+
raises(ValueError, lambda: R2.dx(R2.x))
|
| 292 |
+
|
| 293 |
+
raises(ValueError, lambda: LieDerivative(R2.dx, R2.dx))
|
| 294 |
+
raises(ValueError, lambda: LieDerivative(R2.x, R2.dx))
|
| 295 |
+
|
| 296 |
+
raises(ValueError, lambda: CovarDerivativeOp(R2.dx, []))
|
| 297 |
+
raises(ValueError, lambda: CovarDerivativeOp(R2.x, []))
|
| 298 |
+
|
| 299 |
+
a = Symbol('a')
|
| 300 |
+
raises(ValueError, lambda: intcurve_series(R2.dx, a, R2_r.point([1, 2])))
|
| 301 |
+
raises(ValueError, lambda: intcurve_series(R2.x, a, R2_r.point([1, 2])))
|
| 302 |
+
|
| 303 |
+
raises(ValueError, lambda: intcurve_diffequ(R2.dx, a, R2_r.point([1, 2])))
|
| 304 |
+
raises(ValueError, lambda: intcurve_diffequ(R2.x, a, R2_r.point([1, 2])))
|
| 305 |
+
|
| 306 |
+
raises(ValueError, lambda: contravariant_order(R2.e_x + R2.dx))
|
| 307 |
+
raises(ValueError, lambda: covariant_order(R2.e_x + R2.dx))
|
| 308 |
+
|
| 309 |
+
raises(ValueError, lambda: contravariant_order(R2.e_x*R2.e_y))
|
| 310 |
+
raises(ValueError, lambda: covariant_order(R2.dx*R2.dy))
|
| 311 |
+
|
| 312 |
+
def test_simplify():
|
| 313 |
+
x, y = R2_r.coord_functions()
|
| 314 |
+
dx, dy = R2_r.base_oneforms()
|
| 315 |
+
ex, ey = R2_r.base_vectors()
|
| 316 |
+
assert simplify(x) == x
|
| 317 |
+
assert simplify(x*y) == x*y
|
| 318 |
+
assert simplify(dx*dy) == dx*dy
|
| 319 |
+
assert simplify(ex*ey) == ex*ey
|
| 320 |
+
assert ((1-x)*dx)/(1-x)**2 == dx/(1-x)
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
def test_issue_17917():
|
| 324 |
+
X = R2.x*R2.e_x - R2.y*R2.e_y
|
| 325 |
+
Y = (R2.x**2 + R2.y**2)*R2.e_x - R2.x*R2.y*R2.e_y
|
| 326 |
+
assert LieDerivative(X, Y).expand() == (
|
| 327 |
+
R2.x**2*R2.e_x - 3*R2.y**2*R2.e_x - R2.x*R2.y*R2.e_y)
|
| 328 |
+
|
| 329 |
+
def test_deprecations():
|
| 330 |
+
m = Manifold('M', 2)
|
| 331 |
+
p = Patch('P', m)
|
| 332 |
+
with warns_deprecated_sympy():
|
| 333 |
+
CoordSystem('Car2d', p, names=['x', 'y'])
|
| 334 |
+
|
| 335 |
+
with warns_deprecated_sympy():
|
| 336 |
+
c = CoordSystem('Car2d', p, ['x', 'y'])
|
| 337 |
+
|
| 338 |
+
with warns_deprecated_sympy():
|
| 339 |
+
list(m.patches)
|
| 340 |
+
|
| 341 |
+
with warns_deprecated_sympy():
|
| 342 |
+
list(c.transforms)
|
evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/test_function_diffgeom_book.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sympy.diffgeom.rn import R2, R2_p, R2_r, R3_r
|
| 2 |
+
from sympy.diffgeom import intcurve_series, Differential, WedgeProduct
|
| 3 |
+
from sympy.core import symbols, Function, Derivative
|
| 4 |
+
from sympy.simplify import trigsimp, simplify
|
| 5 |
+
from sympy.functions import sqrt, atan2, sin, cos
|
| 6 |
+
from sympy.matrices import Matrix
|
| 7 |
+
|
| 8 |
+
# Most of the functionality is covered in the
|
| 9 |
+
# test_functional_diffgeom_ch* tests which are based on the
|
| 10 |
+
# example from the paper of Sussman and Wisdom.
|
| 11 |
+
# If they do not cover something, additional tests are added in other test
|
| 12 |
+
# functions.
|
| 13 |
+
|
| 14 |
+
# From "Functional Differential Geometry" as of 2011
|
| 15 |
+
# by Sussman and Wisdom.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def test_functional_diffgeom_ch2():
|
| 19 |
+
x0, y0, r0, theta0 = symbols('x0, y0, r0, theta0', real=True)
|
| 20 |
+
x, y = symbols('x, y', real=True)
|
| 21 |
+
f = Function('f')
|
| 22 |
+
|
| 23 |
+
assert (R2_p.point_to_coords(R2_r.point([x0, y0])) ==
|
| 24 |
+
Matrix([sqrt(x0**2 + y0**2), atan2(y0, x0)]))
|
| 25 |
+
assert (R2_r.point_to_coords(R2_p.point([r0, theta0])) ==
|
| 26 |
+
Matrix([r0*cos(theta0), r0*sin(theta0)]))
|
| 27 |
+
|
| 28 |
+
assert R2_p.jacobian(R2_r, [r0, theta0]) == Matrix(
|
| 29 |
+
[[cos(theta0), -r0*sin(theta0)], [sin(theta0), r0*cos(theta0)]])
|
| 30 |
+
|
| 31 |
+
field = f(R2.x, R2.y)
|
| 32 |
+
p1_in_rect = R2_r.point([x0, y0])
|
| 33 |
+
p1_in_polar = R2_p.point([sqrt(x0**2 + y0**2), atan2(y0, x0)])
|
| 34 |
+
assert field.rcall(p1_in_rect) == f(x0, y0)
|
| 35 |
+
assert field.rcall(p1_in_polar) == f(x0, y0)
|
| 36 |
+
|
| 37 |
+
p_r = R2_r.point([x0, y0])
|
| 38 |
+
p_p = R2_p.point([r0, theta0])
|
| 39 |
+
assert R2.x(p_r) == x0
|
| 40 |
+
assert R2.x(p_p) == r0*cos(theta0)
|
| 41 |
+
assert R2.r(p_p) == r0
|
| 42 |
+
assert R2.r(p_r) == sqrt(x0**2 + y0**2)
|
| 43 |
+
assert R2.theta(p_r) == atan2(y0, x0)
|
| 44 |
+
|
| 45 |
+
h = R2.x*R2.r**2 + R2.y**3
|
| 46 |
+
assert h.rcall(p_r) == x0*(x0**2 + y0**2) + y0**3
|
| 47 |
+
assert h.rcall(p_p) == r0**3*sin(theta0)**3 + r0**3*cos(theta0)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def test_functional_diffgeom_ch3():
|
| 51 |
+
x0, y0 = symbols('x0, y0', real=True)
|
| 52 |
+
x, y, t = symbols('x, y, t', real=True)
|
| 53 |
+
f = Function('f')
|
| 54 |
+
b1 = Function('b1')
|
| 55 |
+
b2 = Function('b2')
|
| 56 |
+
p_r = R2_r.point([x0, y0])
|
| 57 |
+
|
| 58 |
+
s_field = f(R2.x, R2.y)
|
| 59 |
+
v_field = b1(R2.x)*R2.e_x + b2(R2.y)*R2.e_y
|
| 60 |
+
assert v_field.rcall(s_field).rcall(p_r).doit() == b1(
|
| 61 |
+
x0)*Derivative(f(x0, y0), x0) + b2(y0)*Derivative(f(x0, y0), y0)
|
| 62 |
+
|
| 63 |
+
assert R2.e_x(R2.r**2).rcall(p_r) == 2*x0
|
| 64 |
+
v = R2.e_x + 2*R2.e_y
|
| 65 |
+
s = R2.r**2 + 3*R2.x
|
| 66 |
+
assert v.rcall(s).rcall(p_r).doit() == 2*x0 + 4*y0 + 3
|
| 67 |
+
|
| 68 |
+
circ = -R2.y*R2.e_x + R2.x*R2.e_y
|
| 69 |
+
series = intcurve_series(circ, t, R2_r.point([1, 0]), coeffs=True)
|
| 70 |
+
series_x, series_y = zip(*series)
|
| 71 |
+
assert all(
|
| 72 |
+
term == cos(t).taylor_term(i, t) for i, term in enumerate(series_x))
|
| 73 |
+
assert all(
|
| 74 |
+
term == sin(t).taylor_term(i, t) for i, term in enumerate(series_y))
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def test_functional_diffgeom_ch4():
|
| 78 |
+
x0, y0, theta0 = symbols('x0, y0, theta0', real=True)
|
| 79 |
+
x, y, r, theta = symbols('x, y, r, theta', real=True)
|
| 80 |
+
r0 = symbols('r0', positive=True)
|
| 81 |
+
f = Function('f')
|
| 82 |
+
b1 = Function('b1')
|
| 83 |
+
b2 = Function('b2')
|
| 84 |
+
p_r = R2_r.point([x0, y0])
|
| 85 |
+
p_p = R2_p.point([r0, theta0])
|
| 86 |
+
|
| 87 |
+
f_field = b1(R2.x, R2.y)*R2.dx + b2(R2.x, R2.y)*R2.dy
|
| 88 |
+
assert f_field.rcall(R2.e_x).rcall(p_r) == b1(x0, y0)
|
| 89 |
+
assert f_field.rcall(R2.e_y).rcall(p_r) == b2(x0, y0)
|
| 90 |
+
|
| 91 |
+
s_field_r = f(R2.x, R2.y)
|
| 92 |
+
df = Differential(s_field_r)
|
| 93 |
+
assert df(R2.e_x).rcall(p_r).doit() == Derivative(f(x0, y0), x0)
|
| 94 |
+
assert df(R2.e_y).rcall(p_r).doit() == Derivative(f(x0, y0), y0)
|
| 95 |
+
|
| 96 |
+
s_field_p = f(R2.r, R2.theta)
|
| 97 |
+
df = Differential(s_field_p)
|
| 98 |
+
assert trigsimp(df(R2.e_x).rcall(p_p).doit()) == (
|
| 99 |
+
cos(theta0)*Derivative(f(r0, theta0), r0) -
|
| 100 |
+
sin(theta0)*Derivative(f(r0, theta0), theta0)/r0)
|
| 101 |
+
assert trigsimp(df(R2.e_y).rcall(p_p).doit()) == (
|
| 102 |
+
sin(theta0)*Derivative(f(r0, theta0), r0) +
|
| 103 |
+
cos(theta0)*Derivative(f(r0, theta0), theta0)/r0)
|
| 104 |
+
|
| 105 |
+
assert R2.dx(R2.e_x).rcall(p_r) == 1
|
| 106 |
+
assert R2.dx(R2.e_x) == 1
|
| 107 |
+
assert R2.dx(R2.e_y).rcall(p_r) == 0
|
| 108 |
+
assert R2.dx(R2.e_y) == 0
|
| 109 |
+
|
| 110 |
+
circ = -R2.y*R2.e_x + R2.x*R2.e_y
|
| 111 |
+
assert R2.dx(circ).rcall(p_r).doit() == -y0
|
| 112 |
+
assert R2.dy(circ).rcall(p_r) == x0
|
| 113 |
+
assert R2.dr(circ).rcall(p_r) == 0
|
| 114 |
+
assert simplify(R2.dtheta(circ).rcall(p_r)) == 1
|
| 115 |
+
|
| 116 |
+
assert (circ - R2.e_theta).rcall(s_field_r).rcall(p_r) == 0
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def test_functional_diffgeom_ch6():
|
| 120 |
+
u0, u1, u2, v0, v1, v2, w0, w1, w2 = symbols('u0:3, v0:3, w0:3', real=True)
|
| 121 |
+
|
| 122 |
+
u = u0*R2.e_x + u1*R2.e_y
|
| 123 |
+
v = v0*R2.e_x + v1*R2.e_y
|
| 124 |
+
wp = WedgeProduct(R2.dx, R2.dy)
|
| 125 |
+
assert wp(u, v) == u0*v1 - u1*v0
|
| 126 |
+
|
| 127 |
+
u = u0*R3_r.e_x + u1*R3_r.e_y + u2*R3_r.e_z
|
| 128 |
+
v = v0*R3_r.e_x + v1*R3_r.e_y + v2*R3_r.e_z
|
| 129 |
+
w = w0*R3_r.e_x + w1*R3_r.e_y + w2*R3_r.e_z
|
| 130 |
+
wp = WedgeProduct(R3_r.dx, R3_r.dy, R3_r.dz)
|
| 131 |
+
assert wp(
|
| 132 |
+
u, v, w) == Matrix(3, 3, [u0, u1, u2, v0, v1, v2, w0, w1, w2]).det()
|
| 133 |
+
|
| 134 |
+
a, b, c = symbols('a, b, c', cls=Function)
|
| 135 |
+
a_f = a(R3_r.x, R3_r.y, R3_r.z)
|
| 136 |
+
b_f = b(R3_r.x, R3_r.y, R3_r.z)
|
| 137 |
+
c_f = c(R3_r.x, R3_r.y, R3_r.z)
|
| 138 |
+
theta = a_f*R3_r.dx + b_f*R3_r.dy + c_f*R3_r.dz
|
| 139 |
+
dtheta = Differential(theta)
|
| 140 |
+
da = Differential(a_f)
|
| 141 |
+
db = Differential(b_f)
|
| 142 |
+
dc = Differential(c_f)
|
| 143 |
+
expr = dtheta - WedgeProduct(
|
| 144 |
+
da, R3_r.dx) - WedgeProduct(db, R3_r.dy) - WedgeProduct(dc, R3_r.dz)
|
| 145 |
+
assert expr.rcall(R3_r.e_x, R3_r.e_y) == 0
|
evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/test_hyperbolic_space.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
r'''
|
| 2 |
+
unit test describing the hyperbolic half-plane with the Poincare metric. This
|
| 3 |
+
is a basic model of hyperbolic geometry on the (positive) half-space
|
| 4 |
+
|
| 5 |
+
{(x,y) \in R^2 | y > 0}
|
| 6 |
+
|
| 7 |
+
with the Riemannian metric
|
| 8 |
+
|
| 9 |
+
ds^2 = (dx^2 + dy^2)/y^2
|
| 10 |
+
|
| 11 |
+
It has constant negative scalar curvature = -2
|
| 12 |
+
|
| 13 |
+
https://en.wikipedia.org/wiki/Poincare_half-plane_model
|
| 14 |
+
'''
|
| 15 |
+
from sympy.matrices.dense import diag
|
| 16 |
+
from sympy.diffgeom import (twoform_to_matrix,
|
| 17 |
+
metric_to_Christoffel_1st, metric_to_Christoffel_2nd,
|
| 18 |
+
metric_to_Riemann_components, metric_to_Ricci_components)
|
| 19 |
+
import sympy.diffgeom.rn
|
| 20 |
+
from sympy.tensor.array import ImmutableDenseNDimArray
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def test_H2():
|
| 24 |
+
TP = sympy.diffgeom.TensorProduct
|
| 25 |
+
R2 = sympy.diffgeom.rn.R2
|
| 26 |
+
y = R2.y
|
| 27 |
+
dy = R2.dy
|
| 28 |
+
dx = R2.dx
|
| 29 |
+
g = (TP(dx, dx) + TP(dy, dy))*y**(-2)
|
| 30 |
+
automat = twoform_to_matrix(g)
|
| 31 |
+
mat = diag(y**(-2), y**(-2))
|
| 32 |
+
assert mat == automat
|
| 33 |
+
|
| 34 |
+
gamma1 = metric_to_Christoffel_1st(g)
|
| 35 |
+
assert gamma1[0, 0, 0] == 0
|
| 36 |
+
assert gamma1[0, 0, 1] == -y**(-3)
|
| 37 |
+
assert gamma1[0, 1, 0] == -y**(-3)
|
| 38 |
+
assert gamma1[0, 1, 1] == 0
|
| 39 |
+
|
| 40 |
+
assert gamma1[1, 1, 1] == -y**(-3)
|
| 41 |
+
assert gamma1[1, 1, 0] == 0
|
| 42 |
+
assert gamma1[1, 0, 1] == 0
|
| 43 |
+
assert gamma1[1, 0, 0] == y**(-3)
|
| 44 |
+
|
| 45 |
+
gamma2 = metric_to_Christoffel_2nd(g)
|
| 46 |
+
assert gamma2[0, 0, 0] == 0
|
| 47 |
+
assert gamma2[0, 0, 1] == -y**(-1)
|
| 48 |
+
assert gamma2[0, 1, 0] == -y**(-1)
|
| 49 |
+
assert gamma2[0, 1, 1] == 0
|
| 50 |
+
|
| 51 |
+
assert gamma2[1, 1, 1] == -y**(-1)
|
| 52 |
+
assert gamma2[1, 1, 0] == 0
|
| 53 |
+
assert gamma2[1, 0, 1] == 0
|
| 54 |
+
assert gamma2[1, 0, 0] == y**(-1)
|
| 55 |
+
|
| 56 |
+
Rm = metric_to_Riemann_components(g)
|
| 57 |
+
assert Rm[0, 0, 0, 0] == 0
|
| 58 |
+
assert Rm[0, 0, 0, 1] == 0
|
| 59 |
+
assert Rm[0, 0, 1, 0] == 0
|
| 60 |
+
assert Rm[0, 0, 1, 1] == 0
|
| 61 |
+
|
| 62 |
+
assert Rm[0, 1, 0, 0] == 0
|
| 63 |
+
assert Rm[0, 1, 0, 1] == -y**(-2)
|
| 64 |
+
assert Rm[0, 1, 1, 0] == y**(-2)
|
| 65 |
+
assert Rm[0, 1, 1, 1] == 0
|
| 66 |
+
|
| 67 |
+
assert Rm[1, 0, 0, 0] == 0
|
| 68 |
+
assert Rm[1, 0, 0, 1] == y**(-2)
|
| 69 |
+
assert Rm[1, 0, 1, 0] == -y**(-2)
|
| 70 |
+
assert Rm[1, 0, 1, 1] == 0
|
| 71 |
+
|
| 72 |
+
assert Rm[1, 1, 0, 0] == 0
|
| 73 |
+
assert Rm[1, 1, 0, 1] == 0
|
| 74 |
+
assert Rm[1, 1, 1, 0] == 0
|
| 75 |
+
assert Rm[1, 1, 1, 1] == 0
|
| 76 |
+
|
| 77 |
+
Ric = metric_to_Ricci_components(g)
|
| 78 |
+
assert Ric[0, 0] == -y**(-2)
|
| 79 |
+
assert Ric[0, 1] == 0
|
| 80 |
+
assert Ric[1, 0] == 0
|
| 81 |
+
assert Ric[0, 0] == -y**(-2)
|
| 82 |
+
|
| 83 |
+
assert Ric == ImmutableDenseNDimArray([-y**(-2), 0, 0, -y**(-2)], (2, 2))
|
| 84 |
+
|
| 85 |
+
## scalar curvature is -2
|
| 86 |
+
#TODO - it would be nice to have index contraction built-in
|
| 87 |
+
R = (Ric[0, 0] + Ric[1, 1])*y**2
|
| 88 |
+
assert R == -2
|
| 89 |
+
|
| 90 |
+
## Gauss curvature is -1
|
| 91 |
+
assert R/2 == -1
|
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/__init__.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""A module to manipulate symbolic objects with indices including tensors
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
from .indexed import IndexedBase, Idx, Indexed
|
| 5 |
+
from .index_methods import get_contraction_structure, get_indices
|
| 6 |
+
from .functions import shape
|
| 7 |
+
from .array import (MutableDenseNDimArray, ImmutableDenseNDimArray,
|
| 8 |
+
MutableSparseNDimArray, ImmutableSparseNDimArray, NDimArray, tensorproduct,
|
| 9 |
+
tensorcontraction, tensordiagonal, derive_by_array, permutedims, Array,
|
| 10 |
+
DenseNDimArray, SparseNDimArray,)
|
| 11 |
+
|
| 12 |
+
__all__ = [
|
| 13 |
+
'IndexedBase', 'Idx', 'Indexed',
|
| 14 |
+
|
| 15 |
+
'get_contraction_structure', 'get_indices',
|
| 16 |
+
|
| 17 |
+
'shape',
|
| 18 |
+
|
| 19 |
+
'MutableDenseNDimArray', 'ImmutableDenseNDimArray',
|
| 20 |
+
'MutableSparseNDimArray', 'ImmutableSparseNDimArray', 'NDimArray',
|
| 21 |
+
'tensorproduct', 'tensorcontraction', 'tensordiagonal', 'derive_by_array', 'permutedims',
|
| 22 |
+
'Array', 'DenseNDimArray', 'SparseNDimArray',
|
| 23 |
+
]
|
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/functions.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Iterable
|
| 2 |
+
from functools import singledispatch
|
| 3 |
+
|
| 4 |
+
from sympy.core.expr import Expr
|
| 5 |
+
from sympy.core.mul import Mul
|
| 6 |
+
from sympy.core.singleton import S
|
| 7 |
+
from sympy.core.sympify import sympify
|
| 8 |
+
from sympy.core.parameters import global_parameters
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class TensorProduct(Expr):
|
| 12 |
+
"""
|
| 13 |
+
Generic class for tensor products.
|
| 14 |
+
"""
|
| 15 |
+
is_number = False
|
| 16 |
+
|
| 17 |
+
def __new__(cls, *args, **kwargs):
|
| 18 |
+
from sympy.tensor.array import NDimArray, tensorproduct, Array
|
| 19 |
+
from sympy.matrices.expressions.matexpr import MatrixExpr
|
| 20 |
+
from sympy.matrices.matrixbase import MatrixBase
|
| 21 |
+
from sympy.strategies import flatten
|
| 22 |
+
|
| 23 |
+
args = [sympify(arg) for arg in args]
|
| 24 |
+
evaluate = kwargs.get("evaluate", global_parameters.evaluate)
|
| 25 |
+
|
| 26 |
+
if not evaluate:
|
| 27 |
+
obj = Expr.__new__(cls, *args)
|
| 28 |
+
return obj
|
| 29 |
+
|
| 30 |
+
arrays = []
|
| 31 |
+
other = []
|
| 32 |
+
scalar = S.One
|
| 33 |
+
for arg in args:
|
| 34 |
+
if isinstance(arg, (Iterable, MatrixBase, NDimArray)):
|
| 35 |
+
arrays.append(Array(arg))
|
| 36 |
+
elif isinstance(arg, (MatrixExpr,)):
|
| 37 |
+
other.append(arg)
|
| 38 |
+
else:
|
| 39 |
+
scalar *= arg
|
| 40 |
+
|
| 41 |
+
coeff = scalar*tensorproduct(*arrays)
|
| 42 |
+
if len(other) == 0:
|
| 43 |
+
return coeff
|
| 44 |
+
if coeff != 1:
|
| 45 |
+
newargs = [coeff] + other
|
| 46 |
+
else:
|
| 47 |
+
newargs = other
|
| 48 |
+
obj = Expr.__new__(cls, *newargs, **kwargs)
|
| 49 |
+
return flatten(obj)
|
| 50 |
+
|
| 51 |
+
def rank(self):
|
| 52 |
+
return len(self.shape)
|
| 53 |
+
|
| 54 |
+
def _get_args_shapes(self):
|
| 55 |
+
from sympy.tensor.array import Array
|
| 56 |
+
return [i.shape if hasattr(i, "shape") else Array(i).shape for i in self.args]
|
| 57 |
+
|
| 58 |
+
@property
|
| 59 |
+
def shape(self):
|
| 60 |
+
shape_list = self._get_args_shapes()
|
| 61 |
+
return sum(shape_list, ())
|
| 62 |
+
|
| 63 |
+
def __getitem__(self, index):
|
| 64 |
+
index = iter(index)
|
| 65 |
+
return Mul.fromiter(
|
| 66 |
+
arg.__getitem__(tuple(next(index) for i in shp))
|
| 67 |
+
for arg, shp in zip(self.args, self._get_args_shapes())
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@singledispatch
|
| 72 |
+
def shape(expr):
|
| 73 |
+
"""
|
| 74 |
+
Return the shape of the *expr* as a tuple. *expr* should represent
|
| 75 |
+
suitable object such as matrix or array.
|
| 76 |
+
|
| 77 |
+
Parameters
|
| 78 |
+
==========
|
| 79 |
+
|
| 80 |
+
expr : SymPy object having ``MatrixKind`` or ``ArrayKind``.
|
| 81 |
+
|
| 82 |
+
Raises
|
| 83 |
+
======
|
| 84 |
+
|
| 85 |
+
NoShapeError : Raised when object with wrong kind is passed.
|
| 86 |
+
|
| 87 |
+
Examples
|
| 88 |
+
========
|
| 89 |
+
|
| 90 |
+
This function returns the shape of any object representing matrix or array.
|
| 91 |
+
|
| 92 |
+
>>> from sympy import shape, Array, ImmutableDenseMatrix, Integral
|
| 93 |
+
>>> from sympy.abc import x
|
| 94 |
+
>>> A = Array([1, 2])
|
| 95 |
+
>>> shape(A)
|
| 96 |
+
(2,)
|
| 97 |
+
>>> shape(Integral(A, x))
|
| 98 |
+
(2,)
|
| 99 |
+
>>> M = ImmutableDenseMatrix([1, 2])
|
| 100 |
+
>>> shape(M)
|
| 101 |
+
(2, 1)
|
| 102 |
+
>>> shape(Integral(M, x))
|
| 103 |
+
(2, 1)
|
| 104 |
+
|
| 105 |
+
You can support new type by dispatching.
|
| 106 |
+
|
| 107 |
+
>>> from sympy import Expr
|
| 108 |
+
>>> class NewExpr(Expr):
|
| 109 |
+
... pass
|
| 110 |
+
>>> @shape.register(NewExpr)
|
| 111 |
+
... def _(expr):
|
| 112 |
+
... return shape(expr.args[0])
|
| 113 |
+
>>> shape(NewExpr(M))
|
| 114 |
+
(2, 1)
|
| 115 |
+
|
| 116 |
+
If unsuitable expression is passed, ``NoShapeError()`` will be raised.
|
| 117 |
+
|
| 118 |
+
>>> shape(Integral(x, x))
|
| 119 |
+
Traceback (most recent call last):
|
| 120 |
+
...
|
| 121 |
+
sympy.tensor.functions.NoShapeError: shape() called on non-array object: Integral(x, x)
|
| 122 |
+
|
| 123 |
+
Notes
|
| 124 |
+
=====
|
| 125 |
+
|
| 126 |
+
Array-like classes (such as ``Matrix`` or ``NDimArray``) has ``shape``
|
| 127 |
+
property which returns its shape, but it cannot be used for non-array
|
| 128 |
+
classes containing array. This function returns the shape of any
|
| 129 |
+
registered object representing array.
|
| 130 |
+
|
| 131 |
+
"""
|
| 132 |
+
if hasattr(expr, "shape"):
|
| 133 |
+
return expr.shape
|
| 134 |
+
raise NoShapeError(
|
| 135 |
+
"%s does not have shape, or its type is not registered to shape()." % expr)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class NoShapeError(Exception):
|
| 139 |
+
"""
|
| 140 |
+
Raised when ``shape()`` is called on non-array object.
|
| 141 |
+
|
| 142 |
+
This error can be imported from ``sympy.tensor.functions``.
|
| 143 |
+
|
| 144 |
+
Examples
|
| 145 |
+
========
|
| 146 |
+
|
| 147 |
+
>>> from sympy import shape
|
| 148 |
+
>>> from sympy.abc import x
|
| 149 |
+
>>> shape(x)
|
| 150 |
+
Traceback (most recent call last):
|
| 151 |
+
...
|
| 152 |
+
sympy.tensor.functions.NoShapeError: shape() called on non-array object: x
|
| 153 |
+
"""
|
| 154 |
+
pass
|
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/index_methods.py
ADDED
|
@@ -0,0 +1,469 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Module with functions operating on IndexedBase, Indexed and Idx objects
|
| 2 |
+
|
| 3 |
+
- Check shape conformance
|
| 4 |
+
- Determine indices in resulting expression
|
| 5 |
+
|
| 6 |
+
etc.
|
| 7 |
+
|
| 8 |
+
Methods in this module could be implemented by calling methods on Expr
|
| 9 |
+
objects instead. When things stabilize this could be a useful
|
| 10 |
+
refactoring.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from functools import reduce
|
| 14 |
+
|
| 15 |
+
from sympy.core.function import Function
|
| 16 |
+
from sympy.functions import exp, Piecewise
|
| 17 |
+
from sympy.tensor.indexed import Idx, Indexed
|
| 18 |
+
from sympy.utilities import sift
|
| 19 |
+
|
| 20 |
+
from collections import OrderedDict
|
| 21 |
+
|
| 22 |
+
class IndexConformanceException(Exception):
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
def _unique_and_repeated(inds):
|
| 26 |
+
"""
|
| 27 |
+
Returns the unique and repeated indices. Also note, from the examples given below
|
| 28 |
+
that the order of indices is maintained as given in the input.
|
| 29 |
+
|
| 30 |
+
Examples
|
| 31 |
+
========
|
| 32 |
+
|
| 33 |
+
>>> from sympy.tensor.index_methods import _unique_and_repeated
|
| 34 |
+
>>> _unique_and_repeated([2, 3, 1, 3, 0, 4, 0])
|
| 35 |
+
([2, 1, 4], [3, 0])
|
| 36 |
+
"""
|
| 37 |
+
uniq = OrderedDict()
|
| 38 |
+
for i in inds:
|
| 39 |
+
if i in uniq:
|
| 40 |
+
uniq[i] = 0
|
| 41 |
+
else:
|
| 42 |
+
uniq[i] = 1
|
| 43 |
+
return sift(uniq, lambda x: uniq[x], binary=True)
|
| 44 |
+
|
| 45 |
+
def _remove_repeated(inds):
|
| 46 |
+
"""
|
| 47 |
+
Removes repeated objects from sequences
|
| 48 |
+
|
| 49 |
+
Returns a set of the unique objects and a tuple of all that have been
|
| 50 |
+
removed.
|
| 51 |
+
|
| 52 |
+
Examples
|
| 53 |
+
========
|
| 54 |
+
|
| 55 |
+
>>> from sympy.tensor.index_methods import _remove_repeated
|
| 56 |
+
>>> l1 = [1, 2, 3, 2]
|
| 57 |
+
>>> _remove_repeated(l1)
|
| 58 |
+
({1, 3}, (2,))
|
| 59 |
+
|
| 60 |
+
"""
|
| 61 |
+
u, r = _unique_and_repeated(inds)
|
| 62 |
+
return set(u), tuple(r)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def _get_indices_Mul(expr, return_dummies=False):
|
| 66 |
+
"""Determine the outer indices of a Mul object.
|
| 67 |
+
|
| 68 |
+
Examples
|
| 69 |
+
========
|
| 70 |
+
|
| 71 |
+
>>> from sympy.tensor.index_methods import _get_indices_Mul
|
| 72 |
+
>>> from sympy.tensor.indexed import IndexedBase, Idx
|
| 73 |
+
>>> i, j, k = map(Idx, ['i', 'j', 'k'])
|
| 74 |
+
>>> x = IndexedBase('x')
|
| 75 |
+
>>> y = IndexedBase('y')
|
| 76 |
+
>>> _get_indices_Mul(x[i, k]*y[j, k])
|
| 77 |
+
({i, j}, {})
|
| 78 |
+
>>> _get_indices_Mul(x[i, k]*y[j, k], return_dummies=True)
|
| 79 |
+
({i, j}, {}, (k,))
|
| 80 |
+
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
inds = list(map(get_indices, expr.args))
|
| 84 |
+
inds, syms = list(zip(*inds))
|
| 85 |
+
|
| 86 |
+
inds = list(map(list, inds))
|
| 87 |
+
inds = list(reduce(lambda x, y: x + y, inds))
|
| 88 |
+
inds, dummies = _remove_repeated(inds)
|
| 89 |
+
|
| 90 |
+
symmetry = {}
|
| 91 |
+
for s in syms:
|
| 92 |
+
for pair in s:
|
| 93 |
+
if pair in symmetry:
|
| 94 |
+
symmetry[pair] *= s[pair]
|
| 95 |
+
else:
|
| 96 |
+
symmetry[pair] = s[pair]
|
| 97 |
+
|
| 98 |
+
if return_dummies:
|
| 99 |
+
return inds, symmetry, dummies
|
| 100 |
+
else:
|
| 101 |
+
return inds, symmetry
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def _get_indices_Pow(expr):
|
| 105 |
+
"""Determine outer indices of a power or an exponential.
|
| 106 |
+
|
| 107 |
+
A power is considered a universal function, so that the indices of a Pow is
|
| 108 |
+
just the collection of indices present in the expression. This may be
|
| 109 |
+
viewed as a bit inconsistent in the special case:
|
| 110 |
+
|
| 111 |
+
x[i]**2 = x[i]*x[i] (1)
|
| 112 |
+
|
| 113 |
+
The above expression could have been interpreted as the contraction of x[i]
|
| 114 |
+
with itself, but we choose instead to interpret it as a function
|
| 115 |
+
|
| 116 |
+
lambda y: y**2
|
| 117 |
+
|
| 118 |
+
applied to each element of x (a universal function in numpy terms). In
|
| 119 |
+
order to allow an interpretation of (1) as a contraction, we need
|
| 120 |
+
contravariant and covariant Idx subclasses. (FIXME: this is not yet
|
| 121 |
+
implemented)
|
| 122 |
+
|
| 123 |
+
Expressions in the base or exponent are subject to contraction as usual,
|
| 124 |
+
but an index that is present in the exponent, will not be considered
|
| 125 |
+
contractable with its own base. Note however, that indices in the same
|
| 126 |
+
exponent can be contracted with each other.
|
| 127 |
+
|
| 128 |
+
Examples
|
| 129 |
+
========
|
| 130 |
+
|
| 131 |
+
>>> from sympy.tensor.index_methods import _get_indices_Pow
|
| 132 |
+
>>> from sympy import Pow, exp, IndexedBase, Idx
|
| 133 |
+
>>> A = IndexedBase('A')
|
| 134 |
+
>>> x = IndexedBase('x')
|
| 135 |
+
>>> i, j, k = map(Idx, ['i', 'j', 'k'])
|
| 136 |
+
>>> _get_indices_Pow(exp(A[i, j]*x[j]))
|
| 137 |
+
({i}, {})
|
| 138 |
+
>>> _get_indices_Pow(Pow(x[i], x[i]))
|
| 139 |
+
({i}, {})
|
| 140 |
+
>>> _get_indices_Pow(Pow(A[i, j]*x[j], x[i]))
|
| 141 |
+
({i}, {})
|
| 142 |
+
|
| 143 |
+
"""
|
| 144 |
+
base, exp = expr.as_base_exp()
|
| 145 |
+
binds, bsyms = get_indices(base)
|
| 146 |
+
einds, esyms = get_indices(exp)
|
| 147 |
+
|
| 148 |
+
inds = binds | einds
|
| 149 |
+
|
| 150 |
+
# FIXME: symmetries from power needs to check special cases, else nothing
|
| 151 |
+
symmetries = {}
|
| 152 |
+
|
| 153 |
+
return inds, symmetries
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def _get_indices_Add(expr):
|
| 157 |
+
"""Determine outer indices of an Add object.
|
| 158 |
+
|
| 159 |
+
In a sum, each term must have the same set of outer indices. A valid
|
| 160 |
+
expression could be
|
| 161 |
+
|
| 162 |
+
x(i)*y(j) - x(j)*y(i)
|
| 163 |
+
|
| 164 |
+
But we do not allow expressions like:
|
| 165 |
+
|
| 166 |
+
x(i)*y(j) - z(j)*z(j)
|
| 167 |
+
|
| 168 |
+
FIXME: Add support for Numpy broadcasting
|
| 169 |
+
|
| 170 |
+
Examples
|
| 171 |
+
========
|
| 172 |
+
|
| 173 |
+
>>> from sympy.tensor.index_methods import _get_indices_Add
|
| 174 |
+
>>> from sympy.tensor.indexed import IndexedBase, Idx
|
| 175 |
+
>>> i, j, k = map(Idx, ['i', 'j', 'k'])
|
| 176 |
+
>>> x = IndexedBase('x')
|
| 177 |
+
>>> y = IndexedBase('y')
|
| 178 |
+
>>> _get_indices_Add(x[i] + x[k]*y[i, k])
|
| 179 |
+
({i}, {})
|
| 180 |
+
|
| 181 |
+
"""
|
| 182 |
+
|
| 183 |
+
inds = list(map(get_indices, expr.args))
|
| 184 |
+
inds, syms = list(zip(*inds))
|
| 185 |
+
|
| 186 |
+
# allow broadcast of scalars
|
| 187 |
+
non_scalars = [x for x in inds if x != set()]
|
| 188 |
+
if not non_scalars:
|
| 189 |
+
return set(), {}
|
| 190 |
+
|
| 191 |
+
if not all(x == non_scalars[0] for x in non_scalars[1:]):
|
| 192 |
+
raise IndexConformanceException("Indices are not consistent: %s" % expr)
|
| 193 |
+
if not reduce(lambda x, y: x != y or y, syms):
|
| 194 |
+
symmetries = syms[0]
|
| 195 |
+
else:
|
| 196 |
+
# FIXME: search for symmetries
|
| 197 |
+
symmetries = {}
|
| 198 |
+
|
| 199 |
+
return non_scalars[0], symmetries
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def get_indices(expr):
|
| 203 |
+
"""Determine the outer indices of expression ``expr``
|
| 204 |
+
|
| 205 |
+
By *outer* we mean indices that are not summation indices. Returns a set
|
| 206 |
+
and a dict. The set contains outer indices and the dict contains
|
| 207 |
+
information about index symmetries.
|
| 208 |
+
|
| 209 |
+
Examples
|
| 210 |
+
========
|
| 211 |
+
|
| 212 |
+
>>> from sympy.tensor.index_methods import get_indices
|
| 213 |
+
>>> from sympy import symbols
|
| 214 |
+
>>> from sympy.tensor import IndexedBase
|
| 215 |
+
>>> x, y, A = map(IndexedBase, ['x', 'y', 'A'])
|
| 216 |
+
>>> i, j, a, z = symbols('i j a z', integer=True)
|
| 217 |
+
|
| 218 |
+
The indices of the total expression is determined, Repeated indices imply a
|
| 219 |
+
summation, for instance the trace of a matrix A:
|
| 220 |
+
|
| 221 |
+
>>> get_indices(A[i, i])
|
| 222 |
+
(set(), {})
|
| 223 |
+
|
| 224 |
+
In the case of many terms, the terms are required to have identical
|
| 225 |
+
outer indices. Else an IndexConformanceException is raised.
|
| 226 |
+
|
| 227 |
+
>>> get_indices(x[i] + A[i, j]*y[j])
|
| 228 |
+
({i}, {})
|
| 229 |
+
|
| 230 |
+
:Exceptions:
|
| 231 |
+
|
| 232 |
+
An IndexConformanceException means that the terms ar not compatible, e.g.
|
| 233 |
+
|
| 234 |
+
>>> get_indices(x[i] + y[j]) #doctest: +SKIP
|
| 235 |
+
(...)
|
| 236 |
+
IndexConformanceException: Indices are not consistent: x(i) + y(j)
|
| 237 |
+
|
| 238 |
+
.. warning::
|
| 239 |
+
The concept of *outer* indices applies recursively, starting on the deepest
|
| 240 |
+
level. This implies that dummies inside parenthesis are assumed to be
|
| 241 |
+
summed first, so that the following expression is handled gracefully:
|
| 242 |
+
|
| 243 |
+
>>> get_indices((x[i] + A[i, j]*y[j])*x[j])
|
| 244 |
+
({i, j}, {})
|
| 245 |
+
|
| 246 |
+
This is correct and may appear convenient, but you need to be careful
|
| 247 |
+
with this as SymPy will happily .expand() the product, if requested. The
|
| 248 |
+
resulting expression would mix the outer ``j`` with the dummies inside
|
| 249 |
+
the parenthesis, which makes it a different expression. To be on the
|
| 250 |
+
safe side, it is best to avoid such ambiguities by using unique indices
|
| 251 |
+
for all contractions that should be held separate.
|
| 252 |
+
|
| 253 |
+
"""
|
| 254 |
+
# We call ourself recursively to determine indices of sub expressions.
|
| 255 |
+
|
| 256 |
+
# break recursion
|
| 257 |
+
if isinstance(expr, Indexed):
|
| 258 |
+
c = expr.indices
|
| 259 |
+
inds, dummies = _remove_repeated(c)
|
| 260 |
+
return inds, {}
|
| 261 |
+
elif expr is None:
|
| 262 |
+
return set(), {}
|
| 263 |
+
elif isinstance(expr, Idx):
|
| 264 |
+
return {expr}, {}
|
| 265 |
+
elif expr.is_Atom:
|
| 266 |
+
return set(), {}
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
# recurse via specialized functions
|
| 270 |
+
else:
|
| 271 |
+
if expr.is_Mul:
|
| 272 |
+
return _get_indices_Mul(expr)
|
| 273 |
+
elif expr.is_Add:
|
| 274 |
+
return _get_indices_Add(expr)
|
| 275 |
+
elif expr.is_Pow or isinstance(expr, exp):
|
| 276 |
+
return _get_indices_Pow(expr)
|
| 277 |
+
|
| 278 |
+
elif isinstance(expr, Piecewise):
|
| 279 |
+
# FIXME: No support for Piecewise yet
|
| 280 |
+
return set(), {}
|
| 281 |
+
elif isinstance(expr, Function):
|
| 282 |
+
# Support ufunc like behaviour by returning indices from arguments.
|
| 283 |
+
# Functions do not interpret repeated indices across arguments
|
| 284 |
+
# as summation
|
| 285 |
+
ind0 = set()
|
| 286 |
+
for arg in expr.args:
|
| 287 |
+
ind, sym = get_indices(arg)
|
| 288 |
+
ind0 |= ind
|
| 289 |
+
return ind0, sym
|
| 290 |
+
|
| 291 |
+
# this test is expensive, so it should be at the end
|
| 292 |
+
elif not expr.has(Indexed):
|
| 293 |
+
return set(), {}
|
| 294 |
+
raise NotImplementedError(
|
| 295 |
+
"FIXME: No specialized handling of type %s" % type(expr))
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
def get_contraction_structure(expr):
|
| 299 |
+
"""Determine dummy indices of ``expr`` and describe its structure
|
| 300 |
+
|
| 301 |
+
By *dummy* we mean indices that are summation indices.
|
| 302 |
+
|
| 303 |
+
The structure of the expression is determined and described as follows:
|
| 304 |
+
|
| 305 |
+
1) A conforming summation of Indexed objects is described with a dict where
|
| 306 |
+
the keys are summation indices and the corresponding values are sets
|
| 307 |
+
containing all terms for which the summation applies. All Add objects
|
| 308 |
+
in the SymPy expression tree are described like this.
|
| 309 |
+
|
| 310 |
+
2) For all nodes in the SymPy expression tree that are *not* of type Add, the
|
| 311 |
+
following applies:
|
| 312 |
+
|
| 313 |
+
If a node discovers contractions in one of its arguments, the node
|
| 314 |
+
itself will be stored as a key in the dict. For that key, the
|
| 315 |
+
corresponding value is a list of dicts, each of which is the result of a
|
| 316 |
+
recursive call to get_contraction_structure(). The list contains only
|
| 317 |
+
dicts for the non-trivial deeper contractions, omitting dicts with None
|
| 318 |
+
as the one and only key.
|
| 319 |
+
|
| 320 |
+
.. Note:: The presence of expressions among the dictionary keys indicates
|
| 321 |
+
multiple levels of index contractions. A nested dict displays nested
|
| 322 |
+
contractions and may itself contain dicts from a deeper level. In
|
| 323 |
+
practical calculations the summation in the deepest nested level must be
|
| 324 |
+
calculated first so that the outer expression can access the resulting
|
| 325 |
+
indexed object.
|
| 326 |
+
|
| 327 |
+
Examples
|
| 328 |
+
========
|
| 329 |
+
|
| 330 |
+
>>> from sympy.tensor.index_methods import get_contraction_structure
|
| 331 |
+
>>> from sympy import default_sort_key
|
| 332 |
+
>>> from sympy.tensor import IndexedBase, Idx
|
| 333 |
+
>>> x, y, A = map(IndexedBase, ['x', 'y', 'A'])
|
| 334 |
+
>>> i, j, k, l = map(Idx, ['i', 'j', 'k', 'l'])
|
| 335 |
+
>>> get_contraction_structure(x[i]*y[i] + A[j, j])
|
| 336 |
+
{(i,): {x[i]*y[i]}, (j,): {A[j, j]}}
|
| 337 |
+
>>> get_contraction_structure(x[i]*y[j])
|
| 338 |
+
{None: {x[i]*y[j]}}
|
| 339 |
+
|
| 340 |
+
A multiplication of contracted factors results in nested dicts representing
|
| 341 |
+
the internal contractions.
|
| 342 |
+
|
| 343 |
+
>>> d = get_contraction_structure(x[i, i]*y[j, j])
|
| 344 |
+
>>> sorted(d.keys(), key=default_sort_key)
|
| 345 |
+
[None, x[i, i]*y[j, j]]
|
| 346 |
+
|
| 347 |
+
In this case, the product has no contractions:
|
| 348 |
+
|
| 349 |
+
>>> d[None]
|
| 350 |
+
{x[i, i]*y[j, j]}
|
| 351 |
+
|
| 352 |
+
Factors are contracted "first":
|
| 353 |
+
|
| 354 |
+
>>> sorted(d[x[i, i]*y[j, j]], key=default_sort_key)
|
| 355 |
+
[{(i,): {x[i, i]}}, {(j,): {y[j, j]}}]
|
| 356 |
+
|
| 357 |
+
A parenthesized Add object is also returned as a nested dictionary. The
|
| 358 |
+
term containing the parenthesis is a Mul with a contraction among the
|
| 359 |
+
arguments, so it will be found as a key in the result. It stores the
|
| 360 |
+
dictionary resulting from a recursive call on the Add expression.
|
| 361 |
+
|
| 362 |
+
>>> d = get_contraction_structure(x[i]*(y[i] + A[i, j]*x[j]))
|
| 363 |
+
>>> sorted(d.keys(), key=default_sort_key)
|
| 364 |
+
[(A[i, j]*x[j] + y[i])*x[i], (i,)]
|
| 365 |
+
>>> d[(i,)]
|
| 366 |
+
{(A[i, j]*x[j] + y[i])*x[i]}
|
| 367 |
+
>>> d[x[i]*(A[i, j]*x[j] + y[i])]
|
| 368 |
+
[{None: {y[i]}, (j,): {A[i, j]*x[j]}}]
|
| 369 |
+
|
| 370 |
+
Powers with contractions in either base or exponent will also be found as
|
| 371 |
+
keys in the dictionary, mapping to a list of results from recursive calls:
|
| 372 |
+
|
| 373 |
+
>>> d = get_contraction_structure(A[j, j]**A[i, i])
|
| 374 |
+
>>> d[None]
|
| 375 |
+
{A[j, j]**A[i, i]}
|
| 376 |
+
>>> nested_contractions = d[A[j, j]**A[i, i]]
|
| 377 |
+
>>> nested_contractions[0]
|
| 378 |
+
{(j,): {A[j, j]}}
|
| 379 |
+
>>> nested_contractions[1]
|
| 380 |
+
{(i,): {A[i, i]}}
|
| 381 |
+
|
| 382 |
+
The description of the contraction structure may appear complicated when
|
| 383 |
+
represented with a string in the above examples, but it is easy to iterate
|
| 384 |
+
over:
|
| 385 |
+
|
| 386 |
+
>>> from sympy import Expr
|
| 387 |
+
>>> for key in d:
|
| 388 |
+
... if isinstance(key, Expr):
|
| 389 |
+
... continue
|
| 390 |
+
... for term in d[key]:
|
| 391 |
+
... if term in d:
|
| 392 |
+
... # treat deepest contraction first
|
| 393 |
+
... pass
|
| 394 |
+
... # treat outermost contactions here
|
| 395 |
+
|
| 396 |
+
"""
|
| 397 |
+
|
| 398 |
+
# We call ourself recursively to inspect sub expressions.
|
| 399 |
+
|
| 400 |
+
if isinstance(expr, Indexed):
|
| 401 |
+
junk, key = _remove_repeated(expr.indices)
|
| 402 |
+
return {key or None: {expr}}
|
| 403 |
+
elif expr.is_Atom:
|
| 404 |
+
return {None: {expr}}
|
| 405 |
+
elif expr.is_Mul:
|
| 406 |
+
junk, junk, key = _get_indices_Mul(expr, return_dummies=True)
|
| 407 |
+
result = {key or None: {expr}}
|
| 408 |
+
# recurse on every factor
|
| 409 |
+
nested = []
|
| 410 |
+
for fac in expr.args:
|
| 411 |
+
facd = get_contraction_structure(fac)
|
| 412 |
+
if not (None in facd and len(facd) == 1):
|
| 413 |
+
nested.append(facd)
|
| 414 |
+
if nested:
|
| 415 |
+
result[expr] = nested
|
| 416 |
+
return result
|
| 417 |
+
elif expr.is_Pow or isinstance(expr, exp):
|
| 418 |
+
# recurse in base and exp separately. If either has internal
|
| 419 |
+
# contractions we must include ourselves as a key in the returned dict
|
| 420 |
+
b, e = expr.as_base_exp()
|
| 421 |
+
dbase = get_contraction_structure(b)
|
| 422 |
+
dexp = get_contraction_structure(e)
|
| 423 |
+
|
| 424 |
+
dicts = []
|
| 425 |
+
for d in dbase, dexp:
|
| 426 |
+
if not (None in d and len(d) == 1):
|
| 427 |
+
dicts.append(d)
|
| 428 |
+
result = {None: {expr}}
|
| 429 |
+
if dicts:
|
| 430 |
+
result[expr] = dicts
|
| 431 |
+
return result
|
| 432 |
+
elif expr.is_Add:
|
| 433 |
+
# Note: we just collect all terms with identical summation indices, We
|
| 434 |
+
# do nothing to identify equivalent terms here, as this would require
|
| 435 |
+
# substitutions or pattern matching in expressions of unknown
|
| 436 |
+
# complexity.
|
| 437 |
+
result = {}
|
| 438 |
+
for term in expr.args:
|
| 439 |
+
# recurse on every term
|
| 440 |
+
d = get_contraction_structure(term)
|
| 441 |
+
for key in d:
|
| 442 |
+
if key in result:
|
| 443 |
+
result[key] |= d[key]
|
| 444 |
+
else:
|
| 445 |
+
result[key] = d[key]
|
| 446 |
+
return result
|
| 447 |
+
|
| 448 |
+
elif isinstance(expr, Piecewise):
|
| 449 |
+
# FIXME: No support for Piecewise yet
|
| 450 |
+
return {None: expr}
|
| 451 |
+
elif isinstance(expr, Function):
|
| 452 |
+
# Collect non-trivial contraction structures in each argument
|
| 453 |
+
# We do not report repeated indices in separate arguments as a
|
| 454 |
+
# contraction
|
| 455 |
+
deeplist = []
|
| 456 |
+
for arg in expr.args:
|
| 457 |
+
deep = get_contraction_structure(arg)
|
| 458 |
+
if not (None in deep and len(deep) == 1):
|
| 459 |
+
deeplist.append(deep)
|
| 460 |
+
d = {None: {expr}}
|
| 461 |
+
if deeplist:
|
| 462 |
+
d[expr] = deeplist
|
| 463 |
+
return d
|
| 464 |
+
|
| 465 |
+
# this test is expensive, so it should be at the end
|
| 466 |
+
elif not expr.has(Indexed):
|
| 467 |
+
return {None: {expr}}
|
| 468 |
+
raise NotImplementedError(
|
| 469 |
+
"FIXME: No specialized handling of type %s" % type(expr))
|
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/indexed.py
ADDED
|
@@ -0,0 +1,793 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
r"""Module that defines indexed objects.
|
| 2 |
+
|
| 3 |
+
The classes ``IndexedBase``, ``Indexed``, and ``Idx`` represent a
|
| 4 |
+
matrix element ``M[i, j]`` as in the following diagram::
|
| 5 |
+
|
| 6 |
+
1) The Indexed class represents the entire indexed object.
|
| 7 |
+
|
|
| 8 |
+
___|___
|
| 9 |
+
' '
|
| 10 |
+
M[i, j]
|
| 11 |
+
/ \__\______
|
| 12 |
+
| |
|
| 13 |
+
| |
|
| 14 |
+
| 2) The Idx class represents indices; each Idx can
|
| 15 |
+
| optionally contain information about its range.
|
| 16 |
+
|
|
| 17 |
+
3) IndexedBase represents the 'stem' of an indexed object, here `M`.
|
| 18 |
+
The stem used by itself is usually taken to represent the entire
|
| 19 |
+
array.
|
| 20 |
+
|
| 21 |
+
There can be any number of indices on an Indexed object. No
|
| 22 |
+
transformation properties are implemented in these Base objects, but
|
| 23 |
+
implicit contraction of repeated indices is supported.
|
| 24 |
+
|
| 25 |
+
Note that the support for complicated (i.e. non-atomic) integer
|
| 26 |
+
expressions as indices is limited. (This should be improved in
|
| 27 |
+
future releases.)
|
| 28 |
+
|
| 29 |
+
Examples
|
| 30 |
+
========
|
| 31 |
+
|
| 32 |
+
To express the above matrix element example you would write:
|
| 33 |
+
|
| 34 |
+
>>> from sympy import symbols, IndexedBase, Idx
|
| 35 |
+
>>> M = IndexedBase('M')
|
| 36 |
+
>>> i, j = symbols('i j', cls=Idx)
|
| 37 |
+
>>> M[i, j]
|
| 38 |
+
M[i, j]
|
| 39 |
+
|
| 40 |
+
Repeated indices in a product implies a summation, so to express a
|
| 41 |
+
matrix-vector product in terms of Indexed objects:
|
| 42 |
+
|
| 43 |
+
>>> x = IndexedBase('x')
|
| 44 |
+
>>> M[i, j]*x[j]
|
| 45 |
+
M[i, j]*x[j]
|
| 46 |
+
|
| 47 |
+
If the indexed objects will be converted to component based arrays, e.g.
|
| 48 |
+
with the code printers or the autowrap framework, you also need to provide
|
| 49 |
+
(symbolic or numerical) dimensions. This can be done by passing an
|
| 50 |
+
optional shape parameter to IndexedBase upon construction:
|
| 51 |
+
|
| 52 |
+
>>> dim1, dim2 = symbols('dim1 dim2', integer=True)
|
| 53 |
+
>>> A = IndexedBase('A', shape=(dim1, 2*dim1, dim2))
|
| 54 |
+
>>> A.shape
|
| 55 |
+
(dim1, 2*dim1, dim2)
|
| 56 |
+
>>> A[i, j, 3].shape
|
| 57 |
+
(dim1, 2*dim1, dim2)
|
| 58 |
+
|
| 59 |
+
If an IndexedBase object has no shape information, it is assumed that the
|
| 60 |
+
array is as large as the ranges of its indices:
|
| 61 |
+
|
| 62 |
+
>>> n, m = symbols('n m', integer=True)
|
| 63 |
+
>>> i = Idx('i', m)
|
| 64 |
+
>>> j = Idx('j', n)
|
| 65 |
+
>>> M[i, j].shape
|
| 66 |
+
(m, n)
|
| 67 |
+
>>> M[i, j].ranges
|
| 68 |
+
[(0, m - 1), (0, n - 1)]
|
| 69 |
+
|
| 70 |
+
The above can be compared with the following:
|
| 71 |
+
|
| 72 |
+
>>> A[i, 2, j].shape
|
| 73 |
+
(dim1, 2*dim1, dim2)
|
| 74 |
+
>>> A[i, 2, j].ranges
|
| 75 |
+
[(0, m - 1), None, (0, n - 1)]
|
| 76 |
+
|
| 77 |
+
To analyze the structure of indexed expressions, you can use the methods
|
| 78 |
+
get_indices() and get_contraction_structure():
|
| 79 |
+
|
| 80 |
+
>>> from sympy.tensor import get_indices, get_contraction_structure
|
| 81 |
+
>>> get_indices(A[i, j, j])
|
| 82 |
+
({i}, {})
|
| 83 |
+
>>> get_contraction_structure(A[i, j, j])
|
| 84 |
+
{(j,): {A[i, j, j]}}
|
| 85 |
+
|
| 86 |
+
See the appropriate docstrings for a detailed explanation of the output.
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
# TODO: (some ideas for improvement)
|
| 90 |
+
#
|
| 91 |
+
# o test and guarantee numpy compatibility
|
| 92 |
+
# - implement full support for broadcasting
|
| 93 |
+
# - strided arrays
|
| 94 |
+
#
|
| 95 |
+
# o more functions to analyze indexed expressions
|
| 96 |
+
# - identify standard constructs, e.g matrix-vector product in a subexpression
|
| 97 |
+
#
|
| 98 |
+
# o functions to generate component based arrays (numpy and sympy.Matrix)
|
| 99 |
+
# - generate a single array directly from Indexed
|
| 100 |
+
# - convert simple sub-expressions
|
| 101 |
+
#
|
| 102 |
+
# o sophisticated indexing (possibly in subclasses to preserve simplicity)
|
| 103 |
+
# - Idx with range smaller than dimension of Indexed
|
| 104 |
+
# - Idx with stepsize != 1
|
| 105 |
+
# - Idx with step determined by function call
|
| 106 |
+
from collections.abc import Iterable
|
| 107 |
+
|
| 108 |
+
from sympy.core.numbers import Number
|
| 109 |
+
from sympy.core.assumptions import StdFactKB
|
| 110 |
+
from sympy.core import Expr, Tuple, sympify, S
|
| 111 |
+
from sympy.core.symbol import _filter_assumptions, Symbol
|
| 112 |
+
from sympy.core.logic import fuzzy_bool, fuzzy_not
|
| 113 |
+
from sympy.core.sympify import _sympify
|
| 114 |
+
from sympy.functions.special.tensor_functions import KroneckerDelta
|
| 115 |
+
from sympy.multipledispatch import dispatch
|
| 116 |
+
from sympy.utilities.iterables import is_sequence, NotIterable
|
| 117 |
+
from sympy.utilities.misc import filldedent
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class IndexException(Exception):
|
| 121 |
+
pass
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
class Indexed(Expr):
|
| 125 |
+
"""Represents a mathematical object with indices.
|
| 126 |
+
|
| 127 |
+
>>> from sympy import Indexed, IndexedBase, Idx, symbols
|
| 128 |
+
>>> i, j = symbols('i j', cls=Idx)
|
| 129 |
+
>>> Indexed('A', i, j)
|
| 130 |
+
A[i, j]
|
| 131 |
+
|
| 132 |
+
It is recommended that ``Indexed`` objects be created by indexing ``IndexedBase``:
|
| 133 |
+
``IndexedBase('A')[i, j]`` instead of ``Indexed(IndexedBase('A'), i, j)``.
|
| 134 |
+
|
| 135 |
+
>>> A = IndexedBase('A')
|
| 136 |
+
>>> a_ij = A[i, j] # Prefer this,
|
| 137 |
+
>>> b_ij = Indexed(A, i, j) # over this.
|
| 138 |
+
>>> a_ij == b_ij
|
| 139 |
+
True
|
| 140 |
+
|
| 141 |
+
"""
|
| 142 |
+
is_Indexed = True
|
| 143 |
+
is_symbol = True
|
| 144 |
+
is_Atom = True
|
| 145 |
+
|
| 146 |
+
def __new__(cls, base, *args, **kw_args):
|
| 147 |
+
from sympy.tensor.array.ndim_array import NDimArray
|
| 148 |
+
from sympy.matrices.matrixbase import MatrixBase
|
| 149 |
+
|
| 150 |
+
if not args:
|
| 151 |
+
raise IndexException("Indexed needs at least one index.")
|
| 152 |
+
if isinstance(base, (str, Symbol)):
|
| 153 |
+
base = IndexedBase(base)
|
| 154 |
+
elif not hasattr(base, '__getitem__') and not isinstance(base, IndexedBase):
|
| 155 |
+
raise TypeError(filldedent("""
|
| 156 |
+
The base can only be replaced with a string, Symbol,
|
| 157 |
+
IndexedBase or an object with a method for getting
|
| 158 |
+
items (i.e. an object with a `__getitem__` method).
|
| 159 |
+
"""))
|
| 160 |
+
args = list(map(sympify, args))
|
| 161 |
+
if isinstance(base, (NDimArray, Iterable, Tuple, MatrixBase)) and all(i.is_number for i in args):
|
| 162 |
+
if len(args) == 1:
|
| 163 |
+
return base[args[0]]
|
| 164 |
+
else:
|
| 165 |
+
return base[args]
|
| 166 |
+
|
| 167 |
+
base = _sympify(base)
|
| 168 |
+
|
| 169 |
+
obj = Expr.__new__(cls, base, *args, **kw_args)
|
| 170 |
+
|
| 171 |
+
IndexedBase._set_assumptions(obj, base.assumptions0)
|
| 172 |
+
|
| 173 |
+
return obj
|
| 174 |
+
|
| 175 |
+
def _hashable_content(self):
|
| 176 |
+
return super()._hashable_content() + tuple(sorted(self.assumptions0.items()))
|
| 177 |
+
|
| 178 |
+
@property
|
| 179 |
+
def name(self):
|
| 180 |
+
return str(self)
|
| 181 |
+
|
| 182 |
+
@property
|
| 183 |
+
def _diff_wrt(self):
|
| 184 |
+
"""Allow derivatives with respect to an ``Indexed`` object."""
|
| 185 |
+
return True
|
| 186 |
+
|
| 187 |
+
def _eval_derivative(self, wrt):
|
| 188 |
+
from sympy.tensor.array.ndim_array import NDimArray
|
| 189 |
+
|
| 190 |
+
if isinstance(wrt, Indexed) and wrt.base == self.base:
|
| 191 |
+
if len(self.indices) != len(wrt.indices):
|
| 192 |
+
msg = "Different # of indices: d({!s})/d({!s})".format(self,
|
| 193 |
+
wrt)
|
| 194 |
+
raise IndexException(msg)
|
| 195 |
+
result = S.One
|
| 196 |
+
for index1, index2 in zip(self.indices, wrt.indices):
|
| 197 |
+
result *= KroneckerDelta(index1, index2)
|
| 198 |
+
return result
|
| 199 |
+
elif isinstance(self.base, NDimArray):
|
| 200 |
+
from sympy.tensor.array import derive_by_array
|
| 201 |
+
return Indexed(derive_by_array(self.base, wrt), *self.args[1:])
|
| 202 |
+
else:
|
| 203 |
+
if Tuple(self.indices).has(wrt):
|
| 204 |
+
return S.NaN
|
| 205 |
+
return S.Zero
|
| 206 |
+
|
| 207 |
+
@property
|
| 208 |
+
def assumptions0(self):
|
| 209 |
+
return {k: v for k, v in self._assumptions.items() if v is not None}
|
| 210 |
+
|
| 211 |
+
@property
|
| 212 |
+
def base(self):
|
| 213 |
+
"""Returns the ``IndexedBase`` of the ``Indexed`` object.
|
| 214 |
+
|
| 215 |
+
Examples
|
| 216 |
+
========
|
| 217 |
+
|
| 218 |
+
>>> from sympy import Indexed, IndexedBase, Idx, symbols
|
| 219 |
+
>>> i, j = symbols('i j', cls=Idx)
|
| 220 |
+
>>> Indexed('A', i, j).base
|
| 221 |
+
A
|
| 222 |
+
>>> B = IndexedBase('B')
|
| 223 |
+
>>> B == B[i, j].base
|
| 224 |
+
True
|
| 225 |
+
|
| 226 |
+
"""
|
| 227 |
+
return self.args[0]
|
| 228 |
+
|
| 229 |
+
@property
|
| 230 |
+
def indices(self):
|
| 231 |
+
"""
|
| 232 |
+
Returns the indices of the ``Indexed`` object.
|
| 233 |
+
|
| 234 |
+
Examples
|
| 235 |
+
========
|
| 236 |
+
|
| 237 |
+
>>> from sympy import Indexed, Idx, symbols
|
| 238 |
+
>>> i, j = symbols('i j', cls=Idx)
|
| 239 |
+
>>> Indexed('A', i, j).indices
|
| 240 |
+
(i, j)
|
| 241 |
+
|
| 242 |
+
"""
|
| 243 |
+
return self.args[1:]
|
| 244 |
+
|
| 245 |
+
@property
|
| 246 |
+
def rank(self):
|
| 247 |
+
"""
|
| 248 |
+
Returns the rank of the ``Indexed`` object.
|
| 249 |
+
|
| 250 |
+
Examples
|
| 251 |
+
========
|
| 252 |
+
|
| 253 |
+
>>> from sympy import Indexed, Idx, symbols
|
| 254 |
+
>>> i, j, k, l, m = symbols('i:m', cls=Idx)
|
| 255 |
+
>>> Indexed('A', i, j).rank
|
| 256 |
+
2
|
| 257 |
+
>>> q = Indexed('A', i, j, k, l, m)
|
| 258 |
+
>>> q.rank
|
| 259 |
+
5
|
| 260 |
+
>>> q.rank == len(q.indices)
|
| 261 |
+
True
|
| 262 |
+
|
| 263 |
+
"""
|
| 264 |
+
return len(self.args) - 1
|
| 265 |
+
|
| 266 |
+
@property
|
| 267 |
+
def shape(self):
|
| 268 |
+
"""Returns a list with dimensions of each index.
|
| 269 |
+
|
| 270 |
+
Dimensions is a property of the array, not of the indices. Still, if
|
| 271 |
+
the ``IndexedBase`` does not define a shape attribute, it is assumed
|
| 272 |
+
that the ranges of the indices correspond to the shape of the array.
|
| 273 |
+
|
| 274 |
+
>>> from sympy import IndexedBase, Idx, symbols
|
| 275 |
+
>>> n, m = symbols('n m', integer=True)
|
| 276 |
+
>>> i = Idx('i', m)
|
| 277 |
+
>>> j = Idx('j', m)
|
| 278 |
+
>>> A = IndexedBase('A', shape=(n, n))
|
| 279 |
+
>>> B = IndexedBase('B')
|
| 280 |
+
>>> A[i, j].shape
|
| 281 |
+
(n, n)
|
| 282 |
+
>>> B[i, j].shape
|
| 283 |
+
(m, m)
|
| 284 |
+
"""
|
| 285 |
+
|
| 286 |
+
if self.base.shape:
|
| 287 |
+
return self.base.shape
|
| 288 |
+
sizes = []
|
| 289 |
+
for i in self.indices:
|
| 290 |
+
upper = getattr(i, 'upper', None)
|
| 291 |
+
lower = getattr(i, 'lower', None)
|
| 292 |
+
if None in (upper, lower):
|
| 293 |
+
raise IndexException(filldedent("""
|
| 294 |
+
Range is not defined for all indices in: %s""" % self))
|
| 295 |
+
try:
|
| 296 |
+
size = upper - lower + 1
|
| 297 |
+
except TypeError:
|
| 298 |
+
raise IndexException(filldedent("""
|
| 299 |
+
Shape cannot be inferred from Idx with
|
| 300 |
+
undefined range: %s""" % self))
|
| 301 |
+
sizes.append(size)
|
| 302 |
+
return Tuple(*sizes)
|
| 303 |
+
|
| 304 |
+
@property
|
| 305 |
+
def ranges(self):
|
| 306 |
+
"""Returns a list of tuples with lower and upper range of each index.
|
| 307 |
+
|
| 308 |
+
If an index does not define the data members upper and lower, the
|
| 309 |
+
corresponding slot in the list contains ``None`` instead of a tuple.
|
| 310 |
+
|
| 311 |
+
Examples
|
| 312 |
+
========
|
| 313 |
+
|
| 314 |
+
>>> from sympy import Indexed,Idx, symbols
|
| 315 |
+
>>> Indexed('A', Idx('i', 2), Idx('j', 4), Idx('k', 8)).ranges
|
| 316 |
+
[(0, 1), (0, 3), (0, 7)]
|
| 317 |
+
>>> Indexed('A', Idx('i', 3), Idx('j', 3), Idx('k', 3)).ranges
|
| 318 |
+
[(0, 2), (0, 2), (0, 2)]
|
| 319 |
+
>>> x, y, z = symbols('x y z', integer=True)
|
| 320 |
+
>>> Indexed('A', x, y, z).ranges
|
| 321 |
+
[None, None, None]
|
| 322 |
+
|
| 323 |
+
"""
|
| 324 |
+
ranges = []
|
| 325 |
+
sentinel = object()
|
| 326 |
+
for i in self.indices:
|
| 327 |
+
upper = getattr(i, 'upper', sentinel)
|
| 328 |
+
lower = getattr(i, 'lower', sentinel)
|
| 329 |
+
if sentinel not in (upper, lower):
|
| 330 |
+
ranges.append((lower, upper))
|
| 331 |
+
else:
|
| 332 |
+
ranges.append(None)
|
| 333 |
+
return ranges
|
| 334 |
+
|
| 335 |
+
def _sympystr(self, p):
|
| 336 |
+
indices = list(map(p.doprint, self.indices))
|
| 337 |
+
return "%s[%s]" % (p.doprint(self.base), ", ".join(indices))
|
| 338 |
+
|
| 339 |
+
@property
|
| 340 |
+
def free_symbols(self):
|
| 341 |
+
base_free_symbols = self.base.free_symbols
|
| 342 |
+
indices_free_symbols = {
|
| 343 |
+
fs for i in self.indices for fs in i.free_symbols}
|
| 344 |
+
if base_free_symbols:
|
| 345 |
+
return {self} | base_free_symbols | indices_free_symbols
|
| 346 |
+
else:
|
| 347 |
+
return indices_free_symbols
|
| 348 |
+
|
| 349 |
+
@property
|
| 350 |
+
def expr_free_symbols(self):
|
| 351 |
+
from sympy.utilities.exceptions import sympy_deprecation_warning
|
| 352 |
+
sympy_deprecation_warning("""
|
| 353 |
+
The expr_free_symbols property is deprecated. Use free_symbols to get
|
| 354 |
+
the free symbols of an expression.
|
| 355 |
+
""",
|
| 356 |
+
deprecated_since_version="1.9",
|
| 357 |
+
active_deprecations_target="deprecated-expr-free-symbols")
|
| 358 |
+
|
| 359 |
+
return {self}
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
class IndexedBase(Expr, NotIterable):
|
| 363 |
+
"""Represent the base or stem of an indexed object
|
| 364 |
+
|
| 365 |
+
The IndexedBase class represent an array that contains elements. The main purpose
|
| 366 |
+
of this class is to allow the convenient creation of objects of the Indexed
|
| 367 |
+
class. The __getitem__ method of IndexedBase returns an instance of
|
| 368 |
+
Indexed. Alone, without indices, the IndexedBase class can be used as a
|
| 369 |
+
notation for e.g. matrix equations, resembling what you could do with the
|
| 370 |
+
Symbol class. But, the IndexedBase class adds functionality that is not
|
| 371 |
+
available for Symbol instances:
|
| 372 |
+
|
| 373 |
+
- An IndexedBase object can optionally store shape information. This can
|
| 374 |
+
be used in to check array conformance and conditions for numpy
|
| 375 |
+
broadcasting. (TODO)
|
| 376 |
+
- An IndexedBase object implements syntactic sugar that allows easy symbolic
|
| 377 |
+
representation of array operations, using implicit summation of
|
| 378 |
+
repeated indices.
|
| 379 |
+
- The IndexedBase object symbolizes a mathematical structure equivalent
|
| 380 |
+
to arrays, and is recognized as such for code generation and automatic
|
| 381 |
+
compilation and wrapping.
|
| 382 |
+
|
| 383 |
+
>>> from sympy.tensor import IndexedBase, Idx
|
| 384 |
+
>>> from sympy import symbols
|
| 385 |
+
>>> A = IndexedBase('A'); A
|
| 386 |
+
A
|
| 387 |
+
>>> type(A)
|
| 388 |
+
<class 'sympy.tensor.indexed.IndexedBase'>
|
| 389 |
+
|
| 390 |
+
When an IndexedBase object receives indices, it returns an array with named
|
| 391 |
+
axes, represented by an Indexed object:
|
| 392 |
+
|
| 393 |
+
>>> i, j = symbols('i j', integer=True)
|
| 394 |
+
>>> A[i, j, 2]
|
| 395 |
+
A[i, j, 2]
|
| 396 |
+
>>> type(A[i, j, 2])
|
| 397 |
+
<class 'sympy.tensor.indexed.Indexed'>
|
| 398 |
+
|
| 399 |
+
The IndexedBase constructor takes an optional shape argument. If given,
|
| 400 |
+
it overrides any shape information in the indices. (But not the index
|
| 401 |
+
ranges!)
|
| 402 |
+
|
| 403 |
+
>>> m, n, o, p = symbols('m n o p', integer=True)
|
| 404 |
+
>>> i = Idx('i', m)
|
| 405 |
+
>>> j = Idx('j', n)
|
| 406 |
+
>>> A[i, j].shape
|
| 407 |
+
(m, n)
|
| 408 |
+
>>> B = IndexedBase('B', shape=(o, p))
|
| 409 |
+
>>> B[i, j].shape
|
| 410 |
+
(o, p)
|
| 411 |
+
|
| 412 |
+
Assumptions can be specified with keyword arguments the same way as for Symbol:
|
| 413 |
+
|
| 414 |
+
>>> A_real = IndexedBase('A', real=True)
|
| 415 |
+
>>> A_real.is_real
|
| 416 |
+
True
|
| 417 |
+
>>> A != A_real
|
| 418 |
+
True
|
| 419 |
+
|
| 420 |
+
Assumptions can also be inherited if a Symbol is used to initialize the IndexedBase:
|
| 421 |
+
|
| 422 |
+
>>> I = symbols('I', integer=True)
|
| 423 |
+
>>> C_inherit = IndexedBase(I)
|
| 424 |
+
>>> C_explicit = IndexedBase('I', integer=True)
|
| 425 |
+
>>> C_inherit == C_explicit
|
| 426 |
+
True
|
| 427 |
+
"""
|
| 428 |
+
is_symbol = True
|
| 429 |
+
is_Atom = True
|
| 430 |
+
|
| 431 |
+
@staticmethod
|
| 432 |
+
def _set_assumptions(obj, assumptions):
|
| 433 |
+
"""Set assumptions on obj, making sure to apply consistent values."""
|
| 434 |
+
tmp_asm_copy = assumptions.copy()
|
| 435 |
+
is_commutative = fuzzy_bool(assumptions.get('commutative', True))
|
| 436 |
+
assumptions['commutative'] = is_commutative
|
| 437 |
+
obj._assumptions = StdFactKB(assumptions)
|
| 438 |
+
obj._assumptions._generator = tmp_asm_copy # Issue #8873
|
| 439 |
+
|
| 440 |
+
def __new__(cls, label, shape=None, *, offset=S.Zero, strides=None, **kw_args):
|
| 441 |
+
from sympy.matrices.matrixbase import MatrixBase
|
| 442 |
+
from sympy.tensor.array.ndim_array import NDimArray
|
| 443 |
+
|
| 444 |
+
assumptions, kw_args = _filter_assumptions(kw_args)
|
| 445 |
+
if isinstance(label, str):
|
| 446 |
+
label = Symbol(label, **assumptions)
|
| 447 |
+
elif isinstance(label, Symbol):
|
| 448 |
+
assumptions = label._merge(assumptions)
|
| 449 |
+
elif isinstance(label, (MatrixBase, NDimArray)):
|
| 450 |
+
return label
|
| 451 |
+
elif isinstance(label, Iterable):
|
| 452 |
+
return _sympify(label)
|
| 453 |
+
else:
|
| 454 |
+
label = _sympify(label)
|
| 455 |
+
|
| 456 |
+
if is_sequence(shape):
|
| 457 |
+
shape = Tuple(*shape)
|
| 458 |
+
elif shape is not None:
|
| 459 |
+
shape = Tuple(shape)
|
| 460 |
+
|
| 461 |
+
if shape is not None:
|
| 462 |
+
obj = Expr.__new__(cls, label, shape)
|
| 463 |
+
else:
|
| 464 |
+
obj = Expr.__new__(cls, label)
|
| 465 |
+
obj._shape = shape
|
| 466 |
+
obj._offset = offset
|
| 467 |
+
obj._strides = strides
|
| 468 |
+
obj._name = str(label)
|
| 469 |
+
|
| 470 |
+
IndexedBase._set_assumptions(obj, assumptions)
|
| 471 |
+
return obj
|
| 472 |
+
|
| 473 |
+
@property
|
| 474 |
+
def name(self):
|
| 475 |
+
return self._name
|
| 476 |
+
|
| 477 |
+
def _hashable_content(self):
|
| 478 |
+
return super()._hashable_content() + tuple(sorted(self.assumptions0.items()))
|
| 479 |
+
|
| 480 |
+
@property
|
| 481 |
+
def assumptions0(self):
|
| 482 |
+
return {k: v for k, v in self._assumptions.items() if v is not None}
|
| 483 |
+
|
| 484 |
+
def __getitem__(self, indices, **kw_args):
|
| 485 |
+
if is_sequence(indices):
|
| 486 |
+
# Special case needed because M[*my_tuple] is a syntax error.
|
| 487 |
+
if self.shape and len(self.shape) != len(indices):
|
| 488 |
+
raise IndexException("Rank mismatch.")
|
| 489 |
+
return Indexed(self, *indices, **kw_args)
|
| 490 |
+
else:
|
| 491 |
+
if self.shape and len(self.shape) != 1:
|
| 492 |
+
raise IndexException("Rank mismatch.")
|
| 493 |
+
return Indexed(self, indices, **kw_args)
|
| 494 |
+
|
| 495 |
+
@property
|
| 496 |
+
def shape(self):
|
| 497 |
+
"""Returns the shape of the ``IndexedBase`` object.
|
| 498 |
+
|
| 499 |
+
Examples
|
| 500 |
+
========
|
| 501 |
+
|
| 502 |
+
>>> from sympy import IndexedBase, Idx
|
| 503 |
+
>>> from sympy.abc import x, y
|
| 504 |
+
>>> IndexedBase('A', shape=(x, y)).shape
|
| 505 |
+
(x, y)
|
| 506 |
+
|
| 507 |
+
Note: If the shape of the ``IndexedBase`` is specified, it will override
|
| 508 |
+
any shape information given by the indices.
|
| 509 |
+
|
| 510 |
+
>>> A = IndexedBase('A', shape=(x, y))
|
| 511 |
+
>>> B = IndexedBase('B')
|
| 512 |
+
>>> i = Idx('i', 2)
|
| 513 |
+
>>> j = Idx('j', 1)
|
| 514 |
+
>>> A[i, j].shape
|
| 515 |
+
(x, y)
|
| 516 |
+
>>> B[i, j].shape
|
| 517 |
+
(2, 1)
|
| 518 |
+
|
| 519 |
+
"""
|
| 520 |
+
return self._shape
|
| 521 |
+
|
| 522 |
+
@property
|
| 523 |
+
def strides(self):
|
| 524 |
+
"""Returns the strided scheme for the ``IndexedBase`` object.
|
| 525 |
+
|
| 526 |
+
Normally this is a tuple denoting the number of
|
| 527 |
+
steps to take in the respective dimension when traversing
|
| 528 |
+
an array. For code generation purposes strides='C' and
|
| 529 |
+
strides='F' can also be used.
|
| 530 |
+
|
| 531 |
+
strides='C' would mean that code printer would unroll
|
| 532 |
+
in row-major order and 'F' means unroll in column major
|
| 533 |
+
order.
|
| 534 |
+
|
| 535 |
+
"""
|
| 536 |
+
|
| 537 |
+
return self._strides
|
| 538 |
+
|
| 539 |
+
@property
|
| 540 |
+
def offset(self):
|
| 541 |
+
"""Returns the offset for the ``IndexedBase`` object.
|
| 542 |
+
|
| 543 |
+
This is the value added to the resulting index when the
|
| 544 |
+
2D Indexed object is unrolled to a 1D form. Used in code
|
| 545 |
+
generation.
|
| 546 |
+
|
| 547 |
+
Examples
|
| 548 |
+
==========
|
| 549 |
+
>>> from sympy.printing import ccode
|
| 550 |
+
>>> from sympy.tensor import IndexedBase, Idx
|
| 551 |
+
>>> from sympy import symbols
|
| 552 |
+
>>> l, m, n, o = symbols('l m n o', integer=True)
|
| 553 |
+
>>> A = IndexedBase('A', strides=(l, m, n), offset=o)
|
| 554 |
+
>>> i, j, k = map(Idx, 'ijk')
|
| 555 |
+
>>> ccode(A[i, j, k])
|
| 556 |
+
'A[l*i + m*j + n*k + o]'
|
| 557 |
+
|
| 558 |
+
"""
|
| 559 |
+
return self._offset
|
| 560 |
+
|
| 561 |
+
@property
|
| 562 |
+
def label(self):
|
| 563 |
+
"""Returns the label of the ``IndexedBase`` object.
|
| 564 |
+
|
| 565 |
+
Examples
|
| 566 |
+
========
|
| 567 |
+
|
| 568 |
+
>>> from sympy import IndexedBase
|
| 569 |
+
>>> from sympy.abc import x, y
|
| 570 |
+
>>> IndexedBase('A', shape=(x, y)).label
|
| 571 |
+
A
|
| 572 |
+
|
| 573 |
+
"""
|
| 574 |
+
return self.args[0]
|
| 575 |
+
|
| 576 |
+
def _sympystr(self, p):
|
| 577 |
+
return p.doprint(self.label)
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
class Idx(Expr):
|
| 581 |
+
"""Represents an integer index as an ``Integer`` or integer expression.
|
| 582 |
+
|
| 583 |
+
There are a number of ways to create an ``Idx`` object. The constructor
|
| 584 |
+
takes two arguments:
|
| 585 |
+
|
| 586 |
+
``label``
|
| 587 |
+
An integer or a symbol that labels the index.
|
| 588 |
+
``range``
|
| 589 |
+
Optionally you can specify a range as either
|
| 590 |
+
|
| 591 |
+
* ``Symbol`` or integer: This is interpreted as a dimension. Lower and
|
| 592 |
+
upper bounds are set to ``0`` and ``range - 1``, respectively.
|
| 593 |
+
* ``tuple``: The two elements are interpreted as the lower and upper
|
| 594 |
+
bounds of the range, respectively.
|
| 595 |
+
|
| 596 |
+
Note: bounds of the range are assumed to be either integer or infinite (oo
|
| 597 |
+
and -oo are allowed to specify an unbounded range). If ``n`` is given as a
|
| 598 |
+
bound, then ``n.is_integer`` must not return false.
|
| 599 |
+
|
| 600 |
+
For convenience, if the label is given as a string it is automatically
|
| 601 |
+
converted to an integer symbol. (Note: this conversion is not done for
|
| 602 |
+
range or dimension arguments.)
|
| 603 |
+
|
| 604 |
+
Examples
|
| 605 |
+
========
|
| 606 |
+
|
| 607 |
+
>>> from sympy import Idx, symbols, oo
|
| 608 |
+
>>> n, i, L, U = symbols('n i L U', integer=True)
|
| 609 |
+
|
| 610 |
+
If a string is given for the label an integer ``Symbol`` is created and the
|
| 611 |
+
bounds are both ``None``:
|
| 612 |
+
|
| 613 |
+
>>> idx = Idx('qwerty'); idx
|
| 614 |
+
qwerty
|
| 615 |
+
>>> idx.lower, idx.upper
|
| 616 |
+
(None, None)
|
| 617 |
+
|
| 618 |
+
Both upper and lower bounds can be specified:
|
| 619 |
+
|
| 620 |
+
>>> idx = Idx(i, (L, U)); idx
|
| 621 |
+
i
|
| 622 |
+
>>> idx.lower, idx.upper
|
| 623 |
+
(L, U)
|
| 624 |
+
|
| 625 |
+
When only a single bound is given it is interpreted as the dimension
|
| 626 |
+
and the lower bound defaults to 0:
|
| 627 |
+
|
| 628 |
+
>>> idx = Idx(i, n); idx.lower, idx.upper
|
| 629 |
+
(0, n - 1)
|
| 630 |
+
>>> idx = Idx(i, 4); idx.lower, idx.upper
|
| 631 |
+
(0, 3)
|
| 632 |
+
>>> idx = Idx(i, oo); idx.lower, idx.upper
|
| 633 |
+
(0, oo)
|
| 634 |
+
|
| 635 |
+
"""
|
| 636 |
+
|
| 637 |
+
is_integer = True
|
| 638 |
+
is_finite = True
|
| 639 |
+
is_real = True
|
| 640 |
+
is_symbol = True
|
| 641 |
+
is_Atom = True
|
| 642 |
+
_diff_wrt = True
|
| 643 |
+
|
| 644 |
+
def __new__(cls, label, range=None, **kw_args):
|
| 645 |
+
|
| 646 |
+
if isinstance(label, str):
|
| 647 |
+
label = Symbol(label, integer=True)
|
| 648 |
+
label, range = list(map(sympify, (label, range)))
|
| 649 |
+
|
| 650 |
+
if label.is_Number:
|
| 651 |
+
if not label.is_integer:
|
| 652 |
+
raise TypeError("Index is not an integer number.")
|
| 653 |
+
return label
|
| 654 |
+
|
| 655 |
+
if not label.is_integer:
|
| 656 |
+
raise TypeError("Idx object requires an integer label.")
|
| 657 |
+
|
| 658 |
+
elif is_sequence(range):
|
| 659 |
+
if len(range) != 2:
|
| 660 |
+
raise ValueError(filldedent("""
|
| 661 |
+
Idx range tuple must have length 2, but got %s""" % len(range)))
|
| 662 |
+
for bound in range:
|
| 663 |
+
if (bound.is_integer is False and bound is not S.Infinity
|
| 664 |
+
and bound is not S.NegativeInfinity):
|
| 665 |
+
raise TypeError("Idx object requires integer bounds.")
|
| 666 |
+
args = label, Tuple(*range)
|
| 667 |
+
elif isinstance(range, Expr):
|
| 668 |
+
if range is not S.Infinity and fuzzy_not(range.is_integer):
|
| 669 |
+
raise TypeError("Idx object requires an integer dimension.")
|
| 670 |
+
args = label, Tuple(0, range - 1)
|
| 671 |
+
elif range:
|
| 672 |
+
raise TypeError(filldedent("""
|
| 673 |
+
The range must be an ordered iterable or
|
| 674 |
+
integer SymPy expression."""))
|
| 675 |
+
else:
|
| 676 |
+
args = label,
|
| 677 |
+
|
| 678 |
+
obj = Expr.__new__(cls, *args, **kw_args)
|
| 679 |
+
obj._assumptions["finite"] = True
|
| 680 |
+
obj._assumptions["real"] = True
|
| 681 |
+
return obj
|
| 682 |
+
|
| 683 |
+
@property
|
| 684 |
+
def label(self):
|
| 685 |
+
"""Returns the label (Integer or integer expression) of the Idx object.
|
| 686 |
+
|
| 687 |
+
Examples
|
| 688 |
+
========
|
| 689 |
+
|
| 690 |
+
>>> from sympy import Idx, Symbol
|
| 691 |
+
>>> x = Symbol('x', integer=True)
|
| 692 |
+
>>> Idx(x).label
|
| 693 |
+
x
|
| 694 |
+
>>> j = Symbol('j', integer=True)
|
| 695 |
+
>>> Idx(j).label
|
| 696 |
+
j
|
| 697 |
+
>>> Idx(j + 1).label
|
| 698 |
+
j + 1
|
| 699 |
+
|
| 700 |
+
"""
|
| 701 |
+
return self.args[0]
|
| 702 |
+
|
| 703 |
+
@property
|
| 704 |
+
def lower(self):
|
| 705 |
+
"""Returns the lower bound of the ``Idx``.
|
| 706 |
+
|
| 707 |
+
Examples
|
| 708 |
+
========
|
| 709 |
+
|
| 710 |
+
>>> from sympy import Idx
|
| 711 |
+
>>> Idx('j', 2).lower
|
| 712 |
+
0
|
| 713 |
+
>>> Idx('j', 5).lower
|
| 714 |
+
0
|
| 715 |
+
>>> Idx('j').lower is None
|
| 716 |
+
True
|
| 717 |
+
|
| 718 |
+
"""
|
| 719 |
+
try:
|
| 720 |
+
return self.args[1][0]
|
| 721 |
+
except IndexError:
|
| 722 |
+
return
|
| 723 |
+
|
| 724 |
+
@property
|
| 725 |
+
def upper(self):
|
| 726 |
+
"""Returns the upper bound of the ``Idx``.
|
| 727 |
+
|
| 728 |
+
Examples
|
| 729 |
+
========
|
| 730 |
+
|
| 731 |
+
>>> from sympy import Idx
|
| 732 |
+
>>> Idx('j', 2).upper
|
| 733 |
+
1
|
| 734 |
+
>>> Idx('j', 5).upper
|
| 735 |
+
4
|
| 736 |
+
>>> Idx('j').upper is None
|
| 737 |
+
True
|
| 738 |
+
|
| 739 |
+
"""
|
| 740 |
+
try:
|
| 741 |
+
return self.args[1][1]
|
| 742 |
+
except IndexError:
|
| 743 |
+
return
|
| 744 |
+
|
| 745 |
+
def _sympystr(self, p):
|
| 746 |
+
return p.doprint(self.label)
|
| 747 |
+
|
| 748 |
+
@property
|
| 749 |
+
def name(self):
|
| 750 |
+
return self.label.name if self.label.is_Symbol else str(self.label)
|
| 751 |
+
|
| 752 |
+
@property
|
| 753 |
+
def free_symbols(self):
|
| 754 |
+
return {self}
|
| 755 |
+
|
| 756 |
+
|
| 757 |
+
@dispatch(Idx, Idx)
|
| 758 |
+
def _eval_is_ge(lhs, rhs): # noqa:F811
|
| 759 |
+
|
| 760 |
+
other_upper = rhs if rhs.upper is None else rhs.upper
|
| 761 |
+
other_lower = rhs if rhs.lower is None else rhs.lower
|
| 762 |
+
|
| 763 |
+
if lhs.lower is not None and (lhs.lower >= other_upper) == True:
|
| 764 |
+
return True
|
| 765 |
+
if lhs.upper is not None and (lhs.upper < other_lower) == True:
|
| 766 |
+
return False
|
| 767 |
+
return None
|
| 768 |
+
|
| 769 |
+
|
| 770 |
+
@dispatch(Idx, Number) # type:ignore
|
| 771 |
+
def _eval_is_ge(lhs, rhs): # noqa:F811
|
| 772 |
+
|
| 773 |
+
other_upper = rhs
|
| 774 |
+
other_lower = rhs
|
| 775 |
+
|
| 776 |
+
if lhs.lower is not None and (lhs.lower >= other_upper) == True:
|
| 777 |
+
return True
|
| 778 |
+
if lhs.upper is not None and (lhs.upper < other_lower) == True:
|
| 779 |
+
return False
|
| 780 |
+
return None
|
| 781 |
+
|
| 782 |
+
|
| 783 |
+
@dispatch(Number, Idx) # type:ignore
|
| 784 |
+
def _eval_is_ge(lhs, rhs): # noqa:F811
|
| 785 |
+
|
| 786 |
+
other_upper = lhs
|
| 787 |
+
other_lower = lhs
|
| 788 |
+
|
| 789 |
+
if rhs.upper is not None and (rhs.upper <= other_lower) == True:
|
| 790 |
+
return True
|
| 791 |
+
if rhs.lower is not None and (rhs.lower > other_upper) == True:
|
| 792 |
+
return False
|
| 793 |
+
return None
|
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/tensor.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/tests/test_printing.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead
|
| 2 |
+
from sympy import I
|
| 3 |
+
|
| 4 |
+
def test_printing_TensMul():
|
| 5 |
+
R3 = TensorIndexType('R3', dim=3)
|
| 6 |
+
p, q = tensor_indices("p q", R3)
|
| 7 |
+
K = TensorHead("K", [R3])
|
| 8 |
+
|
| 9 |
+
assert repr(2*K(p)) == "2*K(p)"
|
| 10 |
+
assert repr(-K(p)) == "-K(p)"
|
| 11 |
+
assert repr(-2*K(p)*K(q)) == "-2*K(p)*K(q)"
|
| 12 |
+
assert repr(-I*K(p)) == "-I*K(p)"
|
| 13 |
+
assert repr(I*K(p)) == "I*K(p)"
|
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/toperators.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sympy import permutedims
|
| 2 |
+
from sympy.core.numbers import Number
|
| 3 |
+
from sympy.core.singleton import S
|
| 4 |
+
from sympy.core.symbol import Symbol
|
| 5 |
+
from sympy.core.sympify import sympify
|
| 6 |
+
from sympy.tensor.tensor import Tensor, TensExpr, TensAdd, TensMul
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class PartialDerivative(TensExpr):
|
| 10 |
+
"""
|
| 11 |
+
Partial derivative for tensor expressions.
|
| 12 |
+
|
| 13 |
+
Examples
|
| 14 |
+
========
|
| 15 |
+
|
| 16 |
+
>>> from sympy.tensor.tensor import TensorIndexType, TensorHead
|
| 17 |
+
>>> from sympy.tensor.toperators import PartialDerivative
|
| 18 |
+
>>> from sympy import symbols
|
| 19 |
+
>>> L = TensorIndexType("L")
|
| 20 |
+
>>> A = TensorHead("A", [L])
|
| 21 |
+
>>> B = TensorHead("B", [L])
|
| 22 |
+
>>> i, j, k = symbols("i j k")
|
| 23 |
+
|
| 24 |
+
>>> expr = PartialDerivative(A(i), A(j))
|
| 25 |
+
>>> expr
|
| 26 |
+
PartialDerivative(A(i), A(j))
|
| 27 |
+
|
| 28 |
+
The ``PartialDerivative`` object behaves like a tensorial expression:
|
| 29 |
+
|
| 30 |
+
>>> expr.get_indices()
|
| 31 |
+
[i, -j]
|
| 32 |
+
|
| 33 |
+
Notice that the deriving variables have opposite valence than the
|
| 34 |
+
printed one: ``A(j)`` is printed as covariant, but the index of the
|
| 35 |
+
derivative is actually contravariant, i.e. ``-j``.
|
| 36 |
+
|
| 37 |
+
Indices can be contracted:
|
| 38 |
+
|
| 39 |
+
>>> expr = PartialDerivative(A(i), A(i))
|
| 40 |
+
>>> expr
|
| 41 |
+
PartialDerivative(A(L_0), A(L_0))
|
| 42 |
+
>>> expr.get_indices()
|
| 43 |
+
[L_0, -L_0]
|
| 44 |
+
|
| 45 |
+
The method ``.get_indices()`` always returns all indices (even the
|
| 46 |
+
contracted ones). If only uncontracted indices are needed, call
|
| 47 |
+
``.get_free_indices()``:
|
| 48 |
+
|
| 49 |
+
>>> expr.get_free_indices()
|
| 50 |
+
[]
|
| 51 |
+
|
| 52 |
+
Nested partial derivatives are flattened:
|
| 53 |
+
|
| 54 |
+
>>> expr = PartialDerivative(PartialDerivative(A(i), A(j)), A(k))
|
| 55 |
+
>>> expr
|
| 56 |
+
PartialDerivative(A(i), A(j), A(k))
|
| 57 |
+
>>> expr.get_indices()
|
| 58 |
+
[i, -j, -k]
|
| 59 |
+
|
| 60 |
+
Replace a derivative with array values:
|
| 61 |
+
|
| 62 |
+
>>> from sympy.abc import x, y
|
| 63 |
+
>>> from sympy import sin, log
|
| 64 |
+
>>> compA = [sin(x), log(x)*y**3]
|
| 65 |
+
>>> compB = [x, y]
|
| 66 |
+
>>> expr = PartialDerivative(A(i), B(j))
|
| 67 |
+
>>> expr.replace_with_arrays({A(i): compA, B(i): compB})
|
| 68 |
+
[[cos(x), 0], [y**3/x, 3*y**2*log(x)]]
|
| 69 |
+
|
| 70 |
+
The returned array is indexed by `(i, -j)`.
|
| 71 |
+
|
| 72 |
+
Be careful that other SymPy modules put the indices of the deriving
|
| 73 |
+
variables before the indices of the derivand in the derivative result.
|
| 74 |
+
For example:
|
| 75 |
+
|
| 76 |
+
>>> expr.get_free_indices()
|
| 77 |
+
[i, -j]
|
| 78 |
+
|
| 79 |
+
>>> from sympy import Matrix, Array
|
| 80 |
+
>>> Matrix(compA).diff(Matrix(compB)).reshape(2, 2)
|
| 81 |
+
[[cos(x), y**3/x], [0, 3*y**2*log(x)]]
|
| 82 |
+
>>> Array(compA).diff(Array(compB))
|
| 83 |
+
[[cos(x), y**3/x], [0, 3*y**2*log(x)]]
|
| 84 |
+
|
| 85 |
+
These are the transpose of the result of ``PartialDerivative``,
|
| 86 |
+
as the matrix and the array modules put the index `-j` before `i` in the
|
| 87 |
+
derivative result. An array read with index order `(-j, i)` is indeed the
|
| 88 |
+
transpose of the same array read with index order `(i, -j)`. By specifying
|
| 89 |
+
the index order to ``.replace_with_arrays`` one can get a compatible
|
| 90 |
+
expression:
|
| 91 |
+
|
| 92 |
+
>>> expr.replace_with_arrays({A(i): compA, B(i): compB}, [-j, i])
|
| 93 |
+
[[cos(x), y**3/x], [0, 3*y**2*log(x)]]
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
def __new__(cls, expr, *variables):
|
| 97 |
+
|
| 98 |
+
# Flatten:
|
| 99 |
+
if isinstance(expr, PartialDerivative):
|
| 100 |
+
variables = expr.variables + variables
|
| 101 |
+
expr = expr.expr
|
| 102 |
+
|
| 103 |
+
args, indices, free, dum = cls._contract_indices_for_derivative(
|
| 104 |
+
S(expr), variables)
|
| 105 |
+
|
| 106 |
+
obj = TensExpr.__new__(cls, *args)
|
| 107 |
+
|
| 108 |
+
obj._indices = indices
|
| 109 |
+
obj._free = free
|
| 110 |
+
obj._dum = dum
|
| 111 |
+
return obj
|
| 112 |
+
|
| 113 |
+
@property
|
| 114 |
+
def coeff(self):
|
| 115 |
+
return S.One
|
| 116 |
+
|
| 117 |
+
@property
|
| 118 |
+
def nocoeff(self):
|
| 119 |
+
return self
|
| 120 |
+
|
| 121 |
+
@classmethod
|
| 122 |
+
def _contract_indices_for_derivative(cls, expr, variables):
|
| 123 |
+
variables_opposite_valence = []
|
| 124 |
+
|
| 125 |
+
for i in variables:
|
| 126 |
+
if isinstance(i, Tensor):
|
| 127 |
+
i_free_indices = i.get_free_indices()
|
| 128 |
+
variables_opposite_valence.append(
|
| 129 |
+
i.xreplace({k: -k for k in i_free_indices}))
|
| 130 |
+
elif isinstance(i, Symbol):
|
| 131 |
+
variables_opposite_valence.append(i)
|
| 132 |
+
|
| 133 |
+
args, indices, free, dum = TensMul._tensMul_contract_indices(
|
| 134 |
+
[expr] + variables_opposite_valence, replace_indices=True)
|
| 135 |
+
|
| 136 |
+
for i in range(1, len(args)):
|
| 137 |
+
args_i = args[i]
|
| 138 |
+
if isinstance(args_i, Tensor):
|
| 139 |
+
i_indices = args[i].get_free_indices()
|
| 140 |
+
args[i] = args[i].xreplace({k: -k for k in i_indices})
|
| 141 |
+
|
| 142 |
+
return args, indices, free, dum
|
| 143 |
+
|
| 144 |
+
def doit(self, **hints):
|
| 145 |
+
args, indices, free, dum = self._contract_indices_for_derivative(self.expr, self.variables)
|
| 146 |
+
|
| 147 |
+
obj = self.func(*args)
|
| 148 |
+
obj._indices = indices
|
| 149 |
+
obj._free = free
|
| 150 |
+
obj._dum = dum
|
| 151 |
+
|
| 152 |
+
return obj
|
| 153 |
+
|
| 154 |
+
def _expand_partial_derivative(self):
|
| 155 |
+
args, indices, free, dum = self._contract_indices_for_derivative(self.expr, self.variables)
|
| 156 |
+
|
| 157 |
+
obj = self.func(*args)
|
| 158 |
+
obj._indices = indices
|
| 159 |
+
obj._free = free
|
| 160 |
+
obj._dum = dum
|
| 161 |
+
|
| 162 |
+
result = obj
|
| 163 |
+
|
| 164 |
+
if not args[0].free_symbols:
|
| 165 |
+
return S.Zero
|
| 166 |
+
elif isinstance(obj.expr, TensAdd):
|
| 167 |
+
# take care of sums of multi PDs
|
| 168 |
+
result = obj.expr.func(*[
|
| 169 |
+
self.func(a, *obj.variables)._expand_partial_derivative()
|
| 170 |
+
for a in result.expr.args])
|
| 171 |
+
elif isinstance(obj.expr, TensMul):
|
| 172 |
+
# take care of products of multi PDs
|
| 173 |
+
if len(obj.variables) == 1:
|
| 174 |
+
# derivative with respect to single variable
|
| 175 |
+
terms = []
|
| 176 |
+
mulargs = list(obj.expr.args)
|
| 177 |
+
for ind in range(len(mulargs)):
|
| 178 |
+
if not isinstance(sympify(mulargs[ind]), Number):
|
| 179 |
+
# a number coefficient is not considered for
|
| 180 |
+
# expansion of PartialDerivative
|
| 181 |
+
d = self.func(mulargs[ind], *obj.variables)._expand_partial_derivative()
|
| 182 |
+
terms.append(TensMul(*(mulargs[:ind]
|
| 183 |
+
+ [d]
|
| 184 |
+
+ mulargs[(ind + 1):])))
|
| 185 |
+
result = TensAdd.fromiter(terms)
|
| 186 |
+
else:
|
| 187 |
+
# derivative with respect to multiple variables
|
| 188 |
+
# decompose:
|
| 189 |
+
# partial(expr, (u, v))
|
| 190 |
+
# = partial(partial(expr, u).doit(), v).doit()
|
| 191 |
+
result = obj.expr # init with expr
|
| 192 |
+
for v in obj.variables:
|
| 193 |
+
result = self.func(result, v)._expand_partial_derivative()
|
| 194 |
+
# then throw PD on it
|
| 195 |
+
|
| 196 |
+
return result
|
| 197 |
+
|
| 198 |
+
def _perform_derivative(self):
|
| 199 |
+
result = self.expr
|
| 200 |
+
for v in self.variables:
|
| 201 |
+
if isinstance(result, TensExpr):
|
| 202 |
+
result = result._eval_partial_derivative(v)
|
| 203 |
+
else:
|
| 204 |
+
if v._diff_wrt:
|
| 205 |
+
result = result._eval_derivative(v)
|
| 206 |
+
else:
|
| 207 |
+
result = S.Zero
|
| 208 |
+
return result
|
| 209 |
+
|
| 210 |
+
def get_indices(self):
|
| 211 |
+
return self._indices
|
| 212 |
+
|
| 213 |
+
def get_free_indices(self):
|
| 214 |
+
free = sorted(self._free, key=lambda x: x[1])
|
| 215 |
+
return [i[0] for i in free]
|
| 216 |
+
|
| 217 |
+
def _replace_indices(self, repl):
|
| 218 |
+
expr = self.expr.xreplace(repl)
|
| 219 |
+
mirrored = {-k: -v for k, v in repl.items()}
|
| 220 |
+
variables = [i.xreplace(mirrored) for i in self.variables]
|
| 221 |
+
return self.func(expr, *variables)
|
| 222 |
+
|
| 223 |
+
@property
|
| 224 |
+
def expr(self):
|
| 225 |
+
return self.args[0]
|
| 226 |
+
|
| 227 |
+
@property
|
| 228 |
+
def variables(self):
|
| 229 |
+
return self.args[1:]
|
| 230 |
+
|
| 231 |
+
def _extract_data(self, replacement_dict):
|
| 232 |
+
from .array import derive_by_array, tensorcontraction
|
| 233 |
+
indices, array = self.expr._extract_data(replacement_dict)
|
| 234 |
+
for variable in self.variables:
|
| 235 |
+
var_indices, var_array = variable._extract_data(replacement_dict)
|
| 236 |
+
var_indices = [-i for i in var_indices]
|
| 237 |
+
coeff_array, var_array = zip(*[i.as_coeff_Mul() for i in var_array])
|
| 238 |
+
dim_before = len(array.shape)
|
| 239 |
+
array = derive_by_array(array, var_array)
|
| 240 |
+
dim_after = len(array.shape)
|
| 241 |
+
dim_increase = dim_after - dim_before
|
| 242 |
+
array = permutedims(array, [i + dim_increase for i in range(dim_before)] + list(range(dim_increase)))
|
| 243 |
+
array = array.as_mutable()
|
| 244 |
+
varindex = var_indices[0]
|
| 245 |
+
# Remove coefficients of base vector:
|
| 246 |
+
coeff_index = [0] + [slice(None) for i in range(len(indices))]
|
| 247 |
+
for i, coeff in enumerate(coeff_array):
|
| 248 |
+
coeff_index[0] = i
|
| 249 |
+
array[tuple(coeff_index)] /= coeff
|
| 250 |
+
if -varindex in indices:
|
| 251 |
+
pos = indices.index(-varindex)
|
| 252 |
+
array = tensorcontraction(array, (0, pos+1))
|
| 253 |
+
indices.pop(pos)
|
| 254 |
+
else:
|
| 255 |
+
indices.append(varindex)
|
| 256 |
+
return indices, array
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/__init__.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__version__ = "0.23.0"
|
| 2 |
+
|
| 3 |
+
from .accelerator import Accelerator
|
| 4 |
+
from .big_modeling import (
|
| 5 |
+
cpu_offload,
|
| 6 |
+
cpu_offload_with_hook,
|
| 7 |
+
disk_offload,
|
| 8 |
+
dispatch_model,
|
| 9 |
+
init_empty_weights,
|
| 10 |
+
init_on_device,
|
| 11 |
+
load_checkpoint_and_dispatch,
|
| 12 |
+
)
|
| 13 |
+
from .data_loader import skip_first_batches
|
| 14 |
+
from .launchers import debug_launcher, notebook_launcher
|
| 15 |
+
from .state import PartialState
|
| 16 |
+
from .utils import (
|
| 17 |
+
AutocastKwargs,
|
| 18 |
+
DeepSpeedPlugin,
|
| 19 |
+
DistributedDataParallelKwargs,
|
| 20 |
+
DistributedType,
|
| 21 |
+
FullyShardedDataParallelPlugin,
|
| 22 |
+
GradScalerKwargs,
|
| 23 |
+
InitProcessGroupKwargs,
|
| 24 |
+
find_executable_batch_size,
|
| 25 |
+
infer_auto_device_map,
|
| 26 |
+
is_rich_available,
|
| 27 |
+
load_checkpoint_in_model,
|
| 28 |
+
synchronize_rng_states,
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
if is_rich_available():
|
| 33 |
+
from .utils import rich
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/accelerator.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/hooks.py
ADDED
|
@@ -0,0 +1,597 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import functools
|
| 16 |
+
from typing import Dict, List, Mapping, Optional, Union
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
import torch.nn as nn
|
| 20 |
+
|
| 21 |
+
from .state import PartialState
|
| 22 |
+
from .utils import (
|
| 23 |
+
PrefixedDataset,
|
| 24 |
+
find_device,
|
| 25 |
+
named_module_tensors,
|
| 26 |
+
send_to_device,
|
| 27 |
+
set_module_tensor_to_device,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class ModelHook:
|
| 32 |
+
"""
|
| 33 |
+
A hook that contains callbacks to be executed just before and after the forward method of a model. The difference
|
| 34 |
+
with PyTorch existing hooks is that they get passed along the kwargs.
|
| 35 |
+
|
| 36 |
+
Class attribute:
|
| 37 |
+
- **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under
|
| 38 |
+
the `torch.no_grad()` context manager.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
no_grad = False
|
| 42 |
+
|
| 43 |
+
def init_hook(self, module):
|
| 44 |
+
"""
|
| 45 |
+
To be executed when the hook is attached to the module.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
module (`torch.nn.Module`): The module attached to this hook.
|
| 49 |
+
"""
|
| 50 |
+
return module
|
| 51 |
+
|
| 52 |
+
def pre_forward(self, module, *args, **kwargs):
|
| 53 |
+
"""
|
| 54 |
+
To be executed just before the forward method of the model.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
module (`torch.nn.Module`): The module whose forward pass will be executed just after this event.
|
| 58 |
+
args (`Tuple[Any]`): The positional arguments passed to the module.
|
| 59 |
+
kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module.
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
`Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.
|
| 63 |
+
"""
|
| 64 |
+
return args, kwargs
|
| 65 |
+
|
| 66 |
+
def post_forward(self, module, output):
|
| 67 |
+
"""
|
| 68 |
+
To be executed just after the forward method of the model.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
module (`torch.nn.Module`): The module whose forward pass been executed just before this event.
|
| 72 |
+
output (`Any`): The output of the module.
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
`Any`: The processed `output`.
|
| 76 |
+
"""
|
| 77 |
+
return output
|
| 78 |
+
|
| 79 |
+
def detach_hook(self, module):
|
| 80 |
+
"""
|
| 81 |
+
To be executed when the hook is detached from a module.
|
| 82 |
+
|
| 83 |
+
Args:
|
| 84 |
+
module (`torch.nn.Module`): The module detached from this hook.
|
| 85 |
+
"""
|
| 86 |
+
return module
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class SequentialHook(ModelHook):
|
| 90 |
+
"""
|
| 91 |
+
A hook that can contain several hooks and iterates through them at each event.
|
| 92 |
+
"""
|
| 93 |
+
|
| 94 |
+
def __init__(self, *hooks):
|
| 95 |
+
self.hooks = hooks
|
| 96 |
+
|
| 97 |
+
def init_hook(self, module):
|
| 98 |
+
for hook in self.hooks:
|
| 99 |
+
module = hook.init_hook(module)
|
| 100 |
+
return module
|
| 101 |
+
|
| 102 |
+
def pre_forward(self, module, *args, **kwargs):
|
| 103 |
+
for hook in self.hooks:
|
| 104 |
+
args, kwargs = hook.pre_forward(module, *args, **kwargs)
|
| 105 |
+
return args, kwargs
|
| 106 |
+
|
| 107 |
+
def post_forward(self, module, output):
|
| 108 |
+
for hook in self.hooks:
|
| 109 |
+
output = hook.post_forward(module, output)
|
| 110 |
+
return output
|
| 111 |
+
|
| 112 |
+
def detach_hook(self, module):
|
| 113 |
+
for hook in self.hooks:
|
| 114 |
+
module = hook.detach_hook(module)
|
| 115 |
+
return module
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False):
|
| 119 |
+
"""
|
| 120 |
+
Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove
|
| 121 |
+
this behavior and restore the original `forward` method, use `remove_hook_from_module`.
|
| 122 |
+
|
| 123 |
+
<Tip warning={true}>
|
| 124 |
+
|
| 125 |
+
If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks
|
| 126 |
+
together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class.
|
| 127 |
+
|
| 128 |
+
</Tip>
|
| 129 |
+
|
| 130 |
+
Args:
|
| 131 |
+
module (`torch.nn.Module`):
|
| 132 |
+
The module to attach a hook to.
|
| 133 |
+
hook (`ModelHook`):
|
| 134 |
+
The hook to attach.
|
| 135 |
+
append (`bool`, *optional*, defaults to `False`):
|
| 136 |
+
Whether the hook should be chained with an existing one (if module already contains a hook) or not.
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
`torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can
|
| 140 |
+
be discarded).
|
| 141 |
+
"""
|
| 142 |
+
|
| 143 |
+
if append and (getattr(module, "_hf_hook", None) is not None):
|
| 144 |
+
old_hook = module._hf_hook
|
| 145 |
+
remove_hook_from_module(module)
|
| 146 |
+
hook = SequentialHook(old_hook, hook)
|
| 147 |
+
|
| 148 |
+
if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"):
|
| 149 |
+
# If we already put some hook on this module, we replace it with the new one.
|
| 150 |
+
old_forward = module._old_forward
|
| 151 |
+
else:
|
| 152 |
+
old_forward = module.forward
|
| 153 |
+
module._old_forward = old_forward
|
| 154 |
+
|
| 155 |
+
module = hook.init_hook(module)
|
| 156 |
+
module._hf_hook = hook
|
| 157 |
+
|
| 158 |
+
@functools.wraps(old_forward)
|
| 159 |
+
def new_forward(*args, **kwargs):
|
| 160 |
+
args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
|
| 161 |
+
if module._hf_hook.no_grad:
|
| 162 |
+
with torch.no_grad():
|
| 163 |
+
output = old_forward(*args, **kwargs)
|
| 164 |
+
else:
|
| 165 |
+
output = old_forward(*args, **kwargs)
|
| 166 |
+
return module._hf_hook.post_forward(module, output)
|
| 167 |
+
|
| 168 |
+
module.forward = new_forward
|
| 169 |
+
return module
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def remove_hook_from_module(module: nn.Module, recurse=False):
|
| 173 |
+
"""
|
| 174 |
+
Removes any hook attached to a module via `add_hook_to_module`.
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
module (`torch.nn.Module`): The module to attach a hook to.
|
| 178 |
+
recurse (`bool`, **optional**): Whether to remove the hooks recursively
|
| 179 |
+
|
| 180 |
+
Returns:
|
| 181 |
+
`torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can
|
| 182 |
+
be discarded).
|
| 183 |
+
"""
|
| 184 |
+
|
| 185 |
+
if hasattr(module, "_hf_hook"):
|
| 186 |
+
module._hf_hook.detach_hook(module)
|
| 187 |
+
delattr(module, "_hf_hook")
|
| 188 |
+
|
| 189 |
+
if hasattr(module, "_old_forward"):
|
| 190 |
+
module.forward = module._old_forward
|
| 191 |
+
delattr(module, "_old_forward")
|
| 192 |
+
|
| 193 |
+
if recurse:
|
| 194 |
+
for child in module.children():
|
| 195 |
+
remove_hook_from_module(child, recurse)
|
| 196 |
+
|
| 197 |
+
return module
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
class AlignDevicesHook(ModelHook):
|
| 201 |
+
"""
|
| 202 |
+
A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the
|
| 203 |
+
associated module, potentially offloading the weights after the forward pass.
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
execution_device (`torch.device`, *optional*):
|
| 207 |
+
The device on which inputs and model weights should be placed before the forward pass.
|
| 208 |
+
offload (`bool`, *optional*, defaults to `False`):
|
| 209 |
+
Whether or not the weights should be offloaded after the forward pass.
|
| 210 |
+
io_same_device (`bool`, *optional*, defaults to `False`):
|
| 211 |
+
Whether or not the output should be placed on the same device as the input was.
|
| 212 |
+
weights_map (`Mapping[str, torch.Tensor]`, *optional*):
|
| 213 |
+
When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
|
| 214 |
+
offload_buffers (`bool`, *optional*, defaults to `False`):
|
| 215 |
+
Whether or not to include the associated module's buffers when offloading.
|
| 216 |
+
place_submodules (`bool`, *optional*, defaults to `False`):
|
| 217 |
+
Whether to place the submodules on `execution_device` during the `init_hook` event.
|
| 218 |
+
"""
|
| 219 |
+
|
| 220 |
+
def __init__(
|
| 221 |
+
self,
|
| 222 |
+
execution_device: Optional[Union[int, str, torch.device]] = None,
|
| 223 |
+
offload: bool = False,
|
| 224 |
+
io_same_device: bool = False,
|
| 225 |
+
weights_map: Optional[Mapping] = None,
|
| 226 |
+
offload_buffers: bool = False,
|
| 227 |
+
place_submodules: bool = False,
|
| 228 |
+
skip_keys: Optional[Union[str, List[str]]] = None,
|
| 229 |
+
):
|
| 230 |
+
self.execution_device = execution_device
|
| 231 |
+
self.offload = offload
|
| 232 |
+
self.io_same_device = io_same_device
|
| 233 |
+
self.weights_map = weights_map
|
| 234 |
+
self.offload_buffers = offload_buffers
|
| 235 |
+
self.place_submodules = place_submodules
|
| 236 |
+
self.skip_keys = skip_keys
|
| 237 |
+
|
| 238 |
+
# Will contain the input device when `io_same_device=True`.
|
| 239 |
+
self.input_device = None
|
| 240 |
+
self.param_original_devices = {}
|
| 241 |
+
self.buffer_original_devices = {}
|
| 242 |
+
|
| 243 |
+
def __repr__(self):
|
| 244 |
+
return (
|
| 245 |
+
f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, "
|
| 246 |
+
f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, "
|
| 247 |
+
f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})"
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
def init_hook(self, module):
|
| 251 |
+
if not self.offload and self.execution_device is not None:
|
| 252 |
+
for name, _ in named_module_tensors(module, recurse=self.place_submodules):
|
| 253 |
+
set_module_tensor_to_device(module, name, self.execution_device)
|
| 254 |
+
elif self.offload:
|
| 255 |
+
self.original_devices = {
|
| 256 |
+
name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules)
|
| 257 |
+
}
|
| 258 |
+
if self.weights_map is None:
|
| 259 |
+
self.weights_map = {
|
| 260 |
+
name: param.to("cpu")
|
| 261 |
+
for name, param in named_module_tensors(
|
| 262 |
+
module, include_buffers=self.offload_buffers, recurse=self.place_submodules
|
| 263 |
+
)
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
for name, _ in named_module_tensors(
|
| 267 |
+
module, include_buffers=self.offload_buffers, recurse=self.place_submodules
|
| 268 |
+
):
|
| 269 |
+
set_module_tensor_to_device(module, name, "meta")
|
| 270 |
+
if not self.offload_buffers and self.execution_device is not None:
|
| 271 |
+
for name, _ in module.named_buffers(recurse=self.place_submodules):
|
| 272 |
+
set_module_tensor_to_device(module, name, self.execution_device)
|
| 273 |
+
return module
|
| 274 |
+
|
| 275 |
+
def pre_forward(self, module, *args, **kwargs):
|
| 276 |
+
if self.io_same_device:
|
| 277 |
+
self.input_device = find_device([args, kwargs])
|
| 278 |
+
if self.offload:
|
| 279 |
+
for name, _ in named_module_tensors(
|
| 280 |
+
module, include_buffers=self.offload_buffers, recurse=self.place_submodules
|
| 281 |
+
):
|
| 282 |
+
fp16_statistics = None
|
| 283 |
+
if "weight" in name and name.replace("weight", "SCB") in self.weights_map.keys():
|
| 284 |
+
if self.weights_map[name].dtype == torch.int8:
|
| 285 |
+
fp16_statistics = self.weights_map[name.replace("weight", "SCB")]
|
| 286 |
+
set_module_tensor_to_device(
|
| 287 |
+
module, name, self.execution_device, value=self.weights_map[name], fp16_statistics=fp16_statistics
|
| 288 |
+
)
|
| 289 |
+
|
| 290 |
+
return send_to_device(args, self.execution_device), send_to_device(
|
| 291 |
+
kwargs, self.execution_device, skip_keys=self.skip_keys
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
def post_forward(self, module, output):
|
| 295 |
+
if self.offload:
|
| 296 |
+
for name, _ in named_module_tensors(
|
| 297 |
+
module, include_buffers=self.offload_buffers, recurse=self.place_submodules
|
| 298 |
+
):
|
| 299 |
+
set_module_tensor_to_device(module, name, "meta")
|
| 300 |
+
if type(module).__name__ == "Linear8bitLt":
|
| 301 |
+
module.state.SCB = None
|
| 302 |
+
module.state.CxB = None
|
| 303 |
+
|
| 304 |
+
if self.io_same_device and self.input_device is not None:
|
| 305 |
+
output = send_to_device(output, self.input_device, skip_keys=self.skip_keys)
|
| 306 |
+
|
| 307 |
+
return output
|
| 308 |
+
|
| 309 |
+
def detach_hook(self, module):
|
| 310 |
+
if self.offload:
|
| 311 |
+
for name, device in self.original_devices.items():
|
| 312 |
+
if device != torch.device("meta"):
|
| 313 |
+
set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))
|
| 314 |
+
return module
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def attach_execution_device_hook(
|
| 318 |
+
module: torch.nn.Module,
|
| 319 |
+
execution_device: Union[int, str, torch.device],
|
| 320 |
+
skip_keys: Optional[Union[str, List[str]]] = None,
|
| 321 |
+
preload_module_classes: Optional[List[str]] = None,
|
| 322 |
+
):
|
| 323 |
+
"""
|
| 324 |
+
Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right
|
| 325 |
+
execution device
|
| 326 |
+
|
| 327 |
+
Args:
|
| 328 |
+
module (`torch.nn.Module`):
|
| 329 |
+
The module where we want to attach the hooks.
|
| 330 |
+
execution_device (`int`, `str` or `torch.device`):
|
| 331 |
+
The device on which inputs and model weights should be placed before the forward pass.
|
| 332 |
+
skip_keys (`str` or `List[str]`, *optional*):
|
| 333 |
+
A list of keys to ignore when moving inputs or outputs between devices.
|
| 334 |
+
preload_module_classes (`List[str]`, *optional*):
|
| 335 |
+
A list of classes whose instances should load all their weights (even in the submodules) at the beginning
|
| 336 |
+
of the forward. This should only be used for classes that have submodules which are registered but not
|
| 337 |
+
called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
|
| 338 |
+
`dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
|
| 339 |
+
"""
|
| 340 |
+
if not hasattr(module, "_hf_hook") and len(module.state_dict()) > 0:
|
| 341 |
+
add_hook_to_module(module, AlignDevicesHook(execution_device, skip_keys=skip_keys))
|
| 342 |
+
|
| 343 |
+
# Break the recursion if we get to a preload module.
|
| 344 |
+
if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes:
|
| 345 |
+
return
|
| 346 |
+
|
| 347 |
+
for child in module.children():
|
| 348 |
+
attach_execution_device_hook(child, execution_device)
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def attach_align_device_hook(
|
| 352 |
+
module: torch.nn.Module,
|
| 353 |
+
execution_device: Optional[torch.device] = None,
|
| 354 |
+
offload: bool = False,
|
| 355 |
+
weights_map: Optional[Mapping] = None,
|
| 356 |
+
offload_buffers: bool = False,
|
| 357 |
+
module_name: str = "",
|
| 358 |
+
skip_keys: Optional[Union[str, List[str]]] = None,
|
| 359 |
+
preload_module_classes: Optional[List[str]] = None,
|
| 360 |
+
):
|
| 361 |
+
"""
|
| 362 |
+
Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or
|
| 363 |
+
buffers.
|
| 364 |
+
|
| 365 |
+
Args:
|
| 366 |
+
module (`torch.nn.Module`):
|
| 367 |
+
The module where we want to attach the hooks.
|
| 368 |
+
execution_device (`torch.device`, *optional*):
|
| 369 |
+
The device on which inputs and model weights should be placed before the forward pass.
|
| 370 |
+
offload (`bool`, *optional*, defaults to `False`):
|
| 371 |
+
Whether or not the weights should be offloaded after the forward pass.
|
| 372 |
+
weights_map (`Mapping[str, torch.Tensor]`, *optional*):
|
| 373 |
+
When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
|
| 374 |
+
offload_buffers (`bool`, *optional*, defaults to `False`):
|
| 375 |
+
Whether or not to include the associated module's buffers when offloading.
|
| 376 |
+
module_name (`str`, *optional*, defaults to `""`):
|
| 377 |
+
The name of the module.
|
| 378 |
+
skip_keys (`str` or `List[str]`, *optional*):
|
| 379 |
+
A list of keys to ignore when moving inputs or outputs between devices.
|
| 380 |
+
preload_module_classes (`List[str]`, *optional*):
|
| 381 |
+
A list of classes whose instances should load all their weights (even in the submodules) at the beginning
|
| 382 |
+
of the forward. This should only be used for classes that have submodules which are registered but not
|
| 383 |
+
called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
|
| 384 |
+
`dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
|
| 385 |
+
"""
|
| 386 |
+
# Attach the hook on this module if it has any direct tensor.
|
| 387 |
+
directs = named_module_tensors(module)
|
| 388 |
+
full_offload = (
|
| 389 |
+
offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
if len(list(directs)) > 0 or full_offload:
|
| 393 |
+
if weights_map is not None:
|
| 394 |
+
prefix = f"{module_name}." if len(module_name) > 0 else ""
|
| 395 |
+
prefixed_weights_map = PrefixedDataset(weights_map, prefix)
|
| 396 |
+
else:
|
| 397 |
+
prefixed_weights_map = None
|
| 398 |
+
hook = AlignDevicesHook(
|
| 399 |
+
execution_device=execution_device,
|
| 400 |
+
offload=offload,
|
| 401 |
+
weights_map=prefixed_weights_map,
|
| 402 |
+
offload_buffers=offload_buffers,
|
| 403 |
+
place_submodules=full_offload,
|
| 404 |
+
skip_keys=skip_keys,
|
| 405 |
+
)
|
| 406 |
+
add_hook_to_module(module, hook, append=True)
|
| 407 |
+
|
| 408 |
+
# We stop the recursion in case we hit the full offload.
|
| 409 |
+
if full_offload:
|
| 410 |
+
return
|
| 411 |
+
|
| 412 |
+
# Recurse on all children of the module.
|
| 413 |
+
for child_name, child in module.named_children():
|
| 414 |
+
child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
|
| 415 |
+
attach_align_device_hook(
|
| 416 |
+
child,
|
| 417 |
+
execution_device=execution_device,
|
| 418 |
+
offload=offload,
|
| 419 |
+
weights_map=weights_map,
|
| 420 |
+
offload_buffers=offload_buffers,
|
| 421 |
+
module_name=child_name,
|
| 422 |
+
preload_module_classes=preload_module_classes,
|
| 423 |
+
skip_keys=skip_keys,
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
def remove_hook_from_submodules(module: nn.Module):
|
| 428 |
+
"""
|
| 429 |
+
Recursively removes all hooks attached on the submodules of a given model.
|
| 430 |
+
|
| 431 |
+
Args:
|
| 432 |
+
module (`torch.nn.Module`): The module on which to remove all hooks.
|
| 433 |
+
"""
|
| 434 |
+
remove_hook_from_module(module)
|
| 435 |
+
for child in module.children():
|
| 436 |
+
remove_hook_from_submodules(child)
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
def attach_align_device_hook_on_blocks(
|
| 440 |
+
module: nn.Module,
|
| 441 |
+
execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None,
|
| 442 |
+
offload: Union[bool, Dict[str, bool]] = False,
|
| 443 |
+
weights_map: Mapping = None,
|
| 444 |
+
offload_buffers: bool = False,
|
| 445 |
+
module_name: str = "",
|
| 446 |
+
skip_keys: Optional[Union[str, List[str]]] = None,
|
| 447 |
+
preload_module_classes: Optional[List[str]] = None,
|
| 448 |
+
):
|
| 449 |
+
"""
|
| 450 |
+
Attaches `AlignDevicesHook` to all blocks of a given model as needed.
|
| 451 |
+
|
| 452 |
+
Args:
|
| 453 |
+
module (`torch.nn.Module`):
|
| 454 |
+
The module where we want to attach the hooks.
|
| 455 |
+
execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*):
|
| 456 |
+
The device on which inputs and model weights should be placed before the forward pass. It can be one device
|
| 457 |
+
for the whole module, or a dictionary mapping module name to device.
|
| 458 |
+
offload (`bool`, *optional*, defaults to `False`):
|
| 459 |
+
Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole
|
| 460 |
+
module, or a dictionary mapping module name to boolean.
|
| 461 |
+
weights_map (`Mapping[str, torch.Tensor]`, *optional*):
|
| 462 |
+
When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
|
| 463 |
+
offload_buffers (`bool`, *optional*, defaults to `False`):
|
| 464 |
+
Whether or not to include the associated module's buffers when offloading.
|
| 465 |
+
module_name (`str`, *optional*, defaults to `""`):
|
| 466 |
+
The name of the module.
|
| 467 |
+
skip_keys (`str` or `List[str]`, *optional*):
|
| 468 |
+
A list of keys to ignore when moving inputs or outputs between devices.
|
| 469 |
+
preload_module_classes (`List[str]`, *optional*):
|
| 470 |
+
A list of classes whose instances should load all their weights (even in the submodules) at the beginning
|
| 471 |
+
of the forward. This should only be used for classes that have submodules which are registered but not
|
| 472 |
+
called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
|
| 473 |
+
`dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
|
| 474 |
+
"""
|
| 475 |
+
# If one device and one offload, we've got one hook.
|
| 476 |
+
if not isinstance(execution_device, Mapping) and not isinstance(offload, dict):
|
| 477 |
+
if not offload:
|
| 478 |
+
hook = AlignDevicesHook(
|
| 479 |
+
execution_device=execution_device, io_same_device=True, skip_keys=skip_keys, place_submodules=True
|
| 480 |
+
)
|
| 481 |
+
add_hook_to_module(module, hook)
|
| 482 |
+
else:
|
| 483 |
+
attach_align_device_hook(
|
| 484 |
+
module,
|
| 485 |
+
execution_device=execution_device,
|
| 486 |
+
offload=True,
|
| 487 |
+
weights_map=weights_map,
|
| 488 |
+
offload_buffers=offload_buffers,
|
| 489 |
+
module_name=module_name,
|
| 490 |
+
skip_keys=skip_keys,
|
| 491 |
+
)
|
| 492 |
+
return
|
| 493 |
+
|
| 494 |
+
if not isinstance(execution_device, Mapping):
|
| 495 |
+
execution_device = {key: execution_device for key in offload.keys()}
|
| 496 |
+
if not isinstance(offload, Mapping):
|
| 497 |
+
offload = {key: offload for key in execution_device.keys()}
|
| 498 |
+
|
| 499 |
+
if module_name in execution_device and module_name in offload and not offload[module_name]:
|
| 500 |
+
hook = AlignDevicesHook(
|
| 501 |
+
execution_device=execution_device[module_name],
|
| 502 |
+
offload_buffers=offload_buffers,
|
| 503 |
+
io_same_device=(module_name == ""),
|
| 504 |
+
place_submodules=True,
|
| 505 |
+
skip_keys=skip_keys,
|
| 506 |
+
)
|
| 507 |
+
add_hook_to_module(module, hook)
|
| 508 |
+
attach_execution_device_hook(module, execution_device[module_name])
|
| 509 |
+
elif module_name in execution_device and module_name in offload:
|
| 510 |
+
attach_align_device_hook(
|
| 511 |
+
module,
|
| 512 |
+
execution_device=execution_device[module_name],
|
| 513 |
+
offload=True,
|
| 514 |
+
weights_map=weights_map,
|
| 515 |
+
offload_buffers=offload_buffers,
|
| 516 |
+
module_name=module_name,
|
| 517 |
+
skip_keys=skip_keys,
|
| 518 |
+
preload_module_classes=preload_module_classes,
|
| 519 |
+
)
|
| 520 |
+
if not hasattr(module, "_hf_hook"):
|
| 521 |
+
hook = AlignDevicesHook(
|
| 522 |
+
execution_device=execution_device[module_name], io_same_device=(module_name == ""), skip_keys=skip_keys
|
| 523 |
+
)
|
| 524 |
+
add_hook_to_module(module, hook)
|
| 525 |
+
attach_execution_device_hook(
|
| 526 |
+
module,
|
| 527 |
+
execution_device[module_name],
|
| 528 |
+
preload_module_classes=preload_module_classes,
|
| 529 |
+
skip_keys=skip_keys,
|
| 530 |
+
)
|
| 531 |
+
elif module_name == "":
|
| 532 |
+
hook = AlignDevicesHook(execution_device=execution_device.get(""), io_same_device=True, skip_keys=skip_keys)
|
| 533 |
+
add_hook_to_module(module, hook)
|
| 534 |
+
|
| 535 |
+
for child_name, child in module.named_children():
|
| 536 |
+
child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
|
| 537 |
+
attach_align_device_hook_on_blocks(
|
| 538 |
+
child,
|
| 539 |
+
execution_device=execution_device,
|
| 540 |
+
offload=offload,
|
| 541 |
+
weights_map=weights_map,
|
| 542 |
+
offload_buffers=offload_buffers,
|
| 543 |
+
module_name=child_name,
|
| 544 |
+
preload_module_classes=preload_module_classes,
|
| 545 |
+
skip_keys=skip_keys,
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
class CpuOffload(ModelHook):
|
| 550 |
+
"""
|
| 551 |
+
Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after
|
| 552 |
+
the forward, the user needs to call the `init_hook` method again for this.
|
| 553 |
+
|
| 554 |
+
Args:
|
| 555 |
+
execution_device(`str`, `int` or `torch.device`, *optional*):
|
| 556 |
+
The device on which the model should be executed. Will default to the MPS device if it's available, then
|
| 557 |
+
GPU 0 if there is a GPU, and finally to the CPU.
|
| 558 |
+
prev_module_hook (`UserCpuOffloadHook`, *optional*):
|
| 559 |
+
The hook sent back by [`cpu_offload_with_hook`] for a previous model in the pipeline you are running. If
|
| 560 |
+
passed, its offload method will be called just before the forward of the model to which this hook is
|
| 561 |
+
attached.
|
| 562 |
+
"""
|
| 563 |
+
|
| 564 |
+
def __init__(
|
| 565 |
+
self,
|
| 566 |
+
execution_device: Optional[Union[str, int, torch.device]] = None,
|
| 567 |
+
prev_module_hook: Optional["UserCpuOffloadHook"] = None,
|
| 568 |
+
):
|
| 569 |
+
self.prev_module_hook = prev_module_hook
|
| 570 |
+
|
| 571 |
+
self.execution_device = execution_device if execution_device is not None else PartialState().default_device
|
| 572 |
+
|
| 573 |
+
def init_hook(self, module):
|
| 574 |
+
return module.to("cpu")
|
| 575 |
+
|
| 576 |
+
def pre_forward(self, module, *args, **kwargs):
|
| 577 |
+
if self.prev_module_hook is not None:
|
| 578 |
+
self.prev_module_hook.offload()
|
| 579 |
+
module.to(self.execution_device)
|
| 580 |
+
return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device)
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
class UserCpuOffloadHook:
|
| 584 |
+
"""
|
| 585 |
+
A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook
|
| 586 |
+
or remove it entirely.
|
| 587 |
+
"""
|
| 588 |
+
|
| 589 |
+
def __init__(self, model, hook):
|
| 590 |
+
self.model = model
|
| 591 |
+
self.hook = hook
|
| 592 |
+
|
| 593 |
+
def offload(self):
|
| 594 |
+
self.hook.init_hook(self.model)
|
| 595 |
+
|
| 596 |
+
def remove(self):
|
| 597 |
+
remove_hook_from_module(self.model)
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/logging.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import logging
|
| 16 |
+
import os
|
| 17 |
+
|
| 18 |
+
from .state import PartialState
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class MultiProcessAdapter(logging.LoggerAdapter):
|
| 22 |
+
"""
|
| 23 |
+
An adapter to assist with logging in multiprocess.
|
| 24 |
+
|
| 25 |
+
`log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes
|
| 26 |
+
or only the main executed one. Default is `main_process_only=True`.
|
| 27 |
+
|
| 28 |
+
Does not require an `Accelerator` object to be created first.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
@staticmethod
|
| 32 |
+
def _should_log(main_process_only):
|
| 33 |
+
"Check if log should be performed"
|
| 34 |
+
state = PartialState()
|
| 35 |
+
return not main_process_only or (main_process_only and state.is_main_process)
|
| 36 |
+
|
| 37 |
+
def log(self, level, msg, *args, **kwargs):
|
| 38 |
+
"""
|
| 39 |
+
Delegates logger call after checking if we should log.
|
| 40 |
+
|
| 41 |
+
Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes
|
| 42 |
+
or only the main executed one. Default is `True` if not passed
|
| 43 |
+
|
| 44 |
+
Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to
|
| 45 |
+
read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not
|
| 46 |
+
break with the previous behavior.
|
| 47 |
+
|
| 48 |
+
`in_order` is ignored if `main_process_only` is passed.
|
| 49 |
+
"""
|
| 50 |
+
if PartialState._shared_state == {}:
|
| 51 |
+
raise RuntimeError(
|
| 52 |
+
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility."
|
| 53 |
+
)
|
| 54 |
+
main_process_only = kwargs.pop("main_process_only", True)
|
| 55 |
+
in_order = kwargs.pop("in_order", False)
|
| 56 |
+
|
| 57 |
+
if self.isEnabledFor(level):
|
| 58 |
+
if self._should_log(main_process_only):
|
| 59 |
+
msg, kwargs = self.process(msg, kwargs)
|
| 60 |
+
self.logger.log(level, msg, *args, **kwargs)
|
| 61 |
+
|
| 62 |
+
elif in_order:
|
| 63 |
+
state = PartialState()
|
| 64 |
+
for i in range(state.num_processes):
|
| 65 |
+
if i == state.process_index:
|
| 66 |
+
msg, kwargs = self.process(msg, kwargs)
|
| 67 |
+
self.logger.log(level, msg, *args, **kwargs)
|
| 68 |
+
state.wait_for_everyone()
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def get_logger(name: str, log_level: str = None):
|
| 72 |
+
"""
|
| 73 |
+
Returns a `logging.Logger` for `name` that can handle multiprocessing.
|
| 74 |
+
|
| 75 |
+
If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all
|
| 76 |
+
processes and in order, also pass `in_order=True`
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
name (`str`):
|
| 80 |
+
The name for the logger, such as `__file__`
|
| 81 |
+
log_level (`str`, *optional*):
|
| 82 |
+
The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not
|
| 83 |
+
|
| 84 |
+
Example:
|
| 85 |
+
|
| 86 |
+
```python
|
| 87 |
+
>>> from accelerate.logging import get_logger
|
| 88 |
+
|
| 89 |
+
>>> logger = get_logger(__name__)
|
| 90 |
+
|
| 91 |
+
>>> logger.info("My log", main_process_only=False)
|
| 92 |
+
>>> logger.debug("My log", main_process_only=True)
|
| 93 |
+
|
| 94 |
+
>>> logger = get_logger(__name__, log_level="DEBUG")
|
| 95 |
+
>>> logger.info("My log")
|
| 96 |
+
>>> logger.debug("My second log")
|
| 97 |
+
|
| 98 |
+
>>> from accelerate import Accelerator
|
| 99 |
+
|
| 100 |
+
>>> accelerator = Accelerator()
|
| 101 |
+
>>> array = ["a", "b", "c", "d"]
|
| 102 |
+
>>> letter_at_rank = array[accelerator.process_index]
|
| 103 |
+
>>> logger.info(letter_at_rank, in_order=True)
|
| 104 |
+
```
|
| 105 |
+
"""
|
| 106 |
+
if log_level is None:
|
| 107 |
+
log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None)
|
| 108 |
+
logger = logging.getLogger(name)
|
| 109 |
+
if log_level is not None:
|
| 110 |
+
logger.setLevel(log_level.upper())
|
| 111 |
+
logger.root.setLevel(log_level.upper())
|
| 112 |
+
return MultiProcessAdapter(logger, {})
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/memory_utils.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import warnings
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
warnings.warn(
|
| 19 |
+
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
|
| 20 |
+
"`from accelerate import find_executable_batch_size` to avoid this warning.",
|
| 21 |
+
FutureWarning,
|
| 22 |
+
)
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/optimizer.py
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
import warnings
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
|
| 20 |
+
from .state import AcceleratorState, GradientState
|
| 21 |
+
from .utils import DistributedType, honor_type, is_tpu_available
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
if is_tpu_available(check_device=False):
|
| 25 |
+
import torch_xla.core.xla_model as xm
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def move_to_device(state, device):
|
| 29 |
+
if isinstance(state, (list, tuple)):
|
| 30 |
+
return honor_type(state, (move_to_device(t, device) for t in state))
|
| 31 |
+
elif isinstance(state, dict):
|
| 32 |
+
return type(state)({k: move_to_device(v, device) for k, v in state.items()})
|
| 33 |
+
elif isinstance(state, torch.Tensor):
|
| 34 |
+
return state.to(device)
|
| 35 |
+
return state
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class AcceleratedOptimizer(torch.optim.Optimizer):
|
| 39 |
+
"""
|
| 40 |
+
Internal wrapper around a torch optimizer.
|
| 41 |
+
|
| 42 |
+
Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient
|
| 43 |
+
accumulation.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
optimizer (`torch.optim.optimizer.Optimizer`):
|
| 47 |
+
The optimizer to wrap.
|
| 48 |
+
device_placement (`bool`, *optional*, defaults to `True`):
|
| 49 |
+
Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of
|
| 50 |
+
`optimizer` on the right device.
|
| 51 |
+
scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*):
|
| 52 |
+
The scaler to use in the step function if training with mixed precision.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def __init__(self, optimizer, device_placement=True, scaler=None):
|
| 56 |
+
self.optimizer = optimizer
|
| 57 |
+
self.scaler = scaler
|
| 58 |
+
self.accelerator_state = AcceleratorState()
|
| 59 |
+
self.gradient_state = GradientState()
|
| 60 |
+
self.device_placement = device_placement
|
| 61 |
+
self._is_overflow = False
|
| 62 |
+
|
| 63 |
+
if self.scaler is not None:
|
| 64 |
+
self._accelerate_step_called = False
|
| 65 |
+
self._optimizer_original_step_method = self.optimizer.step
|
| 66 |
+
self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
|
| 67 |
+
|
| 68 |
+
# Handle device placement
|
| 69 |
+
if device_placement:
|
| 70 |
+
state_dict = self.optimizer.state_dict()
|
| 71 |
+
if self.accelerator_state.distributed_type == DistributedType.TPU:
|
| 72 |
+
xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
|
| 73 |
+
else:
|
| 74 |
+
state_dict = move_to_device(state_dict, self.accelerator_state.device)
|
| 75 |
+
self.optimizer.load_state_dict(state_dict)
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def state(self):
|
| 79 |
+
return self.optimizer.state
|
| 80 |
+
|
| 81 |
+
@state.setter
|
| 82 |
+
def state(self, state):
|
| 83 |
+
self.optimizer.state = state
|
| 84 |
+
|
| 85 |
+
@property
|
| 86 |
+
def param_groups(self):
|
| 87 |
+
return self.optimizer.param_groups
|
| 88 |
+
|
| 89 |
+
@param_groups.setter
|
| 90 |
+
def param_groups(self, param_groups):
|
| 91 |
+
self.optimizer.param_groups = param_groups
|
| 92 |
+
|
| 93 |
+
@property
|
| 94 |
+
def defaults(self):
|
| 95 |
+
return self.optimizer.defaults
|
| 96 |
+
|
| 97 |
+
@defaults.setter
|
| 98 |
+
def defaults(self, defaults):
|
| 99 |
+
self.optimizer.defaults = defaults
|
| 100 |
+
|
| 101 |
+
def add_param_group(self, param_group):
|
| 102 |
+
self.optimizer.add_param_group(param_group)
|
| 103 |
+
|
| 104 |
+
def load_state_dict(self, state_dict):
|
| 105 |
+
if self.accelerator_state.distributed_type == DistributedType.TPU and self.device_placement:
|
| 106 |
+
xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
|
| 107 |
+
self.optimizer.load_state_dict(state_dict)
|
| 108 |
+
|
| 109 |
+
def state_dict(self):
|
| 110 |
+
return self.optimizer.state_dict()
|
| 111 |
+
|
| 112 |
+
def zero_grad(self, set_to_none=None):
|
| 113 |
+
if self.gradient_state.sync_gradients:
|
| 114 |
+
accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters
|
| 115 |
+
if accept_arg:
|
| 116 |
+
if set_to_none is None:
|
| 117 |
+
set_to_none = False
|
| 118 |
+
self.optimizer.zero_grad(set_to_none=set_to_none)
|
| 119 |
+
else:
|
| 120 |
+
if set_to_none is not None:
|
| 121 |
+
raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.")
|
| 122 |
+
self.optimizer.zero_grad()
|
| 123 |
+
|
| 124 |
+
def step(self, closure=None):
|
| 125 |
+
if self.gradient_state.sync_gradients:
|
| 126 |
+
if self.accelerator_state.distributed_type == DistributedType.TPU:
|
| 127 |
+
optimizer_args = {"closure": closure} if closure is not None else {}
|
| 128 |
+
xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)
|
| 129 |
+
elif self.scaler is not None:
|
| 130 |
+
self.optimizer.step = self._optimizer_patched_step_method
|
| 131 |
+
|
| 132 |
+
self.scaler.step(self.optimizer, closure)
|
| 133 |
+
self.scaler.update()
|
| 134 |
+
|
| 135 |
+
if not self._accelerate_step_called:
|
| 136 |
+
# If the optimizer step was skipped, gradient overflow was detected.
|
| 137 |
+
self._is_overflow = True
|
| 138 |
+
else:
|
| 139 |
+
self._is_overflow = False
|
| 140 |
+
# Reset the step method to the original one
|
| 141 |
+
self.optimizer.step = self._optimizer_original_step_method
|
| 142 |
+
# Reset the indicator
|
| 143 |
+
self._accelerate_step_called = False
|
| 144 |
+
else:
|
| 145 |
+
self.optimizer.step(closure)
|
| 146 |
+
|
| 147 |
+
def _switch_parameters(self, parameters_map):
|
| 148 |
+
for param_group in self.optimizer.param_groups:
|
| 149 |
+
param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]]
|
| 150 |
+
|
| 151 |
+
@property
|
| 152 |
+
def is_overflow(self):
|
| 153 |
+
"""Whether or not the optimizer step was done, or skipped because of gradient overflow."""
|
| 154 |
+
warnings.warn(
|
| 155 |
+
"The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use "
|
| 156 |
+
"`optimizer.step_was_skipped` instead.",
|
| 157 |
+
FutureWarning,
|
| 158 |
+
)
|
| 159 |
+
return self._is_overflow
|
| 160 |
+
|
| 161 |
+
@property
|
| 162 |
+
def step_was_skipped(self):
|
| 163 |
+
"""Whether or not the optimizer step was skipped."""
|
| 164 |
+
return self._is_overflow
|
| 165 |
+
|
| 166 |
+
def __getstate__(self):
|
| 167 |
+
_ignored_keys = [
|
| 168 |
+
"_accelerate_step_called",
|
| 169 |
+
"_optimizer_original_step_method",
|
| 170 |
+
"_optimizer_patched_step_method",
|
| 171 |
+
]
|
| 172 |
+
return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys}
|
| 173 |
+
|
| 174 |
+
def __setstate__(self, state):
|
| 175 |
+
self.__dict__.update(state)
|
| 176 |
+
if self.scaler is not None:
|
| 177 |
+
self._accelerate_step_called = False
|
| 178 |
+
self._optimizer_original_step_method = self.optimizer.step
|
| 179 |
+
self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method):
|
| 183 |
+
def patched_step(*args, **kwargs):
|
| 184 |
+
accelerated_optimizer._accelerate_step_called = True
|
| 185 |
+
return method(*args, **kwargs)
|
| 186 |
+
|
| 187 |
+
return patched_step
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/state.py
ADDED
|
@@ -0,0 +1,1046 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
import math
|
| 18 |
+
import os
|
| 19 |
+
import threading
|
| 20 |
+
import warnings
|
| 21 |
+
from contextlib import contextmanager
|
| 22 |
+
from functools import partial
|
| 23 |
+
from typing import Any, Callable, Optional
|
| 24 |
+
|
| 25 |
+
import torch
|
| 26 |
+
|
| 27 |
+
from .utils import (
|
| 28 |
+
DistributedType,
|
| 29 |
+
DynamoBackend,
|
| 30 |
+
GradientAccumulationPlugin,
|
| 31 |
+
get_ccl_version,
|
| 32 |
+
get_int_from_env,
|
| 33 |
+
is_ccl_available,
|
| 34 |
+
is_deepspeed_available,
|
| 35 |
+
is_fp8_available,
|
| 36 |
+
is_ipex_available,
|
| 37 |
+
is_mps_available,
|
| 38 |
+
is_npu_available,
|
| 39 |
+
is_tpu_available,
|
| 40 |
+
is_xpu_available,
|
| 41 |
+
parse_choice_from_env,
|
| 42 |
+
parse_flag_from_env,
|
| 43 |
+
)
|
| 44 |
+
from .utils.dataclasses import SageMakerDistributedType
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
if is_tpu_available(check_device=False):
|
| 48 |
+
import torch_xla.core.xla_model as xm
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
if is_npu_available(check_device=False):
|
| 52 |
+
import torch_npu # noqa: F401
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def is_initialized() -> bool:
|
| 56 |
+
"""
|
| 57 |
+
Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`,
|
| 58 |
+
but works as a module method.
|
| 59 |
+
"""
|
| 60 |
+
return AcceleratorState._shared_state != {}
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# Lambda function that does nothing
|
| 64 |
+
def do_nothing(*args, **kwargs):
|
| 65 |
+
return None
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class ThreadLocalSharedDict(threading.local):
|
| 69 |
+
"""
|
| 70 |
+
Descriptor that holds a dict shared between instances of a class in the same thread.
|
| 71 |
+
|
| 72 |
+
Note: Descriptors have slightly different semantics than just a dict field on its own.
|
| 73 |
+
`PartialState(...)._shared_state` and `PartialState._shared_state` (instance vs class) give the same value: the
|
| 74 |
+
underlying _storage dict. Likewise, `PartialState(...)._shared_state = {...}` overrides the _storage dict inside
|
| 75 |
+
the descriptor as you would expect. However, `PartialState._shared_state = {}` actually replaces the descriptor
|
| 76 |
+
object with a dict instead Thus, you should modify the _storage dict in-place (e.g. `_shared_state.clear()`).
|
| 77 |
+
|
| 78 |
+
See Python documentation for an explanation of descriptors: https://docs.python.org/3/howto/descriptor.html
|
| 79 |
+
|
| 80 |
+
This is required for using PyTorch/XLA with PJRT in multithreaded mode (required for TPU v2 and v3).
|
| 81 |
+
|
| 82 |
+
See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
def __init__(self, thread_local: bool = False):
|
| 86 |
+
self._storage = {}
|
| 87 |
+
|
| 88 |
+
def __get__(self, obj, objtype=None):
|
| 89 |
+
return self._storage
|
| 90 |
+
|
| 91 |
+
def __set__(self, obj, value):
|
| 92 |
+
self._storage = value
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
# Prefer global shared dictionary, except when using TPU.
|
| 96 |
+
SharedDict = dict if not is_tpu_available(check_device=False) else ThreadLocalSharedDict
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# Inspired by Alex Martelli's 'Borg'.
|
| 100 |
+
class PartialState:
|
| 101 |
+
"""
|
| 102 |
+
Singleton class that has information about the current training environment and functions to help with process
|
| 103 |
+
control. Designed to be used when only process control and device execution states are needed. Does *not* need to
|
| 104 |
+
be initialized from `Accelerator`.
|
| 105 |
+
|
| 106 |
+
**Available attributes:**
|
| 107 |
+
|
| 108 |
+
- **device** (`torch.device`) -- The device to use.
|
| 109 |
+
- **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently
|
| 110 |
+
in use.
|
| 111 |
+
- **local_process_index** (`int`) -- The index of the current process on the current server.
|
| 112 |
+
- **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type
|
| 113 |
+
of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8').
|
| 114 |
+
- **num_processes** (`int`) -- The number of processes currently launched in parallel.
|
| 115 |
+
- **process_index** (`int`) -- The index of the current process.
|
| 116 |
+
- **is_last_process** (`bool`) -- Whether or not the current process is the last one.
|
| 117 |
+
- **is_main_process** (`bool`) -- Whether or not the current process is the main one.
|
| 118 |
+
- **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
|
| 119 |
+
- **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
|
| 120 |
+
"""
|
| 121 |
+
|
| 122 |
+
_shared_state = SharedDict()
|
| 123 |
+
|
| 124 |
+
def __init__(self, cpu: bool = False, **kwargs):
|
| 125 |
+
self.__dict__ = self._shared_state
|
| 126 |
+
if not self.initialized:
|
| 127 |
+
self._cpu = cpu
|
| 128 |
+
self.backend = None
|
| 129 |
+
env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None)
|
| 130 |
+
self.device = torch.device(env_device) if env_device is not None else None
|
| 131 |
+
self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE")
|
| 132 |
+
use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None)
|
| 133 |
+
if use_sagemaker_dp is None:
|
| 134 |
+
use_sagemaker_dp = (
|
| 135 |
+
os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true"
|
| 136 |
+
and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
if use_sagemaker_dp and not cpu:
|
| 140 |
+
if (
|
| 141 |
+
os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL
|
| 142 |
+
) or use_sagemaker_dp:
|
| 143 |
+
self.distributed_type = DistributedType.MULTI_GPU
|
| 144 |
+
import smdistributed.dataparallel.torch.torch_smddp # noqa
|
| 145 |
+
|
| 146 |
+
if not torch.distributed.is_initialized():
|
| 147 |
+
torch.distributed.init_process_group(backend="smddp")
|
| 148 |
+
self.backend = "smddp"
|
| 149 |
+
self.num_processes = torch.distributed.get_world_size()
|
| 150 |
+
self.process_index = torch.distributed.get_rank()
|
| 151 |
+
self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
|
| 152 |
+
if self.device is None:
|
| 153 |
+
self.device = torch.device("cuda", self.local_process_index)
|
| 154 |
+
torch.cuda.set_device(self.device)
|
| 155 |
+
elif is_tpu_available() and not cpu:
|
| 156 |
+
self.distributed_type = DistributedType.TPU
|
| 157 |
+
self.num_processes = xm.xrt_world_size()
|
| 158 |
+
self.process_index = xm.get_ordinal()
|
| 159 |
+
self.local_process_index = xm.get_local_ordinal()
|
| 160 |
+
self.device = xm.xla_device()
|
| 161 |
+
elif (
|
| 162 |
+
os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true"
|
| 163 |
+
and int(os.environ.get("LOCAL_RANK", -1)) != -1
|
| 164 |
+
and not cpu
|
| 165 |
+
):
|
| 166 |
+
assert (
|
| 167 |
+
is_deepspeed_available()
|
| 168 |
+
), "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source"
|
| 169 |
+
self.distributed_type = DistributedType.DEEPSPEED
|
| 170 |
+
if not torch.distributed.is_initialized():
|
| 171 |
+
from deepspeed import comm as dist
|
| 172 |
+
|
| 173 |
+
# DeepSpeed always uses nccl
|
| 174 |
+
kwargs.pop("backend", None)
|
| 175 |
+
if is_xpu_available and is_ccl_available():
|
| 176 |
+
# Set DeepSpeed backend to ccl for xpu
|
| 177 |
+
self.backend = "ccl"
|
| 178 |
+
else:
|
| 179 |
+
self.backend = "nccl"
|
| 180 |
+
dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs)
|
| 181 |
+
|
| 182 |
+
self.num_processes = torch.distributed.get_world_size()
|
| 183 |
+
self.process_index = torch.distributed.get_rank()
|
| 184 |
+
self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
|
| 185 |
+
if self.device is None:
|
| 186 |
+
if is_xpu_available():
|
| 187 |
+
self.device = torch.device("xpu", self.local_process_index)
|
| 188 |
+
if self.device is not None:
|
| 189 |
+
torch.xpu.set_device(self.device)
|
| 190 |
+
else:
|
| 191 |
+
self.device = torch.device("cuda", self.local_process_index)
|
| 192 |
+
if self.device is not None:
|
| 193 |
+
torch.cuda.set_device(self.device)
|
| 194 |
+
self._mixed_precision = "no" # deepspeed handles mixed_precision using deepspeed_config
|
| 195 |
+
elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu and torch.cuda.is_available():
|
| 196 |
+
self.distributed_type = DistributedType.MULTI_GPU
|
| 197 |
+
if not torch.distributed.is_initialized():
|
| 198 |
+
self.backend = kwargs.pop("backend", "nccl")
|
| 199 |
+
# Special case for `TrainingArguments`, where `backend` will be `None`
|
| 200 |
+
if self.backend is None:
|
| 201 |
+
self.backend = "nccl"
|
| 202 |
+
torch.distributed.init_process_group(backend=self.backend, **kwargs)
|
| 203 |
+
self.num_processes = torch.distributed.get_world_size()
|
| 204 |
+
self.process_index = torch.distributed.get_rank()
|
| 205 |
+
self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
|
| 206 |
+
if self.device is None:
|
| 207 |
+
self.device = torch.device("cuda", self.local_process_index)
|
| 208 |
+
torch.cuda.set_device(self.device)
|
| 209 |
+
elif is_npu_available() and not cpu and int(os.environ.get("LOCAL_RANK", -1)) != -1:
|
| 210 |
+
self.distributed_type = DistributedType.MULTI_NPU
|
| 211 |
+
if not torch.distributed.is_initialized():
|
| 212 |
+
# Backend is not set by the user, we set it here
|
| 213 |
+
kwargs.pop("backend", None)
|
| 214 |
+
self.backend = "hccl"
|
| 215 |
+
torch.distributed.init_process_group(backend=self.backend, **kwargs)
|
| 216 |
+
self.num_processes = torch.distributed.get_world_size()
|
| 217 |
+
self.process_index = torch.distributed.get_rank()
|
| 218 |
+
self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
|
| 219 |
+
if self.device is None:
|
| 220 |
+
self.device = torch.device("npu", self.local_process_index)
|
| 221 |
+
torch.npu.set_device(self.device)
|
| 222 |
+
elif get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1:
|
| 223 |
+
if not cpu and is_xpu_available():
|
| 224 |
+
self.distributed_type = DistributedType.MULTI_XPU
|
| 225 |
+
else:
|
| 226 |
+
self.distributed_type = DistributedType.MULTI_CPU
|
| 227 |
+
# Actually, CCL_WORKER_COUNT is a CPU only env var in CCL, no need to set it for XPU.
|
| 228 |
+
if is_ccl_available() and (
|
| 229 |
+
get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or self.distributed_type == DistributedType.MULTI_XPU
|
| 230 |
+
):
|
| 231 |
+
if get_ccl_version() >= "1.12":
|
| 232 |
+
import oneccl_bindings_for_pytorch # noqa: F401
|
| 233 |
+
else:
|
| 234 |
+
import torch_ccl # noqa: F401
|
| 235 |
+
backend = "ccl"
|
| 236 |
+
elif torch.distributed.is_mpi_available():
|
| 237 |
+
backend = "mpi"
|
| 238 |
+
else:
|
| 239 |
+
backend = "gloo"
|
| 240 |
+
# Try to get launch configuration from environment variables set by MPI launcher - works for Intel MPI, OpenMPI and MVAPICH
|
| 241 |
+
rank = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0)
|
| 242 |
+
size = get_int_from_env(["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1)
|
| 243 |
+
local_rank = get_int_from_env(
|
| 244 |
+
["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0
|
| 245 |
+
)
|
| 246 |
+
local_size = get_int_from_env(
|
| 247 |
+
["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1
|
| 248 |
+
)
|
| 249 |
+
self.local_process_index = local_rank
|
| 250 |
+
os.environ["RANK"] = str(rank)
|
| 251 |
+
os.environ["WORLD_SIZE"] = str(size)
|
| 252 |
+
os.environ["LOCAL_RANK"] = str(local_rank)
|
| 253 |
+
if not os.environ.get("MASTER_PORT", None):
|
| 254 |
+
os.environ["MASTER_PORT"] = "29500"
|
| 255 |
+
if not os.environ.get("MASTER_ADDR", None):
|
| 256 |
+
if local_size != size and backend != "mpi":
|
| 257 |
+
raise ValueError(
|
| 258 |
+
"Looks like distributed multinode run but MASTER_ADDR env not set, "
|
| 259 |
+
"please try exporting rank 0's hostname as MASTER_ADDR"
|
| 260 |
+
)
|
| 261 |
+
if (
|
| 262 |
+
self.distributed_type == DistributedType.MULTI_CPU
|
| 263 |
+
and get_int_from_env(["OMP_NUM_THREADS", "MKL_NUM_THREADS"], 0) == 0
|
| 264 |
+
):
|
| 265 |
+
import psutil
|
| 266 |
+
|
| 267 |
+
num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size)
|
| 268 |
+
if num_cpu_threads_per_process == 0:
|
| 269 |
+
num_cpu_threads_per_process = 1
|
| 270 |
+
torch.set_num_threads(num_cpu_threads_per_process)
|
| 271 |
+
warnings.warn(
|
| 272 |
+
f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob"
|
| 273 |
+
" performance."
|
| 274 |
+
)
|
| 275 |
+
if not torch.distributed.is_initialized():
|
| 276 |
+
# Backend is not set by the user, we set it here
|
| 277 |
+
kwargs.pop("backend", None)
|
| 278 |
+
self.backend = backend
|
| 279 |
+
torch.distributed.init_process_group(self.backend, rank=rank, world_size=size, **kwargs)
|
| 280 |
+
self.num_processes = torch.distributed.get_world_size()
|
| 281 |
+
self.process_index = torch.distributed.get_rank()
|
| 282 |
+
if cpu:
|
| 283 |
+
self.device = torch.device("cpu")
|
| 284 |
+
elif is_xpu_available():
|
| 285 |
+
self.device = torch.device("xpu", self.local_process_index)
|
| 286 |
+
torch.xpu.set_device(self.device)
|
| 287 |
+
else:
|
| 288 |
+
self.device = self.default_device
|
| 289 |
+
else:
|
| 290 |
+
self.distributed_type = DistributedType.NO
|
| 291 |
+
self.num_processes = 1
|
| 292 |
+
self.process_index = self.local_process_index = 0
|
| 293 |
+
|
| 294 |
+
if self.device is None:
|
| 295 |
+
self.device = torch.device("cpu") if cpu else self.default_device
|
| 296 |
+
|
| 297 |
+
self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0)
|
| 298 |
+
|
| 299 |
+
def __repr__(self) -> str:
|
| 300 |
+
return (
|
| 301 |
+
f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n"
|
| 302 |
+
f"Num processes: {self.num_processes}\n"
|
| 303 |
+
f"Process index: {self.process_index}\n"
|
| 304 |
+
f"Local process index: {self.local_process_index}\n"
|
| 305 |
+
f"Device: {self.device}\n"
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
@staticmethod
|
| 309 |
+
def _reset_state():
|
| 310 |
+
"Resets `_shared_state`, is used internally and should not be called"
|
| 311 |
+
PartialState._shared_state.clear()
|
| 312 |
+
|
| 313 |
+
@property
|
| 314 |
+
def initialized(self) -> bool:
|
| 315 |
+
"Returns whether the `PartialState` has been initialized"
|
| 316 |
+
return self._shared_state != {}
|
| 317 |
+
|
| 318 |
+
@property
|
| 319 |
+
def use_distributed(self):
|
| 320 |
+
"""
|
| 321 |
+
Whether the Accelerator is configured for distributed training
|
| 322 |
+
"""
|
| 323 |
+
return self.distributed_type != DistributedType.NO and self.num_processes > 1
|
| 324 |
+
|
| 325 |
+
@property
|
| 326 |
+
def is_last_process(self) -> bool:
|
| 327 |
+
"Returns whether the current process is the last one"
|
| 328 |
+
return self.process_index == self.num_processes - 1
|
| 329 |
+
|
| 330 |
+
@property
|
| 331 |
+
def is_main_process(self) -> bool:
|
| 332 |
+
"Returns whether the current process is the main process"
|
| 333 |
+
return (
|
| 334 |
+
self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process
|
| 335 |
+
)
|
| 336 |
+
|
| 337 |
+
@property
|
| 338 |
+
def is_local_main_process(self) -> bool:
|
| 339 |
+
"Returns whether the current process is the main process on the local node"
|
| 340 |
+
return (
|
| 341 |
+
self.local_process_index == 0
|
| 342 |
+
if self.distributed_type != DistributedType.MEGATRON_LM
|
| 343 |
+
else self.is_last_process
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
def wait_for_everyone(self):
|
| 347 |
+
"""
|
| 348 |
+
Will stop the execution of the current process until every other process has reached that point (so this does
|
| 349 |
+
nothing when the script is only run in one process). Useful to do before saving a model.
|
| 350 |
+
|
| 351 |
+
Example:
|
| 352 |
+
|
| 353 |
+
```python
|
| 354 |
+
>>> # Assuming two GPU processes
|
| 355 |
+
>>> import time
|
| 356 |
+
>>> from accelerate.state import PartialState
|
| 357 |
+
|
| 358 |
+
>>> state = PartialState()
|
| 359 |
+
>>> if state.is_main_process:
|
| 360 |
+
... time.sleep(2)
|
| 361 |
+
>>> else:
|
| 362 |
+
... print("I'm waiting for the main process to finish its sleep...")
|
| 363 |
+
>>> state.wait_for_everyone()
|
| 364 |
+
>>> # Should print on every process at the same time
|
| 365 |
+
>>> print("Everyone is here")
|
| 366 |
+
```
|
| 367 |
+
"""
|
| 368 |
+
if self.distributed_type in (
|
| 369 |
+
DistributedType.MULTI_GPU,
|
| 370 |
+
DistributedType.MULTI_NPU,
|
| 371 |
+
DistributedType.MULTI_XPU,
|
| 372 |
+
DistributedType.MULTI_CPU,
|
| 373 |
+
DistributedType.DEEPSPEED,
|
| 374 |
+
DistributedType.FSDP,
|
| 375 |
+
):
|
| 376 |
+
torch.distributed.barrier()
|
| 377 |
+
elif self.distributed_type == DistributedType.TPU:
|
| 378 |
+
xm.rendezvous("accelerate.utils.wait_for_everyone")
|
| 379 |
+
|
| 380 |
+
def _goes_first(self, is_main: bool):
|
| 381 |
+
if not is_main:
|
| 382 |
+
self.wait_for_everyone()
|
| 383 |
+
|
| 384 |
+
yield
|
| 385 |
+
|
| 386 |
+
if is_main:
|
| 387 |
+
self.wait_for_everyone()
|
| 388 |
+
|
| 389 |
+
@contextmanager
|
| 390 |
+
def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
|
| 391 |
+
"""
|
| 392 |
+
Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
|
| 393 |
+
distributed inference, such as with different prompts.
|
| 394 |
+
|
| 395 |
+
Note that when using a `dict`, all keys need to have the same number of elements.
|
| 396 |
+
|
| 397 |
+
Args:
|
| 398 |
+
inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
|
| 399 |
+
The input to split between processes.
|
| 400 |
+
apply_padding (`bool`, `optional`, defaults to `False`):
|
| 401 |
+
Whether to apply padding by repeating the last element of the input so that all processes have the same
|
| 402 |
+
number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing
|
| 403 |
+
in less inputs than there are processes. If so, just remember to drop the padded elements afterwards.
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
Example:
|
| 407 |
+
|
| 408 |
+
```python
|
| 409 |
+
# Assume there are two processes
|
| 410 |
+
from accelerate import PartialState
|
| 411 |
+
|
| 412 |
+
state = PartialState()
|
| 413 |
+
with state.split_between_processes(["A", "B", "C"]) as inputs:
|
| 414 |
+
print(inputs)
|
| 415 |
+
# Process 0
|
| 416 |
+
["A", "B"]
|
| 417 |
+
# Process 1
|
| 418 |
+
["C"]
|
| 419 |
+
|
| 420 |
+
with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
|
| 421 |
+
print(inputs)
|
| 422 |
+
# Process 0
|
| 423 |
+
["A", "B"]
|
| 424 |
+
# Process 1
|
| 425 |
+
["C", "C"]
|
| 426 |
+
```
|
| 427 |
+
"""
|
| 428 |
+
if self.num_processes == 1:
|
| 429 |
+
yield inputs
|
| 430 |
+
return
|
| 431 |
+
length = len(inputs)
|
| 432 |
+
# Nested dictionary of any types
|
| 433 |
+
if isinstance(inputs, dict):
|
| 434 |
+
length = len(inputs[list(inputs.keys())[0]])
|
| 435 |
+
if not all(len(v) == length for v in inputs.values()):
|
| 436 |
+
raise ValueError("All values in the dictionary must have the same length")
|
| 437 |
+
num_samples_per_process = math.ceil(length / self.num_processes)
|
| 438 |
+
start_index = self.process_index * num_samples_per_process
|
| 439 |
+
end_index = start_index + num_samples_per_process
|
| 440 |
+
if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1):
|
| 441 |
+
end_index = length
|
| 442 |
+
|
| 443 |
+
def _split_values(inputs, start_index, end_index):
|
| 444 |
+
if isinstance(inputs, (list, tuple, torch.Tensor)):
|
| 445 |
+
if start_index >= len(inputs):
|
| 446 |
+
result = inputs[-1:]
|
| 447 |
+
else:
|
| 448 |
+
result = inputs[start_index:end_index]
|
| 449 |
+
if apply_padding:
|
| 450 |
+
if isinstance(result, torch.Tensor):
|
| 451 |
+
from accelerate.utils import pad_across_processes, send_to_device
|
| 452 |
+
|
| 453 |
+
# The tensor needs to be on the device before we can pad it
|
| 454 |
+
tensorized_result = send_to_device(result, self.device)
|
| 455 |
+
result = pad_across_processes(tensorized_result, pad_index=inputs[-1])
|
| 456 |
+
else:
|
| 457 |
+
result += [result[-1]] * (num_samples_per_process - len(result))
|
| 458 |
+
return result
|
| 459 |
+
elif isinstance(inputs, dict):
|
| 460 |
+
for key in inputs.keys():
|
| 461 |
+
inputs[key] = _split_values(inputs[key], start_index, end_index)
|
| 462 |
+
return inputs
|
| 463 |
+
else:
|
| 464 |
+
return inputs
|
| 465 |
+
|
| 466 |
+
yield _split_values(inputs, start_index, end_index)
|
| 467 |
+
|
| 468 |
+
@contextmanager
|
| 469 |
+
def main_process_first(self):
|
| 470 |
+
"""
|
| 471 |
+
Lets the main process go first inside a with block.
|
| 472 |
+
|
| 473 |
+
The other processes will enter the with block after the main process exits.
|
| 474 |
+
|
| 475 |
+
Example:
|
| 476 |
+
|
| 477 |
+
```python
|
| 478 |
+
>>> from accelerate import Accelerator
|
| 479 |
+
|
| 480 |
+
>>> accelerator = Accelerator()
|
| 481 |
+
>>> with accelerator.main_process_first():
|
| 482 |
+
... # This will be printed first by process 0 then in a seemingly
|
| 483 |
+
... # random order by the other processes.
|
| 484 |
+
... print(f"This will be printed by process {accelerator.process_index}")
|
| 485 |
+
```
|
| 486 |
+
"""
|
| 487 |
+
yield from self._goes_first(self.is_main_process)
|
| 488 |
+
|
| 489 |
+
@contextmanager
|
| 490 |
+
def local_main_process_first(self):
|
| 491 |
+
"""
|
| 492 |
+
Lets the local main process go inside a with block.
|
| 493 |
+
|
| 494 |
+
The other processes will enter the with block after the main process exits.
|
| 495 |
+
|
| 496 |
+
Example:
|
| 497 |
+
|
| 498 |
+
```python
|
| 499 |
+
>>> from accelerate.state import PartialState
|
| 500 |
+
|
| 501 |
+
>>> state = PartialState()
|
| 502 |
+
>>> with state.local_main_process_first():
|
| 503 |
+
... # This will be printed first by local process 0 then in a seemingly
|
| 504 |
+
... # random order by the other processes.
|
| 505 |
+
... print(f"This will be printed by process {state.local_process_index}")
|
| 506 |
+
```
|
| 507 |
+
"""
|
| 508 |
+
yield from self._goes_first(self.is_local_main_process)
|
| 509 |
+
|
| 510 |
+
def on_main_process(self, function: Callable[..., Any] = None):
|
| 511 |
+
"""
|
| 512 |
+
Decorator that only runs the decorated function on the main process.
|
| 513 |
+
|
| 514 |
+
Args:
|
| 515 |
+
function (`Callable`): The function to decorate.
|
| 516 |
+
|
| 517 |
+
Example:
|
| 518 |
+
|
| 519 |
+
```python
|
| 520 |
+
>>> from accelerate.state import PartialState
|
| 521 |
+
|
| 522 |
+
>>> state = PartialState()
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
>>> @state.on_main_process
|
| 526 |
+
... def print_something():
|
| 527 |
+
... print("This will be printed by process 0 only.")
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
>>> print_something()
|
| 531 |
+
"This will be printed by process 0 only"
|
| 532 |
+
```
|
| 533 |
+
"""
|
| 534 |
+
if not self.initialized:
|
| 535 |
+
raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.")
|
| 536 |
+
if self.is_main_process or not self.use_distributed:
|
| 537 |
+
return function
|
| 538 |
+
return do_nothing
|
| 539 |
+
|
| 540 |
+
def on_local_main_process(self, function: Callable[..., Any] = None):
|
| 541 |
+
"""
|
| 542 |
+
Decorator that only runs the decorated function on the local main process.
|
| 543 |
+
|
| 544 |
+
Args:
|
| 545 |
+
function (`Callable`): The function to decorate.
|
| 546 |
+
|
| 547 |
+
Example:
|
| 548 |
+
```python
|
| 549 |
+
# Assume we have 2 servers with 4 processes each.
|
| 550 |
+
from accelerate.state import PartialState
|
| 551 |
+
|
| 552 |
+
state = PartialState()
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
@state.on_local_main_process
|
| 556 |
+
def print_something():
|
| 557 |
+
print("This will be printed by process 0 only on each server.")
|
| 558 |
+
|
| 559 |
+
|
| 560 |
+
print_something()
|
| 561 |
+
# On server 1:
|
| 562 |
+
"This will be printed by process 0 only"
|
| 563 |
+
# On server 2:
|
| 564 |
+
"This will be printed by process 0 only"
|
| 565 |
+
```
|
| 566 |
+
"""
|
| 567 |
+
if self.is_local_main_process or not self.use_distributed:
|
| 568 |
+
return function
|
| 569 |
+
return do_nothing
|
| 570 |
+
|
| 571 |
+
def on_last_process(self, function: Callable[..., Any]):
|
| 572 |
+
"""
|
| 573 |
+
Decorator that only runs the decorated function on the last process.
|
| 574 |
+
|
| 575 |
+
Args:
|
| 576 |
+
function (`Callable`): The function to decorate.
|
| 577 |
+
|
| 578 |
+
Example:
|
| 579 |
+
```python
|
| 580 |
+
# Assume we have 4 processes.
|
| 581 |
+
from accelerate.state import PartialState
|
| 582 |
+
|
| 583 |
+
state = PartialState()
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
@state.on_last_process
|
| 587 |
+
def print_something():
|
| 588 |
+
print(f"Printed on process {state.process_index}")
|
| 589 |
+
|
| 590 |
+
|
| 591 |
+
print_something()
|
| 592 |
+
"Printed on process 3"
|
| 593 |
+
```
|
| 594 |
+
"""
|
| 595 |
+
if self.is_last_process or not self.use_distributed:
|
| 596 |
+
return function
|
| 597 |
+
return do_nothing
|
| 598 |
+
|
| 599 |
+
def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
|
| 600 |
+
"""
|
| 601 |
+
Decorator that only runs the decorated function on the process with the given index.
|
| 602 |
+
|
| 603 |
+
Args:
|
| 604 |
+
function (`Callable`, `optional`):
|
| 605 |
+
The function to decorate.
|
| 606 |
+
process_index (`int`, `optional`):
|
| 607 |
+
The index of the process on which to run the function.
|
| 608 |
+
|
| 609 |
+
Example:
|
| 610 |
+
```python
|
| 611 |
+
# Assume we have 4 processes.
|
| 612 |
+
from accelerate.state import PartialState
|
| 613 |
+
|
| 614 |
+
state = PartialState()
|
| 615 |
+
|
| 616 |
+
|
| 617 |
+
@state.on_process(process_index=2)
|
| 618 |
+
def print_something():
|
| 619 |
+
print(f"Printed on process {state.process_index}")
|
| 620 |
+
|
| 621 |
+
|
| 622 |
+
print_something()
|
| 623 |
+
"Printed on process 2"
|
| 624 |
+
```
|
| 625 |
+
"""
|
| 626 |
+
if function is None:
|
| 627 |
+
return partial(self.on_process, process_index=process_index)
|
| 628 |
+
if (self.process_index == process_index) or (not self.use_distributed):
|
| 629 |
+
return function
|
| 630 |
+
return do_nothing
|
| 631 |
+
|
| 632 |
+
def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
|
| 633 |
+
"""
|
| 634 |
+
Decorator that only runs the decorated function on the process with the given index on the current node.
|
| 635 |
+
|
| 636 |
+
Args:
|
| 637 |
+
function (`Callable`, *optional*):
|
| 638 |
+
The function to decorate.
|
| 639 |
+
local_process_index (`int`, *optional*):
|
| 640 |
+
The index of the local process on which to run the function.
|
| 641 |
+
|
| 642 |
+
Example:
|
| 643 |
+
```python
|
| 644 |
+
# Assume we have 2 servers with 4 processes each.
|
| 645 |
+
from accelerate import Accelerator
|
| 646 |
+
|
| 647 |
+
accelerator = Accelerator()
|
| 648 |
+
|
| 649 |
+
|
| 650 |
+
@accelerator.on_local_process(local_process_index=2)
|
| 651 |
+
def print_something():
|
| 652 |
+
print(f"Printed on process {accelerator.local_process_index}")
|
| 653 |
+
|
| 654 |
+
|
| 655 |
+
print_something()
|
| 656 |
+
# On server 1:
|
| 657 |
+
"Printed on process 2"
|
| 658 |
+
# On server 2:
|
| 659 |
+
"Printed on process 2"
|
| 660 |
+
```
|
| 661 |
+
"""
|
| 662 |
+
if function is None:
|
| 663 |
+
return partial(self.on_local_process, local_process_index=local_process_index)
|
| 664 |
+
if (self.local_process_index == local_process_index) or (not self.use_distributed):
|
| 665 |
+
return function
|
| 666 |
+
return do_nothing
|
| 667 |
+
|
| 668 |
+
def print(self, *args, **kwargs):
|
| 669 |
+
if self.is_local_main_process:
|
| 670 |
+
print(*args, **kwargs)
|
| 671 |
+
|
| 672 |
+
@property
|
| 673 |
+
def default_device(self) -> torch.device:
|
| 674 |
+
"""
|
| 675 |
+
Returns the default device which is:
|
| 676 |
+
- MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True.
|
| 677 |
+
- CUDA if `torch.cuda.is_available()`
|
| 678 |
+
- NPU if `is_npu_available()`
|
| 679 |
+
- CPU otherwise
|
| 680 |
+
"""
|
| 681 |
+
if is_mps_available():
|
| 682 |
+
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
|
| 683 |
+
return torch.device("mps")
|
| 684 |
+
elif torch.cuda.is_available():
|
| 685 |
+
return torch.device("cuda")
|
| 686 |
+
elif is_xpu_available():
|
| 687 |
+
return torch.device("xpu:0")
|
| 688 |
+
elif is_npu_available():
|
| 689 |
+
return torch.device("npu")
|
| 690 |
+
else:
|
| 691 |
+
return torch.device("cpu")
|
| 692 |
+
|
| 693 |
+
|
| 694 |
+
class AcceleratorState:
|
| 695 |
+
"""
|
| 696 |
+
Singleton class that has information about the current training environment.
|
| 697 |
+
|
| 698 |
+
**Available attributes:**
|
| 699 |
+
|
| 700 |
+
- **device** (`torch.device`) -- The device to use.
|
| 701 |
+
- **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently
|
| 702 |
+
in use.
|
| 703 |
+
- **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`.
|
| 704 |
+
- **local_process_index** (`int`) -- The index of the current process on the current server.
|
| 705 |
+
- **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type
|
| 706 |
+
of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8').
|
| 707 |
+
- **num_processes** (`int`) -- The number of processes currently launched in parallel.
|
| 708 |
+
- **process_index** (`int`) -- The index of the current process.
|
| 709 |
+
- **is_last_process** (`bool`) -- Whether or not the current process is the last one.
|
| 710 |
+
- **is_main_process** (`bool`) -- Whether or not the current process is the main one.
|
| 711 |
+
- **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
|
| 712 |
+
- **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
|
| 713 |
+
"""
|
| 714 |
+
|
| 715 |
+
_shared_state = SharedDict()
|
| 716 |
+
|
| 717 |
+
def __init__(
|
| 718 |
+
self,
|
| 719 |
+
mixed_precision: str = None,
|
| 720 |
+
cpu: bool = False,
|
| 721 |
+
dynamo_plugin=None,
|
| 722 |
+
deepspeed_plugin=None,
|
| 723 |
+
fsdp_plugin=None,
|
| 724 |
+
megatron_lm_plugin=None,
|
| 725 |
+
_from_accelerator: bool = False,
|
| 726 |
+
**kwargs,
|
| 727 |
+
):
|
| 728 |
+
self.__dict__ = self._shared_state
|
| 729 |
+
if parse_flag_from_env("ACCELERATE_USE_CPU"):
|
| 730 |
+
cpu = True
|
| 731 |
+
if PartialState._shared_state == {}:
|
| 732 |
+
PartialState(cpu, **kwargs)
|
| 733 |
+
self.__dict__.update(PartialState._shared_state)
|
| 734 |
+
self._check_initialized(mixed_precision, cpu)
|
| 735 |
+
if not self.initialized:
|
| 736 |
+
self.deepspeed_plugin = None
|
| 737 |
+
self.use_ipex = None
|
| 738 |
+
mixed_precision = (
|
| 739 |
+
parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no")
|
| 740 |
+
if mixed_precision is None
|
| 741 |
+
else mixed_precision.lower()
|
| 742 |
+
)
|
| 743 |
+
if mixed_precision == "fp8" and not is_fp8_available():
|
| 744 |
+
raise ValueError("Using `fp8` precision requires `transformer_engine` to be installed.")
|
| 745 |
+
self.dynamo_plugin = dynamo_plugin
|
| 746 |
+
if not _from_accelerator:
|
| 747 |
+
raise ValueError(
|
| 748 |
+
"Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` "
|
| 749 |
+
"before using any functionality from the `accelerate` library."
|
| 750 |
+
)
|
| 751 |
+
# deepspeed handles mixed_precision using deepspeed_config
|
| 752 |
+
self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision
|
| 753 |
+
if self.distributed_type == DistributedType.TPU:
|
| 754 |
+
if mixed_precision == "bf16":
|
| 755 |
+
if os.environ.get("ACCELERATE_DOWNCAST_BF16"):
|
| 756 |
+
os.environ["XLA_USE_BF16"] = str(0)
|
| 757 |
+
os.environ["XLA_DOWNCAST_BF16"] = str(1)
|
| 758 |
+
self.downcast_bfloat = True
|
| 759 |
+
else:
|
| 760 |
+
os.environ["XLA_USE_BF16"] = str(1)
|
| 761 |
+
os.environ["XLA_DOWNCAST_BF16"] = str(0)
|
| 762 |
+
self.downcast_bfloat = False
|
| 763 |
+
elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu:
|
| 764 |
+
self.deepspeed_plugin = deepspeed_plugin
|
| 765 |
+
elif self.distributed_type == DistributedType.MULTI_GPU:
|
| 766 |
+
if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true":
|
| 767 |
+
self.distributed_type = DistributedType.FSDP
|
| 768 |
+
if self._mixed_precision != "no":
|
| 769 |
+
fsdp_plugin.set_mixed_precision(self._mixed_precision)
|
| 770 |
+
self.fsdp_plugin = fsdp_plugin
|
| 771 |
+
if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true":
|
| 772 |
+
self.distributed_type = DistributedType.MEGATRON_LM
|
| 773 |
+
megatron_lm_plugin.set_mixed_precision(self._mixed_precision)
|
| 774 |
+
self.megatron_lm_plugin = megatron_lm_plugin
|
| 775 |
+
elif self.distributed_type == DistributedType.MULTI_NPU:
|
| 776 |
+
if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true":
|
| 777 |
+
self.distributed_type = DistributedType.FSDP
|
| 778 |
+
if self._mixed_precision != "no":
|
| 779 |
+
fsdp_plugin.set_mixed_precision(self._mixed_precision)
|
| 780 |
+
self.fsdp_plugin = fsdp_plugin
|
| 781 |
+
elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]:
|
| 782 |
+
if is_ipex_available():
|
| 783 |
+
"check if user disables it explicitly"
|
| 784 |
+
self.use_ipex = parse_flag_from_env("ACCELERATE_USE_IPEX", default=True)
|
| 785 |
+
else:
|
| 786 |
+
self.use_ipex = False
|
| 787 |
+
if self.distributed_type == DistributedType.MULTI_XPU:
|
| 788 |
+
if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true":
|
| 789 |
+
self.distributed_type = DistributedType.FSDP
|
| 790 |
+
if self._mixed_precision != "no":
|
| 791 |
+
fsdp_plugin.set_mixed_precision(self._mixed_precision)
|
| 792 |
+
self.fsdp_plugin = fsdp_plugin
|
| 793 |
+
|
| 794 |
+
if (
|
| 795 |
+
self.dynamo_plugin.backend != DynamoBackend.NO
|
| 796 |
+
and self._mixed_precision == "no"
|
| 797 |
+
and self.device.type == "cuda"
|
| 798 |
+
):
|
| 799 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
| 800 |
+
PartialState._shared_state["distributed_type"] = self.distributed_type
|
| 801 |
+
|
| 802 |
+
@property
|
| 803 |
+
def initialized(self) -> bool:
|
| 804 |
+
return self._shared_state != PartialState._shared_state
|
| 805 |
+
|
| 806 |
+
def __repr__(self):
|
| 807 |
+
repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n"
|
| 808 |
+
if self.distributed_type == DistributedType.DEEPSPEED:
|
| 809 |
+
repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n"
|
| 810 |
+
return repr
|
| 811 |
+
|
| 812 |
+
def _check_initialized(self, mixed_precision=None, cpu=None):
|
| 813 |
+
"Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized"
|
| 814 |
+
if self.initialized:
|
| 815 |
+
err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`."
|
| 816 |
+
if cpu and self.device.type != "cpu":
|
| 817 |
+
raise ValueError(err.format(flag="cpu=True"))
|
| 818 |
+
if (
|
| 819 |
+
mixed_precision is not None
|
| 820 |
+
and mixed_precision != self._mixed_precision
|
| 821 |
+
and self.distributed_type != DistributedType.DEEPSPEED
|
| 822 |
+
):
|
| 823 |
+
raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'"))
|
| 824 |
+
|
| 825 |
+
# For backward compatibility
|
| 826 |
+
@property
|
| 827 |
+
def use_fp16(self):
|
| 828 |
+
warnings.warn(
|
| 829 |
+
"The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use "
|
| 830 |
+
"`AcceleratorState.mixed_precision == 'fp16'` instead.",
|
| 831 |
+
FutureWarning,
|
| 832 |
+
)
|
| 833 |
+
return self._mixed_precision != "no"
|
| 834 |
+
|
| 835 |
+
@property
|
| 836 |
+
def mixed_precision(self):
|
| 837 |
+
if self.distributed_type == DistributedType.DEEPSPEED:
|
| 838 |
+
config = self.deepspeed_plugin.deepspeed_config
|
| 839 |
+
if config.get("fp16", {}).get("enabled", False):
|
| 840 |
+
mixed_precision = "fp16"
|
| 841 |
+
elif config.get("bf16", {}).get("enabled", False):
|
| 842 |
+
mixed_precision = "bf16"
|
| 843 |
+
else:
|
| 844 |
+
mixed_precision = "no"
|
| 845 |
+
else:
|
| 846 |
+
mixed_precision = self._mixed_precision
|
| 847 |
+
return mixed_precision
|
| 848 |
+
|
| 849 |
+
@staticmethod
|
| 850 |
+
def _reset_state(reset_partial_state: bool = False):
|
| 851 |
+
"Resets `_shared_state`, is used internally and should not be called"
|
| 852 |
+
AcceleratorState._shared_state.clear()
|
| 853 |
+
if reset_partial_state:
|
| 854 |
+
PartialState._reset_state()
|
| 855 |
+
|
| 856 |
+
@property
|
| 857 |
+
def use_distributed(self):
|
| 858 |
+
"""
|
| 859 |
+
Whether the Accelerator is configured for distributed training
|
| 860 |
+
"""
|
| 861 |
+
return PartialState().use_distributed
|
| 862 |
+
|
| 863 |
+
@property
|
| 864 |
+
def is_last_process(self) -> bool:
|
| 865 |
+
"Returns whether the current process is the last one"
|
| 866 |
+
return PartialState().is_last_process
|
| 867 |
+
|
| 868 |
+
@property
|
| 869 |
+
def is_main_process(self) -> bool:
|
| 870 |
+
"Returns whether the current process is the main process"
|
| 871 |
+
return PartialState().is_main_process
|
| 872 |
+
|
| 873 |
+
@property
|
| 874 |
+
def is_local_main_process(self) -> bool:
|
| 875 |
+
"Returns whether the current process is the main process on the local node"
|
| 876 |
+
return PartialState().is_local_main_process
|
| 877 |
+
|
| 878 |
+
def wait_for_everyone(self):
|
| 879 |
+
PartialState().wait_for_everyone()
|
| 880 |
+
|
| 881 |
+
@contextmanager
|
| 882 |
+
def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
|
| 883 |
+
"""
|
| 884 |
+
Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
|
| 885 |
+
distributed inference, such as with different prompts.
|
| 886 |
+
|
| 887 |
+
Note that when using a `dict`, all keys need to have the same number of elements.
|
| 888 |
+
|
| 889 |
+
Args:
|
| 890 |
+
inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
|
| 891 |
+
The input to split between processes.
|
| 892 |
+
apply_padding (`bool`, `optional`, defaults to `False`):
|
| 893 |
+
Whether to apply padding by repeating the last element of the input so that all processes have the same
|
| 894 |
+
number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing
|
| 895 |
+
in less inputs than there are processes. If so, just remember to drop the padded elements afterwards.
|
| 896 |
+
|
| 897 |
+
|
| 898 |
+
Example:
|
| 899 |
+
|
| 900 |
+
```python
|
| 901 |
+
# Assume there are two processes
|
| 902 |
+
from accelerate.state import AcceleratorState
|
| 903 |
+
|
| 904 |
+
state = AcceleratorState()
|
| 905 |
+
with state.split_between_processes(["A", "B", "C"]) as inputs:
|
| 906 |
+
print(inputs)
|
| 907 |
+
# Process 0
|
| 908 |
+
["A", "B"]
|
| 909 |
+
# Process 1
|
| 910 |
+
["C"]
|
| 911 |
+
|
| 912 |
+
with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
|
| 913 |
+
print(inputs)
|
| 914 |
+
# Process 0
|
| 915 |
+
["A", "B"]
|
| 916 |
+
# Process 1
|
| 917 |
+
["C", "C"]
|
| 918 |
+
```
|
| 919 |
+
"""
|
| 920 |
+
with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
|
| 921 |
+
yield inputs
|
| 922 |
+
|
| 923 |
+
@contextmanager
|
| 924 |
+
def main_process_first(self):
|
| 925 |
+
"""
|
| 926 |
+
Lets the main process go first inside a with block.
|
| 927 |
+
|
| 928 |
+
The other processes will enter the with block after the main process exits.
|
| 929 |
+
"""
|
| 930 |
+
with PartialState().main_process_first():
|
| 931 |
+
yield
|
| 932 |
+
|
| 933 |
+
@contextmanager
|
| 934 |
+
def local_main_process_first(self):
|
| 935 |
+
"""
|
| 936 |
+
Lets the local main process go inside a with block.
|
| 937 |
+
|
| 938 |
+
The other processes will enter the with block after the main process exits.
|
| 939 |
+
"""
|
| 940 |
+
with PartialState().local_main_process_first():
|
| 941 |
+
yield
|
| 942 |
+
|
| 943 |
+
def print(self, *args, **kwargs):
|
| 944 |
+
PartialState().print(*args, **kwargs)
|
| 945 |
+
|
| 946 |
+
|
| 947 |
+
class GradientState:
|
| 948 |
+
"""
|
| 949 |
+
Singleton class that has information related to gradient synchronization for gradient accumulation
|
| 950 |
+
|
| 951 |
+
**Available attributes:**
|
| 952 |
+
|
| 953 |
+
- **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader
|
| 954 |
+
- **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader
|
| 955 |
+
- **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices
|
| 956 |
+
- **active_dataloader** (`Optional[DataLoader]`) -- The dataloader that is currently being iterated over
|
| 957 |
+
- **dataloader_references** (`List[Optional[DataLoader]]`) -- A list of references to the dataloaders that are
|
| 958 |
+
being iterated over
|
| 959 |
+
- **num_steps** (`int`) -- The number of steps to accumulate over
|
| 960 |
+
- **adjust_scheduler** (`bool`) -- Whether the scheduler should be adjusted to account for the gradient
|
| 961 |
+
accumulation
|
| 962 |
+
- **sync_with_dataloader** (`bool`) -- Whether the gradients should be synced at the end of the dataloader
|
| 963 |
+
iteration and the number of total steps reset
|
| 964 |
+
"""
|
| 965 |
+
|
| 966 |
+
_shared_state = SharedDict()
|
| 967 |
+
|
| 968 |
+
def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None):
|
| 969 |
+
self.__dict__ = self._shared_state
|
| 970 |
+
if not self.initialized:
|
| 971 |
+
self.sync_gradients = True
|
| 972 |
+
self.active_dataloader = None
|
| 973 |
+
self.dataloader_references = [None]
|
| 974 |
+
self.plugin_kwargs = (
|
| 975 |
+
gradient_accumulation_plugin.to_kwargs() if gradient_accumulation_plugin is not None else {}
|
| 976 |
+
)
|
| 977 |
+
|
| 978 |
+
# Plugin args are different and can be updated
|
| 979 |
+
if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs():
|
| 980 |
+
self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs()
|
| 981 |
+
|
| 982 |
+
@property
|
| 983 |
+
def num_steps(self) -> int:
|
| 984 |
+
"Returns the number of steps to accumulate over"
|
| 985 |
+
return self.plugin_kwargs.get("num_steps", 1)
|
| 986 |
+
|
| 987 |
+
@property
|
| 988 |
+
def adjust_scheduler(self) -> bool:
|
| 989 |
+
"Returns whether the scheduler should be adjusted"
|
| 990 |
+
return self.plugin_kwargs.get("adjust_scheduler", False)
|
| 991 |
+
|
| 992 |
+
@property
|
| 993 |
+
def sync_with_dataloader(self) -> bool:
|
| 994 |
+
"Returns whether the gradients should be synced at the end of the dataloader iteration and the number of total steps reset"
|
| 995 |
+
return self.plugin_kwargs.get("sync_with_dataloader", True)
|
| 996 |
+
|
| 997 |
+
@property
|
| 998 |
+
def initialized(self) -> bool:
|
| 999 |
+
"Returns whether the `GradientState` has been initialized"
|
| 1000 |
+
return GradientState._shared_state != {}
|
| 1001 |
+
|
| 1002 |
+
@property
|
| 1003 |
+
def end_of_dataloader(self) -> bool:
|
| 1004 |
+
"Returns whether we have reached the end of the current dataloader"
|
| 1005 |
+
if not self.in_dataloader:
|
| 1006 |
+
return False
|
| 1007 |
+
return self.active_dataloader.end_of_dataloader
|
| 1008 |
+
|
| 1009 |
+
@property
|
| 1010 |
+
def remainder(self) -> int:
|
| 1011 |
+
"Returns the number of extra samples that were added from padding the dataloader"
|
| 1012 |
+
if not self.in_dataloader:
|
| 1013 |
+
return -1
|
| 1014 |
+
return self.active_dataloader.remainder
|
| 1015 |
+
|
| 1016 |
+
def __repr__(self):
|
| 1017 |
+
return (
|
| 1018 |
+
f"Sync Gradients: {self.sync_gradients}\n"
|
| 1019 |
+
f"At end of current dataloader: {self.end_of_dataloader}\n"
|
| 1020 |
+
f"Extra samples added: {self.remainder}\n"
|
| 1021 |
+
f"Gradient accumulation plugin: {self.plugin_kwargs}\n"
|
| 1022 |
+
)
|
| 1023 |
+
|
| 1024 |
+
def _set_sync_gradients(self, sync_gradients):
|
| 1025 |
+
"Private function that sets whether gradients should be synchronized. Users should not have to call this."
|
| 1026 |
+
self.sync_gradients = sync_gradients
|
| 1027 |
+
|
| 1028 |
+
def _add_dataloader(self, dataloader):
|
| 1029 |
+
"Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this."
|
| 1030 |
+
self.active_dataloader = dataloader
|
| 1031 |
+
self.dataloader_references.append(self.active_dataloader)
|
| 1032 |
+
|
| 1033 |
+
def _remove_dataloader(self, dataloader):
|
| 1034 |
+
"Private function that removes a dataloader from `self.dataloader_references` and sets `in_dataloader` to `False` if there are no more dataloaders. Users should not have to call this."
|
| 1035 |
+
self.dataloader_references.remove(dataloader)
|
| 1036 |
+
self.active_dataloader = self.dataloader_references[-1]
|
| 1037 |
+
|
| 1038 |
+
@property
|
| 1039 |
+
def in_dataloader(self) -> bool:
|
| 1040 |
+
"Returns whether the current process is in a dataloader"
|
| 1041 |
+
return self.active_dataloader is not None
|
| 1042 |
+
|
| 1043 |
+
@staticmethod
|
| 1044 |
+
def _reset_state():
|
| 1045 |
+
"Resets `_shared_state`, is used internally and should not be called"
|
| 1046 |
+
GradientState._shared_state.clear()
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/__init__.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .testing import (
|
| 2 |
+
are_the_same_tensors,
|
| 3 |
+
assert_exception,
|
| 4 |
+
execute_subprocess_async,
|
| 5 |
+
require_bnb,
|
| 6 |
+
require_cpu,
|
| 7 |
+
require_cuda,
|
| 8 |
+
require_huggingface_suite,
|
| 9 |
+
require_mps,
|
| 10 |
+
require_multi_gpu,
|
| 11 |
+
require_multi_xpu,
|
| 12 |
+
require_safetensors,
|
| 13 |
+
require_single_gpu,
|
| 14 |
+
require_single_xpu,
|
| 15 |
+
require_torch_min_version,
|
| 16 |
+
require_tpu,
|
| 17 |
+
require_xpu,
|
| 18 |
+
skip,
|
| 19 |
+
slow,
|
| 20 |
+
)
|
| 21 |
+
from .training import RegressionDataset, RegressionModel, RegressionModel4XPU
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
from .scripts import test_script, test_sync, test_ops # isort: skip
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py
ADDED
|
File without changes
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc
ADDED
|
Binary file (648 Bytes). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc
ADDED
|
Binary file (14.1 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc
ADDED
|
Binary file (8.68 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py
ADDED
|
File without changes
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc
ADDED
|
Binary file (7.04 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
import argparse
|
| 16 |
+
import json
|
| 17 |
+
import os
|
| 18 |
+
|
| 19 |
+
import evaluate
|
| 20 |
+
import torch
|
| 21 |
+
from datasets import load_dataset
|
| 22 |
+
from torch.optim import AdamW
|
| 23 |
+
from torch.utils.data import DataLoader
|
| 24 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
| 25 |
+
|
| 26 |
+
from accelerate import Accelerator, DistributedType
|
| 27 |
+
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
MAX_GPU_BATCH_SIZE = 16
|
| 31 |
+
EVAL_BATCH_SIZE = 32
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"):
|
| 35 |
+
"""
|
| 36 |
+
Creates a set of `DataLoader`s for the `glue` dataset.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
accelerator (`Accelerator`):
|
| 40 |
+
An `Accelerator` object
|
| 41 |
+
batch_size (`int`, *optional*):
|
| 42 |
+
The batch size for the train and validation DataLoaders.
|
| 43 |
+
model_name (`str`, *optional*):
|
| 44 |
+
"""
|
| 45 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 46 |
+
datasets = load_dataset("glue", "mrpc")
|
| 47 |
+
|
| 48 |
+
def tokenize_function(examples):
|
| 49 |
+
# max_length=None => use the model max length (it's actually the default)
|
| 50 |
+
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
| 51 |
+
return outputs
|
| 52 |
+
|
| 53 |
+
# Apply the method we just defined to all the examples in all the splits of the dataset
|
| 54 |
+
tokenized_datasets = datasets.map(
|
| 55 |
+
tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
| 59 |
+
# transformers library
|
| 60 |
+
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
| 61 |
+
|
| 62 |
+
def collate_fn(examples):
|
| 63 |
+
# On TPU it's best to pad everything to the same length or training will be very slow.
|
| 64 |
+
if accelerator.distributed_type == DistributedType.TPU:
|
| 65 |
+
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
| 66 |
+
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
| 67 |
+
|
| 68 |
+
# Instantiate dataloaders.
|
| 69 |
+
train_dataloader = DataLoader(
|
| 70 |
+
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
|
| 71 |
+
)
|
| 72 |
+
eval_dataloader = DataLoader(
|
| 73 |
+
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
return train_dataloader, eval_dataloader
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def evaluation_loop(accelerator, model, eval_dataloader, metric):
|
| 80 |
+
model.eval()
|
| 81 |
+
samples_seen = 0
|
| 82 |
+
for step, batch in enumerate(eval_dataloader):
|
| 83 |
+
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
| 84 |
+
batch.to(accelerator.device)
|
| 85 |
+
with torch.no_grad():
|
| 86 |
+
outputs = model(**batch)
|
| 87 |
+
predictions = outputs.logits.argmax(dim=-1)
|
| 88 |
+
# It is slightly faster to call this once, than multiple times
|
| 89 |
+
predictions, references = accelerator.gather(
|
| 90 |
+
(predictions, batch["labels"])
|
| 91 |
+
) # If we are in a multiprocess environment, the last batch has duplicates
|
| 92 |
+
if accelerator.use_distributed:
|
| 93 |
+
if step == len(eval_dataloader) - 1:
|
| 94 |
+
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
|
| 95 |
+
references = references[: len(eval_dataloader.dataset) - samples_seen]
|
| 96 |
+
else:
|
| 97 |
+
samples_seen += references.shape[0]
|
| 98 |
+
metric.add_batch(
|
| 99 |
+
predictions=predictions,
|
| 100 |
+
references=references,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
eval_metric = metric.compute()
|
| 104 |
+
return eval_metric["accuracy"]
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def training_function(config, args):
|
| 108 |
+
# Initialize accelerator
|
| 109 |
+
accelerator = Accelerator()
|
| 110 |
+
|
| 111 |
+
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
| 112 |
+
lr = config["lr"]
|
| 113 |
+
num_epochs = int(config["num_epochs"])
|
| 114 |
+
seed = int(config["seed"])
|
| 115 |
+
batch_size = int(config["batch_size"])
|
| 116 |
+
model_name = args.model_name_or_path
|
| 117 |
+
|
| 118 |
+
set_seed(seed)
|
| 119 |
+
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name)
|
| 120 |
+
|
| 121 |
+
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
|
| 122 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
|
| 123 |
+
|
| 124 |
+
# Instantiate optimizer
|
| 125 |
+
optimizer_cls = (
|
| 126 |
+
AdamW
|
| 127 |
+
if accelerator.state.deepspeed_plugin is None
|
| 128 |
+
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
|
| 129 |
+
else DummyOptim
|
| 130 |
+
)
|
| 131 |
+
optimizer = optimizer_cls(params=model.parameters(), lr=lr)
|
| 132 |
+
|
| 133 |
+
if accelerator.state.deepspeed_plugin is not None:
|
| 134 |
+
gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
|
| 135 |
+
"gradient_accumulation_steps"
|
| 136 |
+
]
|
| 137 |
+
else:
|
| 138 |
+
gradient_accumulation_steps = 1
|
| 139 |
+
max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps
|
| 140 |
+
|
| 141 |
+
# Instantiate scheduler
|
| 142 |
+
if (
|
| 143 |
+
accelerator.state.deepspeed_plugin is None
|
| 144 |
+
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
|
| 145 |
+
):
|
| 146 |
+
lr_scheduler = get_linear_schedule_with_warmup(
|
| 147 |
+
optimizer=optimizer,
|
| 148 |
+
num_warmup_steps=0,
|
| 149 |
+
num_training_steps=max_training_steps,
|
| 150 |
+
)
|
| 151 |
+
else:
|
| 152 |
+
lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
|
| 153 |
+
|
| 154 |
+
# Prepare everything
|
| 155 |
+
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
| 156 |
+
# prepare method.
|
| 157 |
+
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
| 158 |
+
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
# We need to keep track of how many total steps we have iterated over
|
| 162 |
+
overall_step = 0
|
| 163 |
+
# We also need to keep track of the stating epoch so files are named properly
|
| 164 |
+
starting_epoch = 0
|
| 165 |
+
metric = evaluate.load("glue", "mrpc")
|
| 166 |
+
ending_epoch = num_epochs
|
| 167 |
+
|
| 168 |
+
if args.partial_train_epoch is not None:
|
| 169 |
+
ending_epoch = args.partial_train_epoch
|
| 170 |
+
|
| 171 |
+
if args.resume_from_checkpoint:
|
| 172 |
+
accelerator.load_state(args.resume_from_checkpoint)
|
| 173 |
+
epoch_string = args.resume_from_checkpoint.split("epoch_")[1]
|
| 174 |
+
state_epoch_num = ""
|
| 175 |
+
for char in epoch_string:
|
| 176 |
+
if char.isdigit():
|
| 177 |
+
state_epoch_num += char
|
| 178 |
+
else:
|
| 179 |
+
break
|
| 180 |
+
starting_epoch = int(state_epoch_num) + 1
|
| 181 |
+
accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric)
|
| 182 |
+
accelerator.print("resumed checkpoint performance:", accuracy)
|
| 183 |
+
accelerator.print("resumed checkpoint's scheduler's lr:", lr_scheduler.get_lr()[0])
|
| 184 |
+
accelerator.print("resumed optimizers's lr:", optimizer.param_groups[0]["lr"])
|
| 185 |
+
with open(os.path.join(args.output_dir, f"state_{starting_epoch-1}.json"), "r") as f:
|
| 186 |
+
resumed_state = json.load(f)
|
| 187 |
+
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
|
| 188 |
+
assert (
|
| 189 |
+
resumed_state["lr"] == lr_scheduler.get_lr()[0]
|
| 190 |
+
), "Scheduler learning rate mismatch, loading from checkpoint failed"
|
| 191 |
+
assert (
|
| 192 |
+
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
|
| 193 |
+
), "Optimizer learning rate mismatch, loading from checkpoint failed"
|
| 194 |
+
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
|
| 195 |
+
return
|
| 196 |
+
|
| 197 |
+
# Now we train the model
|
| 198 |
+
state = {}
|
| 199 |
+
for epoch in range(starting_epoch, ending_epoch):
|
| 200 |
+
model.train()
|
| 201 |
+
for step, batch in enumerate(train_dataloader):
|
| 202 |
+
outputs = model(**batch)
|
| 203 |
+
loss = outputs.loss
|
| 204 |
+
loss = loss / gradient_accumulation_steps
|
| 205 |
+
accelerator.backward(loss)
|
| 206 |
+
if step % gradient_accumulation_steps == 0:
|
| 207 |
+
optimizer.step()
|
| 208 |
+
lr_scheduler.step()
|
| 209 |
+
optimizer.zero_grad()
|
| 210 |
+
|
| 211 |
+
overall_step += 1
|
| 212 |
+
output_dir = f"epoch_{epoch}"
|
| 213 |
+
output_dir = os.path.join(args.output_dir, output_dir)
|
| 214 |
+
accelerator.save_state(output_dir)
|
| 215 |
+
accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric)
|
| 216 |
+
state["accuracy"] = accuracy
|
| 217 |
+
state["lr"] = lr_scheduler.get_lr()[0]
|
| 218 |
+
state["optimizer_lr"] = optimizer.param_groups[0]["lr"]
|
| 219 |
+
state["epoch"] = epoch
|
| 220 |
+
state["step"] = overall_step
|
| 221 |
+
accelerator.print(f"epoch {epoch}:", state)
|
| 222 |
+
|
| 223 |
+
accelerator.wait_for_everyone()
|
| 224 |
+
if accelerator.is_main_process:
|
| 225 |
+
with open(os.path.join(args.output_dir, f"state_{epoch}.json"), "w") as f:
|
| 226 |
+
json.dump(state, f)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def main():
|
| 230 |
+
parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
|
| 231 |
+
parser.add_argument(
|
| 232 |
+
"--model_name_or_path",
|
| 233 |
+
type=str,
|
| 234 |
+
default="bert-base-cased",
|
| 235 |
+
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
| 236 |
+
required=False,
|
| 237 |
+
)
|
| 238 |
+
parser.add_argument(
|
| 239 |
+
"--output_dir",
|
| 240 |
+
type=str,
|
| 241 |
+
default=".",
|
| 242 |
+
help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
|
| 243 |
+
)
|
| 244 |
+
parser.add_argument(
|
| 245 |
+
"--resume_from_checkpoint",
|
| 246 |
+
type=str,
|
| 247 |
+
default=None,
|
| 248 |
+
help="If the training should continue from a checkpoint folder.",
|
| 249 |
+
)
|
| 250 |
+
parser.add_argument(
|
| 251 |
+
"--partial_train_epoch",
|
| 252 |
+
type=int,
|
| 253 |
+
default=None,
|
| 254 |
+
help="If passed, the training will stop after this number of epochs.",
|
| 255 |
+
)
|
| 256 |
+
parser.add_argument(
|
| 257 |
+
"--num_epochs",
|
| 258 |
+
type=int,
|
| 259 |
+
default=2,
|
| 260 |
+
help="Number of train epochs.",
|
| 261 |
+
)
|
| 262 |
+
args = parser.parse_args()
|
| 263 |
+
config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
|
| 264 |
+
|
| 265 |
+
training_function(config, args)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
if __name__ == "__main__":
|
| 269 |
+
main()
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import logging
|
| 16 |
+
import math
|
| 17 |
+
import os
|
| 18 |
+
from copy import deepcopy
|
| 19 |
+
|
| 20 |
+
import datasets
|
| 21 |
+
import evaluate
|
| 22 |
+
import torch
|
| 23 |
+
import transformers
|
| 24 |
+
from datasets import load_dataset
|
| 25 |
+
from torch.utils.data import DataLoader, IterableDataset
|
| 26 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
| 27 |
+
|
| 28 |
+
from accelerate import Accelerator
|
| 29 |
+
from accelerate.data_loader import DataLoaderDispatcher
|
| 30 |
+
from accelerate.test_utils import RegressionDataset, RegressionModel
|
| 31 |
+
from accelerate.utils import is_tpu_available, set_seed
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "true"
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class ListHandler(logging.Handler):
|
| 38 |
+
def __init__(self, *args, **kwargs):
|
| 39 |
+
super(ListHandler, self).__init__(*args, **kwargs)
|
| 40 |
+
self.logs = []
|
| 41 |
+
|
| 42 |
+
def emit(self, record):
|
| 43 |
+
self.logs.append(record)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def get_basic_setup(accelerator, num_samples=82, batch_size=16):
|
| 47 |
+
"Returns everything needed to perform basic training"
|
| 48 |
+
set_seed(42)
|
| 49 |
+
model = RegressionModel()
|
| 50 |
+
ddp_model = deepcopy(model)
|
| 51 |
+
dset = RegressionDataset(length=num_samples)
|
| 52 |
+
dataloader = DataLoader(dset, batch_size=batch_size)
|
| 53 |
+
model.to(accelerator.device)
|
| 54 |
+
ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)
|
| 55 |
+
return model, ddp_model, dataloader
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def get_dataloader(accelerator: Accelerator, use_longest=False):
|
| 59 |
+
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased")
|
| 60 |
+
dataset = load_dataset("glue", "mrpc", split="validation")
|
| 61 |
+
|
| 62 |
+
def tokenize_function(examples):
|
| 63 |
+
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
| 64 |
+
return outputs
|
| 65 |
+
|
| 66 |
+
with accelerator.main_process_first():
|
| 67 |
+
tokenized_datasets = dataset.map(
|
| 68 |
+
tokenize_function,
|
| 69 |
+
batched=True,
|
| 70 |
+
remove_columns=["idx", "sentence1", "sentence2"],
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
| 74 |
+
|
| 75 |
+
def collate_fn(examples):
|
| 76 |
+
if use_longest:
|
| 77 |
+
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
| 78 |
+
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
| 79 |
+
|
| 80 |
+
return DataLoader(tokenized_datasets, shuffle=False, collate_fn=collate_fn, batch_size=16)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def get_mrpc_setup(dispatch_batches, split_batches):
|
| 84 |
+
accelerator = Accelerator(dispatch_batches=dispatch_batches, split_batches=split_batches)
|
| 85 |
+
dataloader = get_dataloader(accelerator, not dispatch_batches)
|
| 86 |
+
model = AutoModelForSequenceClassification.from_pretrained(
|
| 87 |
+
"hf-internal-testing/mrpc-bert-base-cased", return_dict=True
|
| 88 |
+
)
|
| 89 |
+
ddp_model, ddp_dataloader = accelerator.prepare(model, dataloader)
|
| 90 |
+
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def generate_predictions(model, dataloader, accelerator):
|
| 94 |
+
logits_and_targets = []
|
| 95 |
+
for batch in dataloader:
|
| 96 |
+
input, target = batch.values()
|
| 97 |
+
with torch.no_grad():
|
| 98 |
+
logit = model(input)
|
| 99 |
+
logit, target = accelerator.gather_for_metrics((logit, target))
|
| 100 |
+
logits_and_targets.append((logit, target))
|
| 101 |
+
logits, targs = [], []
|
| 102 |
+
for logit, targ in logits_and_targets:
|
| 103 |
+
logits.append(logit)
|
| 104 |
+
targs.append(targ)
|
| 105 |
+
logits, targs = torch.cat(logits), torch.cat(targs)
|
| 106 |
+
return logits, targs
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def test_torch_metrics(
|
| 110 |
+
accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False, batch_size=16
|
| 111 |
+
):
|
| 112 |
+
model, ddp_model, dataloader = get_basic_setup(accelerator, num_samples, batch_size)
|
| 113 |
+
logits, targs = generate_predictions(ddp_model, dataloader, accelerator)
|
| 114 |
+
assert (
|
| 115 |
+
len(logits) == num_samples
|
| 116 |
+
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(logits)}"
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False):
|
| 120 |
+
metric = evaluate.load("glue", "mrpc")
|
| 121 |
+
setup, accelerator = get_mrpc_setup(dispatch_batches, split_batches)
|
| 122 |
+
# First do baseline
|
| 123 |
+
model, dataloader, device = setup["no"]
|
| 124 |
+
model.to(device)
|
| 125 |
+
model.eval()
|
| 126 |
+
for batch in dataloader:
|
| 127 |
+
batch.to(device)
|
| 128 |
+
with torch.inference_mode():
|
| 129 |
+
outputs = model(**batch)
|
| 130 |
+
preds = outputs.logits.argmax(dim=-1)
|
| 131 |
+
metric.add_batch(predictions=preds, references=batch["labels"])
|
| 132 |
+
baseline = metric.compute()
|
| 133 |
+
|
| 134 |
+
# Then do distributed
|
| 135 |
+
model, dataloader, device = setup["ddp"]
|
| 136 |
+
model.eval()
|
| 137 |
+
for batch in dataloader:
|
| 138 |
+
with torch.inference_mode():
|
| 139 |
+
outputs = model(**batch)
|
| 140 |
+
preds = outputs.logits.argmax(dim=-1)
|
| 141 |
+
references = batch["labels"]
|
| 142 |
+
preds, references = accelerator.gather_for_metrics((preds, references))
|
| 143 |
+
metric.add_batch(predictions=preds, references=references)
|
| 144 |
+
distributed = metric.compute()
|
| 145 |
+
|
| 146 |
+
for key in "accuracy f1".split():
|
| 147 |
+
assert math.isclose(
|
| 148 |
+
baseline[key], distributed[key]
|
| 149 |
+
), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def test_gather_for_metrics_with_non_tensor_objects_iterable_dataset():
|
| 153 |
+
class DummyIterableDataset(IterableDataset):
|
| 154 |
+
def __init__(self, data):
|
| 155 |
+
self.data = data
|
| 156 |
+
|
| 157 |
+
def __len__(self):
|
| 158 |
+
return len(self.data)
|
| 159 |
+
|
| 160 |
+
def __iter__(self):
|
| 161 |
+
for element in self.data:
|
| 162 |
+
yield element
|
| 163 |
+
|
| 164 |
+
iterable_dataset = DummyIterableDataset([n for n in range(30)])
|
| 165 |
+
dataloader = DataLoader(iterable_dataset, batch_size=4)
|
| 166 |
+
accelerator = Accelerator()
|
| 167 |
+
prepared_dataloader = accelerator.prepare(dataloader)
|
| 168 |
+
|
| 169 |
+
if accelerator.is_main_process:
|
| 170 |
+
logger = logging.root.manager.loggerDict["accelerate.accelerator"]
|
| 171 |
+
list_handler = ListHandler()
|
| 172 |
+
logger.addHandler(list_handler)
|
| 173 |
+
|
| 174 |
+
batches_for_metrics = []
|
| 175 |
+
for batch in prepared_dataloader:
|
| 176 |
+
batches_for_metrics.append(accelerator.gather_for_metrics(batch))
|
| 177 |
+
|
| 178 |
+
assert torch.cat(batches_for_metrics).size(0) == 30
|
| 179 |
+
|
| 180 |
+
if accelerator.is_main_process:
|
| 181 |
+
assert len(list_handler.logs) == 0
|
| 182 |
+
logger.removeHandler(list_handler)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def test_gather_for_metrics_with_iterable_dataset():
|
| 186 |
+
class DummyIterableDataset(IterableDataset):
|
| 187 |
+
def __init__(self, data):
|
| 188 |
+
self.data = data
|
| 189 |
+
|
| 190 |
+
def __len__(self):
|
| 191 |
+
return len(self.data)
|
| 192 |
+
|
| 193 |
+
def __iter__(self):
|
| 194 |
+
for element in self.data:
|
| 195 |
+
yield element
|
| 196 |
+
|
| 197 |
+
iterable_dataset = DummyIterableDataset(torch.as_tensor(range(30)))
|
| 198 |
+
dataloader = DataLoader(iterable_dataset, batch_size=4)
|
| 199 |
+
|
| 200 |
+
accelerator = Accelerator()
|
| 201 |
+
prepared_dataloader = accelerator.prepare(dataloader)
|
| 202 |
+
|
| 203 |
+
assert isinstance(prepared_dataloader, DataLoaderDispatcher)
|
| 204 |
+
|
| 205 |
+
if accelerator.is_main_process:
|
| 206 |
+
logger = logging.root.manager.loggerDict["accelerate.accelerator"]
|
| 207 |
+
list_handler = ListHandler()
|
| 208 |
+
logger.addHandler(list_handler)
|
| 209 |
+
|
| 210 |
+
batches_for_metrics = []
|
| 211 |
+
for batch in prepared_dataloader:
|
| 212 |
+
batches_for_metrics.append(accelerator.gather_for_metrics(batch))
|
| 213 |
+
|
| 214 |
+
assert torch.cat(batches_for_metrics).size(0) == 30
|
| 215 |
+
|
| 216 |
+
if accelerator.is_main_process:
|
| 217 |
+
assert len(list_handler.logs) == 0
|
| 218 |
+
|
| 219 |
+
logger.removeHandler(list_handler)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def main():
|
| 223 |
+
accelerator = Accelerator(split_batches=False, dispatch_batches=False)
|
| 224 |
+
if accelerator.is_local_main_process:
|
| 225 |
+
datasets.utils.logging.set_verbosity_warning()
|
| 226 |
+
transformers.utils.logging.set_verbosity_warning()
|
| 227 |
+
else:
|
| 228 |
+
datasets.utils.logging.set_verbosity_error()
|
| 229 |
+
transformers.utils.logging.set_verbosity_error()
|
| 230 |
+
# These are a bit slower so they should only be ran on the GPU or TPU
|
| 231 |
+
if torch.cuda.is_available() or is_tpu_available():
|
| 232 |
+
if accelerator.is_local_main_process:
|
| 233 |
+
print("**Testing gather_for_metrics**")
|
| 234 |
+
for split_batches in [True, False]:
|
| 235 |
+
for dispatch_batches in [True, False]:
|
| 236 |
+
if accelerator.is_local_main_process:
|
| 237 |
+
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`")
|
| 238 |
+
test_mrpc(dispatch_batches, split_batches)
|
| 239 |
+
accelerator.state._reset_state()
|
| 240 |
+
print("test_gather_for_metrics_with_iterable_dataset")
|
| 241 |
+
test_gather_for_metrics_with_iterable_dataset()
|
| 242 |
+
print("test gather_for_metrics_with_non_tensor_objects_iterable_dataset")
|
| 243 |
+
test_gather_for_metrics_with_non_tensor_objects_iterable_dataset()
|
| 244 |
+
if accelerator.is_local_main_process:
|
| 245 |
+
print("**Test torch metrics**")
|
| 246 |
+
for split_batches in [True, False]:
|
| 247 |
+
for dispatch_batches in [True, False]:
|
| 248 |
+
accelerator = Accelerator(split_batches=split_batches, dispatch_batches=dispatch_batches)
|
| 249 |
+
if accelerator.is_local_main_process:
|
| 250 |
+
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99")
|
| 251 |
+
test_torch_metrics(accelerator, 99)
|
| 252 |
+
accelerator.state._reset_state()
|
| 253 |
+
if accelerator.is_local_main_process:
|
| 254 |
+
print("**Test last batch is not dropped when perfectly divisible**")
|
| 255 |
+
accelerator = Accelerator()
|
| 256 |
+
test_torch_metrics(accelerator, 512)
|
| 257 |
+
accelerator.state._reset_state()
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def _mp_fn(index):
|
| 261 |
+
# For xla_spawn (TPUs)
|
| 262 |
+
main()
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
if __name__ == "__main__":
|
| 266 |
+
main()
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
import argparse
|
| 16 |
+
import gc
|
| 17 |
+
import json
|
| 18 |
+
import os
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
from datasets import load_dataset
|
| 22 |
+
from torch.optim import AdamW
|
| 23 |
+
from torch.utils.data import DataLoader
|
| 24 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
| 25 |
+
|
| 26 |
+
from accelerate import Accelerator, DistributedType
|
| 27 |
+
from accelerate.utils import is_npu_available, is_xpu_available
|
| 28 |
+
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
MAX_GPU_BATCH_SIZE = 16
|
| 32 |
+
EVAL_BATCH_SIZE = 32
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# Converting Bytes to Megabytes
|
| 36 |
+
def b2mb(x):
|
| 37 |
+
return int(x / 2**20)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# This context manager is used to track the peak memory usage of the process
|
| 41 |
+
class TorchTracemalloc:
|
| 42 |
+
def __enter__(self):
|
| 43 |
+
gc.collect()
|
| 44 |
+
if torch.cuda.is_available():
|
| 45 |
+
torch.cuda.empty_cache()
|
| 46 |
+
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
|
| 47 |
+
self.begin = torch.cuda.memory_allocated()
|
| 48 |
+
elif is_npu_available():
|
| 49 |
+
torch.npu.empty_cache()
|
| 50 |
+
torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero
|
| 51 |
+
self.begin = torch.npu.memory_allocated()
|
| 52 |
+
elif is_xpu_available():
|
| 53 |
+
torch.xpu.empty_cache()
|
| 54 |
+
torch.xpu.reset_max_memory_allocated() # reset the peak gauge to zero
|
| 55 |
+
self.begin = torch.xpu.memory_allocated()
|
| 56 |
+
return self
|
| 57 |
+
|
| 58 |
+
def __exit__(self, *exc):
|
| 59 |
+
gc.collect()
|
| 60 |
+
if torch.cuda.is_available():
|
| 61 |
+
torch.cuda.empty_cache()
|
| 62 |
+
self.end = torch.cuda.memory_allocated()
|
| 63 |
+
self.peak = torch.cuda.max_memory_allocated()
|
| 64 |
+
elif is_npu_available():
|
| 65 |
+
torch.npu.empty_cache()
|
| 66 |
+
self.end = torch.npu.memory_allocated()
|
| 67 |
+
self.peak = torch.npu.max_memory_allocated()
|
| 68 |
+
elif is_xpu_available():
|
| 69 |
+
torch.xpu.empty_cache()
|
| 70 |
+
self.end = torch.xpu.memory_allocated()
|
| 71 |
+
self.peak = torch.xpu.max_memory_allocated()
|
| 72 |
+
self.used = b2mb(self.end - self.begin)
|
| 73 |
+
self.peaked = b2mb(self.peak - self.begin)
|
| 74 |
+
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def get_dataloaders(
|
| 78 |
+
accelerator: Accelerator,
|
| 79 |
+
batch_size: int = 16,
|
| 80 |
+
model_name: str = "bert-base-cased",
|
| 81 |
+
n_train: int = 320,
|
| 82 |
+
n_val: int = 160,
|
| 83 |
+
):
|
| 84 |
+
"""
|
| 85 |
+
Creates a set of `DataLoader`s for the `glue` dataset.
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
accelerator (`Accelerator`):
|
| 89 |
+
An `Accelerator` object
|
| 90 |
+
batch_size (`int`, *optional*):
|
| 91 |
+
The batch size for the train and validation DataLoaders.
|
| 92 |
+
model_name (`str`, *optional*):
|
| 93 |
+
The name of the model to use.
|
| 94 |
+
n_train (`int`, *optional*):
|
| 95 |
+
The number of training examples to use.
|
| 96 |
+
n_val (`int`, *optional*):
|
| 97 |
+
The number of validation examples to use.
|
| 98 |
+
"""
|
| 99 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 100 |
+
datasets = load_dataset(
|
| 101 |
+
"glue", "mrpc", split={"train": f"train[:{n_train}]", "validation": f"validation[:{n_val}]"}
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
def tokenize_function(examples):
|
| 105 |
+
# max_length=None => use the model max length (it's actually the default)
|
| 106 |
+
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
| 107 |
+
return outputs
|
| 108 |
+
|
| 109 |
+
# Apply the method we just defined to all the examples in all the splits of the dataset
|
| 110 |
+
tokenized_datasets = datasets.map(
|
| 111 |
+
tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
| 115 |
+
# transformers library
|
| 116 |
+
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
| 117 |
+
|
| 118 |
+
def collate_fn(examples):
|
| 119 |
+
# On TPU it's best to pad everything to the same length or training will be very slow.
|
| 120 |
+
if accelerator.distributed_type == DistributedType.TPU:
|
| 121 |
+
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
| 122 |
+
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
| 123 |
+
|
| 124 |
+
# Instantiate dataloaders.
|
| 125 |
+
train_dataloader = DataLoader(
|
| 126 |
+
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
|
| 127 |
+
)
|
| 128 |
+
eval_dataloader = DataLoader(
|
| 129 |
+
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
return train_dataloader, eval_dataloader
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def training_function(config, args):
|
| 136 |
+
# Initialize accelerator
|
| 137 |
+
accelerator = Accelerator()
|
| 138 |
+
|
| 139 |
+
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
| 140 |
+
lr = config["lr"]
|
| 141 |
+
num_epochs = int(config["num_epochs"])
|
| 142 |
+
seed = int(config["seed"])
|
| 143 |
+
batch_size = int(config["batch_size"])
|
| 144 |
+
model_name = args.model_name_or_path
|
| 145 |
+
|
| 146 |
+
set_seed(seed)
|
| 147 |
+
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name, args.n_train, args.n_val)
|
| 148 |
+
|
| 149 |
+
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
|
| 150 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
|
| 151 |
+
|
| 152 |
+
# Instantiate optimizer
|
| 153 |
+
optimizer_cls = (
|
| 154 |
+
AdamW
|
| 155 |
+
if accelerator.state.deepspeed_plugin is None
|
| 156 |
+
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
|
| 157 |
+
else DummyOptim
|
| 158 |
+
)
|
| 159 |
+
optimizer = optimizer_cls(params=model.parameters(), lr=lr)
|
| 160 |
+
|
| 161 |
+
if accelerator.state.deepspeed_plugin is not None:
|
| 162 |
+
gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
|
| 163 |
+
"gradient_accumulation_steps"
|
| 164 |
+
]
|
| 165 |
+
else:
|
| 166 |
+
gradient_accumulation_steps = 1
|
| 167 |
+
max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps
|
| 168 |
+
|
| 169 |
+
# Instantiate scheduler
|
| 170 |
+
if (
|
| 171 |
+
accelerator.state.deepspeed_plugin is None
|
| 172 |
+
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
|
| 173 |
+
):
|
| 174 |
+
lr_scheduler = get_linear_schedule_with_warmup(
|
| 175 |
+
optimizer=optimizer,
|
| 176 |
+
num_warmup_steps=0,
|
| 177 |
+
num_training_steps=max_training_steps,
|
| 178 |
+
)
|
| 179 |
+
else:
|
| 180 |
+
lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
|
| 181 |
+
|
| 182 |
+
# Prepare everything
|
| 183 |
+
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
| 184 |
+
# prepare method.
|
| 185 |
+
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
| 186 |
+
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
# We need to keep track of how many total steps we have iterated over
|
| 190 |
+
overall_step = 0
|
| 191 |
+
# We also need to keep track of the stating epoch so files are named properly
|
| 192 |
+
starting_epoch = 0
|
| 193 |
+
|
| 194 |
+
# Now we train the model
|
| 195 |
+
train_total_peak_memory = {}
|
| 196 |
+
for epoch in range(starting_epoch, num_epochs):
|
| 197 |
+
with TorchTracemalloc() as tracemalloc:
|
| 198 |
+
model.train()
|
| 199 |
+
for step, batch in enumerate(train_dataloader):
|
| 200 |
+
outputs = model(**batch)
|
| 201 |
+
loss = outputs.loss
|
| 202 |
+
loss = loss / gradient_accumulation_steps
|
| 203 |
+
accelerator.backward(loss)
|
| 204 |
+
if step % gradient_accumulation_steps == 0:
|
| 205 |
+
optimizer.step()
|
| 206 |
+
lr_scheduler.step()
|
| 207 |
+
optimizer.zero_grad()
|
| 208 |
+
|
| 209 |
+
overall_step += 1
|
| 210 |
+
|
| 211 |
+
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
|
| 212 |
+
accelerator.print("Memory before entering the train : {}".format(b2mb(tracemalloc.begin)))
|
| 213 |
+
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used))
|
| 214 |
+
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked))
|
| 215 |
+
accelerator.print(
|
| 216 |
+
"Total Peak Memory consumed during the train (max): {}".format(
|
| 217 |
+
tracemalloc.peaked + b2mb(tracemalloc.begin)
|
| 218 |
+
)
|
| 219 |
+
)
|
| 220 |
+
train_total_peak_memory[f"epoch-{epoch}"] = tracemalloc.peaked + b2mb(tracemalloc.begin)
|
| 221 |
+
if args.peak_memory_upper_bound is not None:
|
| 222 |
+
assert (
|
| 223 |
+
train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
|
| 224 |
+
), "Peak memory usage exceeded the upper bound"
|
| 225 |
+
|
| 226 |
+
accelerator.wait_for_everyone()
|
| 227 |
+
if accelerator.is_main_process:
|
| 228 |
+
with open(os.path.join(args.output_dir, "peak_memory_utilization.json"), "w") as f:
|
| 229 |
+
json.dump(train_total_peak_memory, f)
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def main():
|
| 233 |
+
parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
|
| 234 |
+
parser.add_argument(
|
| 235 |
+
"--model_name_or_path",
|
| 236 |
+
type=str,
|
| 237 |
+
default="bert-base-cased",
|
| 238 |
+
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
| 239 |
+
required=False,
|
| 240 |
+
)
|
| 241 |
+
parser.add_argument(
|
| 242 |
+
"--output_dir",
|
| 243 |
+
type=str,
|
| 244 |
+
default=".",
|
| 245 |
+
help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
|
| 246 |
+
)
|
| 247 |
+
parser.add_argument(
|
| 248 |
+
"--peak_memory_upper_bound",
|
| 249 |
+
type=float,
|
| 250 |
+
default=None,
|
| 251 |
+
help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.",
|
| 252 |
+
)
|
| 253 |
+
parser.add_argument(
|
| 254 |
+
"--n_train",
|
| 255 |
+
type=int,
|
| 256 |
+
default=320,
|
| 257 |
+
help="Number of training examples to use.",
|
| 258 |
+
)
|
| 259 |
+
parser.add_argument(
|
| 260 |
+
"--n_val",
|
| 261 |
+
type=int,
|
| 262 |
+
default=160,
|
| 263 |
+
help="Number of validation examples to use.",
|
| 264 |
+
)
|
| 265 |
+
parser.add_argument(
|
| 266 |
+
"--num_epochs",
|
| 267 |
+
type=int,
|
| 268 |
+
default=1,
|
| 269 |
+
help="Number of train epochs.",
|
| 270 |
+
)
|
| 271 |
+
args = parser.parse_args()
|
| 272 |
+
config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
|
| 273 |
+
training_function(config, args)
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
if __name__ == "__main__":
|
| 277 |
+
main()
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
import argparse
|
| 16 |
+
import json
|
| 17 |
+
import os
|
| 18 |
+
|
| 19 |
+
import evaluate
|
| 20 |
+
import torch
|
| 21 |
+
from datasets import load_dataset
|
| 22 |
+
from torch.optim import AdamW
|
| 23 |
+
from torch.utils.data import DataLoader
|
| 24 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
|
| 25 |
+
|
| 26 |
+
from accelerate import Accelerator, DistributedType
|
| 27 |
+
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
MAX_GPU_BATCH_SIZE = 16
|
| 31 |
+
EVAL_BATCH_SIZE = 32
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"):
|
| 35 |
+
"""
|
| 36 |
+
Creates a set of `DataLoader`s for the `glue` dataset.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
accelerator (`Accelerator`):
|
| 40 |
+
An `Accelerator` object
|
| 41 |
+
batch_size (`int`, *optional*):
|
| 42 |
+
The batch size for the train and validation DataLoaders.
|
| 43 |
+
model_name (`str`, *optional*):
|
| 44 |
+
"""
|
| 45 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 46 |
+
datasets = load_dataset("glue", "mrpc")
|
| 47 |
+
|
| 48 |
+
def tokenize_function(examples):
|
| 49 |
+
# max_length=None => use the model max length (it's actually the default)
|
| 50 |
+
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
| 51 |
+
return outputs
|
| 52 |
+
|
| 53 |
+
# Apply the method we just defined to all the examples in all the splits of the dataset
|
| 54 |
+
tokenized_datasets = datasets.map(
|
| 55 |
+
tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
|
| 59 |
+
# transformers library
|
| 60 |
+
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
| 61 |
+
|
| 62 |
+
def collate_fn(examples):
|
| 63 |
+
# On TPU it's best to pad everything to the same length or training will be very slow.
|
| 64 |
+
if accelerator.distributed_type == DistributedType.TPU:
|
| 65 |
+
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
| 66 |
+
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
| 67 |
+
|
| 68 |
+
# Instantiate dataloaders.
|
| 69 |
+
train_dataloader = DataLoader(
|
| 70 |
+
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
|
| 71 |
+
)
|
| 72 |
+
eval_dataloader = DataLoader(
|
| 73 |
+
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
return train_dataloader, eval_dataloader
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def training_function(config, args):
|
| 80 |
+
# Initialize accelerator
|
| 81 |
+
accelerator = Accelerator()
|
| 82 |
+
|
| 83 |
+
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
|
| 84 |
+
lr = config["lr"]
|
| 85 |
+
num_epochs = int(config["num_epochs"])
|
| 86 |
+
seed = int(config["seed"])
|
| 87 |
+
batch_size = int(config["batch_size"])
|
| 88 |
+
model_name = args.model_name_or_path
|
| 89 |
+
|
| 90 |
+
set_seed(seed)
|
| 91 |
+
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name)
|
| 92 |
+
|
| 93 |
+
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
|
| 94 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
|
| 95 |
+
|
| 96 |
+
# Instantiate optimizer
|
| 97 |
+
optimizer_cls = (
|
| 98 |
+
AdamW
|
| 99 |
+
if accelerator.state.deepspeed_plugin is None
|
| 100 |
+
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
|
| 101 |
+
else DummyOptim
|
| 102 |
+
)
|
| 103 |
+
optimizer = optimizer_cls(params=model.parameters(), lr=lr)
|
| 104 |
+
|
| 105 |
+
if accelerator.state.deepspeed_plugin is not None:
|
| 106 |
+
gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
|
| 107 |
+
"gradient_accumulation_steps"
|
| 108 |
+
]
|
| 109 |
+
else:
|
| 110 |
+
gradient_accumulation_steps = 1
|
| 111 |
+
max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps
|
| 112 |
+
|
| 113 |
+
# Instantiate scheduler
|
| 114 |
+
if (
|
| 115 |
+
accelerator.state.deepspeed_plugin is None
|
| 116 |
+
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
|
| 117 |
+
):
|
| 118 |
+
lr_scheduler = get_linear_schedule_with_warmup(
|
| 119 |
+
optimizer=optimizer,
|
| 120 |
+
num_warmup_steps=0,
|
| 121 |
+
num_training_steps=max_training_steps,
|
| 122 |
+
)
|
| 123 |
+
else:
|
| 124 |
+
lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
|
| 125 |
+
|
| 126 |
+
# Prepare everything
|
| 127 |
+
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
|
| 128 |
+
# prepare method.
|
| 129 |
+
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
| 130 |
+
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
# We need to keep track of how many total steps we have iterated over
|
| 134 |
+
overall_step = 0
|
| 135 |
+
# We also need to keep track of the stating epoch so files are named properly
|
| 136 |
+
starting_epoch = 0
|
| 137 |
+
|
| 138 |
+
# Now we train the model
|
| 139 |
+
metric = evaluate.load("glue", "mrpc")
|
| 140 |
+
best_performance = 0
|
| 141 |
+
performance_metric = {}
|
| 142 |
+
for epoch in range(starting_epoch, num_epochs):
|
| 143 |
+
model.train()
|
| 144 |
+
for step, batch in enumerate(train_dataloader):
|
| 145 |
+
outputs = model(**batch)
|
| 146 |
+
loss = outputs.loss
|
| 147 |
+
loss = loss / gradient_accumulation_steps
|
| 148 |
+
accelerator.backward(loss)
|
| 149 |
+
if step % gradient_accumulation_steps == 0:
|
| 150 |
+
optimizer.step()
|
| 151 |
+
lr_scheduler.step()
|
| 152 |
+
optimizer.zero_grad()
|
| 153 |
+
|
| 154 |
+
overall_step += 1
|
| 155 |
+
|
| 156 |
+
model.eval()
|
| 157 |
+
samples_seen = 0
|
| 158 |
+
for step, batch in enumerate(eval_dataloader):
|
| 159 |
+
# We could avoid this line since we set the accelerator with `device_placement=True`.
|
| 160 |
+
batch.to(accelerator.device)
|
| 161 |
+
with torch.no_grad():
|
| 162 |
+
outputs = model(**batch)
|
| 163 |
+
predictions = outputs.logits.argmax(dim=-1)
|
| 164 |
+
# It is slightly faster to call this once, than multiple times
|
| 165 |
+
predictions, references = accelerator.gather(
|
| 166 |
+
(predictions, batch["labels"])
|
| 167 |
+
) # If we are in a multiprocess environment, the last batch has duplicates
|
| 168 |
+
if accelerator.use_distributed:
|
| 169 |
+
if step == len(eval_dataloader) - 1:
|
| 170 |
+
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
|
| 171 |
+
references = references[: len(eval_dataloader.dataset) - samples_seen]
|
| 172 |
+
else:
|
| 173 |
+
samples_seen += references.shape[0]
|
| 174 |
+
metric.add_batch(
|
| 175 |
+
predictions=predictions,
|
| 176 |
+
references=references,
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
eval_metric = metric.compute()
|
| 180 |
+
# Use accelerator.print to print only on the main process.
|
| 181 |
+
accelerator.print(f"epoch {epoch}:", eval_metric)
|
| 182 |
+
performance_metric[f"epoch-{epoch}"] = eval_metric["accuracy"]
|
| 183 |
+
|
| 184 |
+
if best_performance < eval_metric["accuracy"]:
|
| 185 |
+
best_performance = eval_metric["accuracy"]
|
| 186 |
+
|
| 187 |
+
if args.performance_lower_bound is not None:
|
| 188 |
+
assert (
|
| 189 |
+
args.performance_lower_bound <= best_performance
|
| 190 |
+
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
|
| 191 |
+
|
| 192 |
+
accelerator.wait_for_everyone()
|
| 193 |
+
if accelerator.is_main_process:
|
| 194 |
+
with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
|
| 195 |
+
json.dump(performance_metric, f)
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def main():
|
| 199 |
+
parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
|
| 200 |
+
parser.add_argument(
|
| 201 |
+
"--model_name_or_path",
|
| 202 |
+
type=str,
|
| 203 |
+
default="bert-base-cased",
|
| 204 |
+
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
| 205 |
+
required=False,
|
| 206 |
+
)
|
| 207 |
+
parser.add_argument(
|
| 208 |
+
"--output_dir",
|
| 209 |
+
type=str,
|
| 210 |
+
default=".",
|
| 211 |
+
help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
|
| 212 |
+
)
|
| 213 |
+
parser.add_argument(
|
| 214 |
+
"--performance_lower_bound",
|
| 215 |
+
type=float,
|
| 216 |
+
default=None,
|
| 217 |
+
help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.",
|
| 218 |
+
)
|
| 219 |
+
parser.add_argument(
|
| 220 |
+
"--num_epochs",
|
| 221 |
+
type=int,
|
| 222 |
+
default=3,
|
| 223 |
+
help="Number of train epochs.",
|
| 224 |
+
)
|
| 225 |
+
args = parser.parse_args()
|
| 226 |
+
config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
|
| 227 |
+
training_function(config, args)
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
if __name__ == "__main__":
|
| 231 |
+
main()
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def main():
|
| 5 |
+
if torch.cuda.is_available():
|
| 6 |
+
num_gpus = torch.cuda.device_count()
|
| 7 |
+
else:
|
| 8 |
+
num_gpus = 0
|
| 9 |
+
print(f"Successfully ran on {num_gpus} GPUs")
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
if __name__ == "__main__":
|
| 13 |
+
main()
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
import warnings
|
| 19 |
+
from typing import List
|
| 20 |
+
from unittest.mock import Mock
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
|
| 24 |
+
|
| 25 |
+
from accelerate.accelerator import Accelerator
|
| 26 |
+
from accelerate.utils.dataclasses import DistributedType
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class DummyIterableDataset(IterableDataset):
|
| 30 |
+
def __init__(self, data):
|
| 31 |
+
self.data = data
|
| 32 |
+
|
| 33 |
+
def __iter__(self):
|
| 34 |
+
for element in self.data:
|
| 35 |
+
yield element
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def create_accelerator(even_batches=True):
|
| 39 |
+
accelerator = Accelerator(even_batches=even_batches)
|
| 40 |
+
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
|
| 41 |
+
return accelerator
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int, iterable: bool = False):
|
| 45 |
+
"""
|
| 46 |
+
Create a simple DataLoader to use during the test cases
|
| 47 |
+
"""
|
| 48 |
+
if iterable:
|
| 49 |
+
dataset = DummyIterableDataset(torch.as_tensor(range(dataset_size)))
|
| 50 |
+
else:
|
| 51 |
+
dataset = TensorDataset(torch.as_tensor(range(dataset_size)))
|
| 52 |
+
|
| 53 |
+
dl = DataLoader(dataset, batch_size=batch_size)
|
| 54 |
+
dl = accelerator.prepare(dl)
|
| 55 |
+
|
| 56 |
+
return dl
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def verify_dataloader_batch_sizes(
|
| 60 |
+
accelerator: Accelerator,
|
| 61 |
+
dataset_size: int,
|
| 62 |
+
batch_size: int,
|
| 63 |
+
process_0_expected_batch_sizes: List[int],
|
| 64 |
+
process_1_expected_batch_sizes: List[int],
|
| 65 |
+
):
|
| 66 |
+
"""
|
| 67 |
+
A helper function for verifying the batch sizes coming from a prepared dataloader in each process
|
| 68 |
+
"""
|
| 69 |
+
dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size)
|
| 70 |
+
|
| 71 |
+
batch_sizes = [len(batch[0]) for batch in dl]
|
| 72 |
+
|
| 73 |
+
if accelerator.process_index == 0:
|
| 74 |
+
assert batch_sizes == process_0_expected_batch_sizes
|
| 75 |
+
elif accelerator.process_index == 1:
|
| 76 |
+
assert batch_sizes == process_1_expected_batch_sizes
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def test_default_ensures_even_batch_sizes():
|
| 80 |
+
accelerator = create_accelerator()
|
| 81 |
+
|
| 82 |
+
# without padding, we would expect a different number of batches
|
| 83 |
+
verify_dataloader_batch_sizes(
|
| 84 |
+
accelerator,
|
| 85 |
+
dataset_size=3,
|
| 86 |
+
batch_size=1,
|
| 87 |
+
process_0_expected_batch_sizes=[1, 1],
|
| 88 |
+
process_1_expected_batch_sizes=[1, 1],
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
# without padding, we would expect the same number of batches, but different sizes
|
| 92 |
+
verify_dataloader_batch_sizes(
|
| 93 |
+
accelerator,
|
| 94 |
+
dataset_size=7,
|
| 95 |
+
batch_size=2,
|
| 96 |
+
process_0_expected_batch_sizes=[2, 2],
|
| 97 |
+
process_1_expected_batch_sizes=[2, 2],
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def test_can_disable_even_batches():
|
| 102 |
+
accelerator = create_accelerator(even_batches=False)
|
| 103 |
+
|
| 104 |
+
verify_dataloader_batch_sizes(
|
| 105 |
+
accelerator,
|
| 106 |
+
dataset_size=3,
|
| 107 |
+
batch_size=1,
|
| 108 |
+
process_0_expected_batch_sizes=[1, 1],
|
| 109 |
+
process_1_expected_batch_sizes=[1],
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
verify_dataloader_batch_sizes(
|
| 113 |
+
accelerator,
|
| 114 |
+
dataset_size=7,
|
| 115 |
+
batch_size=2,
|
| 116 |
+
process_0_expected_batch_sizes=[2, 2],
|
| 117 |
+
process_1_expected_batch_sizes=[2, 1],
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def test_can_join_uneven_inputs():
|
| 122 |
+
accelerator = create_accelerator(even_batches=False)
|
| 123 |
+
|
| 124 |
+
model = torch.nn.Linear(1, 1)
|
| 125 |
+
ddp_model = accelerator.prepare(model)
|
| 126 |
+
|
| 127 |
+
dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
|
| 128 |
+
|
| 129 |
+
batch_idxs = []
|
| 130 |
+
with accelerator.join_uneven_inputs([ddp_model]):
|
| 131 |
+
for batch_idx, batch in enumerate(dl):
|
| 132 |
+
output = ddp_model(batch[0].float())
|
| 133 |
+
loss = output.sum()
|
| 134 |
+
loss.backward()
|
| 135 |
+
batch_idxs.append(batch_idx)
|
| 136 |
+
|
| 137 |
+
accelerator.wait_for_everyone()
|
| 138 |
+
|
| 139 |
+
if accelerator.process_index == 0:
|
| 140 |
+
assert batch_idxs == [0, 1]
|
| 141 |
+
elif accelerator.process_index == 1:
|
| 142 |
+
assert batch_idxs == [0]
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def test_join_raises_warning_for_non_ddp_distributed(accelerator):
|
| 146 |
+
with warnings.catch_warnings(record=True) as w:
|
| 147 |
+
with accelerator.join_uneven_inputs([Mock()]):
|
| 148 |
+
pass
|
| 149 |
+
|
| 150 |
+
assert issubclass(w[-1].category, UserWarning)
|
| 151 |
+
assert "only supported for multi-GPU" in str(w[-1].message)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def test_join_can_override_even_batches():
|
| 155 |
+
default_even_batches = True
|
| 156 |
+
overridden_even_batches = False
|
| 157 |
+
accelerator = create_accelerator(even_batches=default_even_batches)
|
| 158 |
+
model = torch.nn.Linear(1, 1)
|
| 159 |
+
ddp_model = accelerator.prepare(model)
|
| 160 |
+
train_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
|
| 161 |
+
valid_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
|
| 162 |
+
|
| 163 |
+
with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches):
|
| 164 |
+
train_dl_overridden_value = train_dl.batch_sampler.even_batches
|
| 165 |
+
valid_dl_overridden_value = valid_dl.batch_sampler.even_batches
|
| 166 |
+
|
| 167 |
+
assert train_dl_overridden_value == overridden_even_batches
|
| 168 |
+
assert valid_dl_overridden_value == overridden_even_batches
|
| 169 |
+
assert train_dl.batch_sampler.even_batches == default_even_batches
|
| 170 |
+
assert valid_dl.batch_sampler.even_batches == default_even_batches
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def test_join_can_override_for_mixed_type_dataloaders():
|
| 174 |
+
default_even_batches = True
|
| 175 |
+
overridden_even_batches = False
|
| 176 |
+
accelerator = create_accelerator(even_batches=default_even_batches)
|
| 177 |
+
model = torch.nn.Linear(1, 1)
|
| 178 |
+
ddp_model = accelerator.prepare(model)
|
| 179 |
+
create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True)
|
| 180 |
+
batch_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
|
| 181 |
+
|
| 182 |
+
with warnings.catch_warnings():
|
| 183 |
+
warnings.filterwarnings("ignore")
|
| 184 |
+
try:
|
| 185 |
+
with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches):
|
| 186 |
+
batch_dl_overridden_value = batch_dl.batch_sampler.even_batches
|
| 187 |
+
except AttributeError:
|
| 188 |
+
# ensure attribute error is not raised when processing iterable dl
|
| 189 |
+
raise AssertionError
|
| 190 |
+
|
| 191 |
+
assert batch_dl_overridden_value == overridden_even_batches
|
| 192 |
+
assert batch_dl.batch_sampler.even_batches == default_even_batches
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def test_join_raises_warning_for_iterable_when_overriding_even_batches():
|
| 196 |
+
accelerator = create_accelerator()
|
| 197 |
+
model = torch.nn.Linear(1, 1)
|
| 198 |
+
ddp_model = accelerator.prepare(model)
|
| 199 |
+
create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True)
|
| 200 |
+
|
| 201 |
+
with warnings.catch_warnings(record=True) as w:
|
| 202 |
+
with accelerator.join_uneven_inputs([ddp_model], even_batches=False):
|
| 203 |
+
pass
|
| 204 |
+
|
| 205 |
+
assert issubclass(w[-1].category, UserWarning)
|
| 206 |
+
assert "only supported for map-style datasets" in str(w[-1].message)
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def main():
|
| 210 |
+
accelerator = create_accelerator()
|
| 211 |
+
|
| 212 |
+
accelerator.print("Test that even_batches variable ensures uniform batches across processes")
|
| 213 |
+
test_default_ensures_even_batch_sizes()
|
| 214 |
+
|
| 215 |
+
accelerator.print("Run tests with even_batches disabled")
|
| 216 |
+
test_can_disable_even_batches()
|
| 217 |
+
|
| 218 |
+
accelerator.print("Test joining uneven inputs")
|
| 219 |
+
test_can_join_uneven_inputs()
|
| 220 |
+
|
| 221 |
+
accelerator.print("Test overriding even_batches when joining uneven inputs")
|
| 222 |
+
test_join_can_override_even_batches()
|
| 223 |
+
|
| 224 |
+
accelerator.print("Test overriding even_batches for mixed dataloader types")
|
| 225 |
+
test_join_can_override_for_mixed_type_dataloaders()
|
| 226 |
+
|
| 227 |
+
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders")
|
| 228 |
+
test_join_raises_warning_for_iterable_when_overriding_even_batches()
|
| 229 |
+
|
| 230 |
+
accelerator.print("Test join with non DDP distributed raises warning")
|
| 231 |
+
original_state = accelerator.state.distributed_type
|
| 232 |
+
accelerator.state.distributed_type = DistributedType.FSDP
|
| 233 |
+
test_join_raises_warning_for_non_ddp_distributed(accelerator)
|
| 234 |
+
accelerator.state.distributed_type = original_state
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
if __name__ == "__main__":
|
| 238 |
+
main()
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_notebook.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Test file to ensure that in general certain situational setups for notebooks work.
|
| 2 |
+
import argparse
|
| 3 |
+
|
| 4 |
+
from accelerate import PartialState, notebook_launcher
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
parser = argparse.ArgumentParser()
|
| 8 |
+
parser.add_argument("--num_processes", type=int, default=1)
|
| 9 |
+
args = parser.parse_args()
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def function():
|
| 13 |
+
print(f"PartialState:\n{PartialState()}")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
if __name__ == "__main__":
|
| 17 |
+
notebook_launcher(function, num_processes=int(args.num_processes))
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ops.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
|
| 19 |
+
from accelerate import PartialState
|
| 20 |
+
from accelerate.test_utils.testing import assert_exception
|
| 21 |
+
from accelerate.utils.dataclasses import DistributedType
|
| 22 |
+
from accelerate.utils.operations import (
|
| 23 |
+
DistributedOperationException,
|
| 24 |
+
broadcast,
|
| 25 |
+
gather,
|
| 26 |
+
gather_object,
|
| 27 |
+
pad_across_processes,
|
| 28 |
+
reduce,
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def create_tensor(state):
|
| 33 |
+
return (torch.arange(state.num_processes) + 1.0 + (state.num_processes * state.process_index)).to(state.device)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def test_gather(state):
|
| 37 |
+
tensor = create_tensor(state)
|
| 38 |
+
gathered_tensor = gather(tensor)
|
| 39 |
+
assert gathered_tensor.tolist() == list(range(1, state.num_processes**2 + 1))
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def test_gather_object(state):
|
| 43 |
+
obj = [state.process_index]
|
| 44 |
+
gathered_obj = gather_object(obj)
|
| 45 |
+
assert len(gathered_obj) == state.num_processes, f"{gathered_obj}, {len(gathered_obj)} != {state.num_processes}"
|
| 46 |
+
assert gathered_obj == list(range(state.num_processes)), f"{gathered_obj} != {list(range(state.num_processes))}"
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def test_gather_non_contigous(state):
|
| 50 |
+
# Create a non-contiguous tensor
|
| 51 |
+
tensor = torch.arange(12).view(4, 3).t().to(state.device)
|
| 52 |
+
assert not tensor.is_contiguous()
|
| 53 |
+
# Shouldn't error out
|
| 54 |
+
_ = gather(tensor)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def test_broadcast(state):
|
| 58 |
+
tensor = create_tensor(state)
|
| 59 |
+
broadcasted_tensor = broadcast(tensor)
|
| 60 |
+
assert broadcasted_tensor.shape == torch.Size([state.num_processes])
|
| 61 |
+
assert broadcasted_tensor.tolist() == list(range(1, state.num_processes + 1))
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def test_pad_across_processes(state):
|
| 65 |
+
# We need to pad the tensor with one more element if we are the main process
|
| 66 |
+
# to ensure that we can pad
|
| 67 |
+
if state.is_main_process:
|
| 68 |
+
tensor = torch.arange(state.num_processes + 1).to(state.device)
|
| 69 |
+
else:
|
| 70 |
+
tensor = torch.arange(state.num_processes).to(state.device)
|
| 71 |
+
padded_tensor = pad_across_processes(tensor)
|
| 72 |
+
assert padded_tensor.shape == torch.Size([state.num_processes + 1])
|
| 73 |
+
if not state.is_main_process:
|
| 74 |
+
assert padded_tensor.tolist() == list(range(0, state.num_processes)) + [0]
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def test_reduce_sum(state):
|
| 78 |
+
# For now runs on only two processes
|
| 79 |
+
if state.num_processes != 2:
|
| 80 |
+
return
|
| 81 |
+
tensor = create_tensor(state)
|
| 82 |
+
reduced_tensor = reduce(tensor, "sum")
|
| 83 |
+
truth_tensor = torch.tensor([4.0, 6]).to(state.device)
|
| 84 |
+
assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}"
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def test_reduce_mean(state):
|
| 88 |
+
# For now runs on only two processes
|
| 89 |
+
if state.num_processes != 2:
|
| 90 |
+
return
|
| 91 |
+
tensor = create_tensor(state)
|
| 92 |
+
reduced_tensor = reduce(tensor, "mean")
|
| 93 |
+
truth_tensor = torch.tensor([2.0, 3]).to(state.device)
|
| 94 |
+
assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}"
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def test_op_checker(state):
|
| 98 |
+
# Must be in a distributed state
|
| 99 |
+
if state.distributed_type == DistributedType.NO:
|
| 100 |
+
return
|
| 101 |
+
state.debug = True
|
| 102 |
+
# `pad_across_processes`
|
| 103 |
+
if state.process_index == 0:
|
| 104 |
+
data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)}
|
| 105 |
+
else:
|
| 106 |
+
data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4, 5]]]).to(state.device)}
|
| 107 |
+
|
| 108 |
+
with assert_exception(DistributedOperationException):
|
| 109 |
+
pad_across_processes(data, dim=0)
|
| 110 |
+
|
| 111 |
+
# `reduce`
|
| 112 |
+
if state.process_index == 0:
|
| 113 |
+
data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)}
|
| 114 |
+
else:
|
| 115 |
+
data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)}
|
| 116 |
+
|
| 117 |
+
with assert_exception(DistributedOperationException):
|
| 118 |
+
reduce(data)
|
| 119 |
+
|
| 120 |
+
# `broadcast`
|
| 121 |
+
if state.process_index == 0:
|
| 122 |
+
data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)}
|
| 123 |
+
else:
|
| 124 |
+
data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)}
|
| 125 |
+
|
| 126 |
+
with assert_exception(DistributedOperationException):
|
| 127 |
+
broadcast(data)
|
| 128 |
+
|
| 129 |
+
state.debug = False
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def _mp_fn(index):
|
| 133 |
+
# For xla_spawn (TPUs)
|
| 134 |
+
main()
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def main():
|
| 138 |
+
state = PartialState()
|
| 139 |
+
state.print(f"State: {state}")
|
| 140 |
+
state.print("testing gather")
|
| 141 |
+
test_gather(state)
|
| 142 |
+
state.print("testing gather_object")
|
| 143 |
+
test_gather_object(state)
|
| 144 |
+
state.print("testing gather non-contigous")
|
| 145 |
+
test_gather_non_contigous(state)
|
| 146 |
+
state.print("testing broadcast")
|
| 147 |
+
test_broadcast(state)
|
| 148 |
+
state.print("testing pad_across_processes")
|
| 149 |
+
test_pad_across_processes(state)
|
| 150 |
+
state.print("testing reduce_sum")
|
| 151 |
+
test_reduce_sum(state)
|
| 152 |
+
state.print("testing reduce_mean")
|
| 153 |
+
test_reduce_mean(state)
|
| 154 |
+
state.print("testing op_checker")
|
| 155 |
+
test_op_checker(state)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
if __name__ == "__main__":
|
| 159 |
+
main()
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_script.py
ADDED
|
@@ -0,0 +1,616 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
import contextlib
|
| 18 |
+
import io
|
| 19 |
+
import math
|
| 20 |
+
import time
|
| 21 |
+
from copy import deepcopy
|
| 22 |
+
from pathlib import Path
|
| 23 |
+
|
| 24 |
+
import torch
|
| 25 |
+
from torch.utils.data import DataLoader
|
| 26 |
+
|
| 27 |
+
from accelerate import Accelerator
|
| 28 |
+
from accelerate.data_loader import prepare_data_loader
|
| 29 |
+
from accelerate.state import AcceleratorState
|
| 30 |
+
from accelerate.test_utils import RegressionDataset, are_the_same_tensors
|
| 31 |
+
from accelerate.utils import (
|
| 32 |
+
DistributedType,
|
| 33 |
+
gather,
|
| 34 |
+
is_bf16_available,
|
| 35 |
+
is_ipex_available,
|
| 36 |
+
is_npu_available,
|
| 37 |
+
is_xpu_available,
|
| 38 |
+
set_seed,
|
| 39 |
+
synchronize_rng_states,
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# TODO: remove RegressionModel4XPU once ccl support empty buffer in broadcasting.
|
| 44 |
+
if is_xpu_available():
|
| 45 |
+
from accelerate.test_utils import RegressionModel4XPU as RegressionModel
|
| 46 |
+
else:
|
| 47 |
+
from accelerate.test_utils import RegressionModel
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def print_main(state):
|
| 51 |
+
print(f"Printing from the main process {state.process_index}")
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def print_local_main(state):
|
| 55 |
+
print(f"Printing from the local main process {state.local_process_index}")
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def print_last(state):
|
| 59 |
+
print(f"Printing from the last process {state.process_index}")
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def print_on(state, process_idx):
|
| 63 |
+
print(f"Printing from process {process_idx}: {state.process_index}")
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def process_execution_check():
|
| 67 |
+
accelerator = Accelerator()
|
| 68 |
+
num_processes = accelerator.num_processes
|
| 69 |
+
# Test main_process_first context manager
|
| 70 |
+
path = Path("check_main_process_first.txt")
|
| 71 |
+
with accelerator.main_process_first():
|
| 72 |
+
if accelerator.is_main_process:
|
| 73 |
+
time.sleep(0.1) # ensure main process takes longest
|
| 74 |
+
with open(path, "a+") as f:
|
| 75 |
+
f.write("Currently in the main process\n")
|
| 76 |
+
else:
|
| 77 |
+
with open(path, "a+") as f:
|
| 78 |
+
f.write("Now on another process\n")
|
| 79 |
+
accelerator.wait_for_everyone()
|
| 80 |
+
|
| 81 |
+
if accelerator.is_main_process:
|
| 82 |
+
with open(path, "r") as f:
|
| 83 |
+
text = "".join(f.readlines())
|
| 84 |
+
try:
|
| 85 |
+
assert text.startswith("Currently in the main process\n"), "Main process was not first"
|
| 86 |
+
if num_processes > 1:
|
| 87 |
+
assert text.endswith("Now on another process\n"), "Main process was not first"
|
| 88 |
+
assert (
|
| 89 |
+
text.count("Now on another process\n") == accelerator.num_processes - 1
|
| 90 |
+
), f"Only wrote to file {text.count('Now on another process') + 1} times, not {accelerator.num_processes}"
|
| 91 |
+
except AssertionError:
|
| 92 |
+
path.unlink()
|
| 93 |
+
raise
|
| 94 |
+
|
| 95 |
+
if accelerator.is_main_process and path.exists():
|
| 96 |
+
path.unlink()
|
| 97 |
+
accelerator.wait_for_everyone()
|
| 98 |
+
# Test the decorators
|
| 99 |
+
f = io.StringIO()
|
| 100 |
+
with contextlib.redirect_stdout(f):
|
| 101 |
+
accelerator.on_main_process(print_main)(accelerator.state)
|
| 102 |
+
result = f.getvalue().rstrip()
|
| 103 |
+
if accelerator.is_main_process:
|
| 104 |
+
assert result == "Printing from the main process 0", f"{result} != Printing from the main process 0"
|
| 105 |
+
else:
|
| 106 |
+
assert f.getvalue().rstrip() == "", f'{result} != ""'
|
| 107 |
+
f.truncate(0)
|
| 108 |
+
f.seek(0)
|
| 109 |
+
|
| 110 |
+
with contextlib.redirect_stdout(f):
|
| 111 |
+
accelerator.on_local_main_process(print_local_main)(accelerator.state)
|
| 112 |
+
if accelerator.is_local_main_process:
|
| 113 |
+
assert f.getvalue().rstrip() == "Printing from the local main process 0"
|
| 114 |
+
else:
|
| 115 |
+
assert f.getvalue().rstrip() == ""
|
| 116 |
+
f.truncate(0)
|
| 117 |
+
f.seek(0)
|
| 118 |
+
|
| 119 |
+
with contextlib.redirect_stdout(f):
|
| 120 |
+
accelerator.on_last_process(print_last)(accelerator.state)
|
| 121 |
+
if accelerator.is_last_process:
|
| 122 |
+
assert f.getvalue().rstrip() == f"Printing from the last process {accelerator.state.num_processes - 1}"
|
| 123 |
+
else:
|
| 124 |
+
assert f.getvalue().rstrip() == ""
|
| 125 |
+
f.truncate(0)
|
| 126 |
+
f.seek(0)
|
| 127 |
+
|
| 128 |
+
for process_idx in range(num_processes):
|
| 129 |
+
with contextlib.redirect_stdout(f):
|
| 130 |
+
accelerator.on_process(print_on, process_index=process_idx)(accelerator.state, process_idx)
|
| 131 |
+
if accelerator.process_index == process_idx:
|
| 132 |
+
assert f.getvalue().rstrip() == f"Printing from process {process_idx}: {accelerator.process_index}"
|
| 133 |
+
else:
|
| 134 |
+
assert f.getvalue().rstrip() == ""
|
| 135 |
+
f.truncate(0)
|
| 136 |
+
f.seek(0)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def init_state_check():
|
| 140 |
+
# Test we can instantiate this twice in a row.
|
| 141 |
+
state = AcceleratorState()
|
| 142 |
+
if state.local_process_index == 0:
|
| 143 |
+
print("Testing, testing. 1, 2, 3.")
|
| 144 |
+
print(state)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def rng_sync_check():
|
| 148 |
+
state = AcceleratorState()
|
| 149 |
+
synchronize_rng_states(["torch"])
|
| 150 |
+
assert are_the_same_tensors(torch.get_rng_state()), "RNG states improperly synchronized on CPU."
|
| 151 |
+
if state.distributed_type == DistributedType.MULTI_GPU:
|
| 152 |
+
synchronize_rng_states(["cuda"])
|
| 153 |
+
assert are_the_same_tensors(torch.cuda.get_rng_state()), "RNG states improperly synchronized on GPU."
|
| 154 |
+
elif state.distributed_type == DistributedType.MULTI_XPU:
|
| 155 |
+
synchronize_rng_states(["xpu"])
|
| 156 |
+
assert are_the_same_tensors(torch.xpu.get_rng_state()), "RNG states improperly synchronized on XPU."
|
| 157 |
+
generator = torch.Generator()
|
| 158 |
+
synchronize_rng_states(["generator"], generator=generator)
|
| 159 |
+
assert are_the_same_tensors(generator.get_state()), "RNG states improperly synchronized in generator."
|
| 160 |
+
|
| 161 |
+
if state.local_process_index == 0:
|
| 162 |
+
print("All rng are properly synched.")
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def dl_preparation_check():
|
| 166 |
+
state = AcceleratorState()
|
| 167 |
+
length = 32 * state.num_processes
|
| 168 |
+
|
| 169 |
+
dl = DataLoader(range(length), batch_size=8)
|
| 170 |
+
dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True)
|
| 171 |
+
result = []
|
| 172 |
+
for batch in dl:
|
| 173 |
+
result.append(gather(batch))
|
| 174 |
+
result = torch.cat(result)
|
| 175 |
+
|
| 176 |
+
print(state.process_index, result, type(dl))
|
| 177 |
+
assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result."
|
| 178 |
+
|
| 179 |
+
dl = DataLoader(range(length), batch_size=8)
|
| 180 |
+
dl = prepare_data_loader(
|
| 181 |
+
dl,
|
| 182 |
+
state.device,
|
| 183 |
+
state.num_processes,
|
| 184 |
+
state.process_index,
|
| 185 |
+
put_on_device=True,
|
| 186 |
+
split_batches=True,
|
| 187 |
+
)
|
| 188 |
+
result = []
|
| 189 |
+
for batch in dl:
|
| 190 |
+
result.append(gather(batch))
|
| 191 |
+
result = torch.cat(result)
|
| 192 |
+
assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result."
|
| 193 |
+
|
| 194 |
+
if state.process_index == 0:
|
| 195 |
+
print("Non-shuffled dataloader passing.")
|
| 196 |
+
|
| 197 |
+
dl = DataLoader(range(length), batch_size=8, shuffle=True)
|
| 198 |
+
dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True)
|
| 199 |
+
result = []
|
| 200 |
+
for batch in dl:
|
| 201 |
+
result.append(gather(batch))
|
| 202 |
+
result = torch.cat(result).tolist()
|
| 203 |
+
result.sort()
|
| 204 |
+
assert result == list(range(length)), "Wrong shuffled dataloader result."
|
| 205 |
+
|
| 206 |
+
dl = DataLoader(range(length), batch_size=8, shuffle=True)
|
| 207 |
+
dl = prepare_data_loader(
|
| 208 |
+
dl,
|
| 209 |
+
state.device,
|
| 210 |
+
state.num_processes,
|
| 211 |
+
state.process_index,
|
| 212 |
+
put_on_device=True,
|
| 213 |
+
split_batches=True,
|
| 214 |
+
)
|
| 215 |
+
result = []
|
| 216 |
+
for batch in dl:
|
| 217 |
+
result.append(gather(batch))
|
| 218 |
+
result = torch.cat(result).tolist()
|
| 219 |
+
result.sort()
|
| 220 |
+
assert result == list(range(length)), "Wrong shuffled dataloader result."
|
| 221 |
+
|
| 222 |
+
if state.local_process_index == 0:
|
| 223 |
+
print("Shuffled dataloader passing.")
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def central_dl_preparation_check():
|
| 227 |
+
state = AcceleratorState()
|
| 228 |
+
length = 32 * state.num_processes
|
| 229 |
+
|
| 230 |
+
dl = DataLoader(range(length), batch_size=8)
|
| 231 |
+
dl = prepare_data_loader(
|
| 232 |
+
dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True
|
| 233 |
+
)
|
| 234 |
+
result = []
|
| 235 |
+
for batch in dl:
|
| 236 |
+
result.append(gather(batch))
|
| 237 |
+
result = torch.cat(result)
|
| 238 |
+
assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result."
|
| 239 |
+
|
| 240 |
+
dl = DataLoader(range(length), batch_size=8)
|
| 241 |
+
dl = prepare_data_loader(
|
| 242 |
+
dl,
|
| 243 |
+
state.device,
|
| 244 |
+
state.num_processes,
|
| 245 |
+
state.process_index,
|
| 246 |
+
put_on_device=True,
|
| 247 |
+
split_batches=True,
|
| 248 |
+
dispatch_batches=True,
|
| 249 |
+
)
|
| 250 |
+
result = []
|
| 251 |
+
for batch in dl:
|
| 252 |
+
result.append(gather(batch))
|
| 253 |
+
result = torch.cat(result)
|
| 254 |
+
assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result."
|
| 255 |
+
|
| 256 |
+
if state.process_index == 0:
|
| 257 |
+
print("Non-shuffled central dataloader passing.")
|
| 258 |
+
|
| 259 |
+
dl = DataLoader(range(length), batch_size=8, shuffle=True)
|
| 260 |
+
dl = prepare_data_loader(
|
| 261 |
+
dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True
|
| 262 |
+
)
|
| 263 |
+
result = []
|
| 264 |
+
for batch in dl:
|
| 265 |
+
result.append(gather(batch))
|
| 266 |
+
result = torch.cat(result).tolist()
|
| 267 |
+
result.sort()
|
| 268 |
+
assert result == list(range(length)), "Wrong shuffled dataloader result."
|
| 269 |
+
|
| 270 |
+
dl = DataLoader(range(length), batch_size=8, shuffle=True)
|
| 271 |
+
dl = prepare_data_loader(
|
| 272 |
+
dl,
|
| 273 |
+
state.device,
|
| 274 |
+
state.num_processes,
|
| 275 |
+
state.process_index,
|
| 276 |
+
put_on_device=True,
|
| 277 |
+
split_batches=True,
|
| 278 |
+
dispatch_batches=True,
|
| 279 |
+
)
|
| 280 |
+
result = []
|
| 281 |
+
for batch in dl:
|
| 282 |
+
result.append(gather(batch))
|
| 283 |
+
result = torch.cat(result).tolist()
|
| 284 |
+
result.sort()
|
| 285 |
+
assert result == list(range(length)), "Wrong shuffled dataloader result."
|
| 286 |
+
|
| 287 |
+
if state.local_process_index == 0:
|
| 288 |
+
print("Shuffled central dataloader passing.")
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def mock_training(length, batch_size, generator):
|
| 292 |
+
set_seed(42)
|
| 293 |
+
generator.manual_seed(42)
|
| 294 |
+
train_set = RegressionDataset(length=length, seed=42)
|
| 295 |
+
train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
|
| 296 |
+
model = RegressionModel()
|
| 297 |
+
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
|
| 298 |
+
for epoch in range(3):
|
| 299 |
+
for batch in train_dl:
|
| 300 |
+
model.zero_grad()
|
| 301 |
+
output = model(batch["x"])
|
| 302 |
+
loss = torch.nn.functional.mse_loss(output, batch["y"])
|
| 303 |
+
loss.backward()
|
| 304 |
+
optimizer.step()
|
| 305 |
+
return train_set, model
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
def training_check():
|
| 309 |
+
state = AcceleratorState()
|
| 310 |
+
generator = torch.Generator()
|
| 311 |
+
batch_size = 8
|
| 312 |
+
length = batch_size * 4 * state.num_processes
|
| 313 |
+
|
| 314 |
+
train_set, old_model = mock_training(length, batch_size * state.num_processes, generator)
|
| 315 |
+
assert are_the_same_tensors(old_model.a), "Did not obtain the same model on both processes."
|
| 316 |
+
assert are_the_same_tensors(old_model.b), "Did not obtain the same model on both processes."
|
| 317 |
+
|
| 318 |
+
accelerator = Accelerator()
|
| 319 |
+
train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
|
| 320 |
+
model = RegressionModel()
|
| 321 |
+
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
|
| 322 |
+
|
| 323 |
+
train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
|
| 324 |
+
set_seed(42)
|
| 325 |
+
generator.manual_seed(42)
|
| 326 |
+
for epoch in range(3):
|
| 327 |
+
for batch in train_dl:
|
| 328 |
+
model.zero_grad()
|
| 329 |
+
output = model(batch["x"])
|
| 330 |
+
loss = torch.nn.functional.mse_loss(output, batch["y"])
|
| 331 |
+
accelerator.backward(loss)
|
| 332 |
+
optimizer.step()
|
| 333 |
+
|
| 334 |
+
model = accelerator.unwrap_model(model).cpu()
|
| 335 |
+
assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
|
| 336 |
+
assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
|
| 337 |
+
|
| 338 |
+
accelerator.print("Training yielded the same results on one CPU or distributed setup with no batch split.")
|
| 339 |
+
|
| 340 |
+
accelerator = Accelerator(split_batches=True)
|
| 341 |
+
train_dl = DataLoader(train_set, batch_size=batch_size * state.num_processes, shuffle=True, generator=generator)
|
| 342 |
+
model = RegressionModel()
|
| 343 |
+
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
|
| 344 |
+
|
| 345 |
+
train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
|
| 346 |
+
set_seed(42)
|
| 347 |
+
generator.manual_seed(42)
|
| 348 |
+
for _ in range(3):
|
| 349 |
+
for batch in train_dl:
|
| 350 |
+
model.zero_grad()
|
| 351 |
+
output = model(batch["x"])
|
| 352 |
+
loss = torch.nn.functional.mse_loss(output, batch["y"])
|
| 353 |
+
accelerator.backward(loss)
|
| 354 |
+
optimizer.step()
|
| 355 |
+
|
| 356 |
+
model = accelerator.unwrap_model(model).cpu()
|
| 357 |
+
assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
|
| 358 |
+
assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
|
| 359 |
+
|
| 360 |
+
accelerator.print("Training yielded the same results on one CPU or distributes setup with batch split.")
|
| 361 |
+
|
| 362 |
+
if torch.cuda.is_available() or is_npu_available():
|
| 363 |
+
# Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16
|
| 364 |
+
print("FP16 training check.")
|
| 365 |
+
AcceleratorState._reset_state()
|
| 366 |
+
accelerator = Accelerator(mixed_precision="fp16")
|
| 367 |
+
train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
|
| 368 |
+
model = RegressionModel()
|
| 369 |
+
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
|
| 370 |
+
|
| 371 |
+
train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
|
| 372 |
+
set_seed(42)
|
| 373 |
+
generator.manual_seed(42)
|
| 374 |
+
for _ in range(3):
|
| 375 |
+
for batch in train_dl:
|
| 376 |
+
model.zero_grad()
|
| 377 |
+
output = model(batch["x"])
|
| 378 |
+
loss = torch.nn.functional.mse_loss(output, batch["y"])
|
| 379 |
+
accelerator.backward(loss)
|
| 380 |
+
optimizer.step()
|
| 381 |
+
|
| 382 |
+
model = accelerator.unwrap_model(model).cpu()
|
| 383 |
+
assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
|
| 384 |
+
assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
|
| 385 |
+
|
| 386 |
+
if torch.cuda.is_available():
|
| 387 |
+
# Mostly a test that model.forward will have autocast when running unwrap_model(model, keep_fp32_wrapper=True)
|
| 388 |
+
print("Keep fp32 wrapper check.")
|
| 389 |
+
AcceleratorState._reset_state()
|
| 390 |
+
accelerator = Accelerator(mixed_precision="fp16")
|
| 391 |
+
|
| 392 |
+
model = torch.nn.Linear(2, 4)
|
| 393 |
+
model = accelerator.prepare(model)
|
| 394 |
+
model_with_fp32_wrapper = accelerator.unwrap_model(model, keep_fp32_wrapper=True)
|
| 395 |
+
|
| 396 |
+
# Run forward with fp16 as input.
|
| 397 |
+
# When the model is with mixed precision wrapper, no error will be raised.
|
| 398 |
+
input_tensor = torch.Tensor([1, 2]).to(dtype=torch.float16, device=accelerator.device)
|
| 399 |
+
output = model_with_fp32_wrapper(input_tensor)
|
| 400 |
+
|
| 401 |
+
# BF16 support is only for CPU + TPU, and some GPU
|
| 402 |
+
if is_bf16_available():
|
| 403 |
+
# Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16
|
| 404 |
+
print("BF16 training check.")
|
| 405 |
+
AcceleratorState._reset_state()
|
| 406 |
+
accelerator = Accelerator(mixed_precision="bf16")
|
| 407 |
+
train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
|
| 408 |
+
model = RegressionModel()
|
| 409 |
+
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
|
| 410 |
+
|
| 411 |
+
train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
|
| 412 |
+
set_seed(42)
|
| 413 |
+
generator.manual_seed(42)
|
| 414 |
+
for _ in range(3):
|
| 415 |
+
for batch in train_dl:
|
| 416 |
+
model.zero_grad()
|
| 417 |
+
output = model(batch["x"])
|
| 418 |
+
loss = torch.nn.functional.mse_loss(output, batch["y"])
|
| 419 |
+
accelerator.backward(loss)
|
| 420 |
+
optimizer.step()
|
| 421 |
+
|
| 422 |
+
model = accelerator.unwrap_model(model).cpu()
|
| 423 |
+
assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
|
| 424 |
+
assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
|
| 425 |
+
|
| 426 |
+
# IPEX support is only for CPU
|
| 427 |
+
if is_ipex_available():
|
| 428 |
+
print("ipex BF16 training check.")
|
| 429 |
+
AcceleratorState._reset_state()
|
| 430 |
+
accelerator = Accelerator(mixed_precision="bf16", cpu=True)
|
| 431 |
+
train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
|
| 432 |
+
model = RegressionModel()
|
| 433 |
+
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
|
| 434 |
+
|
| 435 |
+
train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
|
| 436 |
+
set_seed(42)
|
| 437 |
+
generator.manual_seed(42)
|
| 438 |
+
for _ in range(3):
|
| 439 |
+
for batch in train_dl:
|
| 440 |
+
model.zero_grad()
|
| 441 |
+
output = model(batch["x"])
|
| 442 |
+
loss = torch.nn.functional.mse_loss(output, batch["y"])
|
| 443 |
+
accelerator.backward(loss)
|
| 444 |
+
optimizer.step()
|
| 445 |
+
|
| 446 |
+
model = accelerator.unwrap_model(model).cpu()
|
| 447 |
+
assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
|
| 448 |
+
assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
|
| 449 |
+
|
| 450 |
+
# XPU support is only for XPU
|
| 451 |
+
if is_xpu_available():
|
| 452 |
+
print("xpu BF16 training check.")
|
| 453 |
+
AcceleratorState._reset_state()
|
| 454 |
+
accelerator = Accelerator(mixed_precision="bf16", cpu=False)
|
| 455 |
+
train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
|
| 456 |
+
model = RegressionModel()
|
| 457 |
+
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
|
| 458 |
+
|
| 459 |
+
train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
|
| 460 |
+
set_seed(42)
|
| 461 |
+
generator.manual_seed(42)
|
| 462 |
+
for _ in range(3):
|
| 463 |
+
for batch in train_dl:
|
| 464 |
+
model.zero_grad()
|
| 465 |
+
output = model(batch["x"])
|
| 466 |
+
loss = torch.nn.functional.mse_loss(output, batch["y"])
|
| 467 |
+
accelerator.backward(loss)
|
| 468 |
+
optimizer.step()
|
| 469 |
+
|
| 470 |
+
model = accelerator.unwrap_model(model).cpu()
|
| 471 |
+
assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on XPU or distributed training."
|
| 472 |
+
assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on XPU or distributed training."
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
def test_split_between_processes_list():
|
| 476 |
+
state = AcceleratorState()
|
| 477 |
+
data = list(range(0, 2 * state.num_processes))
|
| 478 |
+
with state.split_between_processes(data) as results:
|
| 479 |
+
assert (
|
| 480 |
+
len(results) == 2
|
| 481 |
+
), f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}"
|
| 482 |
+
|
| 483 |
+
data = list(range(0, (3 * state.num_processes) - 1))
|
| 484 |
+
with state.split_between_processes(data, apply_padding=True) as results:
|
| 485 |
+
if state.is_last_process:
|
| 486 |
+
# Test that the last process gets the extra item(s)
|
| 487 |
+
num_samples_per_device = math.ceil(len(data) / state.num_processes)
|
| 488 |
+
assert (
|
| 489 |
+
len(results) == num_samples_per_device
|
| 490 |
+
), f"Last process did not get the extra item(s). Process index: {state.process_index}; Length: {len(results)}"
|
| 491 |
+
state.wait_for_everyone()
|
| 492 |
+
|
| 493 |
+
|
| 494 |
+
def test_split_between_processes_nested_dict():
|
| 495 |
+
state = AcceleratorState()
|
| 496 |
+
a = [1, 2, 3, 4, 5, 6, 7, 8]
|
| 497 |
+
b = ["a", "b", "c", "d", "e", "f", "g", "h"]
|
| 498 |
+
c = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8])
|
| 499 |
+
if state.num_processes in (1, 2, 4):
|
| 500 |
+
data = {"a": a, "b": b, "c": c}
|
| 501 |
+
data_copy = deepcopy(data)
|
| 502 |
+
with state.split_between_processes(data) as results:
|
| 503 |
+
if state.process_index == 0:
|
| 504 |
+
assert results["a"] == data_copy["a"][: 8 // state.num_processes]
|
| 505 |
+
elif state.num_processes == 2:
|
| 506 |
+
assert results["a"] == data_copy["a"][4:]
|
| 507 |
+
elif state.process_index == 3:
|
| 508 |
+
# We return a list each time
|
| 509 |
+
assert results["a"] == data_copy["a"][-2:], f'Expected: {data_copy["a"][-2]}, Actual: {results["a"]}'
|
| 510 |
+
if state.process_index == 0:
|
| 511 |
+
assert results["b"] == data_copy["b"][: 8 // state.num_processes]
|
| 512 |
+
elif state.num_processes == 2:
|
| 513 |
+
assert results["b"] == data_copy["b"][4:]
|
| 514 |
+
elif state.process_index == 3:
|
| 515 |
+
assert results["b"] == data_copy["b"][-2:]
|
| 516 |
+
if state.process_index == 0:
|
| 517 |
+
assert torch.allclose(
|
| 518 |
+
results["c"], data_copy["c"][: 8 // state.num_processes]
|
| 519 |
+
), f"Did not obtain expected values on process 0, expected `{data['c'][:8 // state.num_processes]}`, received: {results['c']}"
|
| 520 |
+
elif state.num_processes == 2:
|
| 521 |
+
assert torch.allclose(
|
| 522 |
+
results["c"], data_copy["c"][4:]
|
| 523 |
+
), f"Did not obtain expected values on process 2, expected `{data['c'][4:]}`, received: {results['c']}"
|
| 524 |
+
elif state.process_index == 3:
|
| 525 |
+
assert torch.allclose(
|
| 526 |
+
results["c"], data_copy["c"][-2:]
|
| 527 |
+
), f"Did not obtain expected values on process 4, expected `{data['c'][-2:]}`, received: {results['c']}"
|
| 528 |
+
|
| 529 |
+
state.wait_for_everyone()
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
def test_split_between_processes_tensor():
|
| 533 |
+
state = AcceleratorState()
|
| 534 |
+
if state.num_processes > 1:
|
| 535 |
+
data = torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]]).to(state.device)
|
| 536 |
+
with state.split_between_processes(data) as results:
|
| 537 |
+
if state.process_index == 0:
|
| 538 |
+
assert torch.allclose(results, torch.tensor([0, 1, 2, 3]).to(state.device))
|
| 539 |
+
else:
|
| 540 |
+
assert torch.allclose(results, torch.tensor([4, 5, 6, 7]).to(state.device))
|
| 541 |
+
state.wait_for_everyone()
|
| 542 |
+
|
| 543 |
+
|
| 544 |
+
def test_trigger():
|
| 545 |
+
accelerator = Accelerator()
|
| 546 |
+
# should start with being false
|
| 547 |
+
assert accelerator.check_trigger() is False
|
| 548 |
+
|
| 549 |
+
# set a breakpoint on the main process
|
| 550 |
+
if accelerator.is_main_process:
|
| 551 |
+
accelerator.set_trigger()
|
| 552 |
+
|
| 553 |
+
# check it's been activated across all processes
|
| 554 |
+
# calls `all_reduce` and triggers a sync
|
| 555 |
+
assert accelerator.check_trigger() is True
|
| 556 |
+
|
| 557 |
+
# check it's been reset after the sync
|
| 558 |
+
assert accelerator.check_trigger() is False
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
def main():
|
| 562 |
+
accelerator = Accelerator()
|
| 563 |
+
state = accelerator.state
|
| 564 |
+
if state.local_process_index == 0:
|
| 565 |
+
print("**Initialization**")
|
| 566 |
+
init_state_check()
|
| 567 |
+
state.wait_for_everyone()
|
| 568 |
+
|
| 569 |
+
if state.distributed_type == DistributedType.MULTI_GPU:
|
| 570 |
+
num_processes_per_node = torch.cuda.device_count()
|
| 571 |
+
else:
|
| 572 |
+
num_processes_per_node = state.num_processes
|
| 573 |
+
|
| 574 |
+
# We only run this test on non-multinode
|
| 575 |
+
if num_processes_per_node == state.num_processes:
|
| 576 |
+
if state.process_index == 0:
|
| 577 |
+
print("\n**Test process execution**")
|
| 578 |
+
process_execution_check()
|
| 579 |
+
|
| 580 |
+
if state.process_index == 0:
|
| 581 |
+
print("\n**Test split between processes as a list**")
|
| 582 |
+
test_split_between_processes_list()
|
| 583 |
+
|
| 584 |
+
if state.process_index == 0:
|
| 585 |
+
print("\n**Test split between processes as a dict**")
|
| 586 |
+
test_split_between_processes_nested_dict()
|
| 587 |
+
|
| 588 |
+
if state.process_index == 0:
|
| 589 |
+
print("\n**Test split between processes as a tensor**")
|
| 590 |
+
test_split_between_processes_tensor()
|
| 591 |
+
|
| 592 |
+
if state.local_process_index == 0:
|
| 593 |
+
print("\n**Test random number generator synchronization**")
|
| 594 |
+
rng_sync_check()
|
| 595 |
+
|
| 596 |
+
if state.local_process_index == 0:
|
| 597 |
+
print("\n**DataLoader integration test**")
|
| 598 |
+
dl_preparation_check()
|
| 599 |
+
if state.distributed_type != DistributedType.TPU:
|
| 600 |
+
central_dl_preparation_check()
|
| 601 |
+
|
| 602 |
+
# Trainings are not exactly the same in DeepSpeed and CPU mode
|
| 603 |
+
if state.distributed_type == DistributedType.DEEPSPEED:
|
| 604 |
+
return
|
| 605 |
+
|
| 606 |
+
if state.local_process_index == 0:
|
| 607 |
+
print("\n**Training integration test**")
|
| 608 |
+
training_check()
|
| 609 |
+
|
| 610 |
+
if state.local_process_index == 0:
|
| 611 |
+
print("\n**Breakpoint trigger test**")
|
| 612 |
+
test_trigger()
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
if __name__ == "__main__":
|
| 616 |
+
main()
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py
ADDED
|
@@ -0,0 +1,367 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from copy import deepcopy
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
import torch.nn.functional as F
|
| 19 |
+
from torch.optim import AdamW
|
| 20 |
+
from torch.optim.lr_scheduler import LambdaLR
|
| 21 |
+
from torch.utils.data import DataLoader
|
| 22 |
+
|
| 23 |
+
from accelerate.accelerator import Accelerator
|
| 24 |
+
from accelerate.state import GradientState
|
| 25 |
+
from accelerate.test_utils import RegressionDataset, RegressionModel
|
| 26 |
+
from accelerate.utils import DistributedType, is_torch_version, set_seed
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def check_model_parameters(model_a, model_b, did_step, iteration):
|
| 30 |
+
for param, grad_param in zip(model_a.parameters(), model_b.parameters()):
|
| 31 |
+
if not param.requires_grad:
|
| 32 |
+
continue
|
| 33 |
+
if not did_step:
|
| 34 |
+
# Grads should not be in sync
|
| 35 |
+
assert (
|
| 36 |
+
torch.allclose(param.grad, grad_param.grad) is False
|
| 37 |
+
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
|
| 38 |
+
else:
|
| 39 |
+
# Grads should be in sync
|
| 40 |
+
assert (
|
| 41 |
+
torch.allclose(param.grad, grad_param.grad) is True
|
| 42 |
+
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def step_model(model, input, target, accelerator, do_backward=True):
|
| 46 |
+
model.train()
|
| 47 |
+
output = model(input)
|
| 48 |
+
loss = F.mse_loss(output, target.to(output.device))
|
| 49 |
+
if not do_backward:
|
| 50 |
+
loss /= accelerator.gradient_accumulation_steps
|
| 51 |
+
loss.backward()
|
| 52 |
+
else:
|
| 53 |
+
accelerator.backward(loss)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def get_training_setup(accelerator, sched=False):
|
| 57 |
+
"Returns everything needed to perform basic training"
|
| 58 |
+
set_seed(42)
|
| 59 |
+
model = RegressionModel()
|
| 60 |
+
ddp_model = deepcopy(model)
|
| 61 |
+
dset = RegressionDataset(length=80)
|
| 62 |
+
dataloader = DataLoader(dset, batch_size=16)
|
| 63 |
+
model.to(accelerator.device)
|
| 64 |
+
if sched:
|
| 65 |
+
opt = AdamW(params=model.parameters(), lr=1e-3)
|
| 66 |
+
ddp_opt = AdamW(params=ddp_model.parameters(), lr=1e-3)
|
| 67 |
+
sched = LambdaLR(opt, lr_lambda=lambda epoch: epoch**0.65)
|
| 68 |
+
ddp_sched = LambdaLR(ddp_opt, lr_lambda=lambda epoch: epoch**0.65)
|
| 69 |
+
# Make a copy of `model`
|
| 70 |
+
if sched:
|
| 71 |
+
ddp_model, ddp_opt, ddp_sched, dataloader = accelerator.prepare(ddp_model, ddp_opt, ddp_sched, dataloader)
|
| 72 |
+
else:
|
| 73 |
+
ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)
|
| 74 |
+
if sched:
|
| 75 |
+
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
|
| 76 |
+
return model, ddp_model, dataloader
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def test_noop_sync(accelerator):
|
| 80 |
+
# Test when on a single CPU or GPU that the context manager does nothing
|
| 81 |
+
model, ddp_model, dataloader = get_training_setup(accelerator)
|
| 82 |
+
# Use a single batch
|
| 83 |
+
ddp_input, ddp_target = next(iter(dataloader)).values()
|
| 84 |
+
for iteration in range(3):
|
| 85 |
+
# Gather the distributed inputs and targs for the base model
|
| 86 |
+
input, target = accelerator.gather((ddp_input, ddp_target))
|
| 87 |
+
input, target = input.to(accelerator.device), target.to(accelerator.device)
|
| 88 |
+
# Perform our initial ground truth step in non "DDP"
|
| 89 |
+
step_model(model, input, target, accelerator)
|
| 90 |
+
# Do "gradient accumulation" (noop)
|
| 91 |
+
if iteration % 2 == 0:
|
| 92 |
+
# Accumulate grads locally
|
| 93 |
+
with accelerator.no_sync(ddp_model):
|
| 94 |
+
step_model(ddp_model, ddp_input, ddp_target, accelerator)
|
| 95 |
+
else:
|
| 96 |
+
# Sync grads
|
| 97 |
+
step_model(ddp_model, ddp_input, ddp_target, accelerator)
|
| 98 |
+
|
| 99 |
+
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
|
| 100 |
+
check_model_parameters(model, ddp_model, True, iteration)
|
| 101 |
+
for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):
|
| 102 |
+
if not param.requires_grad:
|
| 103 |
+
continue
|
| 104 |
+
assert torch.allclose(
|
| 105 |
+
param.grad, ddp_param.grad
|
| 106 |
+
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
|
| 107 |
+
|
| 108 |
+
# Shuffle ddp_input on each iteration
|
| 109 |
+
torch.manual_seed(1337 + iteration)
|
| 110 |
+
ddp_input = ddp_input[torch.randperm(len(ddp_input))]
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def test_distributed_sync(accelerator):
|
| 114 |
+
# Test on distributed setup that context manager behaves properly
|
| 115 |
+
model, ddp_model, dataloader = get_training_setup(accelerator)
|
| 116 |
+
# Use a single batch
|
| 117 |
+
ddp_input, ddp_target = next(iter(dataloader)).values()
|
| 118 |
+
for iteration in range(3):
|
| 119 |
+
# Gather the distributed inputs and targs for the base model
|
| 120 |
+
input, target = accelerator.gather((ddp_input, ddp_target))
|
| 121 |
+
input, target = input.to(accelerator.device), target.to(accelerator.device)
|
| 122 |
+
# Perform our initial ground truth step in non "DDP"
|
| 123 |
+
step_model(model, input, target, accelerator)
|
| 124 |
+
# Do "gradient accumulation" (noop)
|
| 125 |
+
if iteration % 2 == 0:
|
| 126 |
+
# Accumulate grads locally
|
| 127 |
+
with accelerator.no_sync(ddp_model):
|
| 128 |
+
step_model(ddp_model, ddp_input, ddp_target, accelerator)
|
| 129 |
+
else:
|
| 130 |
+
# Sync grads
|
| 131 |
+
step_model(ddp_model, ddp_input, ddp_target, accelerator)
|
| 132 |
+
|
| 133 |
+
# DDP model and model should only be in sync when not (iteration % 2 == 0)
|
| 134 |
+
for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):
|
| 135 |
+
if not param.requires_grad:
|
| 136 |
+
continue
|
| 137 |
+
if iteration % 2 == 0:
|
| 138 |
+
# Grads should not be in sync
|
| 139 |
+
assert (
|
| 140 |
+
torch.allclose(param.grad, ddp_param.grad) is False
|
| 141 |
+
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
|
| 142 |
+
else:
|
| 143 |
+
# Grads should be in sync
|
| 144 |
+
assert (
|
| 145 |
+
torch.allclose(param.grad, ddp_param.grad) is True
|
| 146 |
+
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
|
| 147 |
+
|
| 148 |
+
# Shuffle ddp_input on each iteration
|
| 149 |
+
torch.manual_seed(1337 + iteration)
|
| 150 |
+
ddp_input = ddp_input[torch.randperm(len(ddp_input))]
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def test_distributed_sync_multiple_fwd(accelerator):
|
| 154 |
+
# Test on distributed setup that context manager behaves properly when used with multiple forwards followed by multiple backwards
|
| 155 |
+
model, ddp_model, dataloader = get_training_setup(accelerator)
|
| 156 |
+
# Do multiple forwards
|
| 157 |
+
losses = []
|
| 158 |
+
num_iterations = 3
|
| 159 |
+
for iteration in range(num_iterations):
|
| 160 |
+
ddp_input, ddp_target = next(iter(dataloader)).values()
|
| 161 |
+
|
| 162 |
+
# Gather the distributed inputs and targs for the base model
|
| 163 |
+
input, target = accelerator.gather((ddp_input, ddp_target))
|
| 164 |
+
input, target = input.to(accelerator.device), target.to(accelerator.device)
|
| 165 |
+
|
| 166 |
+
# Perform our initial ground truth step in non "DDP"
|
| 167 |
+
step_model(model, input, target, accelerator)
|
| 168 |
+
|
| 169 |
+
# Accumulate grads locally
|
| 170 |
+
with accelerator.no_sync(ddp_model):
|
| 171 |
+
ddp_output = ddp_model(ddp_input)
|
| 172 |
+
loss = F.mse_loss(ddp_output, ddp_target.to(ddp_output.device))
|
| 173 |
+
losses.append(loss)
|
| 174 |
+
|
| 175 |
+
# Do multiple backwards and sync only at the last backward
|
| 176 |
+
for iteration in range(num_iterations):
|
| 177 |
+
loss = losses[iteration]
|
| 178 |
+
|
| 179 |
+
if iteration < num_iterations - 1:
|
| 180 |
+
# Accumulate grads locally
|
| 181 |
+
accelerator.backward(loss)
|
| 182 |
+
|
| 183 |
+
# DDP model and model should only be in sync after last backward
|
| 184 |
+
for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):
|
| 185 |
+
if not param.requires_grad:
|
| 186 |
+
continue
|
| 187 |
+
# Grads should not be in sync
|
| 188 |
+
assert (
|
| 189 |
+
torch.allclose(param.grad, ddp_param.grad) is False
|
| 190 |
+
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
|
| 191 |
+
|
| 192 |
+
else:
|
| 193 |
+
# Sync grads if last backward
|
| 194 |
+
with accelerator.trigger_sync_in_backward(ddp_model):
|
| 195 |
+
accelerator.backward(loss)
|
| 196 |
+
|
| 197 |
+
# DDP model and model should only be in sync after last backward
|
| 198 |
+
for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):
|
| 199 |
+
if not param.requires_grad:
|
| 200 |
+
continue
|
| 201 |
+
# Grads should be in sync
|
| 202 |
+
assert (
|
| 203 |
+
torch.allclose(param.grad, ddp_param.grad) is True
|
| 204 |
+
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def test_gradient_accumulation(split_batches=False, dispatch_batches=False):
|
| 208 |
+
accelerator = Accelerator(
|
| 209 |
+
split_batches=split_batches, dispatch_batches=dispatch_batches, gradient_accumulation_steps=2
|
| 210 |
+
)
|
| 211 |
+
# Test that context manager behaves properly
|
| 212 |
+
model, ddp_model, dataloader = get_training_setup(accelerator)
|
| 213 |
+
for iteration, batch in enumerate(dataloader):
|
| 214 |
+
ddp_input, ddp_target = batch.values()
|
| 215 |
+
# Gather the distributed inputs and targs for the base model
|
| 216 |
+
input, target = accelerator.gather((ddp_input, ddp_target))
|
| 217 |
+
input, target = input.to(accelerator.device), target.to(accelerator.device)
|
| 218 |
+
# Perform our initial ground truth step in non "DDP"
|
| 219 |
+
step_model(model, input, target, accelerator, False)
|
| 220 |
+
# Do "gradient accumulation" (noop)
|
| 221 |
+
with accelerator.accumulate(ddp_model):
|
| 222 |
+
step_model(ddp_model, ddp_input, ddp_target, accelerator)
|
| 223 |
+
|
| 224 |
+
# DDP model and model should only be in sync when not (iteration % 2 == 0)
|
| 225 |
+
for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):
|
| 226 |
+
if not param.requires_grad:
|
| 227 |
+
continue
|
| 228 |
+
if ((iteration + 1) % 2 == 0) or (iteration == len(dataloader) - 1):
|
| 229 |
+
# Grads should be in sync
|
| 230 |
+
assert (
|
| 231 |
+
torch.allclose(param.grad, ddp_param.grad) is True
|
| 232 |
+
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
|
| 233 |
+
else:
|
| 234 |
+
# Grads should not be in sync
|
| 235 |
+
assert (
|
| 236 |
+
torch.allclose(param.grad, ddp_param.grad) is False
|
| 237 |
+
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
|
| 238 |
+
|
| 239 |
+
# Shuffle ddp_input on each iteration
|
| 240 |
+
torch.manual_seed(1337 + iteration)
|
| 241 |
+
ddp_input = ddp_input[torch.randperm(len(ddp_input))]
|
| 242 |
+
GradientState._reset_state()
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def test_gradient_accumulation_with_opt_and_scheduler(split_batches=False, dispatch_batches=False):
|
| 246 |
+
accelerator = Accelerator(
|
| 247 |
+
split_batches=split_batches, dispatch_batches=dispatch_batches, gradient_accumulation_steps=2
|
| 248 |
+
)
|
| 249 |
+
# Test that context manager behaves properly
|
| 250 |
+
model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched = get_training_setup(accelerator, True)
|
| 251 |
+
for iteration, batch in enumerate(dataloader):
|
| 252 |
+
ddp_input, ddp_target = batch.values()
|
| 253 |
+
# Gather the distributed inputs and targs for the base model
|
| 254 |
+
input, target = accelerator.gather((ddp_input, ddp_target))
|
| 255 |
+
input, target = input.to(accelerator.device), target.to(accelerator.device)
|
| 256 |
+
# Perform our initial ground truth step in non "DDP"
|
| 257 |
+
model.train()
|
| 258 |
+
ddp_model.train()
|
| 259 |
+
step_model(model, input, target, accelerator, False)
|
| 260 |
+
opt.step()
|
| 261 |
+
|
| 262 |
+
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(dataloader)):
|
| 263 |
+
if split_batches:
|
| 264 |
+
sched.step()
|
| 265 |
+
else:
|
| 266 |
+
for _ in range(accelerator.num_processes):
|
| 267 |
+
sched.step()
|
| 268 |
+
opt.zero_grad()
|
| 269 |
+
# Perform gradient accumulation under wrapper
|
| 270 |
+
with accelerator.accumulate(ddp_model):
|
| 271 |
+
step_model(ddp_model, ddp_input, ddp_target, accelerator)
|
| 272 |
+
ddp_opt.step()
|
| 273 |
+
ddp_sched.step()
|
| 274 |
+
ddp_opt.zero_grad()
|
| 275 |
+
|
| 276 |
+
# Learning rates should be the same
|
| 277 |
+
assert (
|
| 278 |
+
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
|
| 279 |
+
), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
|
| 280 |
+
did_step = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(dataloader))
|
| 281 |
+
if accelerator.num_processes > 1:
|
| 282 |
+
check_model_parameters(model, ddp_model, did_step, iteration)
|
| 283 |
+
# Shuffle ddp_input on each iteration
|
| 284 |
+
torch.manual_seed(1337 + iteration)
|
| 285 |
+
GradientState._reset_state()
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
def test_dataloader_break():
|
| 289 |
+
accelerator = Accelerator()
|
| 290 |
+
|
| 291 |
+
first_dset = RegressionDataset(length=80)
|
| 292 |
+
first_dataloader = DataLoader(first_dset, batch_size=16)
|
| 293 |
+
second_dset = RegressionDataset(length=96)
|
| 294 |
+
second_dataloader = DataLoader(second_dset, batch_size=16)
|
| 295 |
+
first_dataloader, second_dataloader = accelerator.prepare(first_dataloader, second_dataloader)
|
| 296 |
+
assert accelerator.gradient_state.active_dataloader is None
|
| 297 |
+
for iteration, _ in enumerate(first_dataloader):
|
| 298 |
+
assert id(accelerator.gradient_state.active_dataloader) == id(first_dataloader)
|
| 299 |
+
if iteration < len(first_dataloader) - 1:
|
| 300 |
+
assert not accelerator.gradient_state.end_of_dataloader
|
| 301 |
+
if iteration == 1:
|
| 302 |
+
for batch_num, _ in enumerate(second_dataloader):
|
| 303 |
+
assert id(accelerator.gradient_state.active_dataloader) == id(second_dataloader)
|
| 304 |
+
if batch_num < len(second_dataloader) - 1:
|
| 305 |
+
assert not accelerator.gradient_state.end_of_dataloader
|
| 306 |
+
else:
|
| 307 |
+
assert accelerator.gradient_state.end_of_dataloader
|
| 308 |
+
else:
|
| 309 |
+
assert accelerator.gradient_state.end_of_dataloader
|
| 310 |
+
assert accelerator.gradient_state.active_dataloader is None
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
def main():
|
| 314 |
+
accelerator = Accelerator()
|
| 315 |
+
state = accelerator.state
|
| 316 |
+
if state.local_process_index == 0:
|
| 317 |
+
print("**Test `accumulate` gradient accumulation with dataloader break**")
|
| 318 |
+
test_dataloader_break()
|
| 319 |
+
if state.distributed_type == DistributedType.NO:
|
| 320 |
+
if state.local_process_index == 0:
|
| 321 |
+
print("**Test NOOP `no_sync` context manager**")
|
| 322 |
+
test_noop_sync(accelerator)
|
| 323 |
+
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_CPU):
|
| 324 |
+
if state.local_process_index == 0:
|
| 325 |
+
print("**Test Distributed `no_sync` context manager**")
|
| 326 |
+
test_distributed_sync(accelerator)
|
| 327 |
+
if state.local_process_index == 0:
|
| 328 |
+
print("**Test Distributed `no_sync` context manager with multiple forwards**")
|
| 329 |
+
test_distributed_sync_multiple_fwd(accelerator)
|
| 330 |
+
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU):
|
| 331 |
+
for split_batch in [True, False]:
|
| 332 |
+
for dispatch_batches in [True, False]:
|
| 333 |
+
if state.local_process_index == 0:
|
| 334 |
+
print(
|
| 335 |
+
"**Test `accumulate` gradient accumulation, ",
|
| 336 |
+
f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**",
|
| 337 |
+
)
|
| 338 |
+
test_gradient_accumulation(split_batch, dispatch_batches)
|
| 339 |
+
|
| 340 |
+
# Currently will break on torch 2.0 +, need to investigate why
|
| 341 |
+
if is_torch_version("<", "2.0") or state.distributed_type == DistributedType.NO:
|
| 342 |
+
if state.local_process_index == 0:
|
| 343 |
+
print(
|
| 344 |
+
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ",
|
| 345 |
+
"`split_batches=False`, `dispatch_batches=False`**",
|
| 346 |
+
)
|
| 347 |
+
test_gradient_accumulation_with_opt_and_scheduler()
|
| 348 |
+
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU):
|
| 349 |
+
for split_batch in [True, False]:
|
| 350 |
+
for dispatch_batches in [True, False]:
|
| 351 |
+
if not split_batch and not dispatch_batches:
|
| 352 |
+
continue
|
| 353 |
+
if state.local_process_index == 0:
|
| 354 |
+
print(
|
| 355 |
+
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ",
|
| 356 |
+
f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**",
|
| 357 |
+
)
|
| 358 |
+
test_gradient_accumulation_with_opt_and_scheduler(split_batch, dispatch_batches)
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def _mp_fn(index):
|
| 362 |
+
# For xla_spawn (TPUs)
|
| 363 |
+
main()
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
if __name__ == "__main__":
|
| 367 |
+
main()
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/testing.py
ADDED
|
@@ -0,0 +1,452 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import asyncio
|
| 16 |
+
import os
|
| 17 |
+
import shutil
|
| 18 |
+
import subprocess
|
| 19 |
+
import sys
|
| 20 |
+
import tempfile
|
| 21 |
+
import unittest
|
| 22 |
+
from contextlib import contextmanager
|
| 23 |
+
from functools import partial
|
| 24 |
+
from pathlib import Path
|
| 25 |
+
from typing import List, Union
|
| 26 |
+
from unittest import mock
|
| 27 |
+
|
| 28 |
+
import torch
|
| 29 |
+
|
| 30 |
+
from ..state import AcceleratorState, PartialState
|
| 31 |
+
from ..utils import (
|
| 32 |
+
gather,
|
| 33 |
+
is_bnb_available,
|
| 34 |
+
is_comet_ml_available,
|
| 35 |
+
is_datasets_available,
|
| 36 |
+
is_deepspeed_available,
|
| 37 |
+
is_mps_available,
|
| 38 |
+
is_safetensors_available,
|
| 39 |
+
is_tensorboard_available,
|
| 40 |
+
is_timm_available,
|
| 41 |
+
is_torch_version,
|
| 42 |
+
is_tpu_available,
|
| 43 |
+
is_transformers_available,
|
| 44 |
+
is_wandb_available,
|
| 45 |
+
is_xpu_available,
|
| 46 |
+
str_to_bool,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def parse_flag_from_env(key, default=False):
|
| 51 |
+
try:
|
| 52 |
+
value = os.environ[key]
|
| 53 |
+
except KeyError:
|
| 54 |
+
# KEY isn't set, default to `default`.
|
| 55 |
+
_value = default
|
| 56 |
+
else:
|
| 57 |
+
# KEY is set, convert it to True or False.
|
| 58 |
+
try:
|
| 59 |
+
_value = str_to_bool(value)
|
| 60 |
+
except ValueError:
|
| 61 |
+
# More values are supported, but let's keep the message simple.
|
| 62 |
+
raise ValueError(f"If set, {key} must be yes or no.")
|
| 63 |
+
return _value
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def skip(test_case):
|
| 70 |
+
"Decorator that skips a test unconditionally"
|
| 71 |
+
return unittest.skip("Test was skipped")(test_case)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def slow(test_case):
|
| 75 |
+
"""
|
| 76 |
+
Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a
|
| 77 |
+
truthy value to run them.
|
| 78 |
+
"""
|
| 79 |
+
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def require_cpu(test_case):
|
| 83 |
+
"""
|
| 84 |
+
Decorator marking a test that must be only ran on the CPU. These tests are skipped when a GPU is available.
|
| 85 |
+
"""
|
| 86 |
+
return unittest.skipUnless(not torch.cuda.is_available(), "test requires only a CPU")(test_case)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def require_cuda(test_case):
|
| 90 |
+
"""
|
| 91 |
+
Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available.
|
| 92 |
+
"""
|
| 93 |
+
return unittest.skipUnless(torch.cuda.is_available(), "test requires a GPU")(test_case)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def require_xpu(test_case):
|
| 97 |
+
"""
|
| 98 |
+
Decorator marking a test that requires XPU. These tests are skipped when there are no XPU available.
|
| 99 |
+
"""
|
| 100 |
+
return unittest.skipUnless(is_xpu_available(), "test requires a XPU")(test_case)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def require_mps(test_case):
|
| 104 |
+
"""
|
| 105 |
+
Decorator marking a test that requires MPS backend. These tests are skipped when torch doesn't support `mps`
|
| 106 |
+
backend.
|
| 107 |
+
"""
|
| 108 |
+
return unittest.skipUnless(is_mps_available(), "test requires a `mps` backend support in `torch`")(test_case)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def require_huggingface_suite(test_case):
|
| 112 |
+
"""
|
| 113 |
+
Decorator marking a test that requires transformers and datasets. These tests are skipped when they are not.
|
| 114 |
+
"""
|
| 115 |
+
return unittest.skipUnless(
|
| 116 |
+
is_transformers_available() and is_datasets_available(), "test requires the Hugging Face suite"
|
| 117 |
+
)(test_case)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def require_transformers(test_case):
|
| 121 |
+
"""
|
| 122 |
+
Decorator marking a test that requires transformers. These tests are skipped when they are not.
|
| 123 |
+
"""
|
| 124 |
+
return unittest.skipUnless(is_transformers_available(), "test requires the transformers library")(test_case)
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def require_timm(test_case):
|
| 128 |
+
"""
|
| 129 |
+
Decorator marking a test that requires transformers. These tests are skipped when they are not.
|
| 130 |
+
"""
|
| 131 |
+
return unittest.skipUnless(is_timm_available(), "test requires the timm library")(test_case)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def require_bnb(test_case):
|
| 135 |
+
"""
|
| 136 |
+
Decorator marking a test that requires bitsandbytes. These tests are skipped when they are not.
|
| 137 |
+
"""
|
| 138 |
+
return unittest.skipUnless(is_bnb_available(), "test requires the bitsandbytes library")(test_case)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def require_tpu(test_case):
|
| 142 |
+
"""
|
| 143 |
+
Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available.
|
| 144 |
+
"""
|
| 145 |
+
return unittest.skipUnless(is_tpu_available(), "test requires TPU")(test_case)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def require_single_gpu(test_case):
|
| 149 |
+
"""
|
| 150 |
+
Decorator marking a test that requires CUDA on a single GPU. These tests are skipped when there are no GPU
|
| 151 |
+
available or number of GPUs is more than one.
|
| 152 |
+
"""
|
| 153 |
+
return unittest.skipUnless(torch.cuda.device_count() == 1, "test requires a GPU")(test_case)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def require_single_xpu(test_case):
|
| 157 |
+
"""
|
| 158 |
+
Decorator marking a test that requires CUDA on a single XPU. These tests are skipped when there are no XPU
|
| 159 |
+
available or number of xPUs is more than one.
|
| 160 |
+
"""
|
| 161 |
+
return unittest.skipUnless(torch.xpu.device_count() == 1, "test requires a XPU")(test_case)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def require_multi_gpu(test_case):
|
| 165 |
+
"""
|
| 166 |
+
Decorator marking a test that requires a multi-GPU setup. These tests are skipped on a machine without multiple
|
| 167 |
+
GPUs.
|
| 168 |
+
"""
|
| 169 |
+
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def require_multi_xpu(test_case):
|
| 173 |
+
"""
|
| 174 |
+
Decorator marking a test that requires a multi-XPU setup. These tests are skipped on a machine without multiple
|
| 175 |
+
XPUs.
|
| 176 |
+
"""
|
| 177 |
+
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def require_safetensors(test_case):
|
| 181 |
+
"""
|
| 182 |
+
Decorator marking a test that requires safetensors installed. These tests are skipped when safetensors isn't
|
| 183 |
+
installed
|
| 184 |
+
"""
|
| 185 |
+
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def require_deepspeed(test_case):
|
| 189 |
+
"""
|
| 190 |
+
Decorator marking a test that requires DeepSpeed installed. These tests are skipped when DeepSpeed isn't installed
|
| 191 |
+
"""
|
| 192 |
+
return unittest.skipUnless(is_deepspeed_available(), "test requires DeepSpeed")(test_case)
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def require_fsdp(test_case):
|
| 196 |
+
"""
|
| 197 |
+
Decorator marking a test that requires FSDP installed. These tests are skipped when FSDP isn't installed
|
| 198 |
+
"""
|
| 199 |
+
return unittest.skipUnless(is_torch_version(">=", "1.12.0"), "test requires torch version >= 1.12.0")(test_case)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def require_torch_min_version(test_case=None, version=None):
|
| 203 |
+
"""
|
| 204 |
+
Decorator marking that a test requires a particular torch version to be tested. These tests are skipped when an
|
| 205 |
+
installed torch version is less than the required one.
|
| 206 |
+
"""
|
| 207 |
+
if test_case is None:
|
| 208 |
+
return partial(require_torch_min_version, version=version)
|
| 209 |
+
return unittest.skipUnless(is_torch_version(">=", version), f"test requires torch version >= {version}")(test_case)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def require_tensorboard(test_case):
|
| 213 |
+
"""
|
| 214 |
+
Decorator marking a test that requires tensorboard installed. These tests are skipped when tensorboard isn't
|
| 215 |
+
installed
|
| 216 |
+
"""
|
| 217 |
+
return unittest.skipUnless(is_tensorboard_available(), "test requires Tensorboard")(test_case)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def require_wandb(test_case):
|
| 221 |
+
"""
|
| 222 |
+
Decorator marking a test that requires wandb installed. These tests are skipped when wandb isn't installed
|
| 223 |
+
"""
|
| 224 |
+
return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def require_comet_ml(test_case):
|
| 228 |
+
"""
|
| 229 |
+
Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed
|
| 230 |
+
"""
|
| 231 |
+
return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml")(test_case)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
_atleast_one_tracker_available = (
|
| 235 |
+
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def require_trackers(test_case):
|
| 240 |
+
"""
|
| 241 |
+
Decorator marking that a test requires at least one tracking library installed. These tests are skipped when none
|
| 242 |
+
are installed
|
| 243 |
+
"""
|
| 244 |
+
return unittest.skipUnless(
|
| 245 |
+
_atleast_one_tracker_available,
|
| 246 |
+
"test requires at least one tracker to be available and for `comet_ml` to not be installed",
|
| 247 |
+
)(test_case)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
class TempDirTestCase(unittest.TestCase):
|
| 251 |
+
"""
|
| 252 |
+
A TestCase class that keeps a single `tempfile.TemporaryDirectory` open for the duration of the class, wipes its
|
| 253 |
+
data at the start of a test, and then destroyes it at the end of the TestCase.
|
| 254 |
+
|
| 255 |
+
Useful for when a class or API requires a single constant folder throughout it's use, such as Weights and Biases
|
| 256 |
+
|
| 257 |
+
The temporary directory location will be stored in `self.tmpdir`
|
| 258 |
+
"""
|
| 259 |
+
|
| 260 |
+
clear_on_setup = True
|
| 261 |
+
|
| 262 |
+
@classmethod
|
| 263 |
+
def setUpClass(cls):
|
| 264 |
+
"Creates a `tempfile.TemporaryDirectory` and stores it in `cls.tmpdir`"
|
| 265 |
+
cls.tmpdir = tempfile.mkdtemp()
|
| 266 |
+
|
| 267 |
+
@classmethod
|
| 268 |
+
def tearDownClass(cls):
|
| 269 |
+
"Remove `cls.tmpdir` after test suite has finished"
|
| 270 |
+
if os.path.exists(cls.tmpdir):
|
| 271 |
+
shutil.rmtree(cls.tmpdir)
|
| 272 |
+
|
| 273 |
+
def setUp(self):
|
| 274 |
+
"Destroy all contents in `self.tmpdir`, but not `self.tmpdir`"
|
| 275 |
+
if self.clear_on_setup:
|
| 276 |
+
for path in Path(self.tmpdir).glob("**/*"):
|
| 277 |
+
if path.is_file():
|
| 278 |
+
path.unlink()
|
| 279 |
+
elif path.is_dir():
|
| 280 |
+
shutil.rmtree(path)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class AccelerateTestCase(unittest.TestCase):
|
| 284 |
+
"""
|
| 285 |
+
A TestCase class that will reset the accelerator state at the end of every test. Every test that checks or utilizes
|
| 286 |
+
the `AcceleratorState` class should inherit from this to avoid silent failures due to state being shared between
|
| 287 |
+
tests.
|
| 288 |
+
"""
|
| 289 |
+
|
| 290 |
+
def tearDown(self):
|
| 291 |
+
super().tearDown()
|
| 292 |
+
# Reset the state of the AcceleratorState singleton.
|
| 293 |
+
AcceleratorState._reset_state()
|
| 294 |
+
PartialState._reset_state()
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
class MockingTestCase(unittest.TestCase):
|
| 298 |
+
"""
|
| 299 |
+
A TestCase class designed to dynamically add various mockers that should be used in every test, mimicking the
|
| 300 |
+
behavior of a class-wide mock when defining one normally will not do.
|
| 301 |
+
|
| 302 |
+
Useful when a mock requires specific information available only initialized after `TestCase.setUpClass`, such as
|
| 303 |
+
setting an environment variable with that information.
|
| 304 |
+
|
| 305 |
+
The `add_mocks` function should be ran at the end of a `TestCase`'s `setUp` function, after a call to
|
| 306 |
+
`super().setUp()` such as:
|
| 307 |
+
```python
|
| 308 |
+
def setUp(self):
|
| 309 |
+
super().setUp()
|
| 310 |
+
mocks = mock.patch.dict(os.environ, {"SOME_ENV_VAR", "SOME_VALUE"})
|
| 311 |
+
self.add_mocks(mocks)
|
| 312 |
+
```
|
| 313 |
+
"""
|
| 314 |
+
|
| 315 |
+
def add_mocks(self, mocks: Union[mock.Mock, List[mock.Mock]]):
|
| 316 |
+
"""
|
| 317 |
+
Add custom mocks for tests that should be repeated on each test. Should be called during
|
| 318 |
+
`MockingTestCase.setUp`, after `super().setUp()`.
|
| 319 |
+
|
| 320 |
+
Args:
|
| 321 |
+
mocks (`mock.Mock` or list of `mock.Mock`):
|
| 322 |
+
Mocks that should be added to the `TestCase` after `TestCase.setUpClass` has been run
|
| 323 |
+
"""
|
| 324 |
+
self.mocks = mocks if isinstance(mocks, (tuple, list)) else [mocks]
|
| 325 |
+
for m in self.mocks:
|
| 326 |
+
m.start()
|
| 327 |
+
self.addCleanup(m.stop)
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def are_the_same_tensors(tensor):
|
| 331 |
+
state = AcceleratorState()
|
| 332 |
+
tensor = tensor[None].clone().to(state.device)
|
| 333 |
+
tensors = gather(tensor).cpu()
|
| 334 |
+
tensor = tensor[0].cpu()
|
| 335 |
+
for i in range(tensors.shape[0]):
|
| 336 |
+
if not torch.equal(tensors[i], tensor):
|
| 337 |
+
return False
|
| 338 |
+
return True
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
class _RunOutput:
|
| 342 |
+
def __init__(self, returncode, stdout, stderr):
|
| 343 |
+
self.returncode = returncode
|
| 344 |
+
self.stdout = stdout
|
| 345 |
+
self.stderr = stderr
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
async def _read_stream(stream, callback):
|
| 349 |
+
while True:
|
| 350 |
+
line = await stream.readline()
|
| 351 |
+
if line:
|
| 352 |
+
callback(line)
|
| 353 |
+
else:
|
| 354 |
+
break
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
|
| 358 |
+
if echo:
|
| 359 |
+
print("\nRunning: ", " ".join(cmd))
|
| 360 |
+
|
| 361 |
+
p = await asyncio.create_subprocess_exec(
|
| 362 |
+
cmd[0],
|
| 363 |
+
*cmd[1:],
|
| 364 |
+
stdin=stdin,
|
| 365 |
+
stdout=asyncio.subprocess.PIPE,
|
| 366 |
+
stderr=asyncio.subprocess.PIPE,
|
| 367 |
+
env=env,
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
|
| 371 |
+
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
|
| 372 |
+
#
|
| 373 |
+
# If it starts hanging, will need to switch to the following code. The problem is that no data
|
| 374 |
+
# will be seen until it's done and if it hangs for example there will be no debug info.
|
| 375 |
+
# out, err = await p.communicate()
|
| 376 |
+
# return _RunOutput(p.returncode, out, err)
|
| 377 |
+
|
| 378 |
+
out = []
|
| 379 |
+
err = []
|
| 380 |
+
|
| 381 |
+
def tee(line, sink, pipe, label=""):
|
| 382 |
+
line = line.decode("utf-8").rstrip()
|
| 383 |
+
sink.append(line)
|
| 384 |
+
if not quiet:
|
| 385 |
+
print(label, line, file=pipe)
|
| 386 |
+
|
| 387 |
+
# XXX: the timeout doesn't seem to make any difference here
|
| 388 |
+
await asyncio.wait(
|
| 389 |
+
[
|
| 390 |
+
asyncio.create_task(_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:"))),
|
| 391 |
+
asyncio.create_task(_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:"))),
|
| 392 |
+
],
|
| 393 |
+
timeout=timeout,
|
| 394 |
+
)
|
| 395 |
+
return _RunOutput(await p.wait(), out, err)
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
|
| 399 |
+
loop = asyncio.get_event_loop()
|
| 400 |
+
result = loop.run_until_complete(
|
| 401 |
+
_stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
|
| 402 |
+
)
|
| 403 |
+
|
| 404 |
+
cmd_str = " ".join(cmd)
|
| 405 |
+
if result.returncode > 0:
|
| 406 |
+
stderr = "\n".join(result.stderr)
|
| 407 |
+
raise RuntimeError(
|
| 408 |
+
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
|
| 409 |
+
f"The combined stderr from workers follows:\n{stderr}"
|
| 410 |
+
)
|
| 411 |
+
|
| 412 |
+
return result
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
class SubprocessCallException(Exception):
|
| 416 |
+
pass
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
def run_command(command: List[str], return_stdout=False):
|
| 420 |
+
"""
|
| 421 |
+
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
|
| 422 |
+
if an error occured while running `command`
|
| 423 |
+
"""
|
| 424 |
+
try:
|
| 425 |
+
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
|
| 426 |
+
if return_stdout:
|
| 427 |
+
if hasattr(output, "decode"):
|
| 428 |
+
output = output.decode("utf-8")
|
| 429 |
+
return output
|
| 430 |
+
except subprocess.CalledProcessError as e:
|
| 431 |
+
raise SubprocessCallException(
|
| 432 |
+
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
|
| 433 |
+
) from e
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
@contextmanager
|
| 437 |
+
def assert_exception(exception_class: Exception, msg: str = None) -> bool:
|
| 438 |
+
"""
|
| 439 |
+
Context manager to assert that the right `Exception` class was raised.
|
| 440 |
+
|
| 441 |
+
If `msg` is provided, will check that the message is contained in the raised exception.
|
| 442 |
+
"""
|
| 443 |
+
was_ran = False
|
| 444 |
+
try:
|
| 445 |
+
yield
|
| 446 |
+
was_ran = True
|
| 447 |
+
except Exception as e:
|
| 448 |
+
assert isinstance(e, exception_class), f"Expected exception of type {exception_class} but got {type(e)}"
|
| 449 |
+
if msg is not None:
|
| 450 |
+
assert msg in str(e), f"Expected message '{msg}' to be in exception but got '{str(e)}'"
|
| 451 |
+
if was_ran:
|
| 452 |
+
raise AssertionError(f"Expected exception of type {exception_class} but ran without issue.")
|
evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/training.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2021 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
import torch
|
| 17 |
+
from torch.utils.data import DataLoader
|
| 18 |
+
|
| 19 |
+
from accelerate.utils.dataclasses import DistributedType
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class RegressionDataset:
|
| 23 |
+
def __init__(self, a=2, b=3, length=64, seed=None):
|
| 24 |
+
rng = np.random.default_rng(seed)
|
| 25 |
+
self.length = length
|
| 26 |
+
self.x = rng.normal(size=(length,)).astype(np.float32)
|
| 27 |
+
self.y = a * self.x + b + rng.normal(scale=0.1, size=(length,)).astype(np.float32)
|
| 28 |
+
|
| 29 |
+
def __len__(self):
|
| 30 |
+
return self.length
|
| 31 |
+
|
| 32 |
+
def __getitem__(self, i):
|
| 33 |
+
return {"x": self.x[i], "y": self.y[i]}
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class RegressionModel4XPU(torch.nn.Module):
|
| 37 |
+
def __init__(self, a=0, b=0, double_output=False):
|
| 38 |
+
super().__init__()
|
| 39 |
+
self.a = torch.nn.Parameter(torch.tensor([2, 3]).float())
|
| 40 |
+
self.b = torch.nn.Parameter(torch.tensor([2, 3]).float())
|
| 41 |
+
self.first_batch = True
|
| 42 |
+
|
| 43 |
+
def forward(self, x=None):
|
| 44 |
+
if self.first_batch:
|
| 45 |
+
print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
|
| 46 |
+
self.first_batch = False
|
| 47 |
+
return x * self.a[0] + self.b[0]
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class RegressionModel(torch.nn.Module):
|
| 51 |
+
def __init__(self, a=0, b=0, double_output=False):
|
| 52 |
+
super().__init__()
|
| 53 |
+
self.a = torch.nn.Parameter(torch.tensor(a).float())
|
| 54 |
+
self.b = torch.nn.Parameter(torch.tensor(b).float())
|
| 55 |
+
self.first_batch = True
|
| 56 |
+
|
| 57 |
+
def forward(self, x=None):
|
| 58 |
+
if self.first_batch:
|
| 59 |
+
print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
|
| 60 |
+
self.first_batch = False
|
| 61 |
+
return x * self.a + self.b
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def mocked_dataloaders(accelerator, batch_size: int = 16):
|
| 65 |
+
from datasets import load_dataset
|
| 66 |
+
from transformers import AutoTokenizer
|
| 67 |
+
|
| 68 |
+
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
|
| 69 |
+
data_files = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
|
| 70 |
+
datasets = load_dataset("csv", data_files=data_files)
|
| 71 |
+
label_list = datasets["train"].unique("label")
|
| 72 |
+
|
| 73 |
+
label_to_id = {v: i for i, v in enumerate(label_list)}
|
| 74 |
+
|
| 75 |
+
def tokenize_function(examples):
|
| 76 |
+
# max_length=None => use the model max length (it's actually the default)
|
| 77 |
+
outputs = tokenizer(
|
| 78 |
+
examples["sentence1"], examples["sentence2"], truncation=True, max_length=None, padding="max_length"
|
| 79 |
+
)
|
| 80 |
+
if "label" in examples:
|
| 81 |
+
outputs["labels"] = [label_to_id[l] for l in examples["label"]]
|
| 82 |
+
return outputs
|
| 83 |
+
|
| 84 |
+
# Apply the method we just defined to all the examples in all the splits of the dataset
|
| 85 |
+
tokenized_datasets = datasets.map(
|
| 86 |
+
tokenize_function,
|
| 87 |
+
batched=True,
|
| 88 |
+
remove_columns=["sentence1", "sentence2", "label"],
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
def collate_fn(examples):
|
| 92 |
+
# On TPU it's best to pad everything to the same length or training will be very slow.
|
| 93 |
+
if accelerator.distributed_type == DistributedType.TPU:
|
| 94 |
+
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
|
| 95 |
+
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
|
| 96 |
+
|
| 97 |
+
# Instantiate dataloaders.
|
| 98 |
+
train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=2)
|
| 99 |
+
eval_dataloader = DataLoader(tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=1)
|
| 100 |
+
|
| 101 |
+
return train_dataloader, eval_dataloader
|