diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/__init__.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a15ed546975f41b1b3b09bf531d521e438fb7b70 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/abc.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/abc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..044628b3dac27d77e5d24a565fab09b547be82fd Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/abc.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/conftest.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e675f46df59c092f6c8ea31e76456c055bf14169 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/conftest.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/galgebra.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/galgebra.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91d425658ee03c949d5bfc4fc684afea622c2a8b Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/galgebra.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/release.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/release.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ce3731146f8196dae1e3647868c1a16d5468a4b Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/release.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/this.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/this.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df50f2548eea7dc01c30def4225a054e2f12ffc2 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/__pycache__/this.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/__init__.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f03c741ae7b7729b95e116d178454be8cfa4e15 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/diffgeom.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/diffgeom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7286d648f31dcf56dda2e7857cf5b1fe59141111 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/diffgeom.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/rn.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/rn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..461ec0d37a93699ecf32f5de88bceef594175e24 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/__pycache__/rn.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/__init__.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c31a86fe00e5b34ec7f657abebbd19a4e043858d Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_class_structure.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_class_structure.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f2bd08577bcea509ef4ef16fd81c2c2451f5527 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_class_structure.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_diffgeom.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_diffgeom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fb36c263a665e7a3ed841c6de670c7908e73901 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_diffgeom.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_function_diffgeom_book.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_function_diffgeom_book.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3dbeef2619c3335d83c77ce400d433c9c010f9d5 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_function_diffgeom_book.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_hyperbolic_space.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_hyperbolic_space.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..072afc1daf8a0e91d412b06442dc586421a32012 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/__pycache__/test_hyperbolic_space.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/test_diffgeom.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/test_diffgeom.py new file mode 100644 index 0000000000000000000000000000000000000000..7c3c9265785896b8f4ffa3a2b41816ca90579758 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/test_diffgeom.py @@ -0,0 +1,342 @@ +from sympy.core import Lambda, Symbol, symbols +from sympy.diffgeom.rn import R2, R2_p, R2_r, R3_r, R3_c, R3_s, R2_origin +from sympy.diffgeom import (Manifold, Patch, CoordSystem, Commutator, Differential, TensorProduct, + WedgeProduct, BaseCovarDerivativeOp, CovarDerivativeOp, LieDerivative, + covariant_order, contravariant_order, twoform_to_matrix, metric_to_Christoffel_1st, + metric_to_Christoffel_2nd, metric_to_Riemann_components, + metric_to_Ricci_components, intcurve_diffequ, intcurve_series) +from sympy.simplify import trigsimp, simplify +from sympy.functions import sqrt, atan2, sin +from sympy.matrices import Matrix +from sympy.testing.pytest import raises, nocache_fail +from sympy.testing.pytest import warns_deprecated_sympy + +TP = TensorProduct + + +def test_coordsys_transform(): + # test inverse transforms + p, q, r, s = symbols('p q r s') + rel = {('first', 'second'): [(p, q), (q, -p)]} + R2_pq = CoordSystem('first', R2_origin, [p, q], rel) + R2_rs = CoordSystem('second', R2_origin, [r, s], rel) + r, s = R2_rs.symbols + assert R2_rs.transform(R2_pq) == Matrix([[-s], [r]]) + + # inverse transform impossible case + a, b = symbols('a b', positive=True) + rel = {('first', 'second'): [(a,), (-a,)]} + R2_a = CoordSystem('first', R2_origin, [a], rel) + R2_b = CoordSystem('second', R2_origin, [b], rel) + # This transformation is uninvertible because there is no positive a, b satisfying a = -b + with raises(NotImplementedError): + R2_b.transform(R2_a) + + # inverse transform ambiguous case + c, d = symbols('c d') + rel = {('first', 'second'): [(c,), (c**2,)]} + R2_c = CoordSystem('first', R2_origin, [c], rel) + R2_d = CoordSystem('second', R2_origin, [d], rel) + # The transform method should throw if it finds multiple inverses for a coordinate transformation. + with raises(ValueError): + R2_d.transform(R2_c) + + # test indirect transformation + a, b, c, d, e, f = symbols('a, b, c, d, e, f') + rel = {('C1', 'C2'): [(a, b), (2*a, 3*b)], + ('C2', 'C3'): [(c, d), (3*c, 2*d)]} + C1 = CoordSystem('C1', R2_origin, (a, b), rel) + C2 = CoordSystem('C2', R2_origin, (c, d), rel) + C3 = CoordSystem('C3', R2_origin, (e, f), rel) + a, b = C1.symbols + c, d = C2.symbols + e, f = C3.symbols + assert C2.transform(C1) == Matrix([c/2, d/3]) + assert C1.transform(C3) == Matrix([6*a, 6*b]) + assert C3.transform(C1) == Matrix([e/6, f/6]) + assert C3.transform(C2) == Matrix([e/3, f/2]) + + a, b, c, d, e, f = symbols('a, b, c, d, e, f') + rel = {('C1', 'C2'): [(a, b), (2*a, 3*b + 1)], + ('C3', 'C2'): [(e, f), (-e - 2, 2*f)]} + C1 = CoordSystem('C1', R2_origin, (a, b), rel) + C2 = CoordSystem('C2', R2_origin, (c, d), rel) + C3 = CoordSystem('C3', R2_origin, (e, f), rel) + a, b = C1.symbols + c, d = C2.symbols + e, f = C3.symbols + assert C2.transform(C1) == Matrix([c/2, (d - 1)/3]) + assert C1.transform(C3) == Matrix([-2*a - 2, (3*b + 1)/2]) + assert C3.transform(C1) == Matrix([-e/2 - 1, (2*f - 1)/3]) + assert C3.transform(C2) == Matrix([-e - 2, 2*f]) + + # old signature uses Lambda + a, b, c, d, e, f = symbols('a, b, c, d, e, f') + rel = {('C1', 'C2'): Lambda((a, b), (2*a, 3*b + 1)), + ('C3', 'C2'): Lambda((e, f), (-e - 2, 2*f))} + C1 = CoordSystem('C1', R2_origin, (a, b), rel) + C2 = CoordSystem('C2', R2_origin, (c, d), rel) + C3 = CoordSystem('C3', R2_origin, (e, f), rel) + a, b = C1.symbols + c, d = C2.symbols + e, f = C3.symbols + assert C2.transform(C1) == Matrix([c/2, (d - 1)/3]) + assert C1.transform(C3) == Matrix([-2*a - 2, (3*b + 1)/2]) + assert C3.transform(C1) == Matrix([-e/2 - 1, (2*f - 1)/3]) + assert C3.transform(C2) == Matrix([-e - 2, 2*f]) + + +def test_R2(): + x0, y0, r0, theta0 = symbols('x0, y0, r0, theta0', real=True) + point_r = R2_r.point([x0, y0]) + point_p = R2_p.point([r0, theta0]) + + # r**2 = x**2 + y**2 + assert (R2.r**2 - R2.x**2 - R2.y**2).rcall(point_r) == 0 + assert trigsimp( (R2.r**2 - R2.x**2 - R2.y**2).rcall(point_p) ) == 0 + assert trigsimp(R2.e_r(R2.x**2 + R2.y**2).rcall(point_p).doit()) == 2*r0 + + # polar->rect->polar == Id + a, b = symbols('a b', positive=True) + m = Matrix([[a], [b]]) + + #TODO assert m == R2_r.transform(R2_p, R2_p.transform(R2_r, [a, b])).applyfunc(simplify) + assert m == R2_p.transform(R2_r, R2_r.transform(R2_p, m)).applyfunc(simplify) + + # deprecated method + with warns_deprecated_sympy(): + assert m == R2_p.coord_tuple_transform_to( + R2_r, R2_r.coord_tuple_transform_to(R2_p, m)).applyfunc(simplify) + + +def test_R3(): + a, b, c = symbols('a b c', positive=True) + m = Matrix([[a], [b], [c]]) + + assert m == R3_c.transform(R3_r, R3_r.transform(R3_c, m)).applyfunc(simplify) + #TODO assert m == R3_r.transform(R3_c, R3_c.transform(R3_r, m)).applyfunc(simplify) + assert m == R3_s.transform( + R3_r, R3_r.transform(R3_s, m)).applyfunc(simplify) + #TODO assert m == R3_r.transform(R3_s, R3_s.transform(R3_r, m)).applyfunc(simplify) + assert m == R3_s.transform( + R3_c, R3_c.transform(R3_s, m)).applyfunc(simplify) + #TODO assert m == R3_c.transform(R3_s, R3_s.transform(R3_c, m)).applyfunc(simplify) + + with warns_deprecated_sympy(): + assert m == R3_c.coord_tuple_transform_to( + R3_r, R3_r.coord_tuple_transform_to(R3_c, m)).applyfunc(simplify) + #TODO assert m == R3_r.coord_tuple_transform_to(R3_c, R3_c.coord_tuple_transform_to(R3_r, m)).applyfunc(simplify) + assert m == R3_s.coord_tuple_transform_to( + R3_r, R3_r.coord_tuple_transform_to(R3_s, m)).applyfunc(simplify) + #TODO assert m == R3_r.coord_tuple_transform_to(R3_s, R3_s.coord_tuple_transform_to(R3_r, m)).applyfunc(simplify) + assert m == R3_s.coord_tuple_transform_to( + R3_c, R3_c.coord_tuple_transform_to(R3_s, m)).applyfunc(simplify) + #TODO assert m == R3_c.coord_tuple_transform_to(R3_s, R3_s.coord_tuple_transform_to(R3_c, m)).applyfunc(simplify) + + +def test_CoordinateSymbol(): + x, y = R2_r.symbols + r, theta = R2_p.symbols + assert y.rewrite(R2_p) == r*sin(theta) + + +def test_point(): + x, y = symbols('x, y') + p = R2_r.point([x, y]) + assert p.free_symbols == {x, y} + assert p.coords(R2_r) == p.coords() == Matrix([x, y]) + assert p.coords(R2_p) == Matrix([sqrt(x**2 + y**2), atan2(y, x)]) + + +def test_commutator(): + assert Commutator(R2.e_x, R2.e_y) == 0 + assert Commutator(R2.x*R2.e_x, R2.x*R2.e_x) == 0 + assert Commutator(R2.x*R2.e_x, R2.x*R2.e_y) == R2.x*R2.e_y + c = Commutator(R2.e_x, R2.e_r) + assert c(R2.x) == R2.y*(R2.x**2 + R2.y**2)**(-1)*sin(R2.theta) + + +def test_differential(): + xdy = R2.x*R2.dy + dxdy = Differential(xdy) + assert xdy.rcall(None) == xdy + assert dxdy(R2.e_x, R2.e_y) == 1 + assert dxdy(R2.e_x, R2.x*R2.e_y) == R2.x + assert Differential(dxdy) == 0 + + +def test_products(): + assert TensorProduct( + R2.dx, R2.dy)(R2.e_x, R2.e_y) == R2.dx(R2.e_x)*R2.dy(R2.e_y) == 1 + assert TensorProduct(R2.dx, R2.dy)(None, R2.e_y) == R2.dx + assert TensorProduct(R2.dx, R2.dy)(R2.e_x, None) == R2.dy + assert TensorProduct(R2.dx, R2.dy)(R2.e_x) == R2.dy + assert TensorProduct(R2.x, R2.dx) == R2.x*R2.dx + assert TensorProduct( + R2.e_x, R2.e_y)(R2.x, R2.y) == R2.e_x(R2.x) * R2.e_y(R2.y) == 1 + assert TensorProduct(R2.e_x, R2.e_y)(None, R2.y) == R2.e_x + assert TensorProduct(R2.e_x, R2.e_y)(R2.x, None) == R2.e_y + assert TensorProduct(R2.e_x, R2.e_y)(R2.x) == R2.e_y + assert TensorProduct(R2.x, R2.e_x) == R2.x * R2.e_x + assert TensorProduct( + R2.dx, R2.e_y)(R2.e_x, R2.y) == R2.dx(R2.e_x) * R2.e_y(R2.y) == 1 + assert TensorProduct(R2.dx, R2.e_y)(None, R2.y) == R2.dx + assert TensorProduct(R2.dx, R2.e_y)(R2.e_x, None) == R2.e_y + assert TensorProduct(R2.dx, R2.e_y)(R2.e_x) == R2.e_y + assert TensorProduct(R2.x, R2.e_x) == R2.x * R2.e_x + assert TensorProduct( + R2.e_x, R2.dy)(R2.x, R2.e_y) == R2.e_x(R2.x) * R2.dy(R2.e_y) == 1 + assert TensorProduct(R2.e_x, R2.dy)(None, R2.e_y) == R2.e_x + assert TensorProduct(R2.e_x, R2.dy)(R2.x, None) == R2.dy + assert TensorProduct(R2.e_x, R2.dy)(R2.x) == R2.dy + assert TensorProduct(R2.e_y,R2.e_x)(R2.x**2 + R2.y**2,R2.x**2 + R2.y**2) == 4*R2.x*R2.y + + assert WedgeProduct(R2.dx, R2.dy)(R2.e_x, R2.e_y) == 1 + assert WedgeProduct(R2.e_x, R2.e_y)(R2.x, R2.y) == 1 + + +def test_lie_derivative(): + assert LieDerivative(R2.e_x, R2.y) == R2.e_x(R2.y) == 0 + assert LieDerivative(R2.e_x, R2.x) == R2.e_x(R2.x) == 1 + assert LieDerivative(R2.e_x, R2.e_x) == Commutator(R2.e_x, R2.e_x) == 0 + assert LieDerivative(R2.e_x, R2.e_r) == Commutator(R2.e_x, R2.e_r) + assert LieDerivative(R2.e_x + R2.e_y, R2.x) == 1 + assert LieDerivative( + R2.e_x, TensorProduct(R2.dx, R2.dy))(R2.e_x, R2.e_y) == 0 + + +@nocache_fail +def test_covar_deriv(): + ch = metric_to_Christoffel_2nd(TP(R2.dx, R2.dx) + TP(R2.dy, R2.dy)) + cvd = BaseCovarDerivativeOp(R2_r, 0, ch) + assert cvd(R2.x) == 1 + # This line fails if the cache is disabled: + assert cvd(R2.x*R2.e_x) == R2.e_x + cvd = CovarDerivativeOp(R2.x*R2.e_x, ch) + assert cvd(R2.x) == R2.x + assert cvd(R2.x*R2.e_x) == R2.x*R2.e_x + + +def test_intcurve_diffequ(): + t = symbols('t') + start_point = R2_r.point([1, 0]) + vector_field = -R2.y*R2.e_x + R2.x*R2.e_y + equations, init_cond = intcurve_diffequ(vector_field, t, start_point) + assert str(equations) == '[f_1(t) + Derivative(f_0(t), t), -f_0(t) + Derivative(f_1(t), t)]' + assert str(init_cond) == '[f_0(0) - 1, f_1(0)]' + equations, init_cond = intcurve_diffequ(vector_field, t, start_point, R2_p) + assert str( + equations) == '[Derivative(f_0(t), t), Derivative(f_1(t), t) - 1]' + assert str(init_cond) == '[f_0(0) - 1, f_1(0)]' + + +def test_helpers_and_coordinate_dependent(): + one_form = R2.dr + R2.dx + two_form = Differential(R2.x*R2.dr + R2.r*R2.dx) + three_form = Differential( + R2.y*two_form) + Differential(R2.x*Differential(R2.r*R2.dr)) + metric = TensorProduct(R2.dx, R2.dx) + TensorProduct(R2.dy, R2.dy) + metric_ambig = TensorProduct(R2.dx, R2.dx) + TensorProduct(R2.dr, R2.dr) + misform_a = TensorProduct(R2.dr, R2.dr) + R2.dr + misform_b = R2.dr**4 + misform_c = R2.dx*R2.dy + twoform_not_sym = TensorProduct(R2.dx, R2.dx) + TensorProduct(R2.dx, R2.dy) + twoform_not_TP = WedgeProduct(R2.dx, R2.dy) + + one_vector = R2.e_x + R2.e_y + two_vector = TensorProduct(R2.e_x, R2.e_y) + three_vector = TensorProduct(R2.e_x, R2.e_y, R2.e_x) + two_wp = WedgeProduct(R2.e_x,R2.e_y) + + assert covariant_order(one_form) == 1 + assert covariant_order(two_form) == 2 + assert covariant_order(three_form) == 3 + assert covariant_order(two_form + metric) == 2 + assert covariant_order(two_form + metric_ambig) == 2 + assert covariant_order(two_form + twoform_not_sym) == 2 + assert covariant_order(two_form + twoform_not_TP) == 2 + + assert contravariant_order(one_vector) == 1 + assert contravariant_order(two_vector) == 2 + assert contravariant_order(three_vector) == 3 + assert contravariant_order(two_vector + two_wp) == 2 + + raises(ValueError, lambda: covariant_order(misform_a)) + raises(ValueError, lambda: covariant_order(misform_b)) + raises(ValueError, lambda: covariant_order(misform_c)) + + assert twoform_to_matrix(metric) == Matrix([[1, 0], [0, 1]]) + assert twoform_to_matrix(twoform_not_sym) == Matrix([[1, 0], [1, 0]]) + assert twoform_to_matrix(twoform_not_TP) == Matrix([[0, -1], [1, 0]]) + + raises(ValueError, lambda: twoform_to_matrix(one_form)) + raises(ValueError, lambda: twoform_to_matrix(three_form)) + raises(ValueError, lambda: twoform_to_matrix(metric_ambig)) + + raises(ValueError, lambda: metric_to_Christoffel_1st(twoform_not_sym)) + raises(ValueError, lambda: metric_to_Christoffel_2nd(twoform_not_sym)) + raises(ValueError, lambda: metric_to_Riemann_components(twoform_not_sym)) + raises(ValueError, lambda: metric_to_Ricci_components(twoform_not_sym)) + + +def test_correct_arguments(): + raises(ValueError, lambda: R2.e_x(R2.e_x)) + raises(ValueError, lambda: R2.e_x(R2.dx)) + + raises(ValueError, lambda: Commutator(R2.e_x, R2.x)) + raises(ValueError, lambda: Commutator(R2.dx, R2.e_x)) + + raises(ValueError, lambda: Differential(Differential(R2.e_x))) + + raises(ValueError, lambda: R2.dx(R2.x)) + + raises(ValueError, lambda: LieDerivative(R2.dx, R2.dx)) + raises(ValueError, lambda: LieDerivative(R2.x, R2.dx)) + + raises(ValueError, lambda: CovarDerivativeOp(R2.dx, [])) + raises(ValueError, lambda: CovarDerivativeOp(R2.x, [])) + + a = Symbol('a') + raises(ValueError, lambda: intcurve_series(R2.dx, a, R2_r.point([1, 2]))) + raises(ValueError, lambda: intcurve_series(R2.x, a, R2_r.point([1, 2]))) + + raises(ValueError, lambda: intcurve_diffequ(R2.dx, a, R2_r.point([1, 2]))) + raises(ValueError, lambda: intcurve_diffequ(R2.x, a, R2_r.point([1, 2]))) + + raises(ValueError, lambda: contravariant_order(R2.e_x + R2.dx)) + raises(ValueError, lambda: covariant_order(R2.e_x + R2.dx)) + + raises(ValueError, lambda: contravariant_order(R2.e_x*R2.e_y)) + raises(ValueError, lambda: covariant_order(R2.dx*R2.dy)) + +def test_simplify(): + x, y = R2_r.coord_functions() + dx, dy = R2_r.base_oneforms() + ex, ey = R2_r.base_vectors() + assert simplify(x) == x + assert simplify(x*y) == x*y + assert simplify(dx*dy) == dx*dy + assert simplify(ex*ey) == ex*ey + assert ((1-x)*dx)/(1-x)**2 == dx/(1-x) + + +def test_issue_17917(): + X = R2.x*R2.e_x - R2.y*R2.e_y + Y = (R2.x**2 + R2.y**2)*R2.e_x - R2.x*R2.y*R2.e_y + assert LieDerivative(X, Y).expand() == ( + R2.x**2*R2.e_x - 3*R2.y**2*R2.e_x - R2.x*R2.y*R2.e_y) + +def test_deprecations(): + m = Manifold('M', 2) + p = Patch('P', m) + with warns_deprecated_sympy(): + CoordSystem('Car2d', p, names=['x', 'y']) + + with warns_deprecated_sympy(): + c = CoordSystem('Car2d', p, ['x', 'y']) + + with warns_deprecated_sympy(): + list(m.patches) + + with warns_deprecated_sympy(): + list(c.transforms) diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/test_function_diffgeom_book.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/test_function_diffgeom_book.py new file mode 100644 index 0000000000000000000000000000000000000000..44d9623bc34ab73c7d575d9d9fd5b6d84f8e4a94 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/test_function_diffgeom_book.py @@ -0,0 +1,145 @@ +from sympy.diffgeom.rn import R2, R2_p, R2_r, R3_r +from sympy.diffgeom import intcurve_series, Differential, WedgeProduct +from sympy.core import symbols, Function, Derivative +from sympy.simplify import trigsimp, simplify +from sympy.functions import sqrt, atan2, sin, cos +from sympy.matrices import Matrix + +# Most of the functionality is covered in the +# test_functional_diffgeom_ch* tests which are based on the +# example from the paper of Sussman and Wisdom. +# If they do not cover something, additional tests are added in other test +# functions. + +# From "Functional Differential Geometry" as of 2011 +# by Sussman and Wisdom. + + +def test_functional_diffgeom_ch2(): + x0, y0, r0, theta0 = symbols('x0, y0, r0, theta0', real=True) + x, y = symbols('x, y', real=True) + f = Function('f') + + assert (R2_p.point_to_coords(R2_r.point([x0, y0])) == + Matrix([sqrt(x0**2 + y0**2), atan2(y0, x0)])) + assert (R2_r.point_to_coords(R2_p.point([r0, theta0])) == + Matrix([r0*cos(theta0), r0*sin(theta0)])) + + assert R2_p.jacobian(R2_r, [r0, theta0]) == Matrix( + [[cos(theta0), -r0*sin(theta0)], [sin(theta0), r0*cos(theta0)]]) + + field = f(R2.x, R2.y) + p1_in_rect = R2_r.point([x0, y0]) + p1_in_polar = R2_p.point([sqrt(x0**2 + y0**2), atan2(y0, x0)]) + assert field.rcall(p1_in_rect) == f(x0, y0) + assert field.rcall(p1_in_polar) == f(x0, y0) + + p_r = R2_r.point([x0, y0]) + p_p = R2_p.point([r0, theta0]) + assert R2.x(p_r) == x0 + assert R2.x(p_p) == r0*cos(theta0) + assert R2.r(p_p) == r0 + assert R2.r(p_r) == sqrt(x0**2 + y0**2) + assert R2.theta(p_r) == atan2(y0, x0) + + h = R2.x*R2.r**2 + R2.y**3 + assert h.rcall(p_r) == x0*(x0**2 + y0**2) + y0**3 + assert h.rcall(p_p) == r0**3*sin(theta0)**3 + r0**3*cos(theta0) + + +def test_functional_diffgeom_ch3(): + x0, y0 = symbols('x0, y0', real=True) + x, y, t = symbols('x, y, t', real=True) + f = Function('f') + b1 = Function('b1') + b2 = Function('b2') + p_r = R2_r.point([x0, y0]) + + s_field = f(R2.x, R2.y) + v_field = b1(R2.x)*R2.e_x + b2(R2.y)*R2.e_y + assert v_field.rcall(s_field).rcall(p_r).doit() == b1( + x0)*Derivative(f(x0, y0), x0) + b2(y0)*Derivative(f(x0, y0), y0) + + assert R2.e_x(R2.r**2).rcall(p_r) == 2*x0 + v = R2.e_x + 2*R2.e_y + s = R2.r**2 + 3*R2.x + assert v.rcall(s).rcall(p_r).doit() == 2*x0 + 4*y0 + 3 + + circ = -R2.y*R2.e_x + R2.x*R2.e_y + series = intcurve_series(circ, t, R2_r.point([1, 0]), coeffs=True) + series_x, series_y = zip(*series) + assert all( + term == cos(t).taylor_term(i, t) for i, term in enumerate(series_x)) + assert all( + term == sin(t).taylor_term(i, t) for i, term in enumerate(series_y)) + + +def test_functional_diffgeom_ch4(): + x0, y0, theta0 = symbols('x0, y0, theta0', real=True) + x, y, r, theta = symbols('x, y, r, theta', real=True) + r0 = symbols('r0', positive=True) + f = Function('f') + b1 = Function('b1') + b2 = Function('b2') + p_r = R2_r.point([x0, y0]) + p_p = R2_p.point([r0, theta0]) + + f_field = b1(R2.x, R2.y)*R2.dx + b2(R2.x, R2.y)*R2.dy + assert f_field.rcall(R2.e_x).rcall(p_r) == b1(x0, y0) + assert f_field.rcall(R2.e_y).rcall(p_r) == b2(x0, y0) + + s_field_r = f(R2.x, R2.y) + df = Differential(s_field_r) + assert df(R2.e_x).rcall(p_r).doit() == Derivative(f(x0, y0), x0) + assert df(R2.e_y).rcall(p_r).doit() == Derivative(f(x0, y0), y0) + + s_field_p = f(R2.r, R2.theta) + df = Differential(s_field_p) + assert trigsimp(df(R2.e_x).rcall(p_p).doit()) == ( + cos(theta0)*Derivative(f(r0, theta0), r0) - + sin(theta0)*Derivative(f(r0, theta0), theta0)/r0) + assert trigsimp(df(R2.e_y).rcall(p_p).doit()) == ( + sin(theta0)*Derivative(f(r0, theta0), r0) + + cos(theta0)*Derivative(f(r0, theta0), theta0)/r0) + + assert R2.dx(R2.e_x).rcall(p_r) == 1 + assert R2.dx(R2.e_x) == 1 + assert R2.dx(R2.e_y).rcall(p_r) == 0 + assert R2.dx(R2.e_y) == 0 + + circ = -R2.y*R2.e_x + R2.x*R2.e_y + assert R2.dx(circ).rcall(p_r).doit() == -y0 + assert R2.dy(circ).rcall(p_r) == x0 + assert R2.dr(circ).rcall(p_r) == 0 + assert simplify(R2.dtheta(circ).rcall(p_r)) == 1 + + assert (circ - R2.e_theta).rcall(s_field_r).rcall(p_r) == 0 + + +def test_functional_diffgeom_ch6(): + u0, u1, u2, v0, v1, v2, w0, w1, w2 = symbols('u0:3, v0:3, w0:3', real=True) + + u = u0*R2.e_x + u1*R2.e_y + v = v0*R2.e_x + v1*R2.e_y + wp = WedgeProduct(R2.dx, R2.dy) + assert wp(u, v) == u0*v1 - u1*v0 + + u = u0*R3_r.e_x + u1*R3_r.e_y + u2*R3_r.e_z + v = v0*R3_r.e_x + v1*R3_r.e_y + v2*R3_r.e_z + w = w0*R3_r.e_x + w1*R3_r.e_y + w2*R3_r.e_z + wp = WedgeProduct(R3_r.dx, R3_r.dy, R3_r.dz) + assert wp( + u, v, w) == Matrix(3, 3, [u0, u1, u2, v0, v1, v2, w0, w1, w2]).det() + + a, b, c = symbols('a, b, c', cls=Function) + a_f = a(R3_r.x, R3_r.y, R3_r.z) + b_f = b(R3_r.x, R3_r.y, R3_r.z) + c_f = c(R3_r.x, R3_r.y, R3_r.z) + theta = a_f*R3_r.dx + b_f*R3_r.dy + c_f*R3_r.dz + dtheta = Differential(theta) + da = Differential(a_f) + db = Differential(b_f) + dc = Differential(c_f) + expr = dtheta - WedgeProduct( + da, R3_r.dx) - WedgeProduct(db, R3_r.dy) - WedgeProduct(dc, R3_r.dz) + assert expr.rcall(R3_r.e_x, R3_r.e_y) == 0 diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/test_hyperbolic_space.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/test_hyperbolic_space.py new file mode 100644 index 0000000000000000000000000000000000000000..48ddc7f8065f2b69bcd8eca4726a21c5901514ec --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/diffgeom/tests/test_hyperbolic_space.py @@ -0,0 +1,91 @@ +r''' +unit test describing the hyperbolic half-plane with the Poincare metric. This +is a basic model of hyperbolic geometry on the (positive) half-space + +{(x,y) \in R^2 | y > 0} + +with the Riemannian metric + +ds^2 = (dx^2 + dy^2)/y^2 + +It has constant negative scalar curvature = -2 + +https://en.wikipedia.org/wiki/Poincare_half-plane_model +''' +from sympy.matrices.dense import diag +from sympy.diffgeom import (twoform_to_matrix, + metric_to_Christoffel_1st, metric_to_Christoffel_2nd, + metric_to_Riemann_components, metric_to_Ricci_components) +import sympy.diffgeom.rn +from sympy.tensor.array import ImmutableDenseNDimArray + + +def test_H2(): + TP = sympy.diffgeom.TensorProduct + R2 = sympy.diffgeom.rn.R2 + y = R2.y + dy = R2.dy + dx = R2.dx + g = (TP(dx, dx) + TP(dy, dy))*y**(-2) + automat = twoform_to_matrix(g) + mat = diag(y**(-2), y**(-2)) + assert mat == automat + + gamma1 = metric_to_Christoffel_1st(g) + assert gamma1[0, 0, 0] == 0 + assert gamma1[0, 0, 1] == -y**(-3) + assert gamma1[0, 1, 0] == -y**(-3) + assert gamma1[0, 1, 1] == 0 + + assert gamma1[1, 1, 1] == -y**(-3) + assert gamma1[1, 1, 0] == 0 + assert gamma1[1, 0, 1] == 0 + assert gamma1[1, 0, 0] == y**(-3) + + gamma2 = metric_to_Christoffel_2nd(g) + assert gamma2[0, 0, 0] == 0 + assert gamma2[0, 0, 1] == -y**(-1) + assert gamma2[0, 1, 0] == -y**(-1) + assert gamma2[0, 1, 1] == 0 + + assert gamma2[1, 1, 1] == -y**(-1) + assert gamma2[1, 1, 0] == 0 + assert gamma2[1, 0, 1] == 0 + assert gamma2[1, 0, 0] == y**(-1) + + Rm = metric_to_Riemann_components(g) + assert Rm[0, 0, 0, 0] == 0 + assert Rm[0, 0, 0, 1] == 0 + assert Rm[0, 0, 1, 0] == 0 + assert Rm[0, 0, 1, 1] == 0 + + assert Rm[0, 1, 0, 0] == 0 + assert Rm[0, 1, 0, 1] == -y**(-2) + assert Rm[0, 1, 1, 0] == y**(-2) + assert Rm[0, 1, 1, 1] == 0 + + assert Rm[1, 0, 0, 0] == 0 + assert Rm[1, 0, 0, 1] == y**(-2) + assert Rm[1, 0, 1, 0] == -y**(-2) + assert Rm[1, 0, 1, 1] == 0 + + assert Rm[1, 1, 0, 0] == 0 + assert Rm[1, 1, 0, 1] == 0 + assert Rm[1, 1, 1, 0] == 0 + assert Rm[1, 1, 1, 1] == 0 + + Ric = metric_to_Ricci_components(g) + assert Ric[0, 0] == -y**(-2) + assert Ric[0, 1] == 0 + assert Ric[1, 0] == 0 + assert Ric[0, 0] == -y**(-2) + + assert Ric == ImmutableDenseNDimArray([-y**(-2), 0, 0, -y**(-2)], (2, 2)) + + ## scalar curvature is -2 + #TODO - it would be nice to have index contraction built-in + R = (Ric[0, 0] + Ric[1, 1])*y**2 + assert R == -2 + + ## Gauss curvature is -1 + assert R/2 == -1 diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/__init__.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a832614b1d48e26bf01e16f040f34dd412e8e32b --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/__init__.py @@ -0,0 +1,23 @@ +"""A module to manipulate symbolic objects with indices including tensors + +""" +from .indexed import IndexedBase, Idx, Indexed +from .index_methods import get_contraction_structure, get_indices +from .functions import shape +from .array import (MutableDenseNDimArray, ImmutableDenseNDimArray, + MutableSparseNDimArray, ImmutableSparseNDimArray, NDimArray, tensorproduct, + tensorcontraction, tensordiagonal, derive_by_array, permutedims, Array, + DenseNDimArray, SparseNDimArray,) + +__all__ = [ + 'IndexedBase', 'Idx', 'Indexed', + + 'get_contraction_structure', 'get_indices', + + 'shape', + + 'MutableDenseNDimArray', 'ImmutableDenseNDimArray', + 'MutableSparseNDimArray', 'ImmutableSparseNDimArray', 'NDimArray', + 'tensorproduct', 'tensorcontraction', 'tensordiagonal', 'derive_by_array', 'permutedims', + 'Array', 'DenseNDimArray', 'SparseNDimArray', +] diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/functions.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..f14599d69152db1713f21c9dd785683901c5eeb9 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/functions.py @@ -0,0 +1,154 @@ +from collections.abc import Iterable +from functools import singledispatch + +from sympy.core.expr import Expr +from sympy.core.mul import Mul +from sympy.core.singleton import S +from sympy.core.sympify import sympify +from sympy.core.parameters import global_parameters + + +class TensorProduct(Expr): + """ + Generic class for tensor products. + """ + is_number = False + + def __new__(cls, *args, **kwargs): + from sympy.tensor.array import NDimArray, tensorproduct, Array + from sympy.matrices.expressions.matexpr import MatrixExpr + from sympy.matrices.matrixbase import MatrixBase + from sympy.strategies import flatten + + args = [sympify(arg) for arg in args] + evaluate = kwargs.get("evaluate", global_parameters.evaluate) + + if not evaluate: + obj = Expr.__new__(cls, *args) + return obj + + arrays = [] + other = [] + scalar = S.One + for arg in args: + if isinstance(arg, (Iterable, MatrixBase, NDimArray)): + arrays.append(Array(arg)) + elif isinstance(arg, (MatrixExpr,)): + other.append(arg) + else: + scalar *= arg + + coeff = scalar*tensorproduct(*arrays) + if len(other) == 0: + return coeff + if coeff != 1: + newargs = [coeff] + other + else: + newargs = other + obj = Expr.__new__(cls, *newargs, **kwargs) + return flatten(obj) + + def rank(self): + return len(self.shape) + + def _get_args_shapes(self): + from sympy.tensor.array import Array + return [i.shape if hasattr(i, "shape") else Array(i).shape for i in self.args] + + @property + def shape(self): + shape_list = self._get_args_shapes() + return sum(shape_list, ()) + + def __getitem__(self, index): + index = iter(index) + return Mul.fromiter( + arg.__getitem__(tuple(next(index) for i in shp)) + for arg, shp in zip(self.args, self._get_args_shapes()) + ) + + +@singledispatch +def shape(expr): + """ + Return the shape of the *expr* as a tuple. *expr* should represent + suitable object such as matrix or array. + + Parameters + ========== + + expr : SymPy object having ``MatrixKind`` or ``ArrayKind``. + + Raises + ====== + + NoShapeError : Raised when object with wrong kind is passed. + + Examples + ======== + + This function returns the shape of any object representing matrix or array. + + >>> from sympy import shape, Array, ImmutableDenseMatrix, Integral + >>> from sympy.abc import x + >>> A = Array([1, 2]) + >>> shape(A) + (2,) + >>> shape(Integral(A, x)) + (2,) + >>> M = ImmutableDenseMatrix([1, 2]) + >>> shape(M) + (2, 1) + >>> shape(Integral(M, x)) + (2, 1) + + You can support new type by dispatching. + + >>> from sympy import Expr + >>> class NewExpr(Expr): + ... pass + >>> @shape.register(NewExpr) + ... def _(expr): + ... return shape(expr.args[0]) + >>> shape(NewExpr(M)) + (2, 1) + + If unsuitable expression is passed, ``NoShapeError()`` will be raised. + + >>> shape(Integral(x, x)) + Traceback (most recent call last): + ... + sympy.tensor.functions.NoShapeError: shape() called on non-array object: Integral(x, x) + + Notes + ===== + + Array-like classes (such as ``Matrix`` or ``NDimArray``) has ``shape`` + property which returns its shape, but it cannot be used for non-array + classes containing array. This function returns the shape of any + registered object representing array. + + """ + if hasattr(expr, "shape"): + return expr.shape + raise NoShapeError( + "%s does not have shape, or its type is not registered to shape()." % expr) + + +class NoShapeError(Exception): + """ + Raised when ``shape()`` is called on non-array object. + + This error can be imported from ``sympy.tensor.functions``. + + Examples + ======== + + >>> from sympy import shape + >>> from sympy.abc import x + >>> shape(x) + Traceback (most recent call last): + ... + sympy.tensor.functions.NoShapeError: shape() called on non-array object: x + """ + pass diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/index_methods.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/index_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..12f707b60b4ad0bcadc35a222d9abe0cc5e033fc --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/index_methods.py @@ -0,0 +1,469 @@ +"""Module with functions operating on IndexedBase, Indexed and Idx objects + + - Check shape conformance + - Determine indices in resulting expression + + etc. + + Methods in this module could be implemented by calling methods on Expr + objects instead. When things stabilize this could be a useful + refactoring. +""" + +from functools import reduce + +from sympy.core.function import Function +from sympy.functions import exp, Piecewise +from sympy.tensor.indexed import Idx, Indexed +from sympy.utilities import sift + +from collections import OrderedDict + +class IndexConformanceException(Exception): + pass + +def _unique_and_repeated(inds): + """ + Returns the unique and repeated indices. Also note, from the examples given below + that the order of indices is maintained as given in the input. + + Examples + ======== + + >>> from sympy.tensor.index_methods import _unique_and_repeated + >>> _unique_and_repeated([2, 3, 1, 3, 0, 4, 0]) + ([2, 1, 4], [3, 0]) + """ + uniq = OrderedDict() + for i in inds: + if i in uniq: + uniq[i] = 0 + else: + uniq[i] = 1 + return sift(uniq, lambda x: uniq[x], binary=True) + +def _remove_repeated(inds): + """ + Removes repeated objects from sequences + + Returns a set of the unique objects and a tuple of all that have been + removed. + + Examples + ======== + + >>> from sympy.tensor.index_methods import _remove_repeated + >>> l1 = [1, 2, 3, 2] + >>> _remove_repeated(l1) + ({1, 3}, (2,)) + + """ + u, r = _unique_and_repeated(inds) + return set(u), tuple(r) + + +def _get_indices_Mul(expr, return_dummies=False): + """Determine the outer indices of a Mul object. + + Examples + ======== + + >>> from sympy.tensor.index_methods import _get_indices_Mul + >>> from sympy.tensor.indexed import IndexedBase, Idx + >>> i, j, k = map(Idx, ['i', 'j', 'k']) + >>> x = IndexedBase('x') + >>> y = IndexedBase('y') + >>> _get_indices_Mul(x[i, k]*y[j, k]) + ({i, j}, {}) + >>> _get_indices_Mul(x[i, k]*y[j, k], return_dummies=True) + ({i, j}, {}, (k,)) + + """ + + inds = list(map(get_indices, expr.args)) + inds, syms = list(zip(*inds)) + + inds = list(map(list, inds)) + inds = list(reduce(lambda x, y: x + y, inds)) + inds, dummies = _remove_repeated(inds) + + symmetry = {} + for s in syms: + for pair in s: + if pair in symmetry: + symmetry[pair] *= s[pair] + else: + symmetry[pair] = s[pair] + + if return_dummies: + return inds, symmetry, dummies + else: + return inds, symmetry + + +def _get_indices_Pow(expr): + """Determine outer indices of a power or an exponential. + + A power is considered a universal function, so that the indices of a Pow is + just the collection of indices present in the expression. This may be + viewed as a bit inconsistent in the special case: + + x[i]**2 = x[i]*x[i] (1) + + The above expression could have been interpreted as the contraction of x[i] + with itself, but we choose instead to interpret it as a function + + lambda y: y**2 + + applied to each element of x (a universal function in numpy terms). In + order to allow an interpretation of (1) as a contraction, we need + contravariant and covariant Idx subclasses. (FIXME: this is not yet + implemented) + + Expressions in the base or exponent are subject to contraction as usual, + but an index that is present in the exponent, will not be considered + contractable with its own base. Note however, that indices in the same + exponent can be contracted with each other. + + Examples + ======== + + >>> from sympy.tensor.index_methods import _get_indices_Pow + >>> from sympy import Pow, exp, IndexedBase, Idx + >>> A = IndexedBase('A') + >>> x = IndexedBase('x') + >>> i, j, k = map(Idx, ['i', 'j', 'k']) + >>> _get_indices_Pow(exp(A[i, j]*x[j])) + ({i}, {}) + >>> _get_indices_Pow(Pow(x[i], x[i])) + ({i}, {}) + >>> _get_indices_Pow(Pow(A[i, j]*x[j], x[i])) + ({i}, {}) + + """ + base, exp = expr.as_base_exp() + binds, bsyms = get_indices(base) + einds, esyms = get_indices(exp) + + inds = binds | einds + + # FIXME: symmetries from power needs to check special cases, else nothing + symmetries = {} + + return inds, symmetries + + +def _get_indices_Add(expr): + """Determine outer indices of an Add object. + + In a sum, each term must have the same set of outer indices. A valid + expression could be + + x(i)*y(j) - x(j)*y(i) + + But we do not allow expressions like: + + x(i)*y(j) - z(j)*z(j) + + FIXME: Add support for Numpy broadcasting + + Examples + ======== + + >>> from sympy.tensor.index_methods import _get_indices_Add + >>> from sympy.tensor.indexed import IndexedBase, Idx + >>> i, j, k = map(Idx, ['i', 'j', 'k']) + >>> x = IndexedBase('x') + >>> y = IndexedBase('y') + >>> _get_indices_Add(x[i] + x[k]*y[i, k]) + ({i}, {}) + + """ + + inds = list(map(get_indices, expr.args)) + inds, syms = list(zip(*inds)) + + # allow broadcast of scalars + non_scalars = [x for x in inds if x != set()] + if not non_scalars: + return set(), {} + + if not all(x == non_scalars[0] for x in non_scalars[1:]): + raise IndexConformanceException("Indices are not consistent: %s" % expr) + if not reduce(lambda x, y: x != y or y, syms): + symmetries = syms[0] + else: + # FIXME: search for symmetries + symmetries = {} + + return non_scalars[0], symmetries + + +def get_indices(expr): + """Determine the outer indices of expression ``expr`` + + By *outer* we mean indices that are not summation indices. Returns a set + and a dict. The set contains outer indices and the dict contains + information about index symmetries. + + Examples + ======== + + >>> from sympy.tensor.index_methods import get_indices + >>> from sympy import symbols + >>> from sympy.tensor import IndexedBase + >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) + >>> i, j, a, z = symbols('i j a z', integer=True) + + The indices of the total expression is determined, Repeated indices imply a + summation, for instance the trace of a matrix A: + + >>> get_indices(A[i, i]) + (set(), {}) + + In the case of many terms, the terms are required to have identical + outer indices. Else an IndexConformanceException is raised. + + >>> get_indices(x[i] + A[i, j]*y[j]) + ({i}, {}) + + :Exceptions: + + An IndexConformanceException means that the terms ar not compatible, e.g. + + >>> get_indices(x[i] + y[j]) #doctest: +SKIP + (...) + IndexConformanceException: Indices are not consistent: x(i) + y(j) + + .. warning:: + The concept of *outer* indices applies recursively, starting on the deepest + level. This implies that dummies inside parenthesis are assumed to be + summed first, so that the following expression is handled gracefully: + + >>> get_indices((x[i] + A[i, j]*y[j])*x[j]) + ({i, j}, {}) + + This is correct and may appear convenient, but you need to be careful + with this as SymPy will happily .expand() the product, if requested. The + resulting expression would mix the outer ``j`` with the dummies inside + the parenthesis, which makes it a different expression. To be on the + safe side, it is best to avoid such ambiguities by using unique indices + for all contractions that should be held separate. + + """ + # We call ourself recursively to determine indices of sub expressions. + + # break recursion + if isinstance(expr, Indexed): + c = expr.indices + inds, dummies = _remove_repeated(c) + return inds, {} + elif expr is None: + return set(), {} + elif isinstance(expr, Idx): + return {expr}, {} + elif expr.is_Atom: + return set(), {} + + + # recurse via specialized functions + else: + if expr.is_Mul: + return _get_indices_Mul(expr) + elif expr.is_Add: + return _get_indices_Add(expr) + elif expr.is_Pow or isinstance(expr, exp): + return _get_indices_Pow(expr) + + elif isinstance(expr, Piecewise): + # FIXME: No support for Piecewise yet + return set(), {} + elif isinstance(expr, Function): + # Support ufunc like behaviour by returning indices from arguments. + # Functions do not interpret repeated indices across arguments + # as summation + ind0 = set() + for arg in expr.args: + ind, sym = get_indices(arg) + ind0 |= ind + return ind0, sym + + # this test is expensive, so it should be at the end + elif not expr.has(Indexed): + return set(), {} + raise NotImplementedError( + "FIXME: No specialized handling of type %s" % type(expr)) + + +def get_contraction_structure(expr): + """Determine dummy indices of ``expr`` and describe its structure + + By *dummy* we mean indices that are summation indices. + + The structure of the expression is determined and described as follows: + + 1) A conforming summation of Indexed objects is described with a dict where + the keys are summation indices and the corresponding values are sets + containing all terms for which the summation applies. All Add objects + in the SymPy expression tree are described like this. + + 2) For all nodes in the SymPy expression tree that are *not* of type Add, the + following applies: + + If a node discovers contractions in one of its arguments, the node + itself will be stored as a key in the dict. For that key, the + corresponding value is a list of dicts, each of which is the result of a + recursive call to get_contraction_structure(). The list contains only + dicts for the non-trivial deeper contractions, omitting dicts with None + as the one and only key. + + .. Note:: The presence of expressions among the dictionary keys indicates + multiple levels of index contractions. A nested dict displays nested + contractions and may itself contain dicts from a deeper level. In + practical calculations the summation in the deepest nested level must be + calculated first so that the outer expression can access the resulting + indexed object. + + Examples + ======== + + >>> from sympy.tensor.index_methods import get_contraction_structure + >>> from sympy import default_sort_key + >>> from sympy.tensor import IndexedBase, Idx + >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) + >>> i, j, k, l = map(Idx, ['i', 'j', 'k', 'l']) + >>> get_contraction_structure(x[i]*y[i] + A[j, j]) + {(i,): {x[i]*y[i]}, (j,): {A[j, j]}} + >>> get_contraction_structure(x[i]*y[j]) + {None: {x[i]*y[j]}} + + A multiplication of contracted factors results in nested dicts representing + the internal contractions. + + >>> d = get_contraction_structure(x[i, i]*y[j, j]) + >>> sorted(d.keys(), key=default_sort_key) + [None, x[i, i]*y[j, j]] + + In this case, the product has no contractions: + + >>> d[None] + {x[i, i]*y[j, j]} + + Factors are contracted "first": + + >>> sorted(d[x[i, i]*y[j, j]], key=default_sort_key) + [{(i,): {x[i, i]}}, {(j,): {y[j, j]}}] + + A parenthesized Add object is also returned as a nested dictionary. The + term containing the parenthesis is a Mul with a contraction among the + arguments, so it will be found as a key in the result. It stores the + dictionary resulting from a recursive call on the Add expression. + + >>> d = get_contraction_structure(x[i]*(y[i] + A[i, j]*x[j])) + >>> sorted(d.keys(), key=default_sort_key) + [(A[i, j]*x[j] + y[i])*x[i], (i,)] + >>> d[(i,)] + {(A[i, j]*x[j] + y[i])*x[i]} + >>> d[x[i]*(A[i, j]*x[j] + y[i])] + [{None: {y[i]}, (j,): {A[i, j]*x[j]}}] + + Powers with contractions in either base or exponent will also be found as + keys in the dictionary, mapping to a list of results from recursive calls: + + >>> d = get_contraction_structure(A[j, j]**A[i, i]) + >>> d[None] + {A[j, j]**A[i, i]} + >>> nested_contractions = d[A[j, j]**A[i, i]] + >>> nested_contractions[0] + {(j,): {A[j, j]}} + >>> nested_contractions[1] + {(i,): {A[i, i]}} + + The description of the contraction structure may appear complicated when + represented with a string in the above examples, but it is easy to iterate + over: + + >>> from sympy import Expr + >>> for key in d: + ... if isinstance(key, Expr): + ... continue + ... for term in d[key]: + ... if term in d: + ... # treat deepest contraction first + ... pass + ... # treat outermost contactions here + + """ + + # We call ourself recursively to inspect sub expressions. + + if isinstance(expr, Indexed): + junk, key = _remove_repeated(expr.indices) + return {key or None: {expr}} + elif expr.is_Atom: + return {None: {expr}} + elif expr.is_Mul: + junk, junk, key = _get_indices_Mul(expr, return_dummies=True) + result = {key or None: {expr}} + # recurse on every factor + nested = [] + for fac in expr.args: + facd = get_contraction_structure(fac) + if not (None in facd and len(facd) == 1): + nested.append(facd) + if nested: + result[expr] = nested + return result + elif expr.is_Pow or isinstance(expr, exp): + # recurse in base and exp separately. If either has internal + # contractions we must include ourselves as a key in the returned dict + b, e = expr.as_base_exp() + dbase = get_contraction_structure(b) + dexp = get_contraction_structure(e) + + dicts = [] + for d in dbase, dexp: + if not (None in d and len(d) == 1): + dicts.append(d) + result = {None: {expr}} + if dicts: + result[expr] = dicts + return result + elif expr.is_Add: + # Note: we just collect all terms with identical summation indices, We + # do nothing to identify equivalent terms here, as this would require + # substitutions or pattern matching in expressions of unknown + # complexity. + result = {} + for term in expr.args: + # recurse on every term + d = get_contraction_structure(term) + for key in d: + if key in result: + result[key] |= d[key] + else: + result[key] = d[key] + return result + + elif isinstance(expr, Piecewise): + # FIXME: No support for Piecewise yet + return {None: expr} + elif isinstance(expr, Function): + # Collect non-trivial contraction structures in each argument + # We do not report repeated indices in separate arguments as a + # contraction + deeplist = [] + for arg in expr.args: + deep = get_contraction_structure(arg) + if not (None in deep and len(deep) == 1): + deeplist.append(deep) + d = {None: {expr}} + if deeplist: + d[expr] = deeplist + return d + + # this test is expensive, so it should be at the end + elif not expr.has(Indexed): + return {None: {expr}} + raise NotImplementedError( + "FIXME: No specialized handling of type %s" % type(expr)) diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/indexed.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/indexed.py new file mode 100644 index 0000000000000000000000000000000000000000..feddad21e52bbab2e1243beafdb11f30b2eded4d --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/indexed.py @@ -0,0 +1,793 @@ +r"""Module that defines indexed objects. + +The classes ``IndexedBase``, ``Indexed``, and ``Idx`` represent a +matrix element ``M[i, j]`` as in the following diagram:: + + 1) The Indexed class represents the entire indexed object. + | + ___|___ + ' ' + M[i, j] + / \__\______ + | | + | | + | 2) The Idx class represents indices; each Idx can + | optionally contain information about its range. + | + 3) IndexedBase represents the 'stem' of an indexed object, here `M`. + The stem used by itself is usually taken to represent the entire + array. + +There can be any number of indices on an Indexed object. No +transformation properties are implemented in these Base objects, but +implicit contraction of repeated indices is supported. + +Note that the support for complicated (i.e. non-atomic) integer +expressions as indices is limited. (This should be improved in +future releases.) + +Examples +======== + +To express the above matrix element example you would write: + +>>> from sympy import symbols, IndexedBase, Idx +>>> M = IndexedBase('M') +>>> i, j = symbols('i j', cls=Idx) +>>> M[i, j] +M[i, j] + +Repeated indices in a product implies a summation, so to express a +matrix-vector product in terms of Indexed objects: + +>>> x = IndexedBase('x') +>>> M[i, j]*x[j] +M[i, j]*x[j] + +If the indexed objects will be converted to component based arrays, e.g. +with the code printers or the autowrap framework, you also need to provide +(symbolic or numerical) dimensions. This can be done by passing an +optional shape parameter to IndexedBase upon construction: + +>>> dim1, dim2 = symbols('dim1 dim2', integer=True) +>>> A = IndexedBase('A', shape=(dim1, 2*dim1, dim2)) +>>> A.shape +(dim1, 2*dim1, dim2) +>>> A[i, j, 3].shape +(dim1, 2*dim1, dim2) + +If an IndexedBase object has no shape information, it is assumed that the +array is as large as the ranges of its indices: + +>>> n, m = symbols('n m', integer=True) +>>> i = Idx('i', m) +>>> j = Idx('j', n) +>>> M[i, j].shape +(m, n) +>>> M[i, j].ranges +[(0, m - 1), (0, n - 1)] + +The above can be compared with the following: + +>>> A[i, 2, j].shape +(dim1, 2*dim1, dim2) +>>> A[i, 2, j].ranges +[(0, m - 1), None, (0, n - 1)] + +To analyze the structure of indexed expressions, you can use the methods +get_indices() and get_contraction_structure(): + +>>> from sympy.tensor import get_indices, get_contraction_structure +>>> get_indices(A[i, j, j]) +({i}, {}) +>>> get_contraction_structure(A[i, j, j]) +{(j,): {A[i, j, j]}} + +See the appropriate docstrings for a detailed explanation of the output. +""" + +# TODO: (some ideas for improvement) +# +# o test and guarantee numpy compatibility +# - implement full support for broadcasting +# - strided arrays +# +# o more functions to analyze indexed expressions +# - identify standard constructs, e.g matrix-vector product in a subexpression +# +# o functions to generate component based arrays (numpy and sympy.Matrix) +# - generate a single array directly from Indexed +# - convert simple sub-expressions +# +# o sophisticated indexing (possibly in subclasses to preserve simplicity) +# - Idx with range smaller than dimension of Indexed +# - Idx with stepsize != 1 +# - Idx with step determined by function call +from collections.abc import Iterable + +from sympy.core.numbers import Number +from sympy.core.assumptions import StdFactKB +from sympy.core import Expr, Tuple, sympify, S +from sympy.core.symbol import _filter_assumptions, Symbol +from sympy.core.logic import fuzzy_bool, fuzzy_not +from sympy.core.sympify import _sympify +from sympy.functions.special.tensor_functions import KroneckerDelta +from sympy.multipledispatch import dispatch +from sympy.utilities.iterables import is_sequence, NotIterable +from sympy.utilities.misc import filldedent + + +class IndexException(Exception): + pass + + +class Indexed(Expr): + """Represents a mathematical object with indices. + + >>> from sympy import Indexed, IndexedBase, Idx, symbols + >>> i, j = symbols('i j', cls=Idx) + >>> Indexed('A', i, j) + A[i, j] + + It is recommended that ``Indexed`` objects be created by indexing ``IndexedBase``: + ``IndexedBase('A')[i, j]`` instead of ``Indexed(IndexedBase('A'), i, j)``. + + >>> A = IndexedBase('A') + >>> a_ij = A[i, j] # Prefer this, + >>> b_ij = Indexed(A, i, j) # over this. + >>> a_ij == b_ij + True + + """ + is_Indexed = True + is_symbol = True + is_Atom = True + + def __new__(cls, base, *args, **kw_args): + from sympy.tensor.array.ndim_array import NDimArray + from sympy.matrices.matrixbase import MatrixBase + + if not args: + raise IndexException("Indexed needs at least one index.") + if isinstance(base, (str, Symbol)): + base = IndexedBase(base) + elif not hasattr(base, '__getitem__') and not isinstance(base, IndexedBase): + raise TypeError(filldedent(""" + The base can only be replaced with a string, Symbol, + IndexedBase or an object with a method for getting + items (i.e. an object with a `__getitem__` method). + """)) + args = list(map(sympify, args)) + if isinstance(base, (NDimArray, Iterable, Tuple, MatrixBase)) and all(i.is_number for i in args): + if len(args) == 1: + return base[args[0]] + else: + return base[args] + + base = _sympify(base) + + obj = Expr.__new__(cls, base, *args, **kw_args) + + IndexedBase._set_assumptions(obj, base.assumptions0) + + return obj + + def _hashable_content(self): + return super()._hashable_content() + tuple(sorted(self.assumptions0.items())) + + @property + def name(self): + return str(self) + + @property + def _diff_wrt(self): + """Allow derivatives with respect to an ``Indexed`` object.""" + return True + + def _eval_derivative(self, wrt): + from sympy.tensor.array.ndim_array import NDimArray + + if isinstance(wrt, Indexed) and wrt.base == self.base: + if len(self.indices) != len(wrt.indices): + msg = "Different # of indices: d({!s})/d({!s})".format(self, + wrt) + raise IndexException(msg) + result = S.One + for index1, index2 in zip(self.indices, wrt.indices): + result *= KroneckerDelta(index1, index2) + return result + elif isinstance(self.base, NDimArray): + from sympy.tensor.array import derive_by_array + return Indexed(derive_by_array(self.base, wrt), *self.args[1:]) + else: + if Tuple(self.indices).has(wrt): + return S.NaN + return S.Zero + + @property + def assumptions0(self): + return {k: v for k, v in self._assumptions.items() if v is not None} + + @property + def base(self): + """Returns the ``IndexedBase`` of the ``Indexed`` object. + + Examples + ======== + + >>> from sympy import Indexed, IndexedBase, Idx, symbols + >>> i, j = symbols('i j', cls=Idx) + >>> Indexed('A', i, j).base + A + >>> B = IndexedBase('B') + >>> B == B[i, j].base + True + + """ + return self.args[0] + + @property + def indices(self): + """ + Returns the indices of the ``Indexed`` object. + + Examples + ======== + + >>> from sympy import Indexed, Idx, symbols + >>> i, j = symbols('i j', cls=Idx) + >>> Indexed('A', i, j).indices + (i, j) + + """ + return self.args[1:] + + @property + def rank(self): + """ + Returns the rank of the ``Indexed`` object. + + Examples + ======== + + >>> from sympy import Indexed, Idx, symbols + >>> i, j, k, l, m = symbols('i:m', cls=Idx) + >>> Indexed('A', i, j).rank + 2 + >>> q = Indexed('A', i, j, k, l, m) + >>> q.rank + 5 + >>> q.rank == len(q.indices) + True + + """ + return len(self.args) - 1 + + @property + def shape(self): + """Returns a list with dimensions of each index. + + Dimensions is a property of the array, not of the indices. Still, if + the ``IndexedBase`` does not define a shape attribute, it is assumed + that the ranges of the indices correspond to the shape of the array. + + >>> from sympy import IndexedBase, Idx, symbols + >>> n, m = symbols('n m', integer=True) + >>> i = Idx('i', m) + >>> j = Idx('j', m) + >>> A = IndexedBase('A', shape=(n, n)) + >>> B = IndexedBase('B') + >>> A[i, j].shape + (n, n) + >>> B[i, j].shape + (m, m) + """ + + if self.base.shape: + return self.base.shape + sizes = [] + for i in self.indices: + upper = getattr(i, 'upper', None) + lower = getattr(i, 'lower', None) + if None in (upper, lower): + raise IndexException(filldedent(""" + Range is not defined for all indices in: %s""" % self)) + try: + size = upper - lower + 1 + except TypeError: + raise IndexException(filldedent(""" + Shape cannot be inferred from Idx with + undefined range: %s""" % self)) + sizes.append(size) + return Tuple(*sizes) + + @property + def ranges(self): + """Returns a list of tuples with lower and upper range of each index. + + If an index does not define the data members upper and lower, the + corresponding slot in the list contains ``None`` instead of a tuple. + + Examples + ======== + + >>> from sympy import Indexed,Idx, symbols + >>> Indexed('A', Idx('i', 2), Idx('j', 4), Idx('k', 8)).ranges + [(0, 1), (0, 3), (0, 7)] + >>> Indexed('A', Idx('i', 3), Idx('j', 3), Idx('k', 3)).ranges + [(0, 2), (0, 2), (0, 2)] + >>> x, y, z = symbols('x y z', integer=True) + >>> Indexed('A', x, y, z).ranges + [None, None, None] + + """ + ranges = [] + sentinel = object() + for i in self.indices: + upper = getattr(i, 'upper', sentinel) + lower = getattr(i, 'lower', sentinel) + if sentinel not in (upper, lower): + ranges.append((lower, upper)) + else: + ranges.append(None) + return ranges + + def _sympystr(self, p): + indices = list(map(p.doprint, self.indices)) + return "%s[%s]" % (p.doprint(self.base), ", ".join(indices)) + + @property + def free_symbols(self): + base_free_symbols = self.base.free_symbols + indices_free_symbols = { + fs for i in self.indices for fs in i.free_symbols} + if base_free_symbols: + return {self} | base_free_symbols | indices_free_symbols + else: + return indices_free_symbols + + @property + def expr_free_symbols(self): + from sympy.utilities.exceptions import sympy_deprecation_warning + sympy_deprecation_warning(""" + The expr_free_symbols property is deprecated. Use free_symbols to get + the free symbols of an expression. + """, + deprecated_since_version="1.9", + active_deprecations_target="deprecated-expr-free-symbols") + + return {self} + + +class IndexedBase(Expr, NotIterable): + """Represent the base or stem of an indexed object + + The IndexedBase class represent an array that contains elements. The main purpose + of this class is to allow the convenient creation of objects of the Indexed + class. The __getitem__ method of IndexedBase returns an instance of + Indexed. Alone, without indices, the IndexedBase class can be used as a + notation for e.g. matrix equations, resembling what you could do with the + Symbol class. But, the IndexedBase class adds functionality that is not + available for Symbol instances: + + - An IndexedBase object can optionally store shape information. This can + be used in to check array conformance and conditions for numpy + broadcasting. (TODO) + - An IndexedBase object implements syntactic sugar that allows easy symbolic + representation of array operations, using implicit summation of + repeated indices. + - The IndexedBase object symbolizes a mathematical structure equivalent + to arrays, and is recognized as such for code generation and automatic + compilation and wrapping. + + >>> from sympy.tensor import IndexedBase, Idx + >>> from sympy import symbols + >>> A = IndexedBase('A'); A + A + >>> type(A) + + + When an IndexedBase object receives indices, it returns an array with named + axes, represented by an Indexed object: + + >>> i, j = symbols('i j', integer=True) + >>> A[i, j, 2] + A[i, j, 2] + >>> type(A[i, j, 2]) + + + The IndexedBase constructor takes an optional shape argument. If given, + it overrides any shape information in the indices. (But not the index + ranges!) + + >>> m, n, o, p = symbols('m n o p', integer=True) + >>> i = Idx('i', m) + >>> j = Idx('j', n) + >>> A[i, j].shape + (m, n) + >>> B = IndexedBase('B', shape=(o, p)) + >>> B[i, j].shape + (o, p) + + Assumptions can be specified with keyword arguments the same way as for Symbol: + + >>> A_real = IndexedBase('A', real=True) + >>> A_real.is_real + True + >>> A != A_real + True + + Assumptions can also be inherited if a Symbol is used to initialize the IndexedBase: + + >>> I = symbols('I', integer=True) + >>> C_inherit = IndexedBase(I) + >>> C_explicit = IndexedBase('I', integer=True) + >>> C_inherit == C_explicit + True + """ + is_symbol = True + is_Atom = True + + @staticmethod + def _set_assumptions(obj, assumptions): + """Set assumptions on obj, making sure to apply consistent values.""" + tmp_asm_copy = assumptions.copy() + is_commutative = fuzzy_bool(assumptions.get('commutative', True)) + assumptions['commutative'] = is_commutative + obj._assumptions = StdFactKB(assumptions) + obj._assumptions._generator = tmp_asm_copy # Issue #8873 + + def __new__(cls, label, shape=None, *, offset=S.Zero, strides=None, **kw_args): + from sympy.matrices.matrixbase import MatrixBase + from sympy.tensor.array.ndim_array import NDimArray + + assumptions, kw_args = _filter_assumptions(kw_args) + if isinstance(label, str): + label = Symbol(label, **assumptions) + elif isinstance(label, Symbol): + assumptions = label._merge(assumptions) + elif isinstance(label, (MatrixBase, NDimArray)): + return label + elif isinstance(label, Iterable): + return _sympify(label) + else: + label = _sympify(label) + + if is_sequence(shape): + shape = Tuple(*shape) + elif shape is not None: + shape = Tuple(shape) + + if shape is not None: + obj = Expr.__new__(cls, label, shape) + else: + obj = Expr.__new__(cls, label) + obj._shape = shape + obj._offset = offset + obj._strides = strides + obj._name = str(label) + + IndexedBase._set_assumptions(obj, assumptions) + return obj + + @property + def name(self): + return self._name + + def _hashable_content(self): + return super()._hashable_content() + tuple(sorted(self.assumptions0.items())) + + @property + def assumptions0(self): + return {k: v for k, v in self._assumptions.items() if v is not None} + + def __getitem__(self, indices, **kw_args): + if is_sequence(indices): + # Special case needed because M[*my_tuple] is a syntax error. + if self.shape and len(self.shape) != len(indices): + raise IndexException("Rank mismatch.") + return Indexed(self, *indices, **kw_args) + else: + if self.shape and len(self.shape) != 1: + raise IndexException("Rank mismatch.") + return Indexed(self, indices, **kw_args) + + @property + def shape(self): + """Returns the shape of the ``IndexedBase`` object. + + Examples + ======== + + >>> from sympy import IndexedBase, Idx + >>> from sympy.abc import x, y + >>> IndexedBase('A', shape=(x, y)).shape + (x, y) + + Note: If the shape of the ``IndexedBase`` is specified, it will override + any shape information given by the indices. + + >>> A = IndexedBase('A', shape=(x, y)) + >>> B = IndexedBase('B') + >>> i = Idx('i', 2) + >>> j = Idx('j', 1) + >>> A[i, j].shape + (x, y) + >>> B[i, j].shape + (2, 1) + + """ + return self._shape + + @property + def strides(self): + """Returns the strided scheme for the ``IndexedBase`` object. + + Normally this is a tuple denoting the number of + steps to take in the respective dimension when traversing + an array. For code generation purposes strides='C' and + strides='F' can also be used. + + strides='C' would mean that code printer would unroll + in row-major order and 'F' means unroll in column major + order. + + """ + + return self._strides + + @property + def offset(self): + """Returns the offset for the ``IndexedBase`` object. + + This is the value added to the resulting index when the + 2D Indexed object is unrolled to a 1D form. Used in code + generation. + + Examples + ========== + >>> from sympy.printing import ccode + >>> from sympy.tensor import IndexedBase, Idx + >>> from sympy import symbols + >>> l, m, n, o = symbols('l m n o', integer=True) + >>> A = IndexedBase('A', strides=(l, m, n), offset=o) + >>> i, j, k = map(Idx, 'ijk') + >>> ccode(A[i, j, k]) + 'A[l*i + m*j + n*k + o]' + + """ + return self._offset + + @property + def label(self): + """Returns the label of the ``IndexedBase`` object. + + Examples + ======== + + >>> from sympy import IndexedBase + >>> from sympy.abc import x, y + >>> IndexedBase('A', shape=(x, y)).label + A + + """ + return self.args[0] + + def _sympystr(self, p): + return p.doprint(self.label) + + +class Idx(Expr): + """Represents an integer index as an ``Integer`` or integer expression. + + There are a number of ways to create an ``Idx`` object. The constructor + takes two arguments: + + ``label`` + An integer or a symbol that labels the index. + ``range`` + Optionally you can specify a range as either + + * ``Symbol`` or integer: This is interpreted as a dimension. Lower and + upper bounds are set to ``0`` and ``range - 1``, respectively. + * ``tuple``: The two elements are interpreted as the lower and upper + bounds of the range, respectively. + + Note: bounds of the range are assumed to be either integer or infinite (oo + and -oo are allowed to specify an unbounded range). If ``n`` is given as a + bound, then ``n.is_integer`` must not return false. + + For convenience, if the label is given as a string it is automatically + converted to an integer symbol. (Note: this conversion is not done for + range or dimension arguments.) + + Examples + ======== + + >>> from sympy import Idx, symbols, oo + >>> n, i, L, U = symbols('n i L U', integer=True) + + If a string is given for the label an integer ``Symbol`` is created and the + bounds are both ``None``: + + >>> idx = Idx('qwerty'); idx + qwerty + >>> idx.lower, idx.upper + (None, None) + + Both upper and lower bounds can be specified: + + >>> idx = Idx(i, (L, U)); idx + i + >>> idx.lower, idx.upper + (L, U) + + When only a single bound is given it is interpreted as the dimension + and the lower bound defaults to 0: + + >>> idx = Idx(i, n); idx.lower, idx.upper + (0, n - 1) + >>> idx = Idx(i, 4); idx.lower, idx.upper + (0, 3) + >>> idx = Idx(i, oo); idx.lower, idx.upper + (0, oo) + + """ + + is_integer = True + is_finite = True + is_real = True + is_symbol = True + is_Atom = True + _diff_wrt = True + + def __new__(cls, label, range=None, **kw_args): + + if isinstance(label, str): + label = Symbol(label, integer=True) + label, range = list(map(sympify, (label, range))) + + if label.is_Number: + if not label.is_integer: + raise TypeError("Index is not an integer number.") + return label + + if not label.is_integer: + raise TypeError("Idx object requires an integer label.") + + elif is_sequence(range): + if len(range) != 2: + raise ValueError(filldedent(""" + Idx range tuple must have length 2, but got %s""" % len(range))) + for bound in range: + if (bound.is_integer is False and bound is not S.Infinity + and bound is not S.NegativeInfinity): + raise TypeError("Idx object requires integer bounds.") + args = label, Tuple(*range) + elif isinstance(range, Expr): + if range is not S.Infinity and fuzzy_not(range.is_integer): + raise TypeError("Idx object requires an integer dimension.") + args = label, Tuple(0, range - 1) + elif range: + raise TypeError(filldedent(""" + The range must be an ordered iterable or + integer SymPy expression.""")) + else: + args = label, + + obj = Expr.__new__(cls, *args, **kw_args) + obj._assumptions["finite"] = True + obj._assumptions["real"] = True + return obj + + @property + def label(self): + """Returns the label (Integer or integer expression) of the Idx object. + + Examples + ======== + + >>> from sympy import Idx, Symbol + >>> x = Symbol('x', integer=True) + >>> Idx(x).label + x + >>> j = Symbol('j', integer=True) + >>> Idx(j).label + j + >>> Idx(j + 1).label + j + 1 + + """ + return self.args[0] + + @property + def lower(self): + """Returns the lower bound of the ``Idx``. + + Examples + ======== + + >>> from sympy import Idx + >>> Idx('j', 2).lower + 0 + >>> Idx('j', 5).lower + 0 + >>> Idx('j').lower is None + True + + """ + try: + return self.args[1][0] + except IndexError: + return + + @property + def upper(self): + """Returns the upper bound of the ``Idx``. + + Examples + ======== + + >>> from sympy import Idx + >>> Idx('j', 2).upper + 1 + >>> Idx('j', 5).upper + 4 + >>> Idx('j').upper is None + True + + """ + try: + return self.args[1][1] + except IndexError: + return + + def _sympystr(self, p): + return p.doprint(self.label) + + @property + def name(self): + return self.label.name if self.label.is_Symbol else str(self.label) + + @property + def free_symbols(self): + return {self} + + +@dispatch(Idx, Idx) +def _eval_is_ge(lhs, rhs): # noqa:F811 + + other_upper = rhs if rhs.upper is None else rhs.upper + other_lower = rhs if rhs.lower is None else rhs.lower + + if lhs.lower is not None and (lhs.lower >= other_upper) == True: + return True + if lhs.upper is not None and (lhs.upper < other_lower) == True: + return False + return None + + +@dispatch(Idx, Number) # type:ignore +def _eval_is_ge(lhs, rhs): # noqa:F811 + + other_upper = rhs + other_lower = rhs + + if lhs.lower is not None and (lhs.lower >= other_upper) == True: + return True + if lhs.upper is not None and (lhs.upper < other_lower) == True: + return False + return None + + +@dispatch(Number, Idx) # type:ignore +def _eval_is_ge(lhs, rhs): # noqa:F811 + + other_upper = lhs + other_lower = lhs + + if rhs.upper is not None and (rhs.upper <= other_lower) == True: + return True + if rhs.lower is not None and (rhs.lower > other_upper) == True: + return False + return None diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/tensor.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..67e287db8625f09ebeb813b89a1424b1787bc5fb --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/tensor.py @@ -0,0 +1,4979 @@ +""" +This module defines tensors with abstract index notation. + +The abstract index notation has been first formalized by Penrose. + +Tensor indices are formal objects, with a tensor type; there is no +notion of index range, it is only possible to assign the dimension, +used to trace the Kronecker delta; the dimension can be a Symbol. + +The Einstein summation convention is used. +The covariant indices are indicated with a minus sign in front of the index. + +For instance the tensor ``t = p(a)*A(b,c)*q(-c)`` has the index ``c`` +contracted. + +A tensor expression ``t`` can be called; called with its +indices in sorted order it is equal to itself: +in the above example ``t(a, b) == t``; +one can call ``t`` with different indices; ``t(c, d) == p(c)*A(d,a)*q(-a)``. + +The contracted indices are dummy indices, internally they have no name, +the indices being represented by a graph-like structure. + +Tensors are put in canonical form using ``canon_bp``, which uses +the Butler-Portugal algorithm for canonicalization using the monoterm +symmetries of the tensors. + +If there is a (anti)symmetric metric, the indices can be raised and +lowered when the tensor is put in canonical form. +""" + +from __future__ import annotations +from typing import Any +from functools import reduce +from math import prod + +from abc import abstractmethod, ABC +from collections import defaultdict +import operator +import itertools +from sympy.core.numbers import (Integer, Rational) +from sympy.combinatorics import Permutation +from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, \ + bsgs_direct_product, canonicalize, riemann_bsgs +from sympy.core import Basic, Expr, sympify, Add, Mul, S +from sympy.core.cache import clear_cache +from sympy.core.containers import Tuple, Dict +from sympy.core.sorting import default_sort_key +from sympy.core.symbol import Symbol, symbols +from sympy.core.sympify import CantSympify, _sympify +from sympy.core.operations import AssocOp +from sympy.external.gmpy import SYMPY_INTS +from sympy.matrices import eye +from sympy.utilities.exceptions import (sympy_deprecation_warning, + SymPyDeprecationWarning, + ignore_warnings) +from sympy.utilities.decorator import memoize_property, deprecated +from sympy.utilities.iterables import sift + + +def deprecate_data(): + sympy_deprecation_warning( + """ + The data attribute of TensorIndexType is deprecated. Use The + replace_with_arrays() method instead. + """, + deprecated_since_version="1.4", + active_deprecations_target="deprecated-tensorindextype-attrs", + stacklevel=4, + ) + +def deprecate_fun_eval(): + sympy_deprecation_warning( + """ + The Tensor.fun_eval() method is deprecated. Use + Tensor.substitute_indices() instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensor-fun-eval", + stacklevel=4, + ) + + +def deprecate_call(): + sympy_deprecation_warning( + """ + Calling a tensor like Tensor(*indices) is deprecated. Use + Tensor.substitute_indices() instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensor-fun-eval", + stacklevel=4, + ) + + +class _IndexStructure(CantSympify): + """ + This class handles the indices (free and dummy ones). It contains the + algorithms to manage the dummy indices replacements and contractions of + free indices under multiplications of tensor expressions, as well as stuff + related to canonicalization sorting, getting the permutation of the + expression and so on. It also includes tools to get the ``TensorIndex`` + objects corresponding to the given index structure. + """ + + def __init__(self, free, dum, index_types, indices, canon_bp=False): + self.free = free + self.dum = dum + self.index_types = index_types + self.indices = indices + self._ext_rank = len(self.free) + 2*len(self.dum) + self.dum.sort(key=lambda x: x[0]) + + @staticmethod + def from_indices(*indices): + """ + Create a new ``_IndexStructure`` object from a list of ``indices``. + + Explanation + =========== + + ``indices`` ``TensorIndex`` objects, the indices. Contractions are + detected upon construction. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, _IndexStructure + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz) + >>> _IndexStructure.from_indices(m0, m1, -m1, m3) + _IndexStructure([(m0, 0), (m3, 3)], [(1, 2)], [Lorentz, Lorentz, Lorentz, Lorentz]) + """ + + free, dum = _IndexStructure._free_dum_from_indices(*indices) + index_types = [i.tensor_index_type for i in indices] + indices = _IndexStructure._replace_dummy_names(indices, free, dum) + return _IndexStructure(free, dum, index_types, indices) + + @staticmethod + def from_components_free_dum(components, free, dum): + index_types = [] + for component in components: + index_types.extend(component.index_types) + indices = _IndexStructure.generate_indices_from_free_dum_index_types(free, dum, index_types) + return _IndexStructure(free, dum, index_types, indices) + + @staticmethod + def _free_dum_from_indices(*indices): + """ + Convert ``indices`` into ``free``, ``dum`` for single component tensor. + + Explanation + =========== + + ``free`` list of tuples ``(index, pos, 0)``, + where ``pos`` is the position of index in + the list of indices formed by the component tensors + + ``dum`` list of tuples ``(pos_contr, pos_cov, 0, 0)`` + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, \ + _IndexStructure + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2, m3 = tensor_indices('m0,m1,m2,m3', Lorentz) + >>> _IndexStructure._free_dum_from_indices(m0, m1, -m1, m3) + ([(m0, 0), (m3, 3)], [(1, 2)]) + """ + n = len(indices) + if n == 1: + return [(indices[0], 0)], [] + + # find the positions of the free indices and of the dummy indices + free = [True]*len(indices) + index_dict = {} + dum = [] + for i, index in enumerate(indices): + name = index.name + typ = index.tensor_index_type + contr = index.is_up + if (name, typ) in index_dict: + # found a pair of dummy indices + is_contr, pos = index_dict[(name, typ)] + # check consistency and update free + if is_contr: + if contr: + raise ValueError('two equal contravariant indices in slots %d and %d' %(pos, i)) + else: + free[pos] = False + free[i] = False + else: + if contr: + free[pos] = False + free[i] = False + else: + raise ValueError('two equal covariant indices in slots %d and %d' %(pos, i)) + if contr: + dum.append((i, pos)) + else: + dum.append((pos, i)) + else: + index_dict[(name, typ)] = index.is_up, i + + free = [(index, i) for i, index in enumerate(indices) if free[i]] + free.sort() + return free, dum + + def get_indices(self): + """ + Get a list of indices, creating new tensor indices to complete dummy indices. + """ + return self.indices[:] + + @staticmethod + def generate_indices_from_free_dum_index_types(free, dum, index_types): + indices = [None]*(len(free)+2*len(dum)) + for idx, pos in free: + indices[pos] = idx + + generate_dummy_name = _IndexStructure._get_generator_for_dummy_indices(free) + for pos1, pos2 in dum: + typ1 = index_types[pos1] + indname = generate_dummy_name(typ1) + indices[pos1] = TensorIndex(indname, typ1, True) + indices[pos2] = TensorIndex(indname, typ1, False) + + return _IndexStructure._replace_dummy_names(indices, free, dum) + + @staticmethod + def _get_generator_for_dummy_indices(free): + cdt = defaultdict(int) + # if the free indices have names with dummy_name, start with an + # index higher than those for the dummy indices + # to avoid name collisions + for indx, ipos in free: + if indx.name.split('_')[0] == indx.tensor_index_type.dummy_name: + cdt[indx.tensor_index_type] = max(cdt[indx.tensor_index_type], int(indx.name.split('_')[1]) + 1) + + def dummy_name_gen(tensor_index_type): + nd = str(cdt[tensor_index_type]) + cdt[tensor_index_type] += 1 + return tensor_index_type.dummy_name + '_' + nd + + return dummy_name_gen + + @staticmethod + def _replace_dummy_names(indices, free, dum): + dum.sort(key=lambda x: x[0]) + new_indices = list(indices) + assert len(indices) == len(free) + 2*len(dum) + generate_dummy_name = _IndexStructure._get_generator_for_dummy_indices(free) + for ipos1, ipos2 in dum: + typ1 = new_indices[ipos1].tensor_index_type + indname = generate_dummy_name(typ1) + new_indices[ipos1] = TensorIndex(indname, typ1, True) + new_indices[ipos2] = TensorIndex(indname, typ1, False) + return new_indices + + def get_free_indices(self) -> list[TensorIndex]: + """ + Get a list of free indices. + """ + # get sorted indices according to their position: + free = sorted(self.free, key=lambda x: x[1]) + return [i[0] for i in free] + + def __str__(self): + return "_IndexStructure({}, {}, {})".format(self.free, self.dum, self.index_types) + + def __repr__(self): + return self.__str__() + + def _get_sorted_free_indices_for_canon(self): + sorted_free = self.free[:] + sorted_free.sort(key=lambda x: x[0]) + return sorted_free + + def _get_sorted_dum_indices_for_canon(self): + return sorted(self.dum, key=lambda x: x[0]) + + def _get_lexicographically_sorted_index_types(self): + permutation = self.indices_canon_args()[0] + index_types = [None]*self._ext_rank + for i, it in enumerate(self.index_types): + index_types[permutation(i)] = it + return index_types + + def _get_lexicographically_sorted_indices(self): + permutation = self.indices_canon_args()[0] + indices = [None]*self._ext_rank + for i, it in enumerate(self.indices): + indices[permutation(i)] = it + return indices + + def perm2tensor(self, g, is_canon_bp=False): + """ + Returns a ``_IndexStructure`` instance corresponding to the permutation ``g``. + + Explanation + =========== + + ``g`` permutation corresponding to the tensor in the representation + used in canonicalization + + ``is_canon_bp`` if True, then ``g`` is the permutation + corresponding to the canonical form of the tensor + """ + sorted_free = [i[0] for i in self._get_sorted_free_indices_for_canon()] + lex_index_types = self._get_lexicographically_sorted_index_types() + lex_indices = self._get_lexicographically_sorted_indices() + nfree = len(sorted_free) + rank = self._ext_rank + dum = [[None]*2 for i in range((rank - nfree)//2)] + free = [] + + index_types = [None]*rank + indices = [None]*rank + for i in range(rank): + gi = g[i] + index_types[i] = lex_index_types[gi] + indices[i] = lex_indices[gi] + if gi < nfree: + ind = sorted_free[gi] + assert index_types[i] == sorted_free[gi].tensor_index_type + free.append((ind, i)) + else: + j = gi - nfree + idum, cov = divmod(j, 2) + if cov: + dum[idum][1] = i + else: + dum[idum][0] = i + dum = [tuple(x) for x in dum] + + return _IndexStructure(free, dum, index_types, indices) + + def indices_canon_args(self): + """ + Returns ``(g, dummies, msym, v)``, the entries of ``canonicalize`` + + See ``canonicalize`` in ``tensor_can.py`` in combinatorics module. + """ + # to be called after sorted_components + from sympy.combinatorics.permutations import _af_new + n = self._ext_rank + g = [None]*n + [n, n+1] + + # Converts the symmetry of the metric into msym from .canonicalize() + # method in the combinatorics module + def metric_symmetry_to_msym(metric): + if metric is None: + return None + sym = metric.symmetry + if sym == TensorSymmetry.fully_symmetric(2): + return 0 + if sym == TensorSymmetry.fully_symmetric(-2): + return 1 + return None + + # ordered indices: first the free indices, ordered by types + # then the dummy indices, ordered by types and contravariant before + # covariant + # g[position in tensor] = position in ordered indices + for i, (indx, ipos) in enumerate(self._get_sorted_free_indices_for_canon()): + g[ipos] = i + pos = len(self.free) + j = len(self.free) + dummies = [] + prev = None + a = [] + msym = [] + for ipos1, ipos2 in self._get_sorted_dum_indices_for_canon(): + g[ipos1] = j + g[ipos2] = j + 1 + j += 2 + typ = self.index_types[ipos1] + if typ != prev: + if a: + dummies.append(a) + a = [pos, pos + 1] + prev = typ + msym.append(metric_symmetry_to_msym(typ.metric)) + else: + a.extend([pos, pos + 1]) + pos += 2 + if a: + dummies.append(a) + + return _af_new(g), dummies, msym + + +def components_canon_args(components): + numtyp = [] + prev = None + for t in components: + if t == prev: + numtyp[-1][1] += 1 + else: + prev = t + numtyp.append([prev, 1]) + v = [] + for h, n in numtyp: + if h.comm in (0, 1): + comm = h.comm + else: + comm = TensorManager.get_comm(h.comm, h.comm) + v.append((h.symmetry.base, h.symmetry.generators, n, comm)) + return v + + +class _TensorDataLazyEvaluator(CantSympify): + """ + EXPERIMENTAL: do not rely on this class, it may change without deprecation + warnings in future versions of SymPy. + + Explanation + =========== + + This object contains the logic to associate components data to a tensor + expression. Components data are set via the ``.data`` property of tensor + expressions, is stored inside this class as a mapping between the tensor + expression and the ``ndarray``. + + Computations are executed lazily: whereas the tensor expressions can have + contractions, tensor products, and additions, components data are not + computed until they are accessed by reading the ``.data`` property + associated to the tensor expression. + """ + _substitutions_dict: dict[Any, Any] = {} + _substitutions_dict_tensmul: dict[Any, Any] = {} + + def __getitem__(self, key): + dat = self._get(key) + if dat is None: + return None + + from .array import NDimArray + if not isinstance(dat, NDimArray): + return dat + + if dat.rank() == 0: + return dat[()] + elif dat.rank() == 1 and len(dat) == 1: + return dat[0] + return dat + + def _get(self, key): + """ + Retrieve ``data`` associated with ``key``. + + Explanation + =========== + + This algorithm looks into ``self._substitutions_dict`` for all + ``TensorHead`` in the ``TensExpr`` (or just ``TensorHead`` if key is a + TensorHead instance). It reconstructs the components data that the + tensor expression should have by performing on components data the + operations that correspond to the abstract tensor operations applied. + + Metric tensor is handled in a different manner: it is pre-computed in + ``self._substitutions_dict_tensmul``. + """ + if key in self._substitutions_dict: + return self._substitutions_dict[key] + + if isinstance(key, TensorHead): + return None + + if isinstance(key, Tensor): + # special case to handle metrics. Metric tensors cannot be + # constructed through contraction by the metric, their + # components show if they are a matrix or its inverse. + signature = tuple([i.is_up for i in key.get_indices()]) + srch = (key.component,) + signature + if srch in self._substitutions_dict_tensmul: + return self._substitutions_dict_tensmul[srch] + array_list = [self.data_from_tensor(key)] + return self.data_contract_dum(array_list, key.dum, key.ext_rank) + + if isinstance(key, TensMul): + tensmul_args = key.args + if len(tensmul_args) == 1 and len(tensmul_args[0].components) == 1: + # special case to handle metrics. Metric tensors cannot be + # constructed through contraction by the metric, their + # components show if they are a matrix or its inverse. + signature = tuple([i.is_up for i in tensmul_args[0].get_indices()]) + srch = (tensmul_args[0].components[0],) + signature + if srch in self._substitutions_dict_tensmul: + return self._substitutions_dict_tensmul[srch] + #data_list = [self.data_from_tensor(i) for i in tensmul_args if isinstance(i, TensExpr)] + data_list = [self.data_from_tensor(i) if isinstance(i, Tensor) else i.data for i in tensmul_args if isinstance(i, TensExpr)] + coeff = prod([i for i in tensmul_args if not isinstance(i, TensExpr)]) + if all(i is None for i in data_list): + return None + if any(i is None for i in data_list): + raise ValueError("Mixing tensors with associated components "\ + "data with tensors without components data") + data_result = self.data_contract_dum(data_list, key.dum, key.ext_rank) + return coeff*data_result + + if isinstance(key, TensAdd): + data_list = [] + free_args_list = [] + for arg in key.args: + if isinstance(arg, TensExpr): + data_list.append(arg.data) + free_args_list.append([x[0] for x in arg.free]) + else: + data_list.append(arg) + free_args_list.append([]) + if all(i is None for i in data_list): + return None + if any(i is None for i in data_list): + raise ValueError("Mixing tensors with associated components "\ + "data with tensors without components data") + + sum_list = [] + from .array import permutedims + for data, free_args in zip(data_list, free_args_list): + if len(free_args) < 2: + sum_list.append(data) + else: + free_args_pos = {y: x for x, y in enumerate(free_args)} + axes = [free_args_pos[arg] for arg in key.free_args] + sum_list.append(permutedims(data, axes)) + return reduce(lambda x, y: x+y, sum_list) + + return None + + @staticmethod + def data_contract_dum(ndarray_list, dum, ext_rank): + from .array import tensorproduct, tensorcontraction, MutableDenseNDimArray + arrays = list(map(MutableDenseNDimArray, ndarray_list)) + prodarr = tensorproduct(*arrays) + return tensorcontraction(prodarr, *dum) + + def data_tensorhead_from_tensmul(self, data, tensmul, tensorhead): + """ + This method is used when assigning components data to a ``TensMul`` + object, it converts components data to a fully contravariant ndarray, + which is then stored according to the ``TensorHead`` key. + """ + if data is None: + return None + + return self._correct_signature_from_indices( + data, + tensmul.get_indices(), + tensmul.free, + tensmul.dum, + True) + + def data_from_tensor(self, tensor): + """ + This method corrects the components data to the right signature + (covariant/contravariant) using the metric associated with each + ``TensorIndexType``. + """ + tensorhead = tensor.component + + if tensorhead.data is None: + return None + + return self._correct_signature_from_indices( + tensorhead.data, + tensor.get_indices(), + tensor.free, + tensor.dum) + + def _assign_data_to_tensor_expr(self, key, data): + if isinstance(key, TensAdd): + raise ValueError('cannot assign data to TensAdd') + # here it is assumed that `key` is a `TensMul` instance. + if len(key.components) != 1: + raise ValueError('cannot assign data to TensMul with multiple components') + tensorhead = key.components[0] + newdata = self.data_tensorhead_from_tensmul(data, key, tensorhead) + return tensorhead, newdata + + def _check_permutations_on_data(self, tens, data): + from .array import permutedims + from .array.arrayop import Flatten + + if isinstance(tens, TensorHead): + rank = tens.rank + generators = tens.symmetry.generators + elif isinstance(tens, Tensor): + rank = tens.rank + generators = tens.components[0].symmetry.generators + elif isinstance(tens, TensorIndexType): + rank = tens.metric.rank + generators = tens.metric.symmetry.generators + + # Every generator is a permutation, check that by permuting the array + # by that permutation, the array will be the same, except for a + # possible sign change if the permutation admits it. + for gener in generators: + sign_change = +1 if (gener(rank) == rank) else -1 + data_swapped = data + last_data = data + permute_axes = list(map(gener, range(rank))) + # the order of a permutation is the number of times to get the + # identity by applying that permutation. + for i in range(gener.order()-1): + data_swapped = permutedims(data_swapped, permute_axes) + # if any value in the difference array is non-zero, raise an error: + if any(Flatten(last_data - sign_change*data_swapped)): + raise ValueError("Component data symmetry structure error") + last_data = data_swapped + + def __setitem__(self, key, value): + """ + Set the components data of a tensor object/expression. + + Explanation + =========== + + Components data are transformed to the all-contravariant form and stored + with the corresponding ``TensorHead`` object. If a ``TensorHead`` object + cannot be uniquely identified, it will raise an error. + """ + data = _TensorDataLazyEvaluator.parse_data(value) + self._check_permutations_on_data(key, data) + + # TensorHead and TensorIndexType can be assigned data directly, while + # TensMul must first convert data to a fully contravariant form, and + # assign it to its corresponding TensorHead single component. + if not isinstance(key, (TensorHead, TensorIndexType)): + key, data = self._assign_data_to_tensor_expr(key, data) + + if isinstance(key, TensorHead): + for dim, indextype in zip(data.shape, key.index_types): + if indextype.data is None: + raise ValueError("index type {} has no components data"\ + " associated (needed to raise/lower index)".format(indextype)) + if not indextype.dim.is_number: + continue + if dim != indextype.dim: + raise ValueError("wrong dimension of ndarray") + self._substitutions_dict[key] = data + + def __delitem__(self, key): + del self._substitutions_dict[key] + + def __contains__(self, key): + return key in self._substitutions_dict + + def add_metric_data(self, metric, data): + """ + Assign data to the ``metric`` tensor. The metric tensor behaves in an + anomalous way when raising and lowering indices. + + Explanation + =========== + + A fully covariant metric is the inverse transpose of the fully + contravariant metric (it is meant matrix inverse). If the metric is + symmetric, the transpose is not necessary and mixed + covariant/contravariant metrics are Kronecker deltas. + """ + # hard assignment, data should not be added to `TensorHead` for metric: + # the problem with `TensorHead` is that the metric is anomalous, i.e. + # raising and lowering the index means considering the metric or its + # inverse, this is not the case for other tensors. + self._substitutions_dict_tensmul[metric, True, True] = data + inverse_transpose = self.inverse_transpose_matrix(data) + # in symmetric spaces, the transpose is the same as the original matrix, + # the full covariant metric tensor is the inverse transpose, so this + # code will be able to handle non-symmetric metrics. + self._substitutions_dict_tensmul[metric, False, False] = inverse_transpose + # now mixed cases, these are identical to the unit matrix if the metric + # is symmetric. + m = data.tomatrix() + invt = inverse_transpose.tomatrix() + self._substitutions_dict_tensmul[metric, True, False] = m * invt + self._substitutions_dict_tensmul[metric, False, True] = invt * m + + @staticmethod + def _flip_index_by_metric(data, metric, pos): + from .array import tensorproduct, tensorcontraction + + mdim = metric.rank() + ddim = data.rank() + + if pos == 0: + data = tensorcontraction( + tensorproduct( + metric, + data + ), + (1, mdim+pos) + ) + else: + data = tensorcontraction( + tensorproduct( + data, + metric + ), + (pos, ddim) + ) + return data + + @staticmethod + def inverse_matrix(ndarray): + m = ndarray.tomatrix().inv() + return _TensorDataLazyEvaluator.parse_data(m) + + @staticmethod + def inverse_transpose_matrix(ndarray): + m = ndarray.tomatrix().inv().T + return _TensorDataLazyEvaluator.parse_data(m) + + @staticmethod + def _correct_signature_from_indices(data, indices, free, dum, inverse=False): + """ + Utility function to correct the values inside the components data + ndarray according to whether indices are covariant or contravariant. + + It uses the metric matrix to lower values of covariant indices. + """ + # change the ndarray values according covariantness/contravariantness of the indices + # use the metric + for i, indx in enumerate(indices): + if not indx.is_up and not inverse: + data = _TensorDataLazyEvaluator._flip_index_by_metric(data, indx.tensor_index_type.data, i) + elif not indx.is_up and inverse: + data = _TensorDataLazyEvaluator._flip_index_by_metric( + data, + _TensorDataLazyEvaluator.inverse_matrix(indx.tensor_index_type.data), + i + ) + return data + + @staticmethod + def _sort_data_axes(old, new): + from .array import permutedims + + new_data = old.data.copy() + + old_free = [i[0] for i in old.free] + new_free = [i[0] for i in new.free] + + for i in range(len(new_free)): + for j in range(i, len(old_free)): + if old_free[j] == new_free[i]: + old_free[i], old_free[j] = old_free[j], old_free[i] + new_data = permutedims(new_data, (i, j)) + break + return new_data + + @staticmethod + def add_rearrange_tensmul_parts(new_tensmul, old_tensmul): + def sorted_compo(): + return _TensorDataLazyEvaluator._sort_data_axes(old_tensmul, new_tensmul) + + _TensorDataLazyEvaluator._substitutions_dict[new_tensmul] = sorted_compo() + + @staticmethod + def parse_data(data): + """ + Transform ``data`` to array. The parameter ``data`` may + contain data in various formats, e.g. nested lists, SymPy ``Matrix``, + and so on. + + Examples + ======== + + >>> from sympy.tensor.tensor import _TensorDataLazyEvaluator + >>> _TensorDataLazyEvaluator.parse_data([1, 3, -6, 12]) + [1, 3, -6, 12] + + >>> _TensorDataLazyEvaluator.parse_data([[1, 2], [4, 7]]) + [[1, 2], [4, 7]] + """ + from .array import MutableDenseNDimArray + + if not isinstance(data, MutableDenseNDimArray): + if len(data) == 2 and hasattr(data[0], '__call__'): + data = MutableDenseNDimArray(data[0], data[1]) + else: + data = MutableDenseNDimArray(data) + return data + +_tensor_data_substitution_dict = _TensorDataLazyEvaluator() + + +class _TensorManager: + """ + Class to manage tensor properties. + + Notes + ===== + + Tensors belong to tensor commutation groups; each group has a label + ``comm``; there are predefined labels: + + ``0`` tensors commuting with any other tensor + + ``1`` tensors anticommuting among themselves + + ``2`` tensors not commuting, apart with those with ``comm=0`` + + Other groups can be defined using ``set_comm``; tensors in those + groups commute with those with ``comm=0``; by default they + do not commute with any other group. + """ + def __init__(self): + self._comm_init() + + def _comm_init(self): + self._comm = [{} for i in range(3)] + for i in range(3): + self._comm[0][i] = 0 + self._comm[i][0] = 0 + self._comm[1][1] = 1 + self._comm[2][1] = None + self._comm[1][2] = None + self._comm_symbols2i = {0:0, 1:1, 2:2} + self._comm_i2symbol = {0:0, 1:1, 2:2} + + @property + def comm(self): + return self._comm + + def comm_symbols2i(self, i): + """ + Get the commutation group number corresponding to ``i``. + + ``i`` can be a symbol or a number or a string. + + If ``i`` is not already defined its commutation group number + is set. + """ + if i not in self._comm_symbols2i: + n = len(self._comm) + self._comm.append({}) + self._comm[n][0] = 0 + self._comm[0][n] = 0 + self._comm_symbols2i[i] = n + self._comm_i2symbol[n] = i + return n + return self._comm_symbols2i[i] + + def comm_i2symbol(self, i): + """ + Returns the symbol corresponding to the commutation group number. + """ + return self._comm_i2symbol[i] + + def set_comm(self, i, j, c): + """ + Set the commutation parameter ``c`` for commutation groups ``i, j``. + + Parameters + ========== + + i, j : symbols representing commutation groups + + c : group commutation number + + Notes + ===== + + ``i, j`` can be symbols, strings or numbers, + apart from ``0, 1`` and ``2`` which are reserved respectively + for commuting, anticommuting tensors and tensors not commuting + with any other group apart with the commuting tensors. + For the remaining cases, use this method to set the commutation rules; + by default ``c=None``. + + The group commutation number ``c`` is assigned in correspondence + to the group commutation symbols; it can be + + 0 commuting + + 1 anticommuting + + None no commutation property + + Examples + ======== + + ``G`` and ``GH`` do not commute with themselves and commute with + each other; A is commuting. + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, TensorManager, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz') + >>> i0,i1,i2,i3,i4 = tensor_indices('i0:5', Lorentz) + >>> A = TensorHead('A', [Lorentz]) + >>> G = TensorHead('G', [Lorentz], TensorSymmetry.no_symmetry(1), 'Gcomm') + >>> GH = TensorHead('GH', [Lorentz], TensorSymmetry.no_symmetry(1), 'GHcomm') + >>> TensorManager.set_comm('Gcomm', 'GHcomm', 0) + >>> (GH(i1)*G(i0)).canon_bp() + G(i0)*GH(i1) + >>> (G(i1)*G(i0)).canon_bp() + G(i1)*G(i0) + >>> (G(i1)*A(i0)).canon_bp() + A(i0)*G(i1) + """ + if c not in (0, 1, None): + raise ValueError('`c` can assume only the values 0, 1 or None') + + i = sympify(i) + j = sympify(j) + + if i not in self._comm_symbols2i: + n = len(self._comm) + self._comm.append({}) + self._comm[n][0] = 0 + self._comm[0][n] = 0 + self._comm_symbols2i[i] = n + self._comm_i2symbol[n] = i + if j not in self._comm_symbols2i: + n = len(self._comm) + self._comm.append({}) + self._comm[0][n] = 0 + self._comm[n][0] = 0 + self._comm_symbols2i[j] = n + self._comm_i2symbol[n] = j + ni = self._comm_symbols2i[i] + nj = self._comm_symbols2i[j] + self._comm[ni][nj] = c + self._comm[nj][ni] = c + + """ + Cached sympy functions (e.g. expand) may have cached the results of + expressions involving tensors, but those results may not be valid after + changing the commutation properties. To stay on the safe side, we clear + the cache of all functions. + """ + clear_cache() + + def set_comms(self, *args): + """ + Set the commutation group numbers ``c`` for symbols ``i, j``. + + Parameters + ========== + + args : sequence of ``(i, j, c)`` + """ + for i, j, c in args: + self.set_comm(i, j, c) + + def get_comm(self, i, j): + """ + Return the commutation parameter for commutation group numbers ``i, j`` + + see ``_TensorManager.set_comm`` + """ + return self._comm[i].get(j, 0 if i == 0 or j == 0 else None) + + def clear(self): + """ + Clear the TensorManager. + """ + self._comm_init() + + +TensorManager = _TensorManager() + + +class TensorIndexType(Basic): + """ + A TensorIndexType is characterized by its name and its metric. + + Parameters + ========== + + name : name of the tensor type + dummy_name : name of the head of dummy indices + dim : dimension, it can be a symbol or an integer or ``None`` + eps_dim : dimension of the epsilon tensor + metric_symmetry : integer that denotes metric symmetry or ``None`` for no metric + metric_name : string with the name of the metric tensor + + Attributes + ========== + + ``metric`` : the metric tensor + ``delta`` : ``Kronecker delta`` + ``epsilon`` : the ``Levi-Civita epsilon`` tensor + ``data`` : (deprecated) a property to add ``ndarray`` values, to work in a specified basis. + + Notes + ===== + + The possible values of the ``metric_symmetry`` parameter are: + + ``1`` : metric tensor is fully symmetric + ``0`` : metric tensor possesses no index symmetry + ``-1`` : metric tensor is fully antisymmetric + ``None``: there is no metric tensor (metric equals to ``None``) + + The metric is assumed to be symmetric by default. It can also be set + to a custom tensor by the ``.set_metric()`` method. + + If there is a metric the metric is used to raise and lower indices. + + In the case of non-symmetric metric, the following raising and + lowering conventions will be adopted: + + ``psi(a) = g(a, b)*psi(-b); chi(-a) = chi(b)*g(-b, -a)`` + + From these it is easy to find: + + ``g(-a, b) = delta(-a, b)`` + + where ``delta(-a, b) = delta(b, -a)`` is the ``Kronecker delta`` + (see ``TensorIndex`` for the conventions on indices). + For antisymmetric metrics there is also the following equality: + + ``g(a, -b) = -delta(a, -b)`` + + If there is no metric it is not possible to raise or lower indices; + e.g. the index of the defining representation of ``SU(N)`` + is 'covariant' and the conjugate representation is + 'contravariant'; for ``N > 2`` they are linearly independent. + + ``eps_dim`` is by default equal to ``dim``, if the latter is an integer; + else it can be assigned (for use in naive dimensional regularization); + if ``eps_dim`` is not an integer ``epsilon`` is ``None``. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> Lorentz.metric + metric(Lorentz,Lorentz) + """ + + def __new__(cls, name, dummy_name=None, dim=None, eps_dim=None, + metric_symmetry=1, metric_name='metric', **kwargs): + if 'dummy_fmt' in kwargs: + dummy_fmt = kwargs['dummy_fmt'] + sympy_deprecation_warning( + f""" + The dummy_fmt keyword to TensorIndexType is deprecated. Use + dummy_name={dummy_fmt} instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorindextype-dummy-fmt", + ) + dummy_name = dummy_fmt + + if isinstance(name, str): + name = Symbol(name) + + if dummy_name is None: + dummy_name = str(name)[0] + if isinstance(dummy_name, str): + dummy_name = Symbol(dummy_name) + + if dim is None: + dim = Symbol("dim_" + dummy_name.name) + else: + dim = sympify(dim) + + if eps_dim is None: + eps_dim = dim + else: + eps_dim = sympify(eps_dim) + + metric_symmetry = sympify(metric_symmetry) + + if isinstance(metric_name, str): + metric_name = Symbol(metric_name) + + if 'metric' in kwargs: + SymPyDeprecationWarning( + """ + The 'metric' keyword argument to TensorIndexType is + deprecated. Use the 'metric_symmetry' keyword argument or the + TensorIndexType.set_metric() method instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorindextype-metric", + ) + metric = kwargs.get('metric') + if metric is not None: + if metric in (True, False, 0, 1): + metric_name = 'metric' + #metric_antisym = metric + else: + metric_name = metric.name + #metric_antisym = metric.antisym + + if metric: + metric_symmetry = -1 + else: + metric_symmetry = 1 + + obj = Basic.__new__(cls, name, dummy_name, dim, eps_dim, + metric_symmetry, metric_name) + + obj._autogenerated = [] + return obj + + @property + def name(self): + return self.args[0].name + + @property + def dummy_name(self): + return self.args[1].name + + @property + def dim(self): + return self.args[2] + + @property + def eps_dim(self): + return self.args[3] + + @memoize_property + def metric(self): + metric_symmetry = self.args[4] + metric_name = self.args[5] + if metric_symmetry is None: + return None + + if metric_symmetry == 0: + symmetry = TensorSymmetry.no_symmetry(2) + elif metric_symmetry == 1: + symmetry = TensorSymmetry.fully_symmetric(2) + elif metric_symmetry == -1: + symmetry = TensorSymmetry.fully_symmetric(-2) + + return TensorHead(metric_name, [self]*2, symmetry) + + @memoize_property + def delta(self): + return TensorHead('KD', [self]*2, TensorSymmetry.fully_symmetric(2)) + + @memoize_property + def epsilon(self): + if not isinstance(self.eps_dim, (SYMPY_INTS, Integer)): + return None + symmetry = TensorSymmetry.fully_symmetric(-self.eps_dim) + return TensorHead('Eps', [self]*self.eps_dim, symmetry) + + def set_metric(self, tensor): + self._metric = tensor + + def __lt__(self, other): + return self.name < other.name + + def __str__(self): + return self.name + + __repr__ = __str__ + + # Everything below this line is deprecated + + @property + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return _tensor_data_substitution_dict[self] + + @data.setter + def data(self, data): + deprecate_data() + # This assignment is a bit controversial, should metric components be assigned + # to the metric only or also to the TensorIndexType object? The advantage here + # is the ability to assign a 1D array and transform it to a 2D diagonal array. + from .array import MutableDenseNDimArray + + data = _TensorDataLazyEvaluator.parse_data(data) + if data.rank() > 2: + raise ValueError("data have to be of rank 1 (diagonal metric) or 2.") + if data.rank() == 1: + if self.dim.is_number: + nda_dim = data.shape[0] + if nda_dim != self.dim: + raise ValueError("Dimension mismatch") + + dim = data.shape[0] + newndarray = MutableDenseNDimArray.zeros(dim, dim) + for i, val in enumerate(data): + newndarray[i, i] = val + data = newndarray + dim1, dim2 = data.shape + if dim1 != dim2: + raise ValueError("Non-square matrix tensor.") + if self.dim.is_number: + if self.dim != dim1: + raise ValueError("Dimension mismatch") + _tensor_data_substitution_dict[self] = data + _tensor_data_substitution_dict.add_metric_data(self.metric, data) + with ignore_warnings(SymPyDeprecationWarning): + delta = self.get_kronecker_delta() + i1 = TensorIndex('i1', self) + i2 = TensorIndex('i2', self) + with ignore_warnings(SymPyDeprecationWarning): + delta(i1, -i2).data = _TensorDataLazyEvaluator.parse_data(eye(dim1)) + + @data.deleter + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + if self.metric in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self.metric] + + @deprecated( + """ + The TensorIndexType.get_kronecker_delta() method is deprecated. Use + the TensorIndexType.delta attribute instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorindextype-methods", + ) + def get_kronecker_delta(self): + sym2 = TensorSymmetry(get_symmetric_group_sgs(2)) + delta = TensorHead('KD', [self]*2, sym2) + return delta + + @deprecated( + """ + The TensorIndexType.get_epsilon() method is deprecated. Use + the TensorIndexType.epsilon attribute instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorindextype-methods", + ) + def get_epsilon(self): + if not isinstance(self._eps_dim, (SYMPY_INTS, Integer)): + return None + sym = TensorSymmetry(get_symmetric_group_sgs(self._eps_dim, 1)) + epsilon = TensorHead('Eps', [self]*self._eps_dim, sym) + return epsilon + + def _components_data_full_destroy(self): + """ + EXPERIMENTAL: do not rely on this API method. + + This destroys components data associated to the ``TensorIndexType``, if + any, specifically: + + * metric tensor data + * Kronecker tensor data + """ + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + + def delete_tensmul_data(key): + if key in _tensor_data_substitution_dict._substitutions_dict_tensmul: + del _tensor_data_substitution_dict._substitutions_dict_tensmul[key] + + # delete metric data: + delete_tensmul_data((self.metric, True, True)) + delete_tensmul_data((self.metric, True, False)) + delete_tensmul_data((self.metric, False, True)) + delete_tensmul_data((self.metric, False, False)) + + # delete delta tensor data: + delta = self.get_kronecker_delta() + if delta in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[delta] + + +class TensorIndex(Basic): + """ + Represents a tensor index + + Parameters + ========== + + name : name of the index, or ``True`` if you want it to be automatically assigned + tensor_index_type : ``TensorIndexType`` of the index + is_up : flag for contravariant index (is_up=True by default) + + Attributes + ========== + + ``name`` + ``tensor_index_type`` + ``is_up`` + + Notes + ===== + + Tensor indices are contracted with the Einstein summation convention. + + An index can be in contravariant or in covariant form; in the latter + case it is represented prepending a ``-`` to the index name. Adding + ``-`` to a covariant (is_up=False) index makes it contravariant. + + Dummy indices have a name with head given by + ``tensor_inde_type.dummy_name`` with underscore and a number. + + Similar to ``symbols`` multiple contravariant indices can be created + at once using ``tensor_indices(s, typ)``, where ``s`` is a string + of names. + + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, TensorIndex, TensorHead, tensor_indices + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> mu = TensorIndex('mu', Lorentz, is_up=False) + >>> nu, rho = tensor_indices('nu, rho', Lorentz) + >>> A = TensorHead('A', [Lorentz, Lorentz]) + >>> A(mu, nu) + A(-mu, nu) + >>> A(-mu, -rho) + A(mu, -rho) + >>> A(mu, -mu) + A(-L_0, L_0) + """ + def __new__(cls, name, tensor_index_type, is_up=True): + if isinstance(name, str): + name_symbol = Symbol(name) + elif isinstance(name, Symbol): + name_symbol = name + elif name is True: + name = "_i{}".format(len(tensor_index_type._autogenerated)) + name_symbol = Symbol(name) + tensor_index_type._autogenerated.append(name_symbol) + else: + raise ValueError("invalid name") + + is_up = sympify(is_up) + return Basic.__new__(cls, name_symbol, tensor_index_type, is_up) + + @property + def name(self): + return self.args[0].name + + @property + def tensor_index_type(self): + return self.args[1] + + @property + def is_up(self): + return self.args[2] + + def _print(self): + s = self.name + if not self.is_up: + s = '-%s' % s + return s + + def __lt__(self, other): + return ((self.tensor_index_type, self.name) < + (other.tensor_index_type, other.name)) + + def __neg__(self): + t1 = TensorIndex(self.name, self.tensor_index_type, + (not self.is_up)) + return t1 + + +def tensor_indices(s, typ): + """ + Returns list of tensor indices given their names and their types. + + Parameters + ========== + + s : string of comma separated names of indices + + typ : ``TensorIndexType`` of the indices + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz) + """ + if isinstance(s, str): + a = [x.name for x in symbols(s, seq=True)] + else: + raise ValueError('expecting a string') + + tilist = [TensorIndex(i, typ) for i in a] + if len(tilist) == 1: + return tilist[0] + return tilist + + +class TensorSymmetry(Basic): + """ + Monoterm symmetry of a tensor (i.e. any symmetric or anti-symmetric + index permutation). For the relevant terminology see ``tensor_can.py`` + section of the combinatorics module. + + Parameters + ========== + + bsgs : tuple ``(base, sgs)`` BSGS of the symmetry of the tensor + + Attributes + ========== + + ``base`` : base of the BSGS + ``generators`` : generators of the BSGS + ``rank`` : rank of the tensor + + Notes + ===== + + A tensor can have an arbitrary monoterm symmetry provided by its BSGS. + Multiterm symmetries, like the cyclic symmetry of the Riemann tensor + (i.e., Bianchi identity), are not covered. See combinatorics module for + information on how to generate BSGS for a general index permutation group. + Simple symmetries can be generated using built-in methods. + + See Also + ======== + + sympy.combinatorics.tensor_can.get_symmetric_group_sgs + + Examples + ======== + + Define a symmetric tensor of rank 2 + + >>> from sympy.tensor.tensor import TensorIndexType, TensorSymmetry, get_symmetric_group_sgs, TensorHead + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> sym = TensorSymmetry(get_symmetric_group_sgs(2)) + >>> T = TensorHead('T', [Lorentz]*2, sym) + + Note, that the same can also be done using built-in TensorSymmetry methods + + >>> sym2 = TensorSymmetry.fully_symmetric(2) + >>> sym == sym2 + True + """ + def __new__(cls, *args, **kw_args): + if len(args) == 1: + base, generators = args[0] + elif len(args) == 2: + base, generators = args + else: + raise TypeError("bsgs required, either two separate parameters or one tuple") + + if not isinstance(base, Tuple): + base = Tuple(*base) + if not isinstance(generators, Tuple): + generators = Tuple(*generators) + + return Basic.__new__(cls, base, generators, **kw_args) + + @property + def base(self): + return self.args[0] + + @property + def generators(self): + return self.args[1] + + @property + def rank(self): + return self.generators[0].size - 2 + + @classmethod + def fully_symmetric(cls, rank): + """ + Returns a fully symmetric (antisymmetric if ``rank``<0) + TensorSymmetry object for ``abs(rank)`` indices. + """ + if rank > 0: + bsgs = get_symmetric_group_sgs(rank, False) + elif rank < 0: + bsgs = get_symmetric_group_sgs(-rank, True) + elif rank == 0: + bsgs = ([], [Permutation(1)]) + return TensorSymmetry(bsgs) + + @classmethod + def direct_product(cls, *args): + """ + Returns a TensorSymmetry object that is being a direct product of + fully (anti-)symmetric index permutation groups. + + Notes + ===== + + Some examples for different values of ``(*args)``: + ``(1)`` vector, equivalent to ``TensorSymmetry.fully_symmetric(1)`` + ``(2)`` tensor with 2 symmetric indices, equivalent to ``.fully_symmetric(2)`` + ``(-2)`` tensor with 2 antisymmetric indices, equivalent to ``.fully_symmetric(-2)`` + ``(2, -2)`` tensor with the first 2 indices commuting and the last 2 anticommuting + ``(1, 1, 1)`` tensor with 3 indices without any symmetry + """ + base, sgs = [], [Permutation(1)] + for arg in args: + if arg > 0: + bsgs2 = get_symmetric_group_sgs(arg, False) + elif arg < 0: + bsgs2 = get_symmetric_group_sgs(-arg, True) + else: + continue + base, sgs = bsgs_direct_product(base, sgs, *bsgs2) + + return TensorSymmetry(base, sgs) + + @classmethod + def riemann(cls): + """ + Returns a monotorem symmetry of the Riemann tensor + """ + return TensorSymmetry(riemann_bsgs) + + @classmethod + def no_symmetry(cls, rank): + """ + TensorSymmetry object for ``rank`` indices with no symmetry + """ + return TensorSymmetry([], [Permutation(rank+1)]) + + +@deprecated( + """ + The tensorsymmetry() function is deprecated. Use the TensorSymmetry + constructor instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorsymmetry", +) +def tensorsymmetry(*args): + """ + Returns a ``TensorSymmetry`` object. This method is deprecated, use + ``TensorSymmetry.direct_product()`` or ``.riemann()`` instead. + + Explanation + =========== + + One can represent a tensor with any monoterm slot symmetry group + using a BSGS. + + ``args`` can be a BSGS + ``args[0]`` base + ``args[1]`` sgs + + Usually tensors are in (direct products of) representations + of the symmetric group; + ``args`` can be a list of lists representing the shapes of Young tableaux + + Notes + ===== + + For instance: + ``[[1]]`` vector + ``[[1]*n]`` symmetric tensor of rank ``n`` + ``[[n]]`` antisymmetric tensor of rank ``n`` + ``[[2, 2]]`` monoterm slot symmetry of the Riemann tensor + ``[[1],[1]]`` vector*vector + ``[[2],[1],[1]`` (antisymmetric tensor)*vector*vector + + Notice that with the shape ``[2, 2]`` we associate only the monoterm + symmetries of the Riemann tensor; this is an abuse of notation, + since the shape ``[2, 2]`` corresponds usually to the irreducible + representation characterized by the monoterm symmetries and by the + cyclic symmetry. + """ + from sympy.combinatorics import Permutation + + def tableau2bsgs(a): + if len(a) == 1: + # antisymmetric vector + n = a[0] + bsgs = get_symmetric_group_sgs(n, 1) + else: + if all(x == 1 for x in a): + # symmetric vector + n = len(a) + bsgs = get_symmetric_group_sgs(n) + elif a == [2, 2]: + bsgs = riemann_bsgs + else: + raise NotImplementedError + return bsgs + + if not args: + return TensorSymmetry(Tuple(), Tuple(Permutation(1))) + + if len(args) == 2 and isinstance(args[1][0], Permutation): + return TensorSymmetry(args) + base, sgs = tableau2bsgs(args[0]) + for a in args[1:]: + basex, sgsx = tableau2bsgs(a) + base, sgs = bsgs_direct_product(base, sgs, basex, sgsx) + return TensorSymmetry(Tuple(base, sgs)) + +@deprecated( + "TensorType is deprecated. Use tensor_heads() instead.", + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensortype", +) +class TensorType(Basic): + """ + Class of tensor types. Deprecated, use tensor_heads() instead. + + Parameters + ========== + + index_types : list of ``TensorIndexType`` of the tensor indices + symmetry : ``TensorSymmetry`` of the tensor + + Attributes + ========== + + ``index_types`` + ``symmetry`` + ``types`` : list of ``TensorIndexType`` without repetitions + """ + is_commutative = False + + def __new__(cls, index_types, symmetry, **kw_args): + assert symmetry.rank == len(index_types) + obj = Basic.__new__(cls, Tuple(*index_types), symmetry, **kw_args) + return obj + + @property + def index_types(self): + return self.args[0] + + @property + def symmetry(self): + return self.args[1] + + @property + def types(self): + return sorted(set(self.index_types), key=lambda x: x.name) + + def __str__(self): + return 'TensorType(%s)' % ([str(x) for x in self.index_types]) + + def __call__(self, s, comm=0): + """ + Return a TensorHead object or a list of TensorHead objects. + + Parameters + ========== + + s : name or string of names. + + comm : Commutation group. + + see ``_TensorManager.set_comm`` + """ + if isinstance(s, str): + names = [x.name for x in symbols(s, seq=True)] + else: + raise ValueError('expecting a string') + if len(names) == 1: + return TensorHead(names[0], self.index_types, self.symmetry, comm) + else: + return [TensorHead(name, self.index_types, self.symmetry, comm) for name in names] + + +@deprecated( + """ + The tensorhead() function is deprecated. Use tensor_heads() instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-tensorhead", +) +def tensorhead(name, typ, sym=None, comm=0): + """ + Function generating tensorhead(s). This method is deprecated, + use TensorHead constructor or tensor_heads() instead. + + Parameters + ========== + + name : name or sequence of names (as in ``symbols``) + + typ : index types + + sym : same as ``*args`` in ``tensorsymmetry`` + + comm : commutation group number + see ``_TensorManager.set_comm`` + """ + if sym is None: + sym = [[1] for i in range(len(typ))] + with ignore_warnings(SymPyDeprecationWarning): + sym = tensorsymmetry(*sym) + return TensorHead(name, typ, sym, comm) + + +class TensorHead(Basic): + """ + Tensor head of the tensor. + + Parameters + ========== + + name : name of the tensor + index_types : list of TensorIndexType + symmetry : TensorSymmetry of the tensor + comm : commutation group number + + Attributes + ========== + + ``name`` + ``index_types`` + ``rank`` : total number of indices + ``symmetry`` + ``comm`` : commutation group + + Notes + ===== + + Similar to ``symbols`` multiple TensorHeads can be created using + ``tensorhead(s, typ, sym=None, comm=0)`` function, where ``s`` + is the string of names and ``sym`` is the monoterm tensor symmetry + (see ``tensorsymmetry``). + + A ``TensorHead`` belongs to a commutation group, defined by a + symbol on number ``comm`` (see ``_TensorManager.set_comm``); + tensors in a commutation group have the same commutation properties; + by default ``comm`` is ``0``, the group of the commuting tensors. + + Examples + ======== + + Define a fully antisymmetric tensor of rank 2: + + >>> from sympy.tensor.tensor import TensorIndexType, TensorHead, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> asym2 = TensorSymmetry.fully_symmetric(-2) + >>> A = TensorHead('A', [Lorentz, Lorentz], asym2) + + Examples with ndarray values, the components data assigned to the + ``TensorHead`` object are assumed to be in a fully-contravariant + representation. In case it is necessary to assign components data which + represents the values of a non-fully covariant tensor, see the other + examples. + + >>> from sympy.tensor.tensor import tensor_indices + >>> from sympy import diag + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> i0, i1 = tensor_indices('i0:2', Lorentz) + + Specify a replacement dictionary to keep track of the arrays to use for + replacements in the tensorial expression. The ``TensorIndexType`` is + associated to the metric used for contractions (in fully covariant form): + + >>> repl = {Lorentz: diag(1, -1, -1, -1)} + + Let's see some examples of working with components with the electromagnetic + tensor: + + >>> from sympy import symbols + >>> Ex, Ey, Ez, Bx, By, Bz = symbols('E_x E_y E_z B_x B_y B_z') + >>> c = symbols('c', positive=True) + + Let's define `F`, an antisymmetric tensor: + + >>> F = TensorHead('F', [Lorentz, Lorentz], asym2) + + Let's update the dictionary to contain the matrix to use in the + replacements: + + >>> repl.update({F(-i0, -i1): [ + ... [0, Ex/c, Ey/c, Ez/c], + ... [-Ex/c, 0, -Bz, By], + ... [-Ey/c, Bz, 0, -Bx], + ... [-Ez/c, -By, Bx, 0]]}) + + Now it is possible to retrieve the contravariant form of the Electromagnetic + tensor: + + >>> F(i0, i1).replace_with_arrays(repl, [i0, i1]) + [[0, -E_x/c, -E_y/c, -E_z/c], [E_x/c, 0, -B_z, B_y], [E_y/c, B_z, 0, -B_x], [E_z/c, -B_y, B_x, 0]] + + and the mixed contravariant-covariant form: + + >>> F(i0, -i1).replace_with_arrays(repl, [i0, -i1]) + [[0, E_x/c, E_y/c, E_z/c], [E_x/c, 0, B_z, -B_y], [E_y/c, -B_z, 0, B_x], [E_z/c, B_y, -B_x, 0]] + + Energy-momentum of a particle may be represented as: + + >>> from sympy import symbols + >>> P = TensorHead('P', [Lorentz], TensorSymmetry.no_symmetry(1)) + >>> E, px, py, pz = symbols('E p_x p_y p_z', positive=True) + >>> repl.update({P(i0): [E, px, py, pz]}) + + The contravariant and covariant components are, respectively: + + >>> P(i0).replace_with_arrays(repl, [i0]) + [E, p_x, p_y, p_z] + >>> P(-i0).replace_with_arrays(repl, [-i0]) + [E, -p_x, -p_y, -p_z] + + The contraction of a 1-index tensor by itself: + + >>> expr = P(i0)*P(-i0) + >>> expr.replace_with_arrays(repl, []) + E**2 - p_x**2 - p_y**2 - p_z**2 + """ + is_commutative = False + + def __new__(cls, name, index_types, symmetry=None, comm=0): + if isinstance(name, str): + name_symbol = Symbol(name) + elif isinstance(name, Symbol): + name_symbol = name + else: + raise ValueError("invalid name") + + if symmetry is None: + symmetry = TensorSymmetry.no_symmetry(len(index_types)) + else: + assert symmetry.rank == len(index_types) + + obj = Basic.__new__(cls, name_symbol, Tuple(*index_types), symmetry, sympify(comm)) + return obj + + @property + def name(self): + return self.args[0].name + + @property + def index_types(self): + return list(self.args[1]) + + @property + def symmetry(self): + return self.args[2] + + @property + def comm(self): + return TensorManager.comm_symbols2i(self.args[3]) + + @property + def rank(self): + return len(self.index_types) + + def __lt__(self, other): + return (self.name, self.index_types) < (other.name, other.index_types) + + def commutes_with(self, other): + """ + Returns ``0`` if ``self`` and ``other`` commute, ``1`` if they anticommute. + + Returns ``None`` if ``self`` and ``other`` neither commute nor anticommute. + """ + r = TensorManager.get_comm(self.comm, other.comm) + return r + + def _print(self): + return '%s(%s)' %(self.name, ','.join([str(x) for x in self.index_types])) + + def __call__(self, *indices, **kw_args): + """ + Returns a tensor with indices. + + Explanation + =========== + + There is a special behavior in case of indices denoted by ``True``, + they are considered auto-matrix indices, their slots are automatically + filled, and confer to the tensor the behavior of a matrix or vector + upon multiplication with another tensor containing auto-matrix indices + of the same ``TensorIndexType``. This means indices get summed over the + same way as in matrix multiplication. For matrix behavior, define two + auto-matrix indices, for vector behavior define just one. + + Indices can also be strings, in which case the attribute + ``index_types`` is used to convert them to proper ``TensorIndex``. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, TensorHead + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> a, b = tensor_indices('a,b', Lorentz) + >>> A = TensorHead('A', [Lorentz]*2, TensorSymmetry.no_symmetry(2)) + >>> t = A(a, -b) + >>> t + A(a, -b) + + """ + + updated_indices = [] + for idx, typ in zip(indices, self.index_types): + if isinstance(idx, str): + idx = idx.strip().replace(" ", "") + if idx.startswith('-'): + updated_indices.append(TensorIndex(idx[1:], typ, + is_up=False)) + else: + updated_indices.append(TensorIndex(idx, typ)) + else: + updated_indices.append(idx) + + updated_indices += indices[len(updated_indices):] + + tensor = Tensor(self, updated_indices, **kw_args) + return tensor.doit() + + # Everything below this line is deprecated + + def __pow__(self, other): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self.data is None: + raise ValueError("No power on abstract tensors.") + from .array import tensorproduct, tensorcontraction + metrics = [_.data for _ in self.index_types] + + marray = self.data + marraydim = marray.rank() + for metric in metrics: + marray = tensorproduct(marray, metric, marray) + marray = tensorcontraction(marray, (0, marraydim), (marraydim+1, marraydim+2)) + + return marray ** (other * S.Half) + + @property + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return _tensor_data_substitution_dict[self] + + @data.setter + def data(self, data): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + _tensor_data_substitution_dict[self] = data + + @data.deleter + def data(self): + deprecate_data() + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + + def __iter__(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return self.data.__iter__() + + def _components_data_full_destroy(self): + """ + EXPERIMENTAL: do not rely on this API method. + + Destroy components data associated to the ``TensorHead`` object, this + checks for attached components data, and destroys components data too. + """ + # do not garbage collect Kronecker tensor (it should be done by + # ``TensorIndexType`` garbage collection) + deprecate_data() + if self.name == "KD": + return + + # the data attached to a tensor must be deleted only by the TensorHead + # destructor. If the TensorHead is deleted, it means that there are no + # more instances of that tensor anywhere. + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + + +def tensor_heads(s, index_types, symmetry=None, comm=0): + """ + Returns a sequence of TensorHeads from a string `s` + """ + if isinstance(s, str): + names = [x.name for x in symbols(s, seq=True)] + else: + raise ValueError('expecting a string') + + thlist = [TensorHead(name, index_types, symmetry, comm) for name in names] + if len(thlist) == 1: + return thlist[0] + return thlist + + +class TensExpr(Expr, ABC): + """ + Abstract base class for tensor expressions + + Notes + ===== + + A tensor expression is an expression formed by tensors; + currently the sums of tensors are distributed. + + A ``TensExpr`` can be a ``TensAdd`` or a ``TensMul``. + + ``TensMul`` objects are formed by products of component tensors, + and include a coefficient, which is a SymPy expression. + + + In the internal representation contracted indices are represented + by ``(ipos1, ipos2, icomp1, icomp2)``, where ``icomp1`` is the position + of the component tensor with contravariant index, ``ipos1`` is the + slot which the index occupies in that component tensor. + + Contracted indices are therefore nameless in the internal representation. + """ + + _op_priority = 12.0 + is_commutative = False + + def __neg__(self): + return self*S.NegativeOne + + def __abs__(self): + raise NotImplementedError + + def __add__(self, other): + return TensAdd(self, other).doit() + + def __radd__(self, other): + return TensAdd(other, self).doit() + + def __sub__(self, other): + return TensAdd(self, -other).doit() + + def __rsub__(self, other): + return TensAdd(other, -self).doit() + + def __mul__(self, other): + """ + Multiply two tensors using Einstein summation convention. + + Explanation + =========== + + If the two tensors have an index in common, one contravariant + and the other covariant, in their product the indices are summed + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) + >>> g = Lorentz.metric + >>> p, q = tensor_heads('p,q', [Lorentz]) + >>> t1 = p(m0) + >>> t2 = q(-m0) + >>> t1*t2 + p(L_0)*q(-L_0) + """ + return TensMul(self, other).doit() + + def __rmul__(self, other): + return TensMul(other, self).doit() + + def __truediv__(self, other): + other = _sympify(other) + if isinstance(other, TensExpr): + raise ValueError('cannot divide by a tensor') + return TensMul(self, S.One/other).doit() + + def __rtruediv__(self, other): + raise ValueError('cannot divide by a tensor') + + def __pow__(self, other): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self.data is None: + raise ValueError("No power without ndarray data.") + from .array import tensorproduct, tensorcontraction + free = self.free + marray = self.data + mdim = marray.rank() + for metric in free: + marray = tensorcontraction( + tensorproduct( + marray, + metric[0].tensor_index_type.data, + marray), + (0, mdim), (mdim+1, mdim+2) + ) + return marray ** (other * S.Half) + + def __rpow__(self, other): + raise NotImplementedError + + @property + @abstractmethod + def nocoeff(self): + raise NotImplementedError("abstract method") + + @property + @abstractmethod + def coeff(self): + raise NotImplementedError("abstract method") + + @abstractmethod + def get_indices(self): + raise NotImplementedError("abstract method") + + @abstractmethod + def get_free_indices(self) -> list[TensorIndex]: + raise NotImplementedError("abstract method") + + @abstractmethod + def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr: + raise NotImplementedError("abstract method") + + def fun_eval(self, *index_tuples): + deprecate_fun_eval() + return self.substitute_indices(*index_tuples) + + def get_matrix(self): + """ + DEPRECATED: do not use. + + Returns ndarray components data as a matrix, if components data are + available and ndarray dimension does not exceed 2. + """ + from sympy.matrices.dense import Matrix + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if 0 < self.rank <= 2: + rows = self.data.shape[0] + columns = self.data.shape[1] if self.rank == 2 else 1 + if self.rank == 2: + mat_list = [] * rows + for i in range(rows): + mat_list.append([]) + for j in range(columns): + mat_list[i].append(self[i, j]) + else: + mat_list = [None] * rows + for i in range(rows): + mat_list[i] = self[i] + return Matrix(mat_list) + else: + raise NotImplementedError( + "missing multidimensional reduction to matrix.") + + @staticmethod + def _get_indices_permutation(indices1, indices2): + return [indices1.index(i) for i in indices2] + + def expand(self, **hints): + return _expand(self, **hints).doit() + + def _expand(self, **kwargs): + return self + + def _get_free_indices_set(self): + indset = set() + for arg in self.args: + if isinstance(arg, TensExpr): + indset.update(arg._get_free_indices_set()) + return indset + + def _get_dummy_indices_set(self): + indset = set() + for arg in self.args: + if isinstance(arg, TensExpr): + indset.update(arg._get_dummy_indices_set()) + return indset + + def _get_indices_set(self): + indset = set() + for arg in self.args: + if isinstance(arg, TensExpr): + indset.update(arg._get_indices_set()) + return indset + + @property + def _iterate_dummy_indices(self): + dummy_set = self._get_dummy_indices_set() + + def recursor(expr, pos): + if isinstance(expr, TensorIndex): + if expr in dummy_set: + yield (expr, pos) + elif isinstance(expr, (Tuple, TensExpr)): + for p, arg in enumerate(expr.args): + yield from recursor(arg, pos+(p,)) + + return recursor(self, ()) + + @property + def _iterate_free_indices(self): + free_set = self._get_free_indices_set() + + def recursor(expr, pos): + if isinstance(expr, TensorIndex): + if expr in free_set: + yield (expr, pos) + elif isinstance(expr, (Tuple, TensExpr)): + for p, arg in enumerate(expr.args): + yield from recursor(arg, pos+(p,)) + + return recursor(self, ()) + + @property + def _iterate_indices(self): + def recursor(expr, pos): + if isinstance(expr, TensorIndex): + yield (expr, pos) + elif isinstance(expr, (Tuple, TensExpr)): + for p, arg in enumerate(expr.args): + yield from recursor(arg, pos+(p,)) + + return recursor(self, ()) + + @staticmethod + def _contract_and_permute_with_metric(metric, array, pos, dim): + # TODO: add possibility of metric after (spinors) + from .array import tensorcontraction, tensorproduct, permutedims + + array = tensorcontraction(tensorproduct(metric, array), (1, 2+pos)) + permu = list(range(dim)) + permu[0], permu[pos] = permu[pos], permu[0] + return permutedims(array, permu) + + @staticmethod + def _match_indices_with_other_tensor(array, free_ind1, free_ind2, replacement_dict): + from .array import permutedims + + index_types1 = [i.tensor_index_type for i in free_ind1] + + # Check if variance of indices needs to be fixed: + pos2up = [] + pos2down = [] + free2remaining = free_ind2[:] + for pos1, index1 in enumerate(free_ind1): + if index1 in free2remaining: + pos2 = free2remaining.index(index1) + free2remaining[pos2] = None + continue + if -index1 in free2remaining: + pos2 = free2remaining.index(-index1) + free2remaining[pos2] = None + free_ind2[pos2] = index1 + if index1.is_up: + pos2up.append(pos2) + else: + pos2down.append(pos2) + else: + index2 = free2remaining[pos1] + if index2 is None: + raise ValueError("incompatible indices: %s and %s" % (free_ind1, free_ind2)) + free2remaining[pos1] = None + free_ind2[pos1] = index1 + if index1.is_up ^ index2.is_up: + if index1.is_up: + pos2up.append(pos1) + else: + pos2down.append(pos1) + + if len(set(free_ind1) & set(free_ind2)) < len(free_ind1): + raise ValueError("incompatible indices: %s and %s" % (free_ind1, free_ind2)) + + # Raise indices: + for pos in pos2up: + index_type_pos = index_types1[pos] + if index_type_pos not in replacement_dict: + raise ValueError("No metric provided to lower index") + metric = replacement_dict[index_type_pos] + metric_inverse = _TensorDataLazyEvaluator.inverse_matrix(metric) + array = TensExpr._contract_and_permute_with_metric(metric_inverse, array, pos, len(free_ind1)) + # Lower indices: + for pos in pos2down: + index_type_pos = index_types1[pos] + if index_type_pos not in replacement_dict: + raise ValueError("No metric provided to lower index") + metric = replacement_dict[index_type_pos] + array = TensExpr._contract_and_permute_with_metric(metric, array, pos, len(free_ind1)) + + if free_ind1: + permutation = TensExpr._get_indices_permutation(free_ind2, free_ind1) + array = permutedims(array, permutation) + + if hasattr(array, "rank") and array.rank() == 0: + array = array[()] + + return free_ind2, array + + def replace_with_arrays(self, replacement_dict, indices=None): + """ + Replace the tensorial expressions with arrays. The final array will + correspond to the N-dimensional array with indices arranged according + to ``indices``. + + Parameters + ========== + + replacement_dict + dictionary containing the replacement rules for tensors. + indices + the index order with respect to which the array is read. The + original index order will be used if no value is passed. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices + >>> from sympy.tensor.tensor import TensorHead + >>> from sympy import symbols, diag + + >>> L = TensorIndexType("L") + >>> i, j = tensor_indices("i j", L) + >>> A = TensorHead("A", [L]) + >>> A(i).replace_with_arrays({A(i): [1, 2]}, [i]) + [1, 2] + + Since 'indices' is optional, we can also call replace_with_arrays by + this way if no specific index order is needed: + + >>> A(i).replace_with_arrays({A(i): [1, 2]}) + [1, 2] + + >>> expr = A(i)*A(j) + >>> expr.replace_with_arrays({A(i): [1, 2]}) + [[1, 2], [2, 4]] + + For contractions, specify the metric of the ``TensorIndexType``, which + in this case is ``L``, in its covariant form: + + >>> expr = A(i)*A(-i) + >>> expr.replace_with_arrays({A(i): [1, 2], L: diag(1, -1)}) + -3 + + Symmetrization of an array: + + >>> H = TensorHead("H", [L, L]) + >>> a, b, c, d = symbols("a b c d") + >>> expr = H(i, j)/2 + H(j, i)/2 + >>> expr.replace_with_arrays({H(i, j): [[a, b], [c, d]]}) + [[a, b/2 + c/2], [b/2 + c/2, d]] + + Anti-symmetrization of an array: + + >>> expr = H(i, j)/2 - H(j, i)/2 + >>> repl = {H(i, j): [[a, b], [c, d]]} + >>> expr.replace_with_arrays(repl) + [[0, b/2 - c/2], [-b/2 + c/2, 0]] + + The same expression can be read as the transpose by inverting ``i`` and + ``j``: + + >>> expr.replace_with_arrays(repl, [j, i]) + [[0, -b/2 + c/2], [b/2 - c/2, 0]] + """ + from .array import Array + + indices = indices or [] + remap = {k.args[0] if k.is_up else -k.args[0]: k for k in self.get_free_indices()} + for i, index in enumerate(indices): + if isinstance(index, (Symbol, Mul)): + if index in remap: + indices[i] = remap[index] + else: + indices[i] = -remap[-index] + + replacement_dict = {tensor: Array(array) for tensor, array in replacement_dict.items()} + + # Check dimensions of replaced arrays: + for tensor, array in replacement_dict.items(): + if isinstance(tensor, TensorIndexType): + expected_shape = [tensor.dim for i in range(2)] + else: + expected_shape = [index_type.dim for index_type in tensor.index_types] + if len(expected_shape) != array.rank() or (not all(dim1 == dim2 if + dim1.is_number else True for dim1, dim2 in zip(expected_shape, + array.shape))): + raise ValueError("shapes for tensor %s expected to be %s, "\ + "replacement array shape is %s" % (tensor, expected_shape, + array.shape)) + + ret_indices, array = self._extract_data(replacement_dict) + + last_indices, array = self._match_indices_with_other_tensor(array, indices, ret_indices, replacement_dict) + return array + + def _check_add_Sum(self, expr, index_symbols): + from sympy.concrete.summations import Sum + indices = self.get_indices() + dum = self.dum + sum_indices = [ (index_symbols[i], 0, + indices[i].tensor_index_type.dim-1) for i, j in dum] + if sum_indices: + expr = Sum(expr, *sum_indices) + return expr + + def _expand_partial_derivative(self): + # simply delegate the _expand_partial_derivative() to + # its arguments to expand a possibly found PartialDerivative + return self.func(*[ + a._expand_partial_derivative() + if isinstance(a, TensExpr) else a + for a in self.args]) + + +class TensAdd(TensExpr, AssocOp): + """ + Sum of tensors. + + Parameters + ========== + + free_args : list of the free indices + + Attributes + ========== + + ``args`` : tuple of addends + ``rank`` : rank of the tensor + ``free_args`` : list of the free indices in sorted order + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_heads, tensor_indices + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> a, b = tensor_indices('a,b', Lorentz) + >>> p, q = tensor_heads('p,q', [Lorentz]) + >>> t = p(a) + q(a); t + p(a) + q(a) + + Examples with components data added to the tensor expression: + + >>> from sympy import symbols, diag + >>> x, y, z, t = symbols("x y z t") + >>> repl = {} + >>> repl[Lorentz] = diag(1, -1, -1, -1) + >>> repl[p(a)] = [1, 2, 3, 4] + >>> repl[q(a)] = [x, y, z, t] + + The following are: 2**2 - 3**2 - 2**2 - 7**2 ==> -58 + + >>> expr = p(a) + q(a) + >>> expr.replace_with_arrays(repl, [a]) + [x + 1, y + 2, z + 3, t + 4] + """ + + def __new__(cls, *args, **kw_args): + args = [_sympify(x) for x in args if x] + args = TensAdd._tensAdd_flatten(args) + args.sort(key=default_sort_key) + if not args: + return S.Zero + if len(args) == 1: + return args[0] + + return Basic.__new__(cls, *args, **kw_args) + + @property + def coeff(self): + return S.One + + @property + def nocoeff(self): + return self + + def get_free_indices(self) -> list[TensorIndex]: + return self.free_indices + + def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr: + newargs = [arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg for arg in self.args] + return self.func(*newargs) + + @memoize_property + def rank(self): + if isinstance(self.args[0], TensExpr): + return self.args[0].rank + else: + return 0 + + @memoize_property + def free_args(self): + if isinstance(self.args[0], TensExpr): + return self.args[0].free_args + else: + return [] + + @memoize_property + def free_indices(self): + if isinstance(self.args[0], TensExpr): + return self.args[0].get_free_indices() + else: + return set() + + def doit(self, **hints): + deep = hints.get('deep', True) + if deep: + args = [arg.doit(**hints) for arg in self.args] + else: + args = self.args + + # if any of the args are zero (after doit), drop them. Otherwise, _tensAdd_check will complain about non-matching indices, even though the TensAdd is correctly formed. + args = [arg for arg in args if arg != S.Zero] + + if len(args) == 0: + return S.Zero + elif len(args) == 1: + return args[0] + + # now check that all addends have the same indices: + TensAdd._tensAdd_check(args) + + # Collect terms appearing more than once, differing by their coefficients: + args = TensAdd._tensAdd_collect_terms(args) + + # collect canonicalized terms + def sort_key(t): + if not isinstance(t, TensExpr): + return [], [], [] + if hasattr(t, "_index_structure") and hasattr(t, "components"): + x = get_index_structure(t) + return t.components, x.free, x.dum + return [], [], [] + args.sort(key=sort_key) + + if not args: + return S.Zero + # it there is only a component tensor return it + if len(args) == 1: + return args[0] + + obj = self.func(*args) + return obj + + @staticmethod + def _tensAdd_flatten(args): + # flatten TensAdd, coerce terms which are not tensors to tensors + a = [] + for x in args: + if isinstance(x, (Add, TensAdd)): + a.extend(list(x.args)) + else: + a.append(x) + args = [x for x in a if x.coeff] + return args + + @staticmethod + def _tensAdd_check(args): + # check that all addends have the same free indices + + def get_indices_set(x: Expr) -> set[TensorIndex]: + if isinstance(x, TensExpr): + return set(x.get_free_indices()) + return set() + + indices0 = get_indices_set(args[0]) + list_indices = [get_indices_set(arg) for arg in args[1:]] + if not all(x == indices0 for x in list_indices): + raise ValueError('all tensors must have the same indices') + + @staticmethod + def _tensAdd_collect_terms(args): + # collect TensMul terms differing at most by their coefficient + terms_dict = defaultdict(list) + scalars = S.Zero + if isinstance(args[0], TensExpr): + free_indices = set(args[0].get_free_indices()) + else: + free_indices = set() + + for arg in args: + if not isinstance(arg, TensExpr): + if free_indices != set(): + raise ValueError("wrong valence") + scalars += arg + continue + if free_indices != set(arg.get_free_indices()): + raise ValueError("wrong valence") + # TODO: what is the part which is not a coeff? + # needs an implementation similar to .as_coeff_Mul() + terms_dict[arg.nocoeff].append(arg.coeff) + + new_args = [TensMul(Add(*coeff), t).doit() for t, coeff in terms_dict.items() if Add(*coeff) != 0] + if isinstance(scalars, Add): + new_args = list(scalars.args) + new_args + elif scalars != 0: + new_args = [scalars] + new_args + return new_args + + def get_indices(self): + indices = [] + for arg in self.args: + indices.extend([i for i in get_indices(arg) if i not in indices]) + return indices + + def _expand(self, **hints): + return TensAdd(*[_expand(i, **hints) for i in self.args]) + + def __call__(self, *indices): + deprecate_call() + free_args = self.free_args + indices = list(indices) + if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]: + raise ValueError('incompatible types') + if indices == free_args: + return self + index_tuples = list(zip(free_args, indices)) + a = [x.func(*x.substitute_indices(*index_tuples).args) for x in self.args] + res = TensAdd(*a).doit() + return res + + def canon_bp(self): + """ + Canonicalize using the Butler-Portugal algorithm for canonicalization + under monoterm symmetries. + """ + expr = self.expand() + args = [canon_bp(x) for x in expr.args] + res = TensAdd(*args).doit() + return res + + def equals(self, other): + other = _sympify(other) + if isinstance(other, TensMul) and other.coeff == 0: + return all(x.coeff == 0 for x in self.args) + if isinstance(other, TensExpr): + if self.rank != other.rank: + return False + if isinstance(other, TensAdd): + if set(self.args) != set(other.args): + return False + else: + return True + t = self - other + if not isinstance(t, TensExpr): + return t == 0 + else: + if isinstance(t, TensMul): + return t.coeff == 0 + else: + return all(x.coeff == 0 for x in t.args) + + def __getitem__(self, item): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return self.data[item] + + def contract_delta(self, delta): + args = [x.contract_delta(delta) for x in self.args] + t = TensAdd(*args).doit() + return canon_bp(t) + + def contract_metric(self, g): + """ + Raise or lower indices with the metric ``g``. + + Parameters + ========== + + g : metric + + contract_all : if True, eliminate all ``g`` which are contracted + + Notes + ===== + + see the ``TensorIndexType`` docstring for the contraction conventions + """ + + args = [contract_metric(x, g) for x in self.args] + t = TensAdd(*args).doit() + return canon_bp(t) + + def substitute_indices(self, *index_tuples): + new_args = [] + for arg in self.args: + if isinstance(arg, TensExpr): + arg = arg.substitute_indices(*index_tuples) + new_args.append(arg) + return TensAdd(*new_args).doit() + + def _print(self): + a = [] + args = self.args + for x in args: + a.append(str(x)) + s = ' + '.join(a) + s = s.replace('+ -', '- ') + return s + + def _extract_data(self, replacement_dict): + from sympy.tensor.array import Array, permutedims + args_indices, arrays = zip(*[ + arg._extract_data(replacement_dict) if + isinstance(arg, TensExpr) else ([], arg) for arg in self.args + ]) + arrays = [Array(i) for i in arrays] + ref_indices = args_indices[0] + for i in range(1, len(args_indices)): + indices = args_indices[i] + array = arrays[i] + permutation = TensMul._get_indices_permutation(indices, ref_indices) + arrays[i] = permutedims(array, permutation) + return ref_indices, sum(arrays, Array.zeros(*array.shape)) + + @property + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return _tensor_data_substitution_dict[self.expand()] + + @data.setter + def data(self, data): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + _tensor_data_substitution_dict[self] = data + + @data.deleter + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + + def __iter__(self): + deprecate_data() + if not self.data: + raise ValueError("No iteration on abstract tensors") + return self.data.flatten().__iter__() + + def _eval_rewrite_as_Indexed(self, *args, **kwargs): + return Add.fromiter(args) + + def _eval_partial_derivative(self, s): + # Evaluation like Add + list_addends = [] + for a in self.args: + if isinstance(a, TensExpr): + list_addends.append(a._eval_partial_derivative(s)) + # do not call diff if s is no symbol + elif s._diff_wrt: + list_addends.append(a._eval_derivative(s)) + + return self.func(*list_addends) + + +class Tensor(TensExpr): + """ + Base tensor class, i.e. this represents a tensor, the single unit to be + put into an expression. + + Explanation + =========== + + This object is usually created from a ``TensorHead``, by attaching indices + to it. Indices preceded by a minus sign are considered contravariant, + otherwise covariant. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead + >>> Lorentz = TensorIndexType("Lorentz", dummy_name="L") + >>> mu, nu = tensor_indices('mu nu', Lorentz) + >>> A = TensorHead("A", [Lorentz, Lorentz]) + >>> A(mu, -nu) + A(mu, -nu) + >>> A(mu, -mu) + A(L_0, -L_0) + + It is also possible to use symbols instead of inidices (appropriate indices + are then generated automatically). + + >>> from sympy import Symbol + >>> x = Symbol('x') + >>> A(x, mu) + A(x, mu) + >>> A(x, -x) + A(L_0, -L_0) + + """ + + is_commutative = False + + _index_structure = None # type: _IndexStructure + args: tuple[TensorHead, Tuple] + + def __new__(cls, tensor_head, indices, *, is_canon_bp=False, **kw_args): + indices = cls._parse_indices(tensor_head, indices) + obj = Basic.__new__(cls, tensor_head, Tuple(*indices), **kw_args) + obj._index_structure = _IndexStructure.from_indices(*indices) + obj._free = obj._index_structure.free[:] + obj._dum = obj._index_structure.dum[:] + obj._ext_rank = obj._index_structure._ext_rank + obj._coeff = S.One + obj._nocoeff = obj + obj._component = tensor_head + obj._components = [tensor_head] + if tensor_head.rank != len(indices): + raise ValueError("wrong number of indices") + obj.is_canon_bp = is_canon_bp + obj._index_map = Tensor._build_index_map(indices, obj._index_structure) + return obj + + @property + def free(self): + return self._free + + @property + def dum(self): + return self._dum + + @property + def ext_rank(self): + return self._ext_rank + + @property + def coeff(self): + return self._coeff + + @property + def nocoeff(self): + return self._nocoeff + + @property + def component(self): + return self._component + + @property + def components(self): + return self._components + + @property + def head(self): + return self.args[0] + + @property + def indices(self): + return self.args[1] + + @property + def free_indices(self): + return set(self._index_structure.get_free_indices()) + + @property + def index_types(self): + return self.head.index_types + + @property + def rank(self): + return len(self.free_indices) + + @staticmethod + def _build_index_map(indices, index_structure): + index_map = {} + for idx in indices: + index_map[idx] = (indices.index(idx),) + return index_map + + def doit(self, **hints): + args, indices, free, dum = TensMul._tensMul_contract_indices([self]) + return args[0] + + @staticmethod + def _parse_indices(tensor_head, indices): + if not isinstance(indices, (tuple, list, Tuple)): + raise TypeError("indices should be an array, got %s" % type(indices)) + indices = list(indices) + for i, index in enumerate(indices): + if isinstance(index, Symbol): + indices[i] = TensorIndex(index, tensor_head.index_types[i], True) + elif isinstance(index, Mul): + c, e = index.as_coeff_Mul() + if c == -1 and isinstance(e, Symbol): + indices[i] = TensorIndex(e, tensor_head.index_types[i], False) + else: + raise ValueError("index not understood: %s" % index) + elif not isinstance(index, TensorIndex): + raise TypeError("wrong type for index: %s is %s" % (index, type(index))) + return indices + + def _set_new_index_structure(self, im, is_canon_bp=False): + indices = im.get_indices() + return self._set_indices(*indices, is_canon_bp=is_canon_bp) + + def _set_indices(self, *indices, is_canon_bp=False, **kw_args): + if len(indices) != self.ext_rank: + raise ValueError("indices length mismatch") + return self.func(self.args[0], indices, is_canon_bp=is_canon_bp).doit() + + def _get_free_indices_set(self): + return {i[0] for i in self._index_structure.free} + + def _get_dummy_indices_set(self): + dummy_pos = set(itertools.chain(*self._index_structure.dum)) + return {idx for i, idx in enumerate(self.args[1]) if i in dummy_pos} + + def _get_indices_set(self): + return set(self.args[1].args) + + @property + def free_in_args(self): + return [(ind, pos, 0) for ind, pos in self.free] + + @property + def dum_in_args(self): + return [(p1, p2, 0, 0) for p1, p2 in self.dum] + + @property + def free_args(self): + return sorted([x[0] for x in self.free]) + + def commutes_with(self, other): + """ + :param other: + :return: + 0 commute + 1 anticommute + None neither commute nor anticommute + """ + if not isinstance(other, TensExpr): + return 0 + elif isinstance(other, Tensor): + return self.component.commutes_with(other.component) + return NotImplementedError + + def perm2tensor(self, g, is_canon_bp=False): + """ + Returns the tensor corresponding to the permutation ``g``. + + For further details, see the method in ``TIDS`` with the same name. + """ + return perm2tensor(self, g, is_canon_bp) + + def canon_bp(self): + if self.is_canon_bp: + return self + expr = self.expand() + g, dummies, msym = expr._index_structure.indices_canon_args() + v = components_canon_args([expr.component]) + can = canonicalize(g, dummies, msym, *v) + if can == 0: + return S.Zero + tensor = self.perm2tensor(can, True) + return tensor + + def split(self): + return [self] + + def _expand(self, **kwargs): + return self + + def sorted_components(self): + return self + + def get_indices(self) -> list[TensorIndex]: + """ + Get a list of indices, corresponding to those of the tensor. + """ + return list(self.args[1]) + + def get_free_indices(self) -> list[TensorIndex]: + """ + Get a list of free indices, corresponding to those of the tensor. + """ + return self._index_structure.get_free_indices() + + def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr: + # TODO: this could be optimized by only swapping the indices + # instead of visiting the whole expression tree: + return self.xreplace(repl) + + def as_base_exp(self): + return self, S.One + + def substitute_indices(self, *index_tuples): + """ + Return a tensor with free indices substituted according to ``index_tuples``. + + ``index_types`` list of tuples ``(old_index, new_index)``. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz) + >>> A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2)) + >>> t = A(i, k)*B(-k, -j); t + A(i, L_0)*B(-L_0, -j) + >>> t.substitute_indices((i, k),(-j, l)) + A(k, L_0)*B(-L_0, l) + """ + indices = [] + for index in self.indices: + for ind_old, ind_new in index_tuples: + if (index.name == ind_old.name and index.tensor_index_type == + ind_old.tensor_index_type): + if index.is_up == ind_old.is_up: + indices.append(ind_new) + else: + indices.append(-ind_new) + break + else: + indices.append(index) + return self.head(*indices) + + def _get_symmetrized_forms(self): + """ + Return a list giving all possible permutations of self that are allowed by its symmetries. + """ + comp = self.component + gens = comp.symmetry.generators + rank = comp.rank + + old_perms = None + new_perms = {self} + while new_perms != old_perms: + old_perms = new_perms.copy() + for tens in old_perms: + for gen in gens: + inds = tens.get_indices() + per = [gen.apply(i) for i in range(0,rank)] + sign = (-1)**(gen.apply(rank) - rank) + ind_map = dict(zip(inds, [inds[i] for i in per])) + new_perms.add( sign * tens._replace_indices(ind_map) ) + + return new_perms + + def matches(self, expr, repl_dict=None, old=False): + expr = sympify(expr) + + if repl_dict is None: + repl_dict = {} + else: + repl_dict = repl_dict.copy() + + #simple checks + if self == expr: + return repl_dict + if not isinstance(expr, Tensor): + return None + if self.head != expr.head: + return None + + #Now consider all index symmetries of expr, and see if any of them allow a match. + for new_expr in expr._get_symmetrized_forms(): + m = self._matches(new_expr, repl_dict, old=old) + if m is not None: + repl_dict.update(m) + return repl_dict + + return None + + def _matches(self, expr, repl_dict=None, old=False): + """ + This does not account for index symmetries of expr + """ + expr = sympify(expr) + + if repl_dict is None: + repl_dict = {} + else: + repl_dict = repl_dict.copy() + + #simple checks + if self == expr: + return repl_dict + if not isinstance(expr, Tensor): + return None + if self.head != expr.head: + return None + + s_indices = self.get_indices() + e_indices = expr.get_indices() + + if len(s_indices) != len(e_indices): + return None + + for i in range(len(s_indices)): + s_ind = s_indices[i] + m = s_ind.matches(e_indices[i]) + if m is None: + return None + elif -s_ind in repl_dict.keys() and -repl_dict[-s_ind] != m[s_ind]: + return None + else: + repl_dict.update(m) + + return repl_dict + + def __call__(self, *indices): + deprecate_call() + free_args = self.free_args + indices = list(indices) + if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]: + raise ValueError('incompatible types') + if indices == free_args: + return self + t = self.substitute_indices(*list(zip(free_args, indices))) + + # object is rebuilt in order to make sure that all contracted indices + # get recognized as dummies, but only if there are contracted indices. + if len({i if i.is_up else -i for i in indices}) != len(indices): + return t.func(*t.args) + return t + + # TODO: put this into TensExpr? + def __iter__(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return self.data.__iter__() + + # TODO: put this into TensExpr? + def __getitem__(self, item): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return self.data[item] + + def _extract_data(self, replacement_dict): + from .array import Array + for k, v in replacement_dict.items(): + if isinstance(k, Tensor) and k.args[0] == self.args[0]: + other = k + array = v + break + else: + raise ValueError("%s not found in %s" % (self, replacement_dict)) + + # TODO: inefficient, this should be done at root level only: + replacement_dict = {k: Array(v) for k, v in replacement_dict.items()} + array = Array(array) + + dum1 = self.dum + dum2 = other.dum + + if len(dum2) > 0: + for pair in dum2: + # allow `dum2` if the contained values are also in `dum1`. + if pair not in dum1: + raise NotImplementedError("%s with contractions is not implemented" % other) + # Remove elements in `dum2` from `dum1`: + dum1 = [pair for pair in dum1 if pair not in dum2] + if len(dum1) > 0: + indices1 = self.get_indices() + indices2 = other.get_indices() + repl = {} + for p1, p2 in dum1: + repl[indices2[p2]] = -indices2[p1] + for pos in (p1, p2): + if indices1[pos].is_up ^ indices2[pos].is_up: + metric = replacement_dict[indices1[pos].tensor_index_type] + if indices1[pos].is_up: + metric = _TensorDataLazyEvaluator.inverse_matrix(metric) + array = self._contract_and_permute_with_metric(metric, array, pos, len(indices2)) + other = other.xreplace(repl).doit() + array = _TensorDataLazyEvaluator.data_contract_dum([array], dum1, len(indices2)) + + free_ind1 = self.get_free_indices() + free_ind2 = other.get_free_indices() + + return self._match_indices_with_other_tensor(array, free_ind1, free_ind2, replacement_dict) + + @property + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return _tensor_data_substitution_dict[self] + + @data.setter + def data(self, data): + deprecate_data() + # TODO: check data compatibility with properties of tensor. + with ignore_warnings(SymPyDeprecationWarning): + _tensor_data_substitution_dict[self] = data + + @data.deleter + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self] + if self.metric in _tensor_data_substitution_dict: + del _tensor_data_substitution_dict[self.metric] + + def _print(self): + indices = [str(ind) for ind in self.indices] + component = self.component + if component.rank > 0: + return ('%s(%s)' % (component.name, ', '.join(indices))) + else: + return ('%s' % component.name) + + def equals(self, other): + if other == 0: + return self.coeff == 0 + other = _sympify(other) + if not isinstance(other, TensExpr): + assert not self.components + return S.One == other + + def _get_compar_comp(self): + t = self.canon_bp() + r = (t.coeff, tuple(t.components), \ + tuple(sorted(t.free)), tuple(sorted(t.dum))) + return r + + return _get_compar_comp(self) == _get_compar_comp(other) + + def contract_metric(self, g): + # if metric is not the same, ignore this step: + if self.component != g: + return self + # in case there are free components, do not perform anything: + if len(self.free) != 0: + return self + + #antisym = g.index_types[0].metric_antisym + if g.symmetry == TensorSymmetry.fully_symmetric(-2): + antisym = 1 + elif g.symmetry == TensorSymmetry.fully_symmetric(2): + antisym = 0 + elif g.symmetry == TensorSymmetry.no_symmetry(2): + antisym = None + else: + raise NotImplementedError + sign = S.One + typ = g.index_types[0] + + if not antisym: + # g(i, -i) + sign = sign*typ.dim + else: + # g(i, -i) + sign = sign*typ.dim + + dp0, dp1 = self.dum[0] + if dp0 < dp1: + # g(i, -i) = -D with antisymmetric metric + sign = -sign + + return sign + + def contract_delta(self, metric): + return self.contract_metric(metric) + + def _eval_rewrite_as_Indexed(self, tens, indices, **kwargs): + from sympy.tensor.indexed import Indexed + # TODO: replace .args[0] with .name: + index_symbols = [i.args[0] for i in self.get_indices()] + expr = Indexed(tens.args[0], *index_symbols) + return self._check_add_Sum(expr, index_symbols) + + def _eval_partial_derivative(self, s): # type: (Tensor) -> Expr + + if not isinstance(s, Tensor): + return S.Zero + else: + + # @a_i/@a_k = delta_i^k + # @a_i/@a^k = g_ij delta^j_k + # @a^i/@a^k = delta^i_k + # @a^i/@a_k = g^ij delta_j^k + # TODO: if there is no metric present, the derivative should be zero? + + if self.head != s.head: + return S.Zero + + # if heads are the same, provide delta and/or metric products + # for every free index pair in the appropriate tensor + # assumed that the free indices are in proper order + # A contravariante index in the derivative becomes covariant + # after performing the derivative and vice versa + + kronecker_delta_list = [1] + + # not guarantee a correct index order + + for (count, (iself, iother)) in enumerate(zip(self.get_free_indices(), s.get_free_indices())): + if iself.tensor_index_type != iother.tensor_index_type: + raise ValueError("index types not compatible") + else: + tensor_index_type = iself.tensor_index_type + tensor_metric = tensor_index_type.metric + dummy = TensorIndex("d_" + str(count), tensor_index_type, + is_up=iself.is_up) + if iself.is_up == iother.is_up: + kroneckerdelta = tensor_index_type.delta(iself, -iother) + else: + kroneckerdelta = ( + TensMul(tensor_metric(iself, dummy), + tensor_index_type.delta(-dummy, -iother)) + ) + kronecker_delta_list.append(kroneckerdelta) + return TensMul.fromiter(kronecker_delta_list).doit() + # doit necessary to rename dummy indices accordingly + + +class TensMul(TensExpr, AssocOp): + """ + Product of tensors. + + Parameters + ========== + + coeff : SymPy coefficient of the tensor + args + + Attributes + ========== + + ``components`` : list of ``TensorHead`` of the component tensors + ``types`` : list of nonrepeated ``TensorIndexType`` + ``free`` : list of ``(ind, ipos, icomp)``, see Notes + ``dum`` : list of ``(ipos1, ipos2, icomp1, icomp2)``, see Notes + ``ext_rank`` : rank of the tensor counting the dummy indices + ``rank`` : rank of the tensor + ``coeff`` : SymPy coefficient of the tensor + ``free_args`` : list of the free indices in sorted order + ``is_canon_bp`` : ``True`` if the tensor in in canonical form + + Notes + ===== + + ``args[0]`` list of ``TensorHead`` of the component tensors. + + ``args[1]`` list of ``(ind, ipos, icomp)`` + where ``ind`` is a free index, ``ipos`` is the slot position + of ``ind`` in the ``icomp``-th component tensor. + + ``args[2]`` list of tuples representing dummy indices. + ``(ipos1, ipos2, icomp1, icomp2)`` indicates that the contravariant + dummy index is the ``ipos1``-th slot position in the ``icomp1``-th + component tensor; the corresponding covariant index is + in the ``ipos2`` slot position in the ``icomp2``-th component tensor. + + """ + identity = S.One + + _index_structure = None # type: _IndexStructure + + def __new__(cls, *args, **kw_args): + is_canon_bp = kw_args.get('is_canon_bp', False) + args = list(map(_sympify, args)) + + """ + If the internal dummy indices in one arg conflict with the free indices + of the remaining args, we need to rename those internal dummy indices. + """ + free = [get_free_indices(arg) for arg in args] + free = set(itertools.chain(*free)) #flatten free + newargs = [] + for arg in args: + dum_this = set(get_dummy_indices(arg)) + dum_other = [get_dummy_indices(a) for a in newargs] + dum_other = set(itertools.chain(*dum_other)) #flatten dum_other + free_this = set(get_free_indices(arg)) + if len(dum_this.intersection(free)) > 0: + exclude = free_this.union(free, dum_other) + newarg = TensMul._dedupe_indices(arg, exclude) + else: + newarg = arg + newargs.append(newarg) + + args = newargs + + # Flatten: + args = [i for arg in args for i in (arg.args if isinstance(arg, (TensMul, Mul)) else [arg])] + + args, indices, free, dum = TensMul._tensMul_contract_indices(args, replace_indices=False) + + # Data for indices: + index_types = [i.tensor_index_type for i in indices] + index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp) + + obj = TensExpr.__new__(cls, *args) + obj._indices = indices + obj._index_types = index_types[:] + obj._index_structure = index_structure + obj._free = index_structure.free[:] + obj._dum = index_structure.dum[:] + obj._free_indices = {x[0] for x in obj.free} + obj._rank = len(obj.free) + obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum) + obj._coeff = S.One + obj._is_canon_bp = is_canon_bp + return obj + + index_types = property(lambda self: self._index_types) + free = property(lambda self: self._free) + dum = property(lambda self: self._dum) + free_indices = property(lambda self: self._free_indices) + rank = property(lambda self: self._rank) + ext_rank = property(lambda self: self._ext_rank) + + @staticmethod + def _indices_to_free_dum(args_indices): + free2pos1 = {} + free2pos2 = {} + dummy_data = [] + indices = [] + + # Notation for positions (to better understand the code): + # `pos1`: position in the `args`. + # `pos2`: position in the indices. + + # Example: + # A(i, j)*B(k, m, n)*C(p) + # `pos1` of `n` is 1 because it's in `B` (second `args` of TensMul). + # `pos2` of `n` is 4 because it's the fifth overall index. + + # Counter for the index position wrt the whole expression: + pos2 = 0 + + for pos1, arg_indices in enumerate(args_indices): + + for index in arg_indices: + if not isinstance(index, TensorIndex): + raise TypeError("expected TensorIndex") + if -index in free2pos1: + # Dummy index detected: + other_pos1 = free2pos1.pop(-index) + other_pos2 = free2pos2.pop(-index) + if index.is_up: + dummy_data.append((index, pos1, other_pos1, pos2, other_pos2)) + else: + dummy_data.append((-index, other_pos1, pos1, other_pos2, pos2)) + indices.append(index) + elif index in free2pos1: + raise ValueError("Repeated index: %s" % index) + else: + free2pos1[index] = pos1 + free2pos2[index] = pos2 + indices.append(index) + pos2 += 1 + + free = list(free2pos2.items()) + free_names = [i.name for i in free2pos2.keys()] + + dummy_data.sort(key=lambda x: x[3]) + return indices, free, free_names, dummy_data + + @staticmethod + def _dummy_data_to_dum(dummy_data): + return [(p2a, p2b) for (i, p1a, p1b, p2a, p2b) in dummy_data] + + @staticmethod + def _tensMul_contract_indices(args, replace_indices=True): + replacements = [{} for _ in args] + + #_index_order = all(_has_index_order(arg) for arg in args) + + args_indices = [get_indices(arg) for arg in args] + indices, free, free_names, dummy_data = TensMul._indices_to_free_dum(args_indices) + + cdt = defaultdict(int) + + def dummy_name_gen(tensor_index_type): + nd = str(cdt[tensor_index_type]) + cdt[tensor_index_type] += 1 + return tensor_index_type.dummy_name + '_' + nd + + if replace_indices: + for old_index, pos1cov, pos1contra, pos2cov, pos2contra in dummy_data: + index_type = old_index.tensor_index_type + while True: + dummy_name = dummy_name_gen(index_type) + if dummy_name not in free_names: + break + dummy = TensorIndex(dummy_name, index_type, True) + replacements[pos1cov][old_index] = dummy + replacements[pos1contra][-old_index] = -dummy + indices[pos2cov] = dummy + indices[pos2contra] = -dummy + args = [ + arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg + for arg, repl in zip(args, replacements)] + + dum = TensMul._dummy_data_to_dum(dummy_data) + return args, indices, free, dum + + @staticmethod + def _get_components_from_args(args): + """ + Get a list of ``Tensor`` objects having the same ``TIDS`` if multiplied + by one another. + """ + components = [] + for arg in args: + if not isinstance(arg, TensExpr): + continue + if isinstance(arg, TensAdd): + continue + components.extend(arg.components) + return components + + @staticmethod + def _rebuild_tensors_list(args, index_structure): + indices = index_structure.get_indices() + #tensors = [None for i in components] # pre-allocate list + ind_pos = 0 + for i, arg in enumerate(args): + if not isinstance(arg, TensExpr): + continue + prev_pos = ind_pos + ind_pos += arg.ext_rank + args[i] = Tensor(arg.component, indices[prev_pos:ind_pos]) + + def doit(self, **hints): + is_canon_bp = self._is_canon_bp + deep = hints.get('deep', True) + if deep: + args = [arg.doit(**hints) for arg in self.args] + + """ + There may now be conflicts between dummy indices of different args + (each arg's doit method does not have any information about which + dummy indices are already used in the other args), so we + deduplicate them. + """ + rule = dict(zip(self.args, args)) + rule = self._dedupe_indices_in_rule(rule) + args = [rule[a] for a in self.args] + + else: + args = self.args + + args = [arg for arg in args if arg != self.identity] + + # Extract non-tensor coefficients: + coeff = reduce(lambda a, b: a*b, [arg for arg in args if not isinstance(arg, TensExpr)], S.One) + args = [arg for arg in args if isinstance(arg, TensExpr)] + + if len(args) == 0: + return coeff + + if coeff != self.identity: + args = [coeff] + args + if coeff == 0: + return S.Zero + + if len(args) == 1: + return args[0] + + args, indices, free, dum = TensMul._tensMul_contract_indices(args) + + # Data for indices: + index_types = [i.tensor_index_type for i in indices] + index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp) + + obj = self.func(*args) + obj._index_types = index_types + obj._index_structure = index_structure + obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum) + obj._coeff = coeff + obj._is_canon_bp = is_canon_bp + return obj + + # TODO: this method should be private + # TODO: should this method be renamed _from_components_free_dum ? + @staticmethod + def from_data(coeff, components, free, dum, **kw_args): + return TensMul(coeff, *TensMul._get_tensors_from_components_free_dum(components, free, dum), **kw_args).doit() + + @staticmethod + def _get_tensors_from_components_free_dum(components, free, dum): + """ + Get a list of ``Tensor`` objects by distributing ``free`` and ``dum`` indices on the ``components``. + """ + index_structure = _IndexStructure.from_components_free_dum(components, free, dum) + indices = index_structure.get_indices() + tensors = [None for i in components] # pre-allocate list + + # distribute indices on components to build a list of tensors: + ind_pos = 0 + for i, component in enumerate(components): + prev_pos = ind_pos + ind_pos += component.rank + tensors[i] = Tensor(component, indices[prev_pos:ind_pos]) + return tensors + + def _get_free_indices_set(self): + return {i[0] for i in self.free} + + def _get_dummy_indices_set(self): + dummy_pos = set(itertools.chain(*self.dum)) + return {idx for i, idx in enumerate(self._index_structure.get_indices()) if i in dummy_pos} + + def _get_position_offset_for_indices(self): + arg_offset = [None for i in range(self.ext_rank)] + counter = 0 + for arg in self.args: + if not isinstance(arg, TensExpr): + continue + for j in range(arg.ext_rank): + arg_offset[j + counter] = counter + counter += arg.ext_rank + return arg_offset + + @property + def free_args(self): + return sorted([x[0] for x in self.free]) + + @property + def components(self): + return self._get_components_from_args(self.args) + + @property + def free_in_args(self): + arg_offset = self._get_position_offset_for_indices() + argpos = self._get_indices_to_args_pos() + return [(ind, pos-arg_offset[pos], argpos[pos]) for (ind, pos) in self.free] + + @property + def coeff(self): + # return Mul.fromiter([c for c in self.args if not isinstance(c, TensExpr)]) + return self._coeff + + @property + def nocoeff(self): + return self.func(*[t for t in self.args if isinstance(t, TensExpr)]).doit() + + @property + def dum_in_args(self): + arg_offset = self._get_position_offset_for_indices() + argpos = self._get_indices_to_args_pos() + return [(p1-arg_offset[p1], p2-arg_offset[p2], argpos[p1], argpos[p2]) for p1, p2 in self.dum] + + def equals(self, other): + if other == 0: + return self.coeff == 0 + other = _sympify(other) + if not isinstance(other, TensExpr): + assert not self.components + return self.coeff == other + + return self.canon_bp() == other.canon_bp() + + def get_indices(self): + """ + Returns the list of indices of the tensor. + + Explanation + =========== + + The indices are listed in the order in which they appear in the + component tensors. + The dummy indices are given a name which does not collide with + the names of the free indices. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) + >>> g = Lorentz.metric + >>> p, q = tensor_heads('p,q', [Lorentz]) + >>> t = p(m1)*g(m0,m2) + >>> t.get_indices() + [m1, m0, m2] + >>> t2 = p(m1)*g(-m1, m2) + >>> t2.get_indices() + [L_0, -L_0, m2] + """ + return self._indices + + def get_free_indices(self) -> list[TensorIndex]: + """ + Returns the list of free indices of the tensor. + + Explanation + =========== + + The indices are listed in the order in which they appear in the + component tensors. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) + >>> g = Lorentz.metric + >>> p, q = tensor_heads('p,q', [Lorentz]) + >>> t = p(m1)*g(m0,m2) + >>> t.get_free_indices() + [m1, m0, m2] + >>> t2 = p(m1)*g(-m1, m2) + >>> t2.get_free_indices() + [m2] + """ + return self._index_structure.get_free_indices() + + def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr: + return self.func(*[arg._replace_indices(repl) if isinstance(arg, TensExpr) else arg for arg in self.args]) + + def split(self): + """ + Returns a list of tensors, whose product is ``self``. + + Explanation + =========== + + Dummy indices contracted among different tensor components + become free indices with the same name as the one used to + represent the dummy indices. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz) + >>> A, B = tensor_heads('A,B', [Lorentz]*2, TensorSymmetry.fully_symmetric(2)) + >>> t = A(a,b)*B(-b,c) + >>> t + A(a, L_0)*B(-L_0, c) + >>> t.split() + [A(a, L_0), B(-L_0, c)] + """ + if self.args == (): + return [self] + splitp = [] + res = 1 + for arg in self.args: + if isinstance(arg, Tensor): + splitp.append(res*arg) + res = 1 + else: + res *= arg + return splitp + + def _expand(self, **hints): + # TODO: temporary solution, in the future this should be linked to + # `Expr.expand`. + args = [_expand(arg, **hints) for arg in self.args] + args1 = [arg.args if isinstance(arg, (Add, TensAdd)) else (arg,) for arg in args] + return TensAdd(*[ + TensMul(*i) for i in itertools.product(*args1)] + ) + + def __neg__(self): + return TensMul(S.NegativeOne, self, is_canon_bp=self._is_canon_bp).doit() + + def __getitem__(self, item): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + return self.data[item] + + def _get_args_for_traditional_printer(self): + args = list(self.args) + if self.coeff.could_extract_minus_sign(): + # expressions like "-A(a)" + sign = "-" + if args[0] == S.NegativeOne: + args = args[1:] + else: + args[0] = -args[0] + else: + sign = "" + return sign, args + + def _sort_args_for_sorted_components(self): + """ + Returns the ``args`` sorted according to the components commutation + properties. + + Explanation + =========== + + The sorting is done taking into account the commutation group + of the component tensors. + """ + cv = [arg for arg in self.args if isinstance(arg, TensExpr)] + sign = 1 + n = len(cv) - 1 + for i in range(n): + for j in range(n, i, -1): + c = cv[j-1].commutes_with(cv[j]) + # if `c` is `None`, it does neither commute nor anticommute, skip: + if c not in (0, 1): + continue + typ1 = sorted(set(cv[j-1].component.index_types), key=lambda x: x.name) + typ2 = sorted(set(cv[j].component.index_types), key=lambda x: x.name) + if (typ1, cv[j-1].component.name) > (typ2, cv[j].component.name): + cv[j-1], cv[j] = cv[j], cv[j-1] + # if `c` is 1, the anticommute, so change sign: + if c: + sign = -sign + + coeff = sign * self.coeff + if coeff != 1: + return [coeff] + cv + return cv + + def sorted_components(self): + """ + Returns a tensor product with sorted components. + """ + return TensMul(*self._sort_args_for_sorted_components()).doit() + + def perm2tensor(self, g, is_canon_bp=False): + """ + Returns the tensor corresponding to the permutation ``g`` + + For further details, see the method in ``TIDS`` with the same name. + """ + return perm2tensor(self, g, is_canon_bp=is_canon_bp) + + def canon_bp(self): + """ + Canonicalize using the Butler-Portugal algorithm for canonicalization + under monoterm symmetries. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) + >>> A = TensorHead('A', [Lorentz]*2, TensorSymmetry.fully_symmetric(-2)) + >>> t = A(m0,-m1)*A(m1,-m0) + >>> t.canon_bp() + -A(L_0, L_1)*A(-L_0, -L_1) + >>> t = A(m0,-m1)*A(m1,-m2)*A(m2,-m0) + >>> t.canon_bp() + 0 + """ + if self._is_canon_bp: + return self + expr = self.expand() + if isinstance(expr, TensAdd): + return expr.canon_bp() + if not expr.components: + return expr + t = expr.sorted_components() + g, dummies, msym = t._index_structure.indices_canon_args() + v = components_canon_args(t.components) + can = canonicalize(g, dummies, msym, *v) + if can == 0: + return S.Zero + tmul = t.perm2tensor(can, True) + return tmul + + def contract_delta(self, delta): + t = self.contract_metric(delta) + return t + + def _get_indices_to_args_pos(self): + """ + Get a dict mapping the index position to TensMul's argument number. + """ + pos_map = {} + pos_counter = 0 + for arg_i, arg in enumerate(self.args): + if not isinstance(arg, TensExpr): + continue + assert isinstance(arg, Tensor) + for i in range(arg.ext_rank): + pos_map[pos_counter] = arg_i + pos_counter += 1 + return pos_map + + def contract_metric(self, g): + """ + Raise or lower indices with the metric ``g``. + + Parameters + ========== + + g : metric + + Notes + ===== + + See the ``TensorIndexType`` docstring for the contraction conventions. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensor_heads + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> m0, m1, m2 = tensor_indices('m0,m1,m2', Lorentz) + >>> g = Lorentz.metric + >>> p, q = tensor_heads('p,q', [Lorentz]) + >>> t = p(m0)*q(m1)*g(-m0, -m1) + >>> t.canon_bp() + metric(L_0, L_1)*p(-L_0)*q(-L_1) + >>> t.contract_metric(g).canon_bp() + p(L_0)*q(-L_0) + """ + expr = self.expand() + if self != expr: + expr = canon_bp(expr) + return contract_metric(expr, g) + pos_map = self._get_indices_to_args_pos() + args = list(self.args) + + #antisym = g.index_types[0].metric_antisym + if g.symmetry == TensorSymmetry.fully_symmetric(-2): + antisym = 1 + elif g.symmetry == TensorSymmetry.fully_symmetric(2): + antisym = 0 + elif g.symmetry == TensorSymmetry.no_symmetry(2): + antisym = None + else: + raise NotImplementedError + + # list of positions of the metric ``g`` inside ``args`` + gpos = [i for i, x in enumerate(self.args) if isinstance(x, Tensor) and x.component == g] + if not gpos: + return self + + # Sign is either 1 or -1, to correct the sign after metric contraction + # (for spinor indices). + sign = 1 + dum = self.dum[:] + free = self.free[:] + elim = set() + for gposx in gpos: + if gposx in elim: + continue + free1 = [x for x in free if pos_map[x[1]] == gposx] + dum1 = [x for x in dum if pos_map[x[0]] == gposx or pos_map[x[1]] == gposx] + if not dum1: + continue + elim.add(gposx) + # subs with the multiplication neutral element, that is, remove it: + args[gposx] = 1 + if len(dum1) == 2: + if not antisym: + dum10, dum11 = dum1 + if pos_map[dum10[1]] == gposx: + # the index with pos p0 contravariant + p0 = dum10[0] + else: + # the index with pos p0 is covariant + p0 = dum10[1] + if pos_map[dum11[1]] == gposx: + # the index with pos p1 is contravariant + p1 = dum11[0] + else: + # the index with pos p1 is covariant + p1 = dum11[1] + + dum.append((p0, p1)) + else: + dum10, dum11 = dum1 + # change the sign to bring the indices of the metric to contravariant + # form; change the sign if dum10 has the metric index in position 0 + if pos_map[dum10[1]] == gposx: + # the index with pos p0 is contravariant + p0 = dum10[0] + if dum10[1] == 1: + sign = -sign + else: + # the index with pos p0 is covariant + p0 = dum10[1] + if dum10[0] == 0: + sign = -sign + if pos_map[dum11[1]] == gposx: + # the index with pos p1 is contravariant + p1 = dum11[0] + sign = -sign + else: + # the index with pos p1 is covariant + p1 = dum11[1] + + dum.append((p0, p1)) + + elif len(dum1) == 1: + if not antisym: + dp0, dp1 = dum1[0] + if pos_map[dp0] == pos_map[dp1]: + # g(i, -i) + typ = g.index_types[0] + sign = sign*typ.dim + + else: + # g(i0, i1)*p(-i1) + if pos_map[dp0] == gposx: + p1 = dp1 + else: + p1 = dp0 + + ind, p = free1[0] + free.append((ind, p1)) + else: + dp0, dp1 = dum1[0] + if pos_map[dp0] == pos_map[dp1]: + # g(i, -i) + typ = g.index_types[0] + sign = sign*typ.dim + + if dp0 < dp1: + # g(i, -i) = -D with antisymmetric metric + sign = -sign + else: + # g(i0, i1)*p(-i1) + if pos_map[dp0] == gposx: + p1 = dp1 + if dp0 == 0: + sign = -sign + else: + p1 = dp0 + ind, p = free1[0] + free.append((ind, p1)) + dum = [x for x in dum if x not in dum1] + free = [x for x in free if x not in free1] + + # shift positions: + shift = 0 + shifts = [0]*len(args) + for i in range(len(args)): + if i in elim: + shift += 2 + continue + shifts[i] = shift + free = [(ind, p - shifts[pos_map[p]]) for (ind, p) in free if pos_map[p] not in elim] + dum = [(p0 - shifts[pos_map[p0]], p1 - shifts[pos_map[p1]]) for p0, p1 in dum if pos_map[p0] not in elim and pos_map[p1] not in elim] + + res = sign*TensMul(*args).doit() + if not isinstance(res, TensExpr): + return res + im = _IndexStructure.from_components_free_dum(res.components, free, dum) + return res._set_new_index_structure(im) + + def _set_new_index_structure(self, im, is_canon_bp=False): + indices = im.get_indices() + return self._set_indices(*indices, is_canon_bp=is_canon_bp) + + def _set_indices(self, *indices, is_canon_bp=False, **kw_args): + if len(indices) != self.ext_rank: + raise ValueError("indices length mismatch") + args = list(self.args)[:] + pos = 0 + for i, arg in enumerate(args): + if not isinstance(arg, TensExpr): + continue + assert isinstance(arg, Tensor) + ext_rank = arg.ext_rank + args[i] = arg._set_indices(*indices[pos:pos+ext_rank]) + pos += ext_rank + return TensMul(*args, is_canon_bp=is_canon_bp).doit() + + @staticmethod + def _index_replacement_for_contract_metric(args, free, dum): + for arg in args: + if not isinstance(arg, TensExpr): + continue + assert isinstance(arg, Tensor) + + def substitute_indices(self, *index_tuples): + new_args = [] + for arg in self.args: + if isinstance(arg, TensExpr): + arg = arg.substitute_indices(*index_tuples) + new_args.append(arg) + return TensMul(*new_args).doit() + + def __call__(self, *indices): + deprecate_call() + free_args = self.free_args + indices = list(indices) + if [x.tensor_index_type for x in indices] != [x.tensor_index_type for x in free_args]: + raise ValueError('incompatible types') + if indices == free_args: + return self + t = self.substitute_indices(*list(zip(free_args, indices))) + + # object is rebuilt in order to make sure that all contracted indices + # get recognized as dummies, but only if there are contracted indices. + if len({i if i.is_up else -i for i in indices}) != len(indices): + return t.func(*t.args) + return t + + def _extract_data(self, replacement_dict): + args_indices, arrays = zip(*[arg._extract_data(replacement_dict) for arg in self.args if isinstance(arg, TensExpr)]) + coeff = reduce(operator.mul, [a for a in self.args if not isinstance(a, TensExpr)], S.One) + indices, free, free_names, dummy_data = TensMul._indices_to_free_dum(args_indices) + dum = TensMul._dummy_data_to_dum(dummy_data) + ext_rank = self.ext_rank + free.sort(key=lambda x: x[1]) + free_indices = [i[0] for i in free] + return free_indices, coeff*_TensorDataLazyEvaluator.data_contract_dum(arrays, dum, ext_rank) + + @property + def data(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + dat = _tensor_data_substitution_dict[self.expand()] + return dat + + @data.setter + def data(self, data): + deprecate_data() + raise ValueError("Not possible to set component data to a tensor expression") + + @data.deleter + def data(self): + deprecate_data() + raise ValueError("Not possible to delete component data to a tensor expression") + + def __iter__(self): + deprecate_data() + with ignore_warnings(SymPyDeprecationWarning): + if self.data is None: + raise ValueError("No iteration on abstract tensors") + return self.data.__iter__() + + @staticmethod + def _dedupe_indices(new, exclude): + """ + exclude: set + new: TensExpr + + If ``new`` has any dummy indices that are in ``exclude``, return a version + of new with those indices replaced. If no replacements are needed, + return None + + """ + exclude = set(exclude) + dums_new = set(get_dummy_indices(new)) + free_new = set(get_free_indices(new)) + + conflicts = dums_new.intersection(exclude) + if len(conflicts) == 0: + return None + + """ + ``exclude_for_gen`` is to be passed to ``_IndexStructure._get_generator_for_dummy_indices()``. + Since the latter does not use the index position for anything, we just + set it as ``None`` here. + """ + exclude.update(dums_new) + exclude.update(free_new) + exclude_for_gen = [(i, None) for i in exclude] + gen = _IndexStructure._get_generator_for_dummy_indices(exclude_for_gen) + repl = {} + for d in conflicts: + if -d in repl.keys(): + continue + newname = gen(d.tensor_index_type) + new_d = d.func(newname, *d.args[1:]) + repl[d] = new_d + repl[-d] = -new_d + + if len(repl) == 0: + return None + + new_renamed = new._replace_indices(repl) + return new_renamed + + def _dedupe_indices_in_rule(self, rule): + """ + rule: dict + + This applies TensMul._dedupe_indices on all values of rule. + + """ + index_rules = {k:v for k,v in rule.items() if isinstance(k, TensorIndex)} + other_rules = {k:v for k,v in rule.items() if k not in index_rules.keys()} + exclude = set(self.get_indices()) + + newrule = {} + newrule.update(index_rules) + exclude.update(index_rules.keys()) + exclude.update(index_rules.values()) + for old, new in other_rules.items(): + new_renamed = TensMul._dedupe_indices(new, exclude) + if old == new or new_renamed is None: + newrule[old] = new + else: + newrule[old] = new_renamed + exclude.update(get_indices(new_renamed)) + return newrule + + def _eval_rewrite_as_Indexed(self, *args, **kwargs): + from sympy.concrete.summations import Sum + index_symbols = [i.args[0] for i in self.get_indices()] + args = [arg.args[0] if isinstance(arg, Sum) else arg for arg in args] + expr = Mul.fromiter(args) + return self._check_add_Sum(expr, index_symbols) + + def _eval_partial_derivative(self, s): + # Evaluation like Mul + terms = [] + for i, arg in enumerate(self.args): + # checking whether some tensor instance is differentiated + # or some other thing is necessary, but ugly + if isinstance(arg, TensExpr): + d = arg._eval_partial_derivative(s) + else: + # do not call diff is s is no symbol + if s._diff_wrt: + d = arg._eval_derivative(s) + else: + d = S.Zero + if d: + terms.append(TensMul.fromiter(self.args[:i] + (d,) + self.args[i + 1:])) + return TensAdd.fromiter(terms) + + +class TensorElement(TensExpr): + """ + Tensor with evaluated components. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, TensorHead, TensorSymmetry + >>> from sympy import symbols + >>> L = TensorIndexType("L") + >>> i, j, k = symbols("i j k") + >>> A = TensorHead("A", [L, L], TensorSymmetry.fully_symmetric(2)) + >>> A(i, j).get_free_indices() + [i, j] + + If we want to set component ``i`` to a specific value, use the + ``TensorElement`` class: + + >>> from sympy.tensor.tensor import TensorElement + >>> te = TensorElement(A(i, j), {i: 2}) + + As index ``i`` has been accessed (``{i: 2}`` is the evaluation of its 3rd + element), the free indices will only contain ``j``: + + >>> te.get_free_indices() + [j] + """ + + def __new__(cls, expr, index_map): + if not isinstance(expr, Tensor): + # remap + if not isinstance(expr, TensExpr): + raise TypeError("%s is not a tensor expression" % expr) + return expr.func(*[TensorElement(arg, index_map) for arg in expr.args]) + expr_free_indices = expr.get_free_indices() + name_translation = {i.args[0]: i for i in expr_free_indices} + index_map = {name_translation.get(index, index): value for index, value in index_map.items()} + index_map = {index: value for index, value in index_map.items() if index in expr_free_indices} + if len(index_map) == 0: + return expr + free_indices = [i for i in expr_free_indices if i not in index_map.keys()] + index_map = Dict(index_map) + obj = TensExpr.__new__(cls, expr, index_map) + obj._free_indices = free_indices + return obj + + @property + def free(self): + return [(index, i) for i, index in enumerate(self.get_free_indices())] + + @property + def dum(self): + # TODO: inherit dummies from expr + return [] + + @property + def expr(self): + return self._args[0] + + @property + def index_map(self): + return self._args[1] + + @property + def coeff(self): + return S.One + + @property + def nocoeff(self): + return self + + def get_free_indices(self): + return self._free_indices + + def _replace_indices(self, repl: dict[TensorIndex, TensorIndex]) -> TensExpr: + # TODO: can be improved: + return self.xreplace(repl) + + def get_indices(self): + return self.get_free_indices() + + def _extract_data(self, replacement_dict): + ret_indices, array = self.expr._extract_data(replacement_dict) + index_map = self.index_map + slice_tuple = tuple(index_map.get(i, slice(None)) for i in ret_indices) + ret_indices = [i for i in ret_indices if i not in index_map] + array = array.__getitem__(slice_tuple) + return ret_indices, array + + +class WildTensorHead(TensorHead): + """ + A wild object that is used to create ``WildTensor`` instances + + Explanation + =========== + + Examples + ======== + >>> from sympy.tensor.tensor import TensorHead, TensorIndex, WildTensorHead, TensorIndexType + >>> R3 = TensorIndexType('R3', dim=3) + >>> p = TensorIndex('p', R3) + >>> q = TensorIndex('q', R3) + + A WildTensorHead can be created without specifying a ``TensorIndexType`` + + >>> W = WildTensorHead("W") + + Calling it with a ``TensorIndex`` creates a ``WildTensor`` instance. + + >>> type(W(p)) + + + The ``TensorIndexType`` is automatically detected from the index that is passed + + >>> W(p).component + W(R3) + + Calling it with no indices returns an object that can match tensors with any number of indices. + + >>> K = TensorHead('K', [R3]) + >>> Q = TensorHead('Q', [R3, R3]) + >>> W().matches(K(p)) + {W: K(p)} + >>> W().matches(Q(p,q)) + {W: Q(p, q)} + + If you want to ignore the order of indices while matching, pass ``unordered_indices=True``. + + >>> U = WildTensorHead("U", unordered_indices=True) + >>> W(p,q).matches(Q(q,p)) + >>> U(p,q).matches(Q(q,p)) + {U(R3,R3): _WildTensExpr(Q(q, p))} + + Parameters + ========== + name : name of the tensor + unordered_indices : whether the order of the indices matters for matching + (default: False) + + See also + ======== + ``WildTensor`` + ``TensorHead`` + + """ + def __new__(cls, name, index_types=None, symmetry=None, comm=0, unordered_indices=False): + if isinstance(name, str): + name_symbol = Symbol(name) + elif isinstance(name, Symbol): + name_symbol = name + else: + raise ValueError("invalid name") + + if index_types is None: + index_types = [] + + if symmetry is None: + symmetry = TensorSymmetry.no_symmetry(len(index_types)) + else: + assert symmetry.rank == len(index_types) + + if symmetry != TensorSymmetry.no_symmetry(len(index_types)): + raise NotImplementedError("Wild matching based on symmetry is not implemented.") + + obj = Basic.__new__(cls, name_symbol, Tuple(*index_types), sympify(symmetry), sympify(comm), sympify(unordered_indices)) + + return obj + + @property + def unordered_indices(self): + return self.args[4] + + def __call__(self, *indices, **kwargs): + tensor = WildTensor(self, indices, **kwargs) + return tensor.doit() + + +class WildTensor(Tensor): + """ + A wild object which matches ``Tensor`` instances + + Explanation + =========== + This is instantiated by attaching indices to a ``WildTensorHead`` instance. + + Examples + ======== + >>> from sympy.tensor.tensor import TensorHead, TensorIndex, WildTensorHead, TensorIndexType + >>> W = WildTensorHead("W") + >>> R3 = TensorIndexType('R3', dim=3) + >>> p = TensorIndex('p', R3) + >>> q = TensorIndex('q', R3) + >>> K = TensorHead('K', [R3]) + >>> Q = TensorHead('Q', [R3, R3]) + + Matching also takes the indices into account + >>> W(p).matches(K(p)) + {W(R3): _WildTensExpr(K(p))} + >>> W(p).matches(K(q)) + >>> W(p).matches(K(-p)) + + If you want to match objects with any number of indices, just use a ``WildTensor`` with no indices. + >>> W().matches(K(p)) + {W: K(p)} + >>> W().matches(Q(p,q)) + {W: Q(p, q)} + + See Also + ======== + ``WildTensorHead`` + ``Tensor`` + + """ + def __new__(cls, tensor_head, indices, **kw_args): + is_canon_bp = kw_args.pop("is_canon_bp", False) + + if tensor_head.func == TensorHead: + """ + If someone tried to call WildTensor by supplying a TensorHead (not a WildTensorHead), return a normal tensor instead. This is helpful when using subs on an expression to replace occurrences of a WildTensorHead with a TensorHead. + """ + return Tensor(tensor_head, indices, is_canon_bp=is_canon_bp, **kw_args) + elif tensor_head.func == _WildTensExpr: + return tensor_head(*indices) + + indices = cls._parse_indices(tensor_head, indices) + index_types = [ind.tensor_index_type for ind in indices] + tensor_head = tensor_head.func( + tensor_head.name, + index_types, + symmetry=None, + comm=tensor_head.comm, + unordered_indices=tensor_head.unordered_indices, + ) + + obj = Basic.__new__(cls, tensor_head, Tuple(*indices)) + obj.name = tensor_head.name + obj._index_structure = _IndexStructure.from_indices(*indices) + obj._free = obj._index_structure.free[:] + obj._dum = obj._index_structure.dum[:] + obj._ext_rank = obj._index_structure._ext_rank + obj._coeff = S.One + obj._nocoeff = obj + obj._component = tensor_head + obj._components = [tensor_head] + if tensor_head.rank != len(indices): + raise ValueError("wrong number of indices") + obj.is_canon_bp = is_canon_bp + obj._index_map = obj._build_index_map(indices, obj._index_structure) + + return obj + + + def matches(self, expr, repl_dict=None, old=False): + if not isinstance(expr, TensExpr) and expr != S(1): + return None + + if repl_dict is None: + repl_dict = {} + else: + repl_dict = repl_dict.copy() + + if len(self.indices) > 0: + if not hasattr(expr, "get_free_indices"): + return None + expr_indices = expr.get_free_indices() + if len(expr_indices) != len(self.indices): + return None + if self._component.unordered_indices: + m = self._match_indices_ignoring_order(expr) + if m is None: + return None + else: + repl_dict.update(m) + else: + for i in range(len(expr_indices)): + m = self.indices[i].matches(expr_indices[i]) + if m is None: + return None + else: + repl_dict.update(m) + + repl_dict[self.component] = _WildTensExpr(expr) + else: + #If no indices were passed to the WildTensor, it may match tensors with any number of indices. + repl_dict[self] = expr + + return repl_dict + + def _match_indices_ignoring_order(self, expr, repl_dict=None, old=False): + """ + Helper method for matches. Checks if the indices of self and expr + match disregarding index ordering. + """ + if repl_dict is None: + repl_dict = {} + else: + repl_dict = repl_dict.copy() + + def siftkey(ind): + if isinstance(ind, WildTensorIndex): + if ind.ignore_updown: + return "wild, updown" + else: + return "wild" + else: + return "nonwild" + + indices_sifted = sift(self.indices, siftkey) + + matched_indices = [] + expr_indices_remaining = expr.get_indices() + for ind in indices_sifted["nonwild"]: + matched_this_ind = False + for e_ind in expr_indices_remaining: + if e_ind in matched_indices: + continue + m = ind.matches(e_ind) + if m is not None: + matched_this_ind = True + repl_dict.update(m) + matched_indices.append(e_ind) + break + if not matched_this_ind: + return None + + expr_indices_remaining = [i for i in expr_indices_remaining if i not in matched_indices] + for ind in indices_sifted["wild"]: + matched_this_ind = False + for e_ind in expr_indices_remaining: + m = ind.matches(e_ind) + if m is not None: + if -ind in repl_dict.keys() and -repl_dict[-ind] != m[ind]: + return None + matched_this_ind = True + repl_dict.update(m) + matched_indices.append(e_ind) + break + if not matched_this_ind: + return None + + expr_indices_remaining = [i for i in expr_indices_remaining if i not in matched_indices] + for ind in indices_sifted["wild, updown"]: + matched_this_ind = False + for e_ind in expr_indices_remaining: + m = ind.matches(e_ind) + if m is not None: + if -ind in repl_dict.keys() and -repl_dict[-ind] != m[ind]: + return None + matched_this_ind = True + repl_dict.update(m) + matched_indices.append(e_ind) + break + if not matched_this_ind: + return None + + if len(matched_indices) < len(self.indices): + return None + else: + return repl_dict + +class WildTensorIndex(TensorIndex): + """ + A wild object that matches TensorIndex instances. + + Examples + ======== + >>> from sympy.tensor.tensor import TensorIndex, TensorIndexType, WildTensorIndex + >>> R3 = TensorIndexType('R3', dim=3) + >>> p = TensorIndex("p", R3) + + By default, covariant indices only match with covariant indices (and + similarly for contravariant) + + >>> q = WildTensorIndex("q", R3) + >>> (q).matches(p) + {q: p} + >>> (q).matches(-p) + + If you want matching to ignore whether the index is co/contra-variant, set + ignore_updown=True + + >>> r = WildTensorIndex("r", R3, ignore_updown=True) + >>> (r).matches(-p) + {r: -p} + >>> (r).matches(p) + {r: p} + + Parameters + ========== + name : name of the index (string), or ``True`` if you want it to be + automatically assigned + tensor_index_type : ``TensorIndexType`` of the index + is_up : flag for contravariant index (is_up=True by default) + ignore_updown : bool, Whether this should match both co- and contra-variant + indices (default:False) + """ + def __new__(cls, name, tensor_index_type, is_up=True, ignore_updown=False): + if isinstance(name, str): + name_symbol = Symbol(name) + elif isinstance(name, Symbol): + name_symbol = name + elif name is True: + name = "_i{}".format(len(tensor_index_type._autogenerated)) + name_symbol = Symbol(name) + tensor_index_type._autogenerated.append(name_symbol) + else: + raise ValueError("invalid name") + + is_up = sympify(is_up) + ignore_updown = sympify(ignore_updown) + return Basic.__new__(cls, name_symbol, tensor_index_type, is_up, ignore_updown) + + @property + def ignore_updown(self): + return self.args[3] + + def __neg__(self): + t1 = WildTensorIndex(self.name, self.tensor_index_type, + (not self.is_up), self.ignore_updown) + return t1 + + def matches(self, expr, repl_dict=None, old=False): + if not isinstance(expr, TensorIndex): + return None + if self.tensor_index_type != expr.tensor_index_type: + return None + if not self.ignore_updown: + if self.is_up != expr.is_up: + return None + + if repl_dict is None: + repl_dict = {} + else: + repl_dict = repl_dict.copy() + + repl_dict[self] = expr + return repl_dict + + +class _WildTensExpr(Basic): + """ + INTERNAL USE ONLY + + This is an object that helps with replacement of WildTensors in expressions. + When this object is set as the tensor_head of a WildTensor, it replaces the + WildTensor by a TensExpr (passed when initializing this object). + + Examples + ======== + >>> from sympy.tensor.tensor import WildTensorHead, TensorIndex, TensorHead, TensorIndexType + >>> W = WildTensorHead("W") + >>> R3 = TensorIndexType('R3', dim=3) + >>> p = TensorIndex('p', R3) + >>> q = TensorIndex('q', R3) + >>> K = TensorHead('K', [R3]) + >>> print( ( K(p) ).replace( W(p), W(q)*W(-q)*W(p) ) ) + K(R_0)*K(-R_0)*K(p) + + """ + def __init__(self, expr): + if not isinstance(expr, TensExpr): + raise TypeError("_WildTensExpr expects a TensExpr as argument") + self.expr = expr + + def __call__(self, *indices): + return self.expr._replace_indices(dict(zip(self.expr.get_free_indices(), indices))) + + def __neg__(self): + return self.func(self.expr*S.NegativeOne) + + def __abs__(self): + raise NotImplementedError + + def __add__(self, other): + if other.func != self.func: + raise TypeError(f"Cannot add {self.func} to {other.func}") + return self.func(self.expr+other.expr) + + def __radd__(self, other): + if other.func != self.func: + raise TypeError(f"Cannot add {self.func} to {other.func}") + return self.func(other.expr+self.expr) + + def __sub__(self, other): + return self + (-other) + + def __rsub__(self, other): + return other + (-self) + + def __mul__(self, other): + raise NotImplementedError + + def __rmul__(self, other): + raise NotImplementedError + + def __truediv__(self, other): + raise NotImplementedError + + def __rtruediv__(self, other): + raise NotImplementedError + + def __pow__(self, other): + raise NotImplementedError + + def __rpow__(self, other): + raise NotImplementedError + + +def canon_bp(p): + """ + Butler-Portugal canonicalization. See ``tensor_can.py`` from the + combinatorics module for the details. + """ + if isinstance(p, TensExpr): + return p.canon_bp() + return p + + +def tensor_mul(*a): + """ + product of tensors + """ + if not a: + return TensMul.from_data(S.One, [], [], []) + t = a[0] + for tx in a[1:]: + t = t*tx + return t + + +def riemann_cyclic_replace(t_r): + """ + replace Riemann tensor with an equivalent expression + + ``R(m,n,p,q) -> 2/3*R(m,n,p,q) - 1/3*R(m,q,n,p) + 1/3*R(m,p,n,q)`` + + """ + free = sorted(t_r.free, key=lambda x: x[1]) + m, n, p, q = [x[0] for x in free] + t0 = t_r*Rational(2, 3) + t1 = -t_r.substitute_indices((m,m),(n,q),(p,n),(q,p))*Rational(1, 3) + t2 = t_r.substitute_indices((m,m),(n,p),(p,n),(q,q))*Rational(1, 3) + t3 = t0 + t1 + t2 + return t3 + +def riemann_cyclic(t2): + """ + Replace each Riemann tensor with an equivalent expression + satisfying the cyclic identity. + + This trick is discussed in the reference guide to Cadabra. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead, riemann_cyclic, TensorSymmetry + >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') + >>> i, j, k, l = tensor_indices('i,j,k,l', Lorentz) + >>> R = TensorHead('R', [Lorentz]*4, TensorSymmetry.riemann()) + >>> t = R(i,j,k,l)*(R(-i,-j,-k,-l) - 2*R(-i,-k,-j,-l)) + >>> riemann_cyclic(t) + 0 + """ + t2 = t2.expand() + if isinstance(t2, (TensMul, Tensor)): + args = [t2] + else: + args = t2.args + a1 = [x.split() for x in args] + a2 = [[riemann_cyclic_replace(tx) for tx in y] for y in a1] + a3 = [tensor_mul(*v) for v in a2] + t3 = TensAdd(*a3).doit() + if not t3: + return t3 + else: + return canon_bp(t3) + + +def get_lines(ex, index_type): + """ + Returns ``(lines, traces, rest)`` for an index type, + where ``lines`` is the list of list of positions of a matrix line, + ``traces`` is the list of list of traced matrix lines, + ``rest`` is the rest of the elements of the tensor. + """ + def _join_lines(a): + i = 0 + while i < len(a): + x = a[i] + xend = x[-1] + xstart = x[0] + hit = True + while hit: + hit = False + for j in range(i + 1, len(a)): + if j >= len(a): + break + if a[j][0] == xend: + hit = True + x.extend(a[j][1:]) + xend = x[-1] + a.pop(j) + continue + if a[j][0] == xstart: + hit = True + a[i] = reversed(a[j][1:]) + x + x = a[i] + xstart = a[i][0] + a.pop(j) + continue + if a[j][-1] == xend: + hit = True + x.extend(reversed(a[j][:-1])) + xend = x[-1] + a.pop(j) + continue + if a[j][-1] == xstart: + hit = True + a[i] = a[j][:-1] + x + x = a[i] + xstart = x[0] + a.pop(j) + continue + i += 1 + return a + + arguments = ex.args + dt = {} + for c in ex.args: + if not isinstance(c, TensExpr): + continue + if c in dt: + continue + index_types = c.index_types + a = [] + for i in range(len(index_types)): + if index_types[i] is index_type: + a.append(i) + if len(a) > 2: + raise ValueError('at most two indices of type %s allowed' % index_type) + if len(a) == 2: + dt[c] = a + #dum = ex.dum + lines = [] + traces = [] + traces1 = [] + #indices_to_args_pos = ex._get_indices_to_args_pos() + # TODO: add a dum_to_components_map ? + for p0, p1, c0, c1 in ex.dum_in_args: + if arguments[c0] not in dt: + continue + if c0 == c1: + traces.append([c0]) + continue + ta0 = dt[arguments[c0]] + ta1 = dt[arguments[c1]] + if p0 not in ta0: + continue + if ta0.index(p0) == ta1.index(p1): + # case gamma(i,s0,-s1) in c0, gamma(j,-s0,s2) in c1; + # to deal with this case one could add to the position + # a flag for transposition; + # one could write [(c0, False), (c1, True)] + raise NotImplementedError + # if p0 == ta0[1] then G in pos c0 is mult on the right by G in c1 + # if p0 == ta0[0] then G in pos c1 is mult on the right by G in c0 + ta0 = dt[arguments[c0]] + b0, b1 = (c0, c1) if p0 == ta0[1] else (c1, c0) + lines1 = lines[:] + for line in lines: + if line[-1] == b0: + if line[0] == b1: + n = line.index(min(line)) + traces1.append(line) + traces.append(line[n:] + line[:n]) + else: + line.append(b1) + break + elif line[0] == b1: + line.insert(0, b0) + break + else: + lines1.append([b0, b1]) + + lines = [x for x in lines1 if x not in traces1] + lines = _join_lines(lines) + rest = [] + for line in lines: + for y in line: + rest.append(y) + for line in traces: + for y in line: + rest.append(y) + rest = [x for x in range(len(arguments)) if x not in rest] + + return lines, traces, rest + + +def get_free_indices(t): + if not isinstance(t, TensExpr): + return () + return t.get_free_indices() + + +def get_indices(t): + if not isinstance(t, TensExpr): + return () + return t.get_indices() + +def get_dummy_indices(t): + if not isinstance(t, TensExpr): + return () + inds = t.get_indices() + free = t.get_free_indices() + return [i for i in inds if i not in free] + +def get_index_structure(t): + if isinstance(t, TensExpr): + return t._index_structure + return _IndexStructure([], [], [], []) + + +def get_coeff(t): + if isinstance(t, Tensor): + return S.One + if isinstance(t, TensMul): + return t.coeff + if isinstance(t, TensExpr): + raise ValueError("no coefficient associated to this tensor expression") + return t + +def contract_metric(t, g): + if isinstance(t, TensExpr): + return t.contract_metric(g) + return t + + +def perm2tensor(t, g, is_canon_bp=False): + """ + Returns the tensor corresponding to the permutation ``g`` + + For further details, see the method in ``TIDS`` with the same name. + """ + if not isinstance(t, TensExpr): + return t + elif isinstance(t, (Tensor, TensMul)): + nim = get_index_structure(t).perm2tensor(g, is_canon_bp=is_canon_bp) + res = t._set_new_index_structure(nim, is_canon_bp=is_canon_bp) + if g[-1] != len(g) - 1: + return -res + + return res + raise NotImplementedError() + + +def substitute_indices(t, *index_tuples): + if not isinstance(t, TensExpr): + return t + return t.substitute_indices(*index_tuples) + + +def _expand(expr, **kwargs): + if isinstance(expr, TensExpr): + return expr._expand(**kwargs) + else: + return expr.expand(**kwargs) + + +def get_postprocessor(cls): + def _postprocessor(expr): + tens_class = {Mul: TensMul, Add: TensAdd}[cls] + if any(isinstance(a, TensExpr) for a in expr.args): + return tens_class(*expr.args) + else: + return expr + + return _postprocessor + +Basic._constructor_postprocessor_mapping[TensExpr] = { + "Mul": [get_postprocessor(Mul)], +} diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/tests/test_printing.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/tests/test_printing.py new file mode 100644 index 0000000000000000000000000000000000000000..9f3cf7f0591a7012c93354ab7b8d7e010def38bb --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/tests/test_printing.py @@ -0,0 +1,13 @@ +from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorHead +from sympy import I + +def test_printing_TensMul(): + R3 = TensorIndexType('R3', dim=3) + p, q = tensor_indices("p q", R3) + K = TensorHead("K", [R3]) + + assert repr(2*K(p)) == "2*K(p)" + assert repr(-K(p)) == "-K(p)" + assert repr(-2*K(p)*K(q)) == "-2*K(p)*K(q)" + assert repr(-I*K(p)) == "-I*K(p)" + assert repr(I*K(p)) == "I*K(p)" diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/toperators.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/toperators.py new file mode 100644 index 0000000000000000000000000000000000000000..1bdd67c4f4a7e86b9821ee55b1d2f9bde29c96a8 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/toperators.py @@ -0,0 +1,256 @@ +from sympy import permutedims +from sympy.core.numbers import Number +from sympy.core.singleton import S +from sympy.core.symbol import Symbol +from sympy.core.sympify import sympify +from sympy.tensor.tensor import Tensor, TensExpr, TensAdd, TensMul + + +class PartialDerivative(TensExpr): + """ + Partial derivative for tensor expressions. + + Examples + ======== + + >>> from sympy.tensor.tensor import TensorIndexType, TensorHead + >>> from sympy.tensor.toperators import PartialDerivative + >>> from sympy import symbols + >>> L = TensorIndexType("L") + >>> A = TensorHead("A", [L]) + >>> B = TensorHead("B", [L]) + >>> i, j, k = symbols("i j k") + + >>> expr = PartialDerivative(A(i), A(j)) + >>> expr + PartialDerivative(A(i), A(j)) + + The ``PartialDerivative`` object behaves like a tensorial expression: + + >>> expr.get_indices() + [i, -j] + + Notice that the deriving variables have opposite valence than the + printed one: ``A(j)`` is printed as covariant, but the index of the + derivative is actually contravariant, i.e. ``-j``. + + Indices can be contracted: + + >>> expr = PartialDerivative(A(i), A(i)) + >>> expr + PartialDerivative(A(L_0), A(L_0)) + >>> expr.get_indices() + [L_0, -L_0] + + The method ``.get_indices()`` always returns all indices (even the + contracted ones). If only uncontracted indices are needed, call + ``.get_free_indices()``: + + >>> expr.get_free_indices() + [] + + Nested partial derivatives are flattened: + + >>> expr = PartialDerivative(PartialDerivative(A(i), A(j)), A(k)) + >>> expr + PartialDerivative(A(i), A(j), A(k)) + >>> expr.get_indices() + [i, -j, -k] + + Replace a derivative with array values: + + >>> from sympy.abc import x, y + >>> from sympy import sin, log + >>> compA = [sin(x), log(x)*y**3] + >>> compB = [x, y] + >>> expr = PartialDerivative(A(i), B(j)) + >>> expr.replace_with_arrays({A(i): compA, B(i): compB}) + [[cos(x), 0], [y**3/x, 3*y**2*log(x)]] + + The returned array is indexed by `(i, -j)`. + + Be careful that other SymPy modules put the indices of the deriving + variables before the indices of the derivand in the derivative result. + For example: + + >>> expr.get_free_indices() + [i, -j] + + >>> from sympy import Matrix, Array + >>> Matrix(compA).diff(Matrix(compB)).reshape(2, 2) + [[cos(x), y**3/x], [0, 3*y**2*log(x)]] + >>> Array(compA).diff(Array(compB)) + [[cos(x), y**3/x], [0, 3*y**2*log(x)]] + + These are the transpose of the result of ``PartialDerivative``, + as the matrix and the array modules put the index `-j` before `i` in the + derivative result. An array read with index order `(-j, i)` is indeed the + transpose of the same array read with index order `(i, -j)`. By specifying + the index order to ``.replace_with_arrays`` one can get a compatible + expression: + + >>> expr.replace_with_arrays({A(i): compA, B(i): compB}, [-j, i]) + [[cos(x), y**3/x], [0, 3*y**2*log(x)]] + """ + + def __new__(cls, expr, *variables): + + # Flatten: + if isinstance(expr, PartialDerivative): + variables = expr.variables + variables + expr = expr.expr + + args, indices, free, dum = cls._contract_indices_for_derivative( + S(expr), variables) + + obj = TensExpr.__new__(cls, *args) + + obj._indices = indices + obj._free = free + obj._dum = dum + return obj + + @property + def coeff(self): + return S.One + + @property + def nocoeff(self): + return self + + @classmethod + def _contract_indices_for_derivative(cls, expr, variables): + variables_opposite_valence = [] + + for i in variables: + if isinstance(i, Tensor): + i_free_indices = i.get_free_indices() + variables_opposite_valence.append( + i.xreplace({k: -k for k in i_free_indices})) + elif isinstance(i, Symbol): + variables_opposite_valence.append(i) + + args, indices, free, dum = TensMul._tensMul_contract_indices( + [expr] + variables_opposite_valence, replace_indices=True) + + for i in range(1, len(args)): + args_i = args[i] + if isinstance(args_i, Tensor): + i_indices = args[i].get_free_indices() + args[i] = args[i].xreplace({k: -k for k in i_indices}) + + return args, indices, free, dum + + def doit(self, **hints): + args, indices, free, dum = self._contract_indices_for_derivative(self.expr, self.variables) + + obj = self.func(*args) + obj._indices = indices + obj._free = free + obj._dum = dum + + return obj + + def _expand_partial_derivative(self): + args, indices, free, dum = self._contract_indices_for_derivative(self.expr, self.variables) + + obj = self.func(*args) + obj._indices = indices + obj._free = free + obj._dum = dum + + result = obj + + if not args[0].free_symbols: + return S.Zero + elif isinstance(obj.expr, TensAdd): + # take care of sums of multi PDs + result = obj.expr.func(*[ + self.func(a, *obj.variables)._expand_partial_derivative() + for a in result.expr.args]) + elif isinstance(obj.expr, TensMul): + # take care of products of multi PDs + if len(obj.variables) == 1: + # derivative with respect to single variable + terms = [] + mulargs = list(obj.expr.args) + for ind in range(len(mulargs)): + if not isinstance(sympify(mulargs[ind]), Number): + # a number coefficient is not considered for + # expansion of PartialDerivative + d = self.func(mulargs[ind], *obj.variables)._expand_partial_derivative() + terms.append(TensMul(*(mulargs[:ind] + + [d] + + mulargs[(ind + 1):]))) + result = TensAdd.fromiter(terms) + else: + # derivative with respect to multiple variables + # decompose: + # partial(expr, (u, v)) + # = partial(partial(expr, u).doit(), v).doit() + result = obj.expr # init with expr + for v in obj.variables: + result = self.func(result, v)._expand_partial_derivative() + # then throw PD on it + + return result + + def _perform_derivative(self): + result = self.expr + for v in self.variables: + if isinstance(result, TensExpr): + result = result._eval_partial_derivative(v) + else: + if v._diff_wrt: + result = result._eval_derivative(v) + else: + result = S.Zero + return result + + def get_indices(self): + return self._indices + + def get_free_indices(self): + free = sorted(self._free, key=lambda x: x[1]) + return [i[0] for i in free] + + def _replace_indices(self, repl): + expr = self.expr.xreplace(repl) + mirrored = {-k: -v for k, v in repl.items()} + variables = [i.xreplace(mirrored) for i in self.variables] + return self.func(expr, *variables) + + @property + def expr(self): + return self.args[0] + + @property + def variables(self): + return self.args[1:] + + def _extract_data(self, replacement_dict): + from .array import derive_by_array, tensorcontraction + indices, array = self.expr._extract_data(replacement_dict) + for variable in self.variables: + var_indices, var_array = variable._extract_data(replacement_dict) + var_indices = [-i for i in var_indices] + coeff_array, var_array = zip(*[i.as_coeff_Mul() for i in var_array]) + dim_before = len(array.shape) + array = derive_by_array(array, var_array) + dim_after = len(array.shape) + dim_increase = dim_after - dim_before + array = permutedims(array, [i + dim_increase for i in range(dim_before)] + list(range(dim_increase))) + array = array.as_mutable() + varindex = var_indices[0] + # Remove coefficients of base vector: + coeff_index = [0] + [slice(None) for i in range(len(indices))] + for i, coeff in enumerate(coeff_array): + coeff_index[0] = i + array[tuple(coeff_index)] /= coeff + if -varindex in indices: + pos = indices.index(-varindex) + array = tensorcontraction(array, (0, pos+1)) + indices.pop(pos) + else: + indices.append(varindex) + return indices, array diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c234ea6b12084a209c6148162a28a4db2d4cde98 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/__init__.py @@ -0,0 +1,33 @@ +__version__ = "0.23.0" + +from .accelerator import Accelerator +from .big_modeling import ( + cpu_offload, + cpu_offload_with_hook, + disk_offload, + dispatch_model, + init_empty_weights, + init_on_device, + load_checkpoint_and_dispatch, +) +from .data_loader import skip_first_batches +from .launchers import debug_launcher, notebook_launcher +from .state import PartialState +from .utils import ( + AutocastKwargs, + DeepSpeedPlugin, + DistributedDataParallelKwargs, + DistributedType, + FullyShardedDataParallelPlugin, + GradScalerKwargs, + InitProcessGroupKwargs, + find_executable_batch_size, + infer_auto_device_map, + is_rich_available, + load_checkpoint_in_model, + synchronize_rng_states, +) + + +if is_rich_available(): + from .utils import rich diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/accelerator.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/accelerator.py new file mode 100644 index 0000000000000000000000000000000000000000..77e9c6ef962e9bd41f73f5e460bd233a22d3d9fa --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/accelerator.py @@ -0,0 +1,3209 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import collections +import contextlib +import functools +import json +import math +import os +import re +import shutil +import sys +import warnings +from collections import OrderedDict +from contextlib import contextmanager +from functools import partial +from types import MethodType +from typing import Any, Callable, Union + +import torch +import torch.utils.hooks as hooks + +from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state +from .data_loader import DataLoaderDispatcher, prepare_data_loader, skip_first_batches +from .logging import get_logger +from .optimizer import AcceleratedOptimizer +from .scheduler import AcceleratedScheduler +from .state import AcceleratorState, GradientState, PartialState +from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers +from .utils import ( + MODEL_NAME, + SAFE_WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, + AutocastKwargs, + DeepSpeedPlugin, + DistributedDataParallelKwargs, + DistributedType, + DynamoBackend, + FP8RecipeKwargs, + FullyShardedDataParallelPlugin, + GradientAccumulationPlugin, + GradScalerKwargs, + InitProcessGroupKwargs, + KwargsHandler, + LoggerType, + MegatronLMPlugin, + PrecisionType, + ProjectConfiguration, + RNGType, + TorchDynamoPlugin, + compare_versions, + convert_model, + convert_outputs_to_fp32, + extract_model_from_parallel, + gather, + gather_object, + get_mixed_precision_context_manager, + get_pretty_name, + has_transformer_engine_layers, + id_tensor_storage, + is_bf16_available, + is_deepspeed_available, + is_fp8_available, + is_ipex_available, + is_megatron_lm_available, + is_npu_available, + is_safetensors_available, + is_torch_version, + is_tpu_available, + is_xpu_available, + load_fsdp_model, + load_fsdp_optimizer, + pad_across_processes, + parse_choice_from_env, + recursively_apply, + reduce, + release_memory, + save, + save_fsdp_model, + save_fsdp_optimizer, + shard_checkpoint, + wait_for_everyone, +) +from .utils.constants import FSDP_PYTORCH_VERSION +from .utils.other import is_compiled_module + + +if is_deepspeed_available(): + from .utils import ( + DeepSpeedEngineWrapper, + DeepSpeedOptimizerWrapper, + DeepSpeedSchedulerWrapper, + DummyOptim, + DummyScheduler, + ) + +if is_fp8_available(): + import transformer_engine.common.recipe as te_recipe + from transformer_engine.pytorch import fp8_autocast + + +if is_megatron_lm_available(): + from .utils import ( + MegatronEngine, + MegatronLMDummyDataLoader, + MegatronLMDummyScheduler, + MegatronLMOptimizerWrapper, + MegatronLMSchedulerWrapper, + megatron_lm_initialize, + megatron_lm_prepare_data_loader, + megatron_lm_prepare_model, + megatron_lm_prepare_optimizer, + megatron_lm_prepare_scheduler, + ) + +from torch.distributed.algorithms.join import Join + + +if is_tpu_available(check_device=False): + import torch_xla.core.xla_model as xm + import torch_xla.distributed.xla_multiprocessing as xmp + + +if is_npu_available(check_device=False): + import torch_npu # noqa: F401 + + +try: + from torch.optim.lr_scheduler import LRScheduler +except ImportError: + from torch.optim.lr_scheduler import _LRScheduler as LRScheduler + +logger = get_logger(__name__) + + +class Accelerator: + """ + Creates an instance of an accelerator for distributed training (on multi-GPU, TPU) or mixed precision training. + + Args: + device_placement (`bool`, *optional*, defaults to `True`): + Whether or not the accelerator should put objects on device (tensors yielded by the dataloader, model, + etc...). + split_batches (`bool`, *optional*, defaults to `False`): + Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If + `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a + round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set + in your script multiplied by the number of processes. + mixed_precision (`str`, *optional*): + Whether or not to use mixed precision training. Choose from 'no','fp16','bf16 or 'fp8'. Will default to the + value in the environment variable `ACCELERATE_MIXED_PRECISION`, which will use the default value in the + accelerate config of the current system or the flag passed with the `accelerate.launch` command. 'fp8' + requires the installation of transformers-engine. + gradient_accumulation_steps (`int`, *optional*, default to 1): + The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with + `Accelerator.accumulate`. If not passed, will default to the value in the environment variable + `ACCELERATE_GRADIENT_ACCUMULATION_STEPS`. Can also be configured through a `GradientAccumulationPlugin`. + cpu (`bool`, *optional*): + Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force + the execution on one process only. + deepspeed_plugin (`DeepSpeedPlugin`, *optional*): + Tweak your DeepSpeed related args using this argument. This argument is optional and can be configured + directly using *accelerate config* + fsdp_plugin (`FullyShardedDataParallelPlugin`, *optional*): + Tweak your FSDP related args using this argument. This argument is optional and can be configured directly + using *accelerate config* + megatron_lm_plugin (`MegatronLMPlugin`, *optional*): + Tweak your MegatronLM related args using this argument. This argument is optional and can be configured + directly using *accelerate config* + rng_types (list of `str` or [`~utils.RNGType`]): + The list of random number generators to synchronize at the beginning of each iteration in your prepared + dataloaders. Should be one or several of: + + - `"torch"`: the base torch random number generator + - `"cuda"`: the CUDA random number generator (GPU only) + - `"xla"`: the XLA random number generator (TPU only) + - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your + dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type. + + Will default to `["torch"]` for PyTorch versions <=1.5.1 and `["generator"]` for PyTorch versions >= 1.6. + log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*): + A list of loggers to be setup for experiment tracking. Should be one or several of: + + - `"all"` + - `"tensorboard"` + - `"wandb"` + - `"comet_ml"` + If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can + also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`. + project_config (`ProjectConfiguration`, *optional*): + A configuration for how saving the state can be handled. + project_dir (`str`, `os.PathLike`, *optional*): + A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved + checkpoints. + dispatch_batches (`bool`, *optional*): + If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process + and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose + underlying dataset is an `IterableDataset`, `False` otherwise. + even_batches (`bool`, *optional*, defaults to `True`): + If set to `True`, in cases where the total batch size across all processes does not exactly divide the + dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among + all workers. + step_scheduler_with_optimizer (`bool`, *optional`, defaults to `True`): + Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only + done under certain circumstances (at the end of each epoch, for instance). + kwargs_handlers (`list[KwargHandler]`, *optional*) + A list of `KwargHandler` to customize how the objects related to distributed training or mixed precision + are created. See [kwargs](kwargs) for more information. + dynamo_backend (`str` or `DynamoBackend`, *optional*, defaults to `"no"`): + Set to one of the possible dynamo backends to optimize your training with torch dynamo. + gradient_accumulation_plugin (`GradientAccumulationPlugin`, *optional*): + A configuration for how gradient accumulation should be handled, if more tweaking than just the + `gradient_accumulation_steps` is needed. + + **Available attributes:** + + - **device** (`torch.device`) -- The device to use. + - **distributed_type** ([`~utils.DistributedType`]) -- The distributed training configuration. + - **local_process_index** (`int`) -- The process index on the current machine. + - **mixed_precision** (`str`) -- The configured mixed precision mode. + - **num_processes** (`int`) -- The total number of processes used for training. + - **optimizer_step_was_skipped** (`bool`) -- Whether or not the optimizer update was skipped (because of + gradient overflow in mixed precision), in which + case the learning rate should not be changed. + - **process_index** (`int`) -- The overall index of the current process among all processes. + - **state** ([`~state.AcceleratorState`]) -- The distributed setup state. + - **sync_gradients** (`bool`) -- Whether the gradients are currently being synced across all processes. + - **use_distributed** (`bool`) -- Whether the current configuration is for distributed training. + """ + + def __init__( + self, + device_placement: bool = True, + split_batches: bool = False, + mixed_precision: PrecisionType | str | None = None, + gradient_accumulation_steps: int = 1, + cpu: bool = False, + deepspeed_plugin: DeepSpeedPlugin | None = None, + fsdp_plugin: FullyShardedDataParallelPlugin | None = None, + megatron_lm_plugin: MegatronLMPlugin | None = None, + rng_types: list[str | RNGType] | None = None, + log_with: str | LoggerType | GeneralTracker | list[str | LoggerType | GeneralTracker] | None = None, + project_dir: str | os.PathLike | None = None, + project_config: ProjectConfiguration | None = None, + gradient_accumulation_plugin: GradientAccumulationPlugin | None = None, + dispatch_batches: bool | None = None, + even_batches: bool = True, + step_scheduler_with_optimizer: bool = True, + kwargs_handlers: list[KwargsHandler] | None = None, + dynamo_backend: DynamoBackend | str | None = None, + ): + if project_config is not None: + self.project_configuration = project_config + else: + self.project_configuration = ProjectConfiguration(project_dir=project_dir) + if project_dir is not None and self.project_dir is None: + self.project_configuration.set_directories(project_dir) + if mixed_precision is not None: + mixed_precision = str(mixed_precision) + if mixed_precision not in PrecisionType: + raise ValueError( + f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}" + ) + + dynamo_plugin = TorchDynamoPlugin() if dynamo_backend is None else TorchDynamoPlugin(backend=dynamo_backend) + + if deepspeed_plugin is None: # init from env variables + deepspeed_plugin = ( + DeepSpeedPlugin() if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" else None + ) + else: + assert isinstance( + deepspeed_plugin, DeepSpeedPlugin + ), "`deepspeed_plugin` must be an `accelerate.utils.DeepSpeedPlugin` object." + os.environ["ACCELERATE_USE_DEEPSPEED"] = "true" # use DeepSpeed if plugin is provided + if deepspeed_plugin: + if not is_deepspeed_available(): + raise ImportError("DeepSpeed is not installed => run `pip install deepspeed` or build it from source.") + if compare_versions("deepspeed", "<", "0.9.3"): + raise ImportError("DeepSpeed version must be >= 0.9.3. Please update DeepSpeed.") + + mixed_precision = ( + os.environ.get("ACCELERATE_MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision + ) + deepspeed_plugin.set_mixed_precision(mixed_precision) + deepspeed_plugin.set_deepspeed_weakref() + + if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" or isinstance( + fsdp_plugin, FullyShardedDataParallelPlugin + ): + if is_torch_version("<", FSDP_PYTORCH_VERSION): + raise ValueError(f"FSDP requires PyTorch >= {FSDP_PYTORCH_VERSION}") + + if fsdp_plugin is None: # init from env variables + fsdp_plugin = ( + FullyShardedDataParallelPlugin() if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" else None + ) + else: + if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin): + raise TypeError("`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.") + os.environ["ACCELERATE_USE_FSDP"] = "true" # use FSDP if plugin is provided + + if megatron_lm_plugin is None: # init from env variables + megatron_lm_plugin = ( + MegatronLMPlugin() if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" else None + ) + else: + if not isinstance(megatron_lm_plugin, MegatronLMPlugin): + raise TypeError("`megatron_lm_plugin` must be a MegatronLMPlugin object.") + os.environ["ACCELERATE_USE_MEGATRON_LM"] = "true" # use MegatronLM if plugin is provided + + if megatron_lm_plugin: + if not is_megatron_lm_available(): + raise ImportError("Megatron is not installed. please build it from source.") + + # Kwargs handlers + self.ddp_handler = None + self.scaler_handler = None + self.init_handler = None + self.fp8_recipe_handler = None + self.autocast_handler = None + if kwargs_handlers is not None: + for handler in kwargs_handlers: + assert isinstance( + handler, KwargsHandler + ), f"Unsupported kwargs handler passed: {handler}, must be one that inherits `accelerate.utils.KwargsHandler`." + if isinstance(handler, DistributedDataParallelKwargs): + if self.ddp_handler is not None: + raise ValueError("You can only pass one `DistributedDataParallelKwargs` in `kwargs_handler`.") + else: + self.ddp_handler = handler + elif isinstance(handler, GradScalerKwargs): + if self.scaler_handler is not None: + raise ValueError("You can only pass one `GradScalerKwargs` in `kwargs_handler`.") + else: + self.scaler_handler = handler + elif isinstance(handler, InitProcessGroupKwargs): + if self.init_handler is not None: + raise ValueError("You can only pass one `InitProcessGroupKwargs` in `kwargs_handler`.") + else: + self.init_handler = handler + elif isinstance(handler, FP8RecipeKwargs): + if self.fp8_recipe_handler is not None: + raise ValueError("You can only pass one `FP8RecipeKwargs` in `kwargs_handler`.") + else: + self.fp8_recipe_handler = handler + elif isinstance(handler, AutocastKwargs): + if self.autocast_handler is not None: + raise ValueError("You can only pass one `AutocastKwargs` in `kwargs_handler`.") + else: + self.autocast_handler = handler + + kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {} + self.state = AcceleratorState( + mixed_precision=mixed_precision, + cpu=cpu, + dynamo_plugin=dynamo_plugin, + deepspeed_plugin=deepspeed_plugin, + fsdp_plugin=fsdp_plugin, + megatron_lm_plugin=megatron_lm_plugin, + _from_accelerator=True, + **kwargs, + ) + + trackers = filter_trackers(log_with, self.logging_dir) + if len(trackers) < 1 and log_with is not None: + warnings.warn(f"`log_with={log_with}` was passed but no supported trackers are currently installed.") + self.log_with = trackers + + if ( + (mixed_precision != "bf16") + and getattr(self.state, "downcast_bfloat", False) + and (self.state.distributedType != DistributedType.TPU) + ): + raise ValueError("Can only use `downcast_bf16` when using `mixed_precision='bf16'` and on a TPU") + + if gradient_accumulation_plugin is not None: + if gradient_accumulation_steps != 1: + raise ValueError( + "You can only pass one of `gradient_accumulation_steps` and `gradient_accumulation_plugin`. Please only pass in the created `GradientAccumulationPlugin` object." + ) + else: + gradient_accumulation_steps = int( + parse_choice_from_env("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", gradient_accumulation_steps) + ) + gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=gradient_accumulation_steps) + self.gradient_state = GradientState( + gradient_accumulation_plugin=gradient_accumulation_plugin, + ) + if self.state.distributed_type == DistributedType.TPU: + if self.gradient_state.num_steps != 1: + raise ValueError( + "Gradient accumulation is not supported on TPU. Please set `gradient_accumulation_steps` to 1 and don't pass in a `GradientAccumulationPlugin` object." + ) + + self.device_placement = device_placement + self.split_batches = split_batches + self.dispatch_batches = dispatch_batches + self.even_batches = even_batches + self.step_scheduler_with_optimizer = step_scheduler_with_optimizer + + # Mixed precision attributes + self.scaler = None + self.native_amp = False + err = "{mode} mixed precision requires {requirement}" + if ( + self.state.mixed_precision == "fp16" + and self.device.type != "cpu" + and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM) + ): + self.native_amp = True + if self.device.type not in ("xpu", "cuda", "mps", "npu"): + raise ValueError(err.format(mode="fp16", requirement="a GPU")) + kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {} + if self.distributed_type == DistributedType.FSDP: + from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler + + self.scaler = ShardedGradScaler(**kwargs) + elif is_npu_available(): + self.scaler = torch.npu.amp.GradScaler(**kwargs) + else: + self.scaler = torch.cuda.amp.GradScaler(**kwargs) + + elif self.state.mixed_precision == "bf16" and self.distributed_type not in ( + DistributedType.DEEPSPEED, + DistributedType.MEGATRON_LM, + ): + if self.device.type in ["cpu", "xpu"]: + self.native_amp = True + else: + self.native_amp = is_bf16_available(True) + if mixed_precision == "bf16" and not self.native_amp and not is_tpu_available(): + raise ValueError(err.format(mode="bf16", requirement="PyTorch >= 1.10 and a supported device.")) + + # Start of internal step tracking + self.step = 0 + + # Internal references to the training objects + self._optimizers = [] + self._models = [] + self._schedulers = [] + self._dataloaders = [] + self._custom_objects = [] + + # Hooks + self._load_model_state_pre_hook = OrderedDict() + self._save_model_state_pre_hook = OrderedDict() + + # RNG Types + self.rng_types = rng_types + if self.rng_types is None: + self.rng_types = ["generator"] + + # Set a flag tensor for early stopping and other breakpoints + self.flag_tensor = None + + @property + def use_distributed(self): + """ + Whether the Accelerator is configured for distributed training + """ + return self.state.use_distributed + + @property + def distributed_type(self): + return self.state.distributed_type + + @property + def num_processes(self): + return self.state.num_processes + + @property + def process_index(self): + return self.state.process_index + + @property + def local_process_index(self): + return self.state.local_process_index + + @property + def device(self): + return self.state.device + + @property + def project_dir(self): + return self.project_configuration.project_dir + + @property + def logging_dir(self): + return self.project_configuration.logging_dir + + @property + def save_iteration(self): + return self.project_configuration.iteration + + @property + def is_main_process(self): + """True for one process only.""" + return self.state.is_main_process + + @property + def is_local_main_process(self): + """True for one process per server.""" + return self.state.is_local_main_process + + @property + def use_fp16(self): + warnings.warn( + "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use " + "`Accelerator.mixed_precision == 'fp16'` instead.", + FutureWarning, + ) + return self.mixed_precision != "no" + + @property + def is_last_process(self): + return self.process_index == self.num_processes - 1 + + @property + def mixed_precision(self): + return self.state.mixed_precision + + @contextmanager + def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): + """ + Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing + distributed inference, such as with different prompts. + + Note that when using a `dict`, all keys need to have the same number of elements. + + Args: + inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): + The input to split between processes. + apply_padding (`bool`, `optional`, defaults to `False`): + Whether to apply padding by repeating the last element of the input so that all processes have the same + number of elements. Useful when trying to perform actions such as `Accelerator.gather()` on the outputs + or passing in less inputs than there are processes. If so, just remember to drop the padded elements + afterwards. + + Example: + + ```python + # Assume there are two processes + from accelerate import Accelerator + + accelerator = Accelerator() + with accelerator.split_between_processes(["A", "B", "C"]) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C"] + + with accelerator.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C", "C"] + ``` + """ + with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs: + yield inputs + + def on_main_process(self, function: Callable[..., Any] = None): + """ + A decorator that will run the decorated function on the main process only. Can also be called using the + `PartialState` class. + + Args: + function (`Callable`): The function to decorate. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + + + >>> @accelerator.on_main_process + ... def print_something(): + ... print("This will be printed by process 0 only.") + + + >>> print_something() + "This will be printed by process 0 only" + ``` + """ + # For times when the `Accelerator` object itself utilizes this decorator. + if function is None: + if "Accelerator." in self.__qualname__: + function = self + else: + raise ValueError( + "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object." + ) + + def _inner(*args, **kwargs): + return PartialState().on_main_process(function)(*args, **kwargs) + + return _inner + + def on_local_main_process(self, function: Callable[..., Any] = None): + """ + A decorator that will run the decorated function on the local main process only. Can also be called using the + `PartialState` class. + + Args: + function (`Callable`): The function to decorate. + + Example: + ```python + # Assume we have 2 servers with 4 processes each. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_local_main_process + def print_something(): + print("This will be printed by process 0 only on each server.") + + + print_something() + # On server 1: + "This will be printed by process 0 only" + # On server 2: + "This will be printed by process 0 only" + ``` + """ + # For times when the `Accelerator` object itself utilizes this decorator. + if function is None: + if "Accelerator." in self.__qualname__: + function = self + else: + raise ValueError( + "The `on_local_main_process` decorator must be called with a function on an instantiated `Accelerator` object." + ) + + def _inner(*args, **kwargs): + return PartialState().on_local_main_process(function)(*args, **kwargs) + + return _inner + + def on_last_process(self, function: Callable[..., Any]): + """ + A decorator that will run the decorated function on the last process only. Can also be called using the + `PartialState` class. + + Args: + function (`Callable`): The function to decorate. + + Example: + ```python + # Assume we have 4 processes. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_last_process + def print_something(): + print(f"Printed on process {accelerator.process_index}") + + + print_something() + "Printed on process 3" + ``` + """ + # For times when the `Accelerator` object itself utilizes this decorator. + if function is None: + if "Accelerator." in self.__qualname__: + function = self + else: + raise ValueError( + "The `on_last_process` decorator must be called with a function on an instantiated `Accelerator` object." + ) + + def _inner(*args, **kwargs): + return PartialState().on_last_process(function)(*args, **kwargs) + + return _inner + + def on_process(self, function: Callable[..., Any] = None, process_index: int = None): + """ + A decorator that will run the decorated function on a given process index only. Can also be called using the + `PartialState` class. + + Args: + function (`Callable`, `optional`): + The function to decorate. + process_index (`int`, `optional`): + The index of the process on which to run the function. + + Example: + ```python + # Assume we have 4 processes. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_process(process_index=2) + def print_something(): + print(f"Printed on process {accelerator.process_index}") + + + print_something() + "Printed on process 2" + ``` + """ + # Initial construction of the decorator. + if (self is not None) and (process_index is not None) and (function is None): + return partial(self.on_process, process_index=process_index) + # For times when the `Accelerator` object itself utilizes this decorator. + if function is None: + if "Accelerator." in self.__qualname__: + function = self + else: + raise ValueError( + "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object." + ) + + def _inner(*args, **kwargs): + return PartialState().on_process(function, process_index)(*args, **kwargs) + + return _inner + + def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): + """ + A decorator that will run the decorated function on a given local process index only. Can also be called using + the `PartialState` class. + + Args: + function (`Callable`, *optional*): + The function to decorate. + local_process_index (`int`, *optional*): + The index of the local process on which to run the function. + + Example: + ```python + # Assume we have 2 servers with 4 processes each. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_local_process(local_process_index=2) + def print_something(): + print(f"Printed on process {accelerator.local_process_index}") + + + print_something() + # On server 1: + "Printed on process 2" + # On server 2: + "Printed on process 2" + ``` + """ + # Initial construction of the decorator. + if (self is not None) and (local_process_index is not None) and (function is None): + return partial(self.on_local_process, local_process_index=local_process_index) + # For times when the `Accelerator` object itself utilizes this decorator. + if function is None: + if "Accelerator." in self.__qualname__: + function = self + else: + raise ValueError( + "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object." + ) + + def _inner(*args, **kwargs): + return PartialState().on_local_process(function, local_process_index)(*args, **kwargs) + + return _inner + + @contextmanager + def main_process_first(self): + """ + Lets the main process go first inside a with block. + + The other processes will enter the with block after the main process exits. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> with accelerator.main_process_first(): + ... # This will be printed first by process 0 then in a seemingly + ... # random order by the other processes. + ... print(f"This will be printed by process {accelerator.process_index}") + ``` + """ + with self.state.main_process_first(): + yield + + @contextmanager + def local_main_process_first(self): + """ + Lets the local main process go inside a with block. + + The other processes will enter the with block after the main process exits. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> with accelerator.local_main_process_first(): + ... # This will be printed first by local process 0 then in a seemingly + ... # random order by the other processes. + ... print(f"This will be printed by process {accelerator.local_process_index}") + ``` + """ + with self.state.local_main_process_first(): + yield + + @contextmanager + def no_sync(self, model): + """ + A context manager to disable gradient synchronizations across DDP processes by calling + `torch.nn.parallel.DistributedDataParallel.no_sync`. + + If `model` is not in DDP, this context manager does nothing + + Args: + model (`torch.nn.Module`): + PyTorch Module that was prepared with `Accelerator.prepare` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer) + >>> input_a = next(iter(dataloader)) + >>> input_b = next(iter(dataloader)) + + >>> with accelerator.no_sync(): + ... outputs = model(input_a) + ... loss = loss_func(outputs) + ... accelerator.backward(loss) + ... # No synchronization across processes, only accumulate gradients + >>> outputs = model(input_b) + >>> accelerator.backward(loss) + >>> # Synchronization across all processes + >>> optimizer.step() + >>> optimizer.zero_grad() + ``` + """ + context = contextlib.nullcontext + if self.use_distributed: + context = getattr(model, "no_sync", context) + + with context(): + yield + + @staticmethod + @contextmanager + def trigger_sync_in_backward(model): + """Trigger the sync of the gradients in the next backward pass of the model after multiple forward passes under + `Accelerator.no_sync` (only applicable in multi-GPU scenarios). + + If the script is not launched in distributed mode, this context manager does nothing. + + Args: + model (`torch.nn.Module`): + The model for which to trigger the gradient synchronization. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer) + + >>> with accelerator.no_sync(): + ... loss_a = loss_func(model(input_a)) # first forward pass + ... loss_b = loss_func(model(input_b)) # second forward pass + >>> accelerator.backward(loss_a) # No synchronization across processes, only accumulate gradients + >>> with accelerator.trigger_sync_in_backward(model): + ... accelerator.backward(loss_b) # Synchronization across all processes + >>> optimizer.step() + >>> optimizer.zero_grad() + ``` + """ + if not isinstance(model, torch.nn.parallel.DistributedDataParallel): + yield + return + + old_require_backward_grad_sync = model.require_backward_grad_sync + old_require_forward_param_sync = model.require_forward_param_sync + + # EXPERIMENTAL: This will force grad sync during `backward()`, but it is unknown if it breaks other DDP features. + # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/nn/parallel/distributed.py#L1453-L1466 + model.require_backward_grad_sync = True + model.require_forward_param_sync = True + # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/csrc/distributed/c10d/reducer.cpp#L1371-L1402 + model.reducer.prepare_for_backward([]) + try: + yield + finally: + model.require_backward_grad_sync = old_require_backward_grad_sync + model.require_forward_param_sync = old_require_forward_param_sync + + def _do_sync(self): + "Sets the right `sync_gradients` context and either resets or increases `self.step`" + if self.gradient_state.sync_with_dataloader and self.gradient_state.end_of_dataloader: + self.step = 0 + self.gradient_state._set_sync_gradients(True) + else: + self.step += 1 + self.gradient_state._set_sync_gradients((self.step % self.gradient_state.num_steps) == 0) + + @property + def sync_gradients(self): + return self.gradient_state.sync_gradients + + @sync_gradients.setter + def sync_gradients(self, sync_gradients): + self.gradient_state.sync_gradients = sync_gradients + + @property + def gradient_accumulation_steps(self): + return self.gradient_state.num_steps + + @gradient_accumulation_steps.setter + def gradient_accumulation_steps(self, gradient_accumulation_steps): + self.gradient_state.plugin_kwargs.update({"num_steps": gradient_accumulation_steps}) + + @contextmanager + def accumulate(self, *models): + """ + A context manager that will lightly wrap around and perform gradient accumulation automatically + + Args: + *models (list of `torch.nn.Module`): + PyTorch Modules that was prepared with `Accelerator.prepare`. Models passed to `accumulate()` will skip + gradient syncing during backward pass in distributed training + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(gradient_accumulation_steps=1) + >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) + + >>> for input, output in dataloader: + ... with accelerator.accumulate(model): + ... outputs = model(input) + ... loss = loss_func(outputs) + ... loss.backward() + ... optimizer.step() + ... scheduler.step() + ... optimizer.zero_grad() + ``` + """ + self._do_sync() + with contextlib.ExitStack() as cm_stack: + for m in models: + cm_stack.enter_context(contextlib.nullcontext() if self.sync_gradients else self.no_sync(m)) + yield + + @contextmanager + def join_uneven_inputs(self, joinables, even_batches=None): + """ + A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper + around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the + length of the dataset. + + Args: + joinables (`list[torch.distributed.algorithms.Joinable]`): + A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a + PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training. + even_batches (`bool`, *optional*) + If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided, + the default `Accelerator` value wil be used. + + + + `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other + configuration, this method will have no effect. + + + + + + Overidding `even_batches` will not affect iterable-style data loaders. + + + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(even_batches=True) + >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader) + + >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False): + ... for input, output in dataloader: + ... outputs = model(input) + ... loss = loss_func(outputs) + ... loss.backward() + ... optimizer.step() + ... optimizer.zero_grad() + ``` + """ + if self.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU): + dl_even_batches_values = [] + + if even_batches is not None: + iterable_dl_seen = False + # override value in batch sampler for map-style datasets + for dl_idx, dl in enumerate(self._dataloaders): + if isinstance(dl, DataLoaderDispatcher): + iterable_dl_seen = True + continue + dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches)) + dl.batch_sampler.even_batches = even_batches + + if iterable_dl_seen: + warnings.warn( + "Overridding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable" + ) + else: + even_batches = self.even_batches + + enable_join = False if even_batches else True + try: + with Join(joinables, enable=enable_join, throw_on_early_termination=False): + yield + finally: + # reset any batch samplers that have been modified + for dl_idx, even_batches_value in dl_even_batches_values: + self._dataloaders[dl_idx].batch_sampler.even_batches = even_batches_value + else: + # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs + if self.distributed_type != DistributedType.NO: + warnings.warn( + "Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect." + ) + + with contextlib.nullcontext(joinables): + yield + + def print(self, *args, **kwargs): + """ + Drop in replacement of `print()` to only print once per server. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> accelerator.print("Hello world!") + ``` + """ + self.state.print(*args, **kwargs) + + def _prepare_one(self, obj, first_pass=False, device_placement=None): + # First pass of preparation: DataLoader, model, optimizer + if first_pass: + if isinstance(obj, torch.utils.data.DataLoader): + return self.prepare_data_loader(obj, device_placement=device_placement) + elif isinstance(obj, torch.nn.Module): + return self.prepare_model(obj, device_placement=device_placement) + elif isinstance(obj, torch.optim.Optimizer): + optimizer = self.prepare_optimizer(obj, device_placement=device_placement) + return optimizer + # Second pass of preparation: LR scheduler (which need the full list of optimizers) + elif isinstance(obj, LRScheduler): + scheduler = self.prepare_scheduler(obj) + return scheduler + # Return the unprocessed object if previous criteria was not met + return obj + + def _prepare_fsdp(self, *args): + result = [] + for obj in args: + if isinstance(obj, torch.nn.Module): + model = obj + break + optimizers = [] + + self._schedulers = [] + self._models = [] + intermediate_result = [] + for obj in args: + if isinstance(obj, torch.optim.Optimizer): + if len(obj.param_groups) > 1: + logger.warning( + "FSDP Warning: When using FSDP, several parameter groups will be conflated into " + "a single one due to nested module wrapping and parameter flattening." + ) + try: + optimizer = obj.optimizer.__class__(model.parameters(), **obj.optimizer.defaults) + except TypeError: + if "differentiable" in obj.optimizer.defaults: + # https://github.com/huggingface/accelerate/issues/801 + defaults = {k: v for k, v in obj.optimizer.defaults.items() if k != "differentiable"} + optimizer = obj.optimizer.__class__(model.parameters(), **defaults) + else: + raise + obj = self.prepare_optimizer(optimizer) + optimizers.append(obj) + elif isinstance(obj, torch.nn.Module): + self._models.append(obj) + intermediate_result.append(obj) + + for obj in intermediate_result: + if isinstance(obj, AcceleratedScheduler): + obj.optimizer = optimizers + for i, opt in enumerate(self._optimizers): + if getattr(obj.scheduler, "optimizer", None) == opt.optimizer: + obj.scheduler.optimizer = optimizers[i] + obj.optimizers = [optimizers[i]] + break + self._schedulers.append(obj) + result.append(obj) + self._optimizers = optimizers + return tuple(result) + + def prepare(self, *args, device_placement=None): + """ + Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same + order. + + Args: + *args (list of objects): + Any of the following type of objects: + + - `torch.utils.data.DataLoader`: PyTorch Dataloader + - `torch.nn.Module`: PyTorch Module + - `torch.optim.Optimizer`: PyTorch Optimizer + - `torch.optim.lr_scheduler.LRScheduler`: PyTorch LR Scheduler + + device_placement (`list[bool]`, *optional*): + Used to customize whether automatic device placement should be performed for each object passed. Needs + to be a list of the same length as `args`. Not compatible with DeepSpeed or FSDP. + + + + You don't need to prepare a model if you only use it for inference without any kind of mixed precision + + + + Examples: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume a model, optimizer, data_loader and scheduler are defined + >>> model, optimizer, data_loader, scheduler = accelerator.prepare(model, optimizer, data_loader, scheduler) + ``` + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume a model, optimizer, data_loader and scheduler are defined + >>> device_placement = [True, True, False, False] + >>> # Will place the first to items passed in automatically to the right device but not the last two. + >>> model, optimizer, data_loader, scheduler = accelerator.prepare( + ... model, optimizer, data_loader, scheduler, device_placement=device_placement + ... ) + ``` + """ + if device_placement is None: + device_placement = [None for _ in args] + elif self.distributed_type in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM): + raise ValueError("You can't customize device placements with DeepSpeed or Megatron-LM.") + elif len(device_placement) != len(args): + raise ValueError( + f"`device_placement` should be a list with {len(args)} elements (the number of objects passed)." + ) + + for obj in args: + # TODO: Look at enabling native TP training directly with a proper config + if ( + isinstance(obj, torch.nn.Module) + and self.verify_device_map(obj) + and self.distributed_type != DistributedType.NO + and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true" + ): + raise ValueError( + "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode." + " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`." + ) + + if self.distributed_type == DistributedType.FSDP: + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + + model_count = 0 + optimizer_present = False + is_type_fsdp = False + for obj in args: + if isinstance(obj, torch.nn.Module): + model_count += 1 + # if the model is compiled using PyTorch 2.0, + # check that the wrapped model is FSDP or not; + # else check if it is FSDP or not; + is_type_fsdp = isinstance(obj, FSDP) or ( + is_compiled_module(obj) and isinstance(obj._orig_mod, FSDP) + ) + if isinstance(obj, torch.optim.Optimizer): + optimizer_present = True + if model_count > 1 and optimizer_present: + raise ValueError( + "For FSDP to work with multiple models (>1), " + "prepare must be called for all the models before optimizers are created. " + "Then pass the optimizers to the prepare call in the same order as corresponding models." + ) + elif model_count == 1 and not is_type_fsdp and optimizer_present: + logger.warning( + "FSDP Warning: When using FSDP, " + "it is efficient and recommended to call prepare for the model before creating the optimizer" + ) + + if self.distributed_type == DistributedType.DEEPSPEED: + model_count = 0 + for obj in args: + if isinstance(obj, torch.nn.Module): + model_count += 1 + if model_count > 1: + raise AssertionError( + "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed" + ) + + # On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will + # have parameters disconnected from the model (so no training :-( ). + # If the model and optimizer have parameters on different devices we raise an error. + if self.distributed_type == DistributedType.TPU: + model_device, optimizer_device = self._get_devices() + if model_device is not None and optimizer_device is not None and model_device != optimizer_device: + raise ValueError( + "The model and the optimizer parameters are not on the same device, which probably means you " + "created an optimizer around your model **before** putting on the device. Make sure the line " + "model.to(device) is before the optimizer creation in your script or remove it entirely and use " + "the flag default value for `device_placement` in your `Accelerator` to let it handle that " + "part for you." + ) + + # If we're dealing with device placement, this deals with that by... + tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.TPU + if tpu_should_fix_optimizer or self.mixed_precision == "fp8": + # 1. grabbing old model parameters + old_named_params = self._get_named_parameters(*args) + + if self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]: + if self.device.type == "cpu" and self.state.use_ipex: + args = self._prepare_ipex(*args) + elif self.device.type == "xpu" and is_xpu_available(): + args = self._prepare_ipex(*args) + if self.distributed_type == DistributedType.DEEPSPEED: + result = self._prepare_deepspeed(*args) + elif self.distributed_type == DistributedType.MEGATRON_LM: + result = self._prepare_megatron_lm(*args) + else: + result = tuple( + self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement) + ) + result = tuple(self._prepare_one(obj, device_placement=d) for obj, d in zip(result, device_placement)) + + if tpu_should_fix_optimizer or self.mixed_precision == "fp8": + # 2. grabbing new model parameters + new_named_params = self._get_named_parameters(*result) + # 3. building a map from the first to the second + mapping = {p: new_named_params[n] for n, p in old_named_params.items()} + # 4. using that map to update the parameters of the optimizer + for obj in result: + if isinstance(obj, torch.optim.Optimizer): + obj._switch_parameters(mapping) + + if ( + self.distributed_type == DistributedType.FSDP + and model_count == 1 + and not is_type_fsdp + and optimizer_present + ): + result = self._prepare_fsdp(*result) + + for item in result: + if any( + item in container + for container in (self._dataloaders, self._models, self._optimizers, self._schedulers) + ): + setattr(item, "_is_accelerate_prepared", True) + + return result if len(result) > 1 else result[0] + + def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, evaluation_mode: bool = False): + """ + Prepares a PyTorch model for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + model (`torch.nn.Module`): + A PyTorch model to prepare. You don't need to prepare a model if it is used only for inference without + any kind of mixed precision + device_placement (`bool`, *optional*): + Whether or not to place the model on the proper device. Will default to `self.device_placement`. + evaluation_mode (`bool`, *optional*, defaults to `False`): + Whether or not to set the model for evaluation only, by just applying mixed precision and + `torch.compile` (if configured in the `Accelerator` object). + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume a model is defined + >>> model = accelerator.prepare_model(model) + ``` + """ + if device_placement is None: + device_placement = self.device_placement and self.distributed_type != DistributedType.FSDP + self._models.append(model) + + # TODO: Look at enabling native TP training directly with a proper config + if ( + self.verify_device_map(model) + and self.distributed_type != DistributedType.NO + and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true" + ): + raise ValueError( + "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode." + " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`." + ) + + if (getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)) and getattr( + model, "hf_device_map", False + ): + model_devices = set(model.hf_device_map.values()) + if len(model_devices) > 1 and self.distributed_type != DistributedType.NO: + raise ValueError( + "You can't train a model that has been loaded in 8-bit precision on multiple devices in any distributed mode." + " In order to use 8-bit models that have been loaded across multiple GPUs the solution is to use Naive Pipeline Parallelism." + " Therefore you should not specify that you are under any distributed regime in your accelerate config." + ) + current_device = list(model_devices)[0] + current_device_index = current_device.index if isinstance(current_device, torch.device) else current_device + + if torch.device(current_device_index) != self.device: + # if on the first device (GPU 0) we don't care + if (self.device.index is not None) or (current_device_index != 0): + raise ValueError( + "You can't train a model that has been loaded in 8-bit precision on a different device than the one " + "you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device()}" + "you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device() or device_map={'':torch.xpu.current_device()}" + ) + + if "cpu" in model_devices or "disk" in model_devices: + raise ValueError( + "You can't train a model that has been loaded in 8-bit precision with CPU or disk offload." + ) + elif device_placement and not self.verify_device_map(model): + model = model.to(self.device) + + if self.native_amp: + model._original_forward = model.forward + model_forward_func = model.forward.__func__ if hasattr(model.forward, "__func__") else model.forward + autocast_context = get_mixed_precision_context_manager(self.native_amp, self.autocast_handler) + new_forward = autocast_context(model_forward_func) + if hasattr(model.forward, "__func__"): + model.forward = MethodType(new_forward, model) + model.forward = MethodType(convert_outputs_to_fp32(model.forward.__func__), model) + else: + model.forward = convert_outputs_to_fp32(new_forward) + elif self.mixed_precision == "fp8": + if not has_transformer_engine_layers(model): + with torch.no_grad(): + convert_model(model) + model._converted_to_transformer_engine = True + model._original_forward = model.forward + + kwargs = self.fp8_recipe_handler.to_kwargs() if self.fp8_recipe_handler is not None else {} + if "fp8_format" in kwargs: + kwargs["fp8_format"] = getattr(te_recipe.Format, kwargs["fp8_format"]) + fp8_recipe = te_recipe.DelayedScaling(**kwargs) + cuda_device_capacity = torch.cuda.get_device_capability() + fp8_enabled = cuda_device_capacity >= (8, 9) + if not fp8_enabled: + logger.warn( + f"The current device has compute capability of {cuda_device_capacity} which is " + "insufficient for FP8 mixed precision training (requires a GPU Hopper/Ada Lovelace " + "or higher, compute capability of 8.9 or higher). Will use FP16 instead." + ) + model.forward = fp8_autocast(enabled=fp8_enabled, fp8_recipe=fp8_recipe)(model.forward) + if not evaluation_mode: + if self.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + ): + if any(p.requires_grad for p in model.parameters()): + kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {} + # TODO: Look at enabling native TP training directly with a proper config + if os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true": + device_ids, output_device = [self.local_process_index], self.local_process_index + else: + device_ids, output_device = None, None + + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=device_ids, output_device=output_device, **kwargs + ) + elif self.distributed_type == DistributedType.FSDP: + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + + # Check if the model is already a FSDP model due to `Manual Wrapping` and if so, + # don't wrap it again + # In case the model is already compiled using PyTorch 2.0 and the wrapped model in it + # is a FSDP model, don't wrap it again + is_type_fsdp = isinstance(model, FSDP) or ( + is_compiled_module(model) and isinstance(model._orig_mod, FSDP) + ) + + if not is_type_fsdp: + self.state.fsdp_plugin.set_auto_wrap_policy(model) + fsdp_plugin = self.state.fsdp_plugin + kwargs = { + "sharding_strategy": fsdp_plugin.sharding_strategy, + "cpu_offload": fsdp_plugin.cpu_offload, + "auto_wrap_policy": fsdp_plugin.auto_wrap_policy, + "mixed_precision": fsdp_plugin.mixed_precision_policy, + "sync_module_states": fsdp_plugin.sync_module_states, + "backward_prefetch": fsdp_plugin.backward_prefetch, + "forward_prefetch": fsdp_plugin.forward_prefetch, + "use_orig_params": fsdp_plugin.use_orig_params, + "param_init_fn": fsdp_plugin.param_init_fn, + "ignored_modules": fsdp_plugin.ignored_modules, + "limit_all_gathers": fsdp_plugin.limit_all_gathers, + "device_id": self.device, + } + model = FSDP(model, **kwargs) + if fsdp_plugin.activation_checkpointing: + from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( + CheckpointImpl, + apply_activation_checkpointing, + checkpoint_wrapper, + ) + + apply_activation_checkpointing( + model, + checkpoint_wrapper_fn=functools.partial( + checkpoint_wrapper, + checkpoint_impl=CheckpointImpl.NO_REENTRANT, + ), + auto_wrap_policy=fsdp_plugin.auto_wrap_policy, + ) + # if the previous and current models are same, delete the previous one + if len(self._models) > 1 and (self._models[-2] is self._models[-1]): + del self._models[-2] + self._models[-1] = model + elif self.distributed_type == DistributedType.MULTI_CPU: + kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {} + model = torch.nn.parallel.DistributedDataParallel(model, **kwargs) + elif self.distributed_type == DistributedType.TPU and self.state.fork_launched: + model = xmp.MpModelWrapper(model).to(self.device) + # torch.compile should be called last and only if the model isn't already compiled. + if self.state.dynamo_plugin.backend != DynamoBackend.NO and not is_compiled_module(model): + if not is_torch_version(">=", "2.0"): + raise ValueError("Using `torch.compile` requires PyTorch 2.0 or higher.") + model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs()) + return model + + def _prepare_deepspeed(self, *args): + import deepspeed + + deepspeed_plugin = self.state.deepspeed_plugin + + is_dataloader_present = any(isinstance(obj, torch.utils.data.DataLoader) for obj in args) + if deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] == "auto" or is_dataloader_present: + result = [ + self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj + for obj in args + ] + + batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")] + if self.split_batches: + batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes] + + if any(bs is None for bs in batch_sizes): + raise ValueError( + "At least one of the dataloaders passed to `accelerate.prepare()` has `None` as batch size." + "Please set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file" + "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`." + ) + if len(batch_sizes) == 0: + raise ValueError( + "When using DeepSpeed `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders " + "or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file" + "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`." + ) + + batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes) + if len(batch_sizes) > 1: + logger.info( + "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here " + f"{deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device})." + ) + else: + batch_size_per_device = deepspeed_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] + result = [obj for obj in args] + + # handle `gradient_accumulation_steps` when the value is `auto` + deepspeed_plugin.fill_match( + "gradient_accumulation_steps", + must_match=False, + gradient_accumulation_steps=self.gradient_accumulation_steps, + ) + + config_kwargs = { + "train_micro_batch_size_per_gpu": batch_size_per_device, + "train_batch_size": batch_size_per_device + * deepspeed_plugin.deepspeed_config["gradient_accumulation_steps"] + * self.num_processes, + "gradient_clipping": 1.0, + "zero_optimization.stage3_gather_16bit_weights_on_model_save": False, + } + + model = None + optimizer = None + scheduler = None + for obj in result: + if isinstance(obj, torch.nn.Module): + model = obj + elif isinstance(obj, (torch.optim.Optimizer, DummyOptim)): + optimizer = obj + elif (isinstance(obj, (LRScheduler, DummyScheduler))) or ( + type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES + ): + scheduler = obj + + if optimizer is not None: + if "optimizer" in deepspeed_plugin.deepspeed_config and not isinstance(optimizer, (DummyOptim)): + raise ValueError( + "You cannot specify an optimizer in the config file and in the code at the same time. " + "Please remove the optimizer from the config file or " + "create `accelerate.utils.DummyOptim` in the code." + ) + elif "optimizer" not in deepspeed_plugin.deepspeed_config and isinstance(optimizer, (DummyOptim)): + raise ValueError( + "You cannot create a `DummyOptim` without specifying an optimizer in the config file." + ) + + if isinstance(optimizer, (torch.optim.Optimizer)): + deepspeed_plugin.deepspeed_config["zero_allow_untested_optimizer"] = True + + if scheduler is not None: + if "scheduler" in deepspeed_plugin.deepspeed_config and not isinstance(scheduler, (DummyScheduler)): + raise ValueError( + "You cannot specify a scheduler in the config file and in the code at the same time. " + "Please remove the scheduler from the config file or " + "create `accelerate.utils.DummyScheduler` in the code." + ) + elif ( + "scheduler" not in deepspeed_plugin.deepspeed_config + and isinstance(scheduler, (DummyScheduler)) + and scheduler.lr_scheduler_callable is None + ): + raise ValueError( + "Either specify a scheduler in the config file or " + "pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`." + ) + + if optimizer is not None and scheduler is not None: + if isinstance(optimizer, (DummyOptim)) and not isinstance(scheduler, (DummyScheduler)): + raise ValueError( + "You can only specify `accelerate.utils.DummyScheduler` in the code when using " + "`accelerate.utils.DummyOptim`." + ) + + if model is not None: + if hasattr(model, "config"): + hidden_size = ( + max(model.config.hidden_sizes) + if getattr(model.config, "hidden_sizes", None) + else getattr(model.config, "hidden_size", None) + ) + if hidden_size is not None: + config_kwargs.update( + { + "zero_optimization.reduce_bucket_size": hidden_size * hidden_size, + "zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size, + "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size, + } + ) + + if isinstance(optimizer, (DummyOptim)): + config_kwargs.update( + {"optimizer.params.lr": optimizer.lr, "optimizer.params.weight_decay": optimizer.weight_decay} + ) + if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is None: + max_lr = ( + getattr(scheduler.optimizer, "lr", None) + if getattr(scheduler.optimizer, "defaults", None) is None + else scheduler.optimizer.defaults["lr"] + ) + config_kwargs.update( + { + "scheduler.params.warmup_min_lr": 0, + "scheduler.params.warmup_max_lr": max_lr, + "scheduler.params.warmup_num_steps": scheduler.warmup_num_steps, + } + ) + if scheduler.total_num_steps is not None: + config_kwargs["scheduler.params.total_num_steps"] = ( + math.ceil(scheduler.total_num_steps / self.num_processes) + if not self.split_batches + else scheduler.total_num_steps + ) + deepspeed_plugin.deepspeed_config_process(must_match=False, **config_kwargs) + self.deepspeed_config = deepspeed_plugin.deepspeed_config + kwargs = dict(model=model, config_params=self.deepspeed_config) + if optimizer is not None: + if isinstance(optimizer, (DummyOptim)): + kwargs["model_parameters"] = optimizer.params + if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is not None: + kwargs["lr_scheduler"] = scheduler.lr_scheduler_callable + else: + if self.deepspeed_config["zero_optimization"].get("offload_optimizer", {}).get( + "device", "none" + ) != "none" and self.deepspeed_config.get("zero_force_ds_cpu_optimizer", True): + from deepspeed.ops.adam import DeepSpeedCPUAdam + + defaults = {k: v for k, v in optimizer.defaults.items() if k in ["lr", "weight_decay"]} + optimizer = DeepSpeedCPUAdam(optimizer.param_groups, **defaults) + kwargs["optimizer"] = optimizer + if scheduler is not None: + if ( + isinstance(scheduler, LRScheduler) + or type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES + ): + kwargs["lr_scheduler"] = scheduler + + engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs) + if optimizer is not None: + optimizer = DeepSpeedOptimizerWrapper(optimizer) + if scheduler is not None: + if lr_scheduler is None: + scheduler = AcceleratedScheduler( + scheduler, + optimizer, + step_with_optimizer=self.step_scheduler_with_optimizer, + split_batches=self.split_batches, + ) + else: + scheduler = DeepSpeedSchedulerWrapper(lr_scheduler, optimizer) + + for i in range(len(result)): + if isinstance(result[i], torch.nn.Module): + result[i] = engine + elif isinstance(result[i], (torch.optim.Optimizer, DummyOptim)): + result[i] = optimizer + elif (isinstance(result[i], (LRScheduler, DummyScheduler))) or ( + type(result[i]).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES + ): + result[i] = scheduler + # pointing for deepspeed_engine_wrapped.backward() + self.deepspeed_engine_wrapped = DeepSpeedEngineWrapper(engine) + self._models.append(engine) + if optimizer is not None: + self._optimizers.append(optimizer) + if scheduler is not None: + self._schedulers.append(scheduler) + if len(self._models) > 1: + raise AssertionError( + "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed" + ) + return tuple(result) + + def _prepare_megatron_lm(self, *args): + megatron_lm_plugin = self.state.megatron_lm_plugin + if not megatron_lm_plugin.megatron_dataset_flag: + batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")] + if len(batch_sizes) == 0: + raise ValueError( + "You must specify a training or evaluation dataloader in `accelerate.prepare()` when using Megatron-LM." + ) + + micro_batch_size = min(batch_sizes) if megatron_lm_plugin.is_train_batch_min else max(batch_sizes) + if len(batch_sizes) > 1: + logger.info( + "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here " + f"{megatron_lm_plugin.is_train_batch_min} will decide the `train_batch_size` ({micro_batch_size})." + ) + else: + for obj in args: + if isinstance(obj, MegatronLMDummyDataLoader): + micro_batch_size = obj.dataset_args["micro_batch_size"] + break + + dp_degree = self.num_processes // (megatron_lm_plugin.tp_degree * megatron_lm_plugin.pp_degree) + megatron_lm_plugin.set_training_args(micro_batch_size, dp_degree) + + model = None + optimizer = None + scheduler = None + is_dummy_scheduler = False + batch_data = None + for obj in args: + if isinstance(obj, torch.utils.data.DataLoader) and batch_data is None: + batch_data = next(iter(obj)) + if isinstance(obj, torch.nn.Module): + model = obj + elif isinstance(obj, (torch.optim.Optimizer)): + optimizer = obj + elif isinstance(obj, (LRScheduler, MegatronLMDummyScheduler)): + scheduler = obj + + if model is not None: + megatron_lm_plugin.set_network_size_args(model, batch_data) + if optimizer is not None: + megatron_lm_plugin.set_optimizer_type(optimizer) + if scheduler is not None: + is_dummy_scheduler = isinstance(scheduler, MegatronLMDummyScheduler) + if not is_dummy_scheduler: + raise ValueError( + "You can't use a custom scheduler with Megatron-LM. Please use the `accelerate.utils.MegatronLMDummyScheduler` instead." + ) + megatron_lm_plugin.set_scheduler_args(scheduler) + + # initialize megatron-lm + megatron_lm_initialize(self, args_defaults=megatron_lm_plugin.megatron_lm_default_args) + counter = 0 + result = [] + for obj in args: + if isinstance(obj, torch.utils.data.DataLoader): + result.append(megatron_lm_prepare_data_loader(self, obj)) + counter += 1 + elif isinstance(obj, MegatronLMDummyDataLoader): + if counter == 0: + obj.set_megatron_data_args() + dataloaders = megatron_lm_prepare_data_loader(self, obj) + result.append(dataloaders[counter]) + counter += 1 + else: + result.append(obj) + + if model is not None: + model = megatron_lm_prepare_model(self) + if optimizer is not None: + optimizer = megatron_lm_prepare_optimizer(self, model) + if scheduler is not None: + scheduler = megatron_lm_prepare_scheduler(self, optimizer, scheduler) + + if model is not None: + model = MegatronEngine(self, model, optimizer, scheduler) + if optimizer is not None: + optimizer = MegatronLMOptimizerWrapper(optimizer) + if scheduler is not None: + scheduler = MegatronLMSchedulerWrapper(scheduler, optimizer) + + for i in range(len(result)): + if isinstance(result[i], torch.nn.Module): + result[i] = model + elif isinstance(result[i], torch.optim.Optimizer): + result[i] = optimizer + elif isinstance(result[i], MegatronLMDummyScheduler): + result[i] = scheduler + if model is not None: + self._models.append(model) + if optimizer is not None: + self._optimizers.append(optimizer) + if scheduler is not None: + self._schedulers.append(scheduler) + if len(self._models) > 1: + raise AssertionError( + "You can't use same `Accelerator()` instance with multiple models when using Megatron-LM" + ) + return tuple(result) + + def _prepare_ipex(self, *args): + if not is_ipex_available(): + raise ImportError( + "IPEX is not installed or IPEX's version does not match current PyTorch version. Please refer" + " to https://github.com/intel/intel-extension-for-pytorch." + ) + else: + import intel_extension_for_pytorch as ipex + + model = None + optimizer = None + result = [obj for obj in args] + for obj in result: + if isinstance(obj, torch.nn.Module): + model = obj + elif isinstance(obj, (torch.optim.Optimizer)): + optimizer = obj + if optimizer is not None and model is not None: + dtype = torch.bfloat16 if self.state.mixed_precision == "bf16" else torch.float32 + if self.device.type == "xpu" and is_xpu_available(): + model = model.to(self.device) + model, optimizer = torch.xpu.optimize( + model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1" + ) + else: + model, optimizer = ipex.optimize(model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1") + for i in range(len(result)): + if isinstance(result[i], torch.nn.Module): + result[i] = model + elif isinstance(result[i], (torch.optim.Optimizer)): + result[i] = optimizer + return tuple(result) + + def prepare_data_loader( + self, data_loader: torch.utils.data.DataLoader, device_placement=None, slice_fn_for_dispatch=None + ): + """ + Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + data_loader (`torch.utils.data.DataLoader`): + A vanilla PyTorch DataLoader to prepare + device_placement (`bool`, *optional*): + Whether or not to place the batches on the proper device in the prepared dataloader. Will default to + `self.device_placement`. + slice_fn_for_dispatch (`Callable`, *optional*`): + If passed, this function will be used to slice tensors across `num_processes`. Will default to + [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will + be ignored otherwise. + + Example: + + ```python + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> data_loader = torch.utils.data.DataLoader(...) + >>> data_loader = accelerator.prepare_data_loader(data_loader, device_placement=True) + ``` + """ + # Ensure we can't double wrap a DataLoader due to `find_batch_size` + if getattr(data_loader, "_is_accelerate_prepared", False): + if data_loader not in self._dataloaders: + self._dataloaders.append(data_loader) + return data_loader + if device_placement is None: + device_placement = self.device_placement if self.distributed_type != DistributedType.TPU else False + prepared_data_loader = prepare_data_loader( + data_loader, + self.device, + num_processes=self.num_processes, + process_index=self.process_index, + split_batches=self.split_batches, + put_on_device=device_placement, + rng_types=self.rng_types.copy(), + dispatch_batches=self.dispatch_batches, + even_batches=self.even_batches, + slice_fn_for_dispatch=slice_fn_for_dispatch, + ) + self._dataloaders.append(prepared_data_loader) + return prepared_data_loader + + def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None): + """ + Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + optimizer (`torch.optim.Optimizer`): + A vanilla PyTorch optimizer to prepare + device_placement (`bool`, *optional*): + Whether or not to place the optimizer on the proper device. Will default to `self.device_placement`. + + Example: + + ```python + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> optimizer = torch.optim.Adam(...) + >>> optimizer = accelerator.prepare_optimizer(optimizer, device_placement=True) + ``` + """ + # Ensure we can't double wrap an optimizer due to `find_batch_size` + if getattr(optimizer, "_is_accelerate_prepared", False): + if optimizer not in self._optimizers: + self._optimizers.append(optimizer) + return optimizer + if device_placement is None: + device_placement = self.device_placement + optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=self.scaler) + self._optimizers.append(optimizer) + return optimizer + + def prepare_scheduler(self, scheduler: LRScheduler): + """ + Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use + [`Accelerator.prepare`] instead. + + Args: + scheduler (`torch.optim.lr_scheduler.LRScheduler`): + A vanilla PyTorch scheduler to prepare + + Example: + + ```python + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> optimizer = torch.optim.Adam(...) + >>> scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, ...) + >>> scheduler = accelerator.prepare_scheduler(scheduler) + ``` + """ + # Ensure we can't double wrap a scheduler due to `find_batch_size` + if getattr(scheduler, "_is_accelerate_prepared", False): + if scheduler not in self._schedulers: + self._schedulers.append(scheduler) + return scheduler + # We try to find the optimizer associated with `scheduler`, the default is the full list. + optimizer = self._optimizers + for opt in self._optimizers: + if getattr(scheduler, "optimizer", None) == opt.optimizer: + optimizer = opt + break + scheduler = AcceleratedScheduler( + scheduler, + optimizer, + step_with_optimizer=self.step_scheduler_with_optimizer, + split_batches=self.split_batches, + ) + self._schedulers.append(scheduler) + return scheduler + + def backward(self, loss, **kwargs): + """ + Scales the gradients in accordance to the `GradientAccumulationPlugin` and calls the correct `backward()` based + on the configuration. + + Should be used in lieu of `loss.backward()`. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(gradient_accumulation_steps=2) + >>> outputs = model(inputs) + >>> loss = loss_fn(outputs, labels) + >>> accelerator.backward(loss) + ``` + """ + if self.distributed_type != DistributedType.DEEPSPEED: + # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward` + loss = loss / self.gradient_accumulation_steps + if self.distributed_type == DistributedType.DEEPSPEED: + self.deepspeed_engine_wrapped.backward(loss, **kwargs) + elif self.distributed_type == DistributedType.MEGATRON_LM: + return + elif self.scaler is not None: + self.scaler.scale(loss).backward(**kwargs) + else: + loss.backward(**kwargs) + + def set_trigger(self): + """ + Sets the internal trigger tensor to 1 on the current process. A latter check should follow using this which + will check across all processes. + + Note: + Does not require `wait_for_everyone()` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume later in the training script + >>> # `should_do_breakpoint` is a custom function to monitor when to break, + >>> # e.g. when the loss is NaN + >>> if should_do_breakpoint(loss): + ... accelerator.set_trigger() + >>> # Assume later in the training script + >>> if accelerator.check_breakpoint(): + ... break + ``` + """ + self.flag_tensor = torch.tensor(1, device=self.device) + + def check_trigger(self): + """ + Checks if the internal trigger tensor has been set to 1 in any of the processes. If so, will return `True` and + reset the trigger tensor to 0. + + Note: + Does not require `wait_for_everyone()` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume later in the training script + >>> # `should_do_breakpoint` is a custom function to monitor when to break, + >>> # e.g. when the loss is NaN + >>> if should_do_breakpoint(loss): + ... accelerator.set_trigger() + >>> # Assume later in the training script + >>> if accelerator.check_trigger(): + ... break + ``` + """ + # Now that we are outside `__init__`, we can initialize it if it is `None` on device + if self.flag_tensor is None: + self.flag_tensor = torch.tensor(0, device=self.device) + flag_tensor = self.reduce(self.flag_tensor) + if flag_tensor.item() >= 1: + self.flag_tensor = torch.tensor(0, device=self.device) + return True + return False + + def unscale_gradients(self, optimizer=None): + """ + Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings. + + Likely should be called through [`Accelerator.clip_grad_norm_`] or [`Accelerator.clip_grad_value_`] + + Args: + optimizer (`torch.optim.Optimizer` or `list[torch.optim.Optimizer]`, *optional*): + The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers + that were passed to [`~Accelerator.prepare`]. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model, optimizer = accelerator.prepare(model, optimizer) + >>> outputs = model(inputs) + >>> loss = loss_fn(outputs, labels) + >>> accelerator.backward(loss) + >>> accelerator.unscale_gradients(optimizer=optimizer) + ``` + """ + if self.native_amp and self.mixed_precision == "fp16": + if optimizer is None: + # TODO: this unscales all optimizers where we should only unscale the one where parameters are. + optimizer = self._optimizers + elif not isinstance(optimizer, (tuple, list)): + optimizer = [optimizer] + for opt in optimizer: + while isinstance(opt, AcceleratedOptimizer): + opt = opt.optimizer + # Reduce gradients first for XLA + if self.distributed_type == DistributedType.TPU: + gradients = xm._fetch_gradients(opt) + self.reduce(gradients, scale=1.0 / self.num_processes) + self.scaler.unscale_(opt) + + def clip_grad_norm_(self, parameters, max_norm, norm_type=2): + """ + Should be used in place of `torch.nn.utils.clip_grad_norm_`. + + Returns: + `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector). + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(gradient_accumulation_steps=2) + >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) + + >>> for input, target in dataloader: + ... optimizer.zero_grad() + ... output = model(input) + ... loss = loss_func(output, target) + ... accelerator.backward(loss) + ... if accelerator.sync_gradients: + ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm) + ... optimizer.step() + ``` + """ + if self.distributed_type == DistributedType.FSDP: + self.unscale_gradients() + parameters = [p for p in parameters] + for model in self._models: + if parameters == [p for p in model.parameters()]: + return model.clip_grad_norm_(max_norm, norm_type) + elif self.distributed_type == DistributedType.DEEPSPEED: + # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + # We cannot return the gradient norm because DeepSpeed does it. + return None + self.unscale_gradients() + return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type) + + def clip_grad_value_(self, parameters, clip_value): + """ + Should be used in place of `torch.nn.utils.clip_grad_value_`. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(gradient_accumulation_steps=2) + >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) + + >>> for input, target in dataloader: + ... optimizer.zero_grad() + ... output = model(input) + ... loss = loss_func(output, target) + ... accelerator.backward(loss) + ... if accelerator.sync_gradients: + ... accelerator.clip_grad_value_(model.parameters(), clip_value) + ... optimizer.step() + ``` + """ + if self.distributed_type in [DistributedType.DEEPSPEED, DistributedType.FSDP]: + raise Exception("DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.") + self.unscale_gradients() + torch.nn.utils.clip_grad_value_(parameters, clip_value) + + def gather(self, tensor): + """ + Gather the values in *tensor* across all processes and concatenate them on the first dimension. Useful to + regroup the predictions from all processes when doing evaluation. + + Note: + This gather happens in all processes. + + Args: + tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`): + The tensors to gather across all processes. + + Returns: + `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The gathered tensor(s). Note that the + first dimension of the result is *num_processes* multiplied by the first dimension of the input tensors. + + Example: + + ```python + >>> # Assuming four processes + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> process_tensor = torch.tensor([accelerator.process_index]) + >>> gathered_tensor = accelerator.gather(process_tensor) + >>> gathered_tensor + tensor([0, 1, 2, 3]) + ``` + """ + return gather(tensor) + + def gather_for_metrics(self, input_data): + """ + Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be + used for gathering the inputs and targets for metric calculation. + + Args: + input (`torch.Tensor`, `object`, a nested tuple/list/dictionary of `torch.Tensor`, or a nested tuple/list/dictionary of `object`): + The tensors or objects for calculating metrics across all processes + + Example: + + ```python + >>> # Assuming two processes, with a batch size of 5 on a dataset with 9 samples + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> dataloader = torch.utils.data.DataLoader(range(9), batch_size=5) + >>> dataloader = accelerator.prepare(dataloader) + >>> batch = next(iter(dataloader)) + >>> gathered_items = accelerator.gather_for_metrics(batch) + >>> len(gathered_items) + 9 + ``` + """ + + try: + recursively_apply(lambda x: x, input_data, error_on_other_type=True) + all_tensors = True + except TypeError: + all_tensors = False + + if not all_tensors: + data = gather_object(input_data) + else: + data = self.gather(input_data) + + try: + if self.gradient_state.end_of_dataloader: + # at the end of a dataloader, `gather_for_metrics` regresses to + # `gather` unless the dataset has a remainder so log. + if self.gradient_state.remainder == -1: + logger.info( + "The used dataset had no length, returning gathered tensors. You should drop the remainder yourself." + ) + return data + elif self.gradient_state.remainder > 0: + # Last batch needs to be truncated on distributed systems as it contains additional samples + def _adjust_samples(tensor): + return tensor[: self.gradient_state.remainder] + + return recursively_apply(_adjust_samples, data) + else: # remainder is 0 + # no remainder even though at end of dataloader, so nothing to do. + return data + else: + # Not at the end of the dataloader, no need to adjust the tensors + return data + except Exception: + # Dataset had no length or raised an error + return data + + def reduce(self, tensor, reduction="sum", scale=1.0): + """ + Reduce the values in *tensor* across all processes based on *reduction*. + + Note: + All processes get the reduced value. + + Args: + tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`): + The tensors to reduce across all processes. + reduction (`str`, *optional*, defaults to "sum"): + A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation. + scale (`float`, *optional*, defaults to 1.0): + A default scaling value to be applied after the reduce, only valied on XLA. + + Returns: + `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: + The reduced tensor(s). + + Example: + + ```python + >>> # Assuming two processes + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> process_tensor = torch.arange(accelerator.num_processes) + 1 + (2 * accelerator.process_index) + >>> process_tensor = process_tensor.to(accelerator.device) + >>> reduced_tensor = accelerator.reduce(process_tensor, reduction="sum") + >>> reduced_tensor + tensor([4, 6]) + ``` + """ + return reduce(tensor, reduction, scale) + + def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False): + """ + Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so + they can safely be gathered. + + Args: + tensor (nested list/tuple/dictionary of `torch.Tensor`): + The data to gather. + dim (`int`, *optional*, defaults to 0): + The dimension on which to pad. + pad_index (`int`, *optional*, defaults to 0): + The value with which to pad. + pad_first (`bool`, *optional*, defaults to `False`): + Whether to pad at the beginning or the end. + + Returns: + `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: + The padded tensor(s). + + Example: + + ```python + >>> # Assuming two processes, with the first processes having a tensor of size 1 and the second of size 2 + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> process_tensor = torch.arange(accelerator.process_index + 1).to(accelerator.device) + >>> padded_tensor = accelerator.pad_across_processes(process_tensor) + >>> padded_tensor.shape + torch.Size([2]) + ``` + """ + return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first) + + def unwrap_model(self, model, keep_fp32_wrapper: bool = True): + """ + Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving + the model. + + Args: + model (`torch.nn.Module`): + The model to unwrap. + keep_fp32_wrapper (`bool`, *optional*, defaults to `True`): + Whether to not remove the mixed precision hook if it was added. + + Returns: + `torch.nn.Module`: The unwrapped model. + + Example: + + ```python + >>> # Assuming two GPU processes + >>> from torch.nn.parallel import DistributedDataParallel + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model = accelerator.prepare(MyModel()) + >>> print(model.__class__.__name__) + DistributedDataParallel + + >>> model = accelerator.unwrap_model(model) + >>> print(model.__class__.__name__) + MyModel + ``` + """ + return extract_model_from_parallel(model, keep_fp32_wrapper) + + def wait_for_everyone(self): + """ + Will stop the execution of the current process until every other process has reached that point (so this does + nothing when the script is only run in one process). Useful to do before saving a model. + + Example: + + ```python + >>> # Assuming two GPU processes + >>> import time + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> if accelerator.is_main_process: + ... time.sleep(2) + >>> else: + ... print("I'm waiting for the main process to finish its sleep...") + >>> accelerator.wait_for_everyone() + >>> # Should print on every process at the same time + >>> print("Everyone is here") + ``` + """ + wait_for_everyone() + + @on_main_process + def init_trackers(self, project_name: str, config: dict | None = None, init_kwargs: dict | None = {}): + """ + Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations + + Args: + project_name (`str`): + The name of the project. All trackers will save their data based on this + config (`dict`, *optional*): + Optional starting configuration to be logged. + init_kwargs (`dict`, *optional*): + A nested dictionary of kwargs to be passed to a specific tracker's `__init__` function. Should be + formatted like so: + ```python + {"wandb": {"tags": ["tag_a", "tag_b"]}} + ``` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(log_with="tensorboard") + >>> accelerator.init_trackers( + ... project_name="my_project", + ... config={"learning_rate": 0.001, "batch_size": 32}, + ... init_kwargs={"tensorboard": {"flush_secs": 60}}, + ... ) + ``` + """ + self.trackers = [] + for tracker in self.log_with: + if issubclass(type(tracker), GeneralTracker): + # Custom trackers are already initialized + self.trackers.append(tracker) + else: + tracker_init = LOGGER_TYPE_TO_CLASS[str(tracker)] + if getattr(tracker_init, "requires_logging_directory"): + # We can skip this check since it was done in `__init__` + self.trackers.append( + tracker_init(project_name, self.logging_dir, **init_kwargs.get(str(tracker), {})) + ) + else: + self.trackers.append(tracker_init(project_name, **init_kwargs.get(str(tracker), {}))) + if config is not None: + for tracker in self.trackers: + tracker.store_init_configuration(config) + + def get_tracker(self, name: str, unwrap: bool = False): + """ + Returns a `tracker` from `self.trackers` based on `name` on the main process only. + + Args: + name (`str`): + The name of a tracker, corresponding to the `.name` property. + unwrap (`bool`): + Whether to return the internal tracking mechanism or to return the wrapped tracker instead + (recommended). + + Returns: + `GeneralTracker`: The tracker corresponding to `name` if it exists. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(log_with="tensorboard") + >>> accelerator.init_trackers("my_project") + >>> tensorboard_tracker = accelerator.get_tracker("tensorboard") + ``` + """ + if len(getattr(self, "trackers", [])) > 0: + for tracker in self.trackers: + if tracker.name == name: + return tracker.tracker if unwrap else tracker + raise ValueError(f"{name} is not an available tracker stored inside the `Accelerator`.") + # Handle tracker only made on main process + return GeneralTracker(_blank=True) + + @on_main_process + def log(self, values: dict, step: int | None = None, log_kwargs: dict | None = {}): + """ + Logs `values` to all stored trackers in `self.trackers` on the main process only. + + Args: + values (`dict`): + Values should be a dictionary-like object containing only types `int`, `float`, or `str`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + log_kwargs (`dict`, *optional*): + A nested dictionary of kwargs to be passed to a specific tracker's `log` function. Should be formatted + like so: + ```python + {"wandb": {"tags": ["tag_a", "tag_b"]}} + ``` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(log_with="tensorboard") + >>> accelerator.init_trackers("my_project") + >>> accelerator.log({"loss": 0.5, "accuracy": 0.9}) + ``` + """ + for tracker in self.trackers: + tracker.log(values, step=step, **log_kwargs.get(tracker.name, {})) + + @on_main_process + def end_training(self): + """ + Runs any special end training behaviors, such as stopping trackers on the main process only. Should always be + called at the end of your script if using experiment tracking. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(log_with="tensorboard") + >>> accelerator.init_trackers("my_project") + >>> # Do training + >>> accelerator.end_training() + ``` + """ + for tracker in self.trackers: + tracker.finish() + + def save(self, obj, f, safe_serialization=False): + """ + Save the object passed to disk once per machine. Use in place of `torch.save`. + + Args: + obj (`object`): The object to save. + f (`str` or `os.PathLike`): Where to save the content of `obj`. + safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors` + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> arr = [0, 1, 2, 3] + >>> accelerator.save(arr, "array.pkl") + ``` + """ + save(obj, f, safe_serialization=safe_serialization) + + def save_model( + self, + model: torch.nn.Module, + save_directory: Union[str, os.PathLike], + max_shard_size: Union[int, str] = "10GB", + safe_serialization: bool = False, + ): + """ + Save a model so that it can be re-loaded using load_checkpoint_in_model + + Arguments: + model: (`torch.nn.Module`): + Model to be saved. The model can be wrapped or unwraped. + save_directory (`str` or `os.PathLike`): + Directory to which to save. Will be created if it doesn't exist. + max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): + The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size + lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). + + + + If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard + which will be bigger than `max_shard_size`. + + + + safe_serialization (`bool`, *optional*, defaults to `False`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model = ... + >>> accelerator.save_model(model, save_directory) + ``` + """ + + if safe_serialization and not is_safetensors_available(): + raise ImportError("`safe_serialization` requires the `safetensors library: `pip install safetensors`.") + + if os.path.isfile(save_directory): + logger.error(f"Provided path ({save_directory}) should be a directory, not a file") + return + + os.makedirs(save_directory, exist_ok=True) + + # get the state_dict of the model + state_dict = self.get_state_dict(model) + + if safe_serialization: + # Safetensors does not allow tensor aliasing. + # We're going to remove aliases before saving + ptrs = collections.defaultdict(list) + # when bnb serialization is used the weights in the state dict can be strings + for name, tensor in state_dict.items(): + if not isinstance(tensor, str): + ptrs[id_tensor_storage(tensor)].append(name) + + # These are all the pointers of shared tensors. + shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} + warn_names = set() + for names in shared_ptrs.values(): + # When not all duplicates have been cleaned, still remove those keys, but put a clear warning. + # If the link between tensors was done at runtime then `from_pretrained` will not get + # the key back leading to random tensor. A proper warning will be shown + # during reload (if applicable), but since the file is not necessarily compatible with + # the config, better show a proper warning. + found = 0 + for name in names: + if name in state_dict: + found += 1 + if found > 1: + del state_dict[name] + warn_names.add(name) + if len(warn_names) > 0: + logger.warning( + f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading", + ) + + weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME + + # Shard the model if it is too big. + shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name=weights_name) + + # Clean the folder from a previous save + for filename in os.listdir(save_directory): + full_filename = os.path.join(save_directory, filename) + # If we have a shard file that is not going to be replaced, we delete it, but only from the main process + # in distributed settings to avoid race conditions. + weights_no_suffix = weights_name.replace(".bin", "") + + # make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005 + filename_no_suffix = filename.replace(".bin", "") + reg = re.compile(r"(.*?)-\d{5}-of-\d{5}") + + if ( + filename.startswith(weights_no_suffix) + and os.path.isfile(full_filename) + and filename not in shards.keys() + and reg.fullmatch(filename_no_suffix) is not None + and PartialState().is_main_process + ): + os.remove(full_filename) + + # Save the model + for shard_file, shard in shards.items(): + self.save(shard, os.path.join(save_directory, shard_file), safe_serialization=safe_serialization) + + if index is None: + path_to_weights = os.path.join(save_directory, WEIGHTS_NAME) + logger.info(f"Model weights saved in {path_to_weights}") + else: + save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME + save_index_file = os.path.join(save_directory, save_index_file) + # Save the index as well + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + logger.info( + f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " + f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " + f"index located at {save_index_file}." + ) + + def register_save_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle: + """ + Registers a pre hook to be run before `save_checkpoint` is called in [`Accelerator.save_state`]. + + Args: + hook (`Callable`): + A function to be called in [`Accelerator.save_state`] before `save_checkpoint`. + + The hook should have the following signature: + + `hook(models: list[torch.nn.Module], weights: list[dict[str, torch.Tensor]], input_dir: str) -> None` + + The `models` argument are the models as saved in the accelerator state under `accelerator._models`, `weigths` + argument are the state dicts of the `models`, and the `input_dir` argument is the `input_dir` argument passed + to [`Accelerator.load_state`]. + + + + Should only be used in conjunction with [`Accelerator.register_load_state_pre_hook`]. Can be useful to save + configurations in addition to model weights. Can also be used to overwrite model saving with a customized + method. In this case, make sure to remove already loaded weights from the weights list. + + + + Returns: + `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling + `handle.remove()` + """ + handle = hooks.RemovableHandle(self._save_model_state_pre_hook) + self._save_model_state_pre_hook[handle.id] = hook + return handle + + def save_state(self, output_dir: str = None, **save_model_func_kwargs): + """ + Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder. + + If a `ProjectConfiguration` was passed to the `Accelerator` object with `automatic_checkpoint_naming` enabled + then checkpoints will be saved to `self.project_dir/checkpoints`. If the number of current saves is greater + than `total_limit` then the oldest save is deleted. Each checkpoint is saved in seperate folders named + `checkpoint_`. + + Otherwise they are just saved to `output_dir`. + + + + Should only be used when wanting to save a checkpoint during training and restoring the state in the same + environment. + + + + Args: + output_dir (`str` or `os.PathLike`): + The name of the folder to save all relevant weights and states. + save_model_func_kwargs (`dict`, *optional*): + Additional keyword arguments for saving model which can be passed to the underlying save function, such + as optional arguments for DeepSpeed's `save_checkpoint` function. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model, optimizer, lr_scheduler = ... + >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) + >>> accelerator.save_state(output_dir="my_checkpoint") + ``` + """ + if self.project_configuration.automatic_checkpoint_naming: + output_dir = os.path.join(self.project_dir, "checkpoints") + os.makedirs(output_dir, exist_ok=True) + if self.project_configuration.automatic_checkpoint_naming: + folders = [os.path.join(output_dir, folder) for folder in os.listdir(output_dir)] + if ( + self.project_configuration.total_limit is not None + and (len(folders) + 1 > self.project_configuration.total_limit) + and self.is_main_process + ): + + def _inner(folder): + return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0] + + folders.sort(key=_inner) + logger.warning( + f"Deleting {len(folders) + 1 - self.project_configuration.total_limit} checkpoints to make room for new checkpoint." + ) + for folder in folders[: len(folders) + 1 - self.project_configuration.total_limit]: + shutil.rmtree(folder) + output_dir = os.path.join(output_dir, f"checkpoint_{self.save_iteration}") + if os.path.exists(output_dir): + raise ValueError( + f"Checkpoint directory {output_dir} ({self.save_iteration}) already exists. Please manually override `self.save_iteration` with what iteration to start with." + ) + self.wait_for_everyone() + os.makedirs(output_dir, exist_ok=True) + logger.info(f"Saving current state to {output_dir}") + + if self.distributed_type == DistributedType.TPU: + # Finish running the previous step before checkpointing + xm.mark_step() + + # Save the models taking care of FSDP and DeepSpeed nuances + weights = [] + for i, model in enumerate(self._models): + if self.distributed_type == DistributedType.FSDP: + logger.info("Saving FSDP model") + save_fsdp_model(self.state.fsdp_plugin, self, model, output_dir, i) + logger.info(f"FSDP Model saved to output dir {output_dir}") + elif self.distributed_type == DistributedType.DEEPSPEED: + logger.info("Saving DeepSpeed Model and Optimizer") + ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}" + model.save_checkpoint(output_dir, ckpt_id, **save_model_func_kwargs) + logger.info(f"DeepSpeed Model and Optimizer saved to output dir {os.path.join(output_dir, ckpt_id)}") + elif self.distributed_type == DistributedType.MEGATRON_LM: + logger.info("Saving Megatron-LM Model, Optimizer and Scheduler") + model.save_checkpoint(output_dir) + logger.info(f"Megatron-LM Model , Optimizer and Scheduler saved to output dir {output_dir}") + else: + weights.append(self.get_state_dict(model, unwrap=False)) + + # Save the optimizers taking care of FSDP and DeepSpeed nuances + optimizers = [] + if self.distributed_type == DistributedType.FSDP: + for opt in self._optimizers: + logger.info("Saving FSDP Optimizer") + save_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], output_dir, i) + logger.info(f"FSDP Optimizer saved to output dir {output_dir}") + elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: + optimizers = self._optimizers + + # Save the lr schedulers taking care of DeepSpeed nuances + schedulers = [] + if self.distributed_type == DistributedType.DEEPSPEED: + for i, scheduler in enumerate(self._schedulers): + if isinstance(scheduler, DeepSpeedSchedulerWrapper): + continue + schedulers.append(scheduler) + elif self.distributed_type not in [DistributedType.MEGATRON_LM]: + schedulers = self._schedulers + + # Call model loading hooks that might have been registered with + # accelerator.register_model_state_hook + for hook in self._save_model_state_pre_hook.values(): + hook(self._models, weights, output_dir) + + save_location = save_accelerator_state( + output_dir, weights, optimizers, schedulers, self.state.process_index, self.scaler + ) + for i, obj in enumerate(self._custom_objects): + save_custom_state(obj, output_dir, i) + self.project_configuration.iteration += 1 + return save_location + + def register_load_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle: + """ + Registers a pre hook to be run before [`load_checkpoint`] is called in [`Accelerator.load_state`]. + + Args: + hook (`Callable`): + A function to be called in [`Accelerator.load_state`] before `load_checkpoint`. + + The hook should have the following signature: + + `hook(models: list[torch.nn.Module], input_dir: str) -> None` + + The `models` argument are the models as saved in the accelerator state under `accelerator._models`, and the + `input_dir` argument is the `input_dir` argument passed to [`Accelerator.load_state`]. + + + + Should only be used in conjunction with [`Accelerator.register_save_state_pre_hook`]. Can be useful to load + configurations in addition to model weights. Can also be used to overwrite model loading with a customized + method. In this case, make sure to remove already loaded models from the models list. + + + + Returns: + `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling + `handle.remove()` + """ + handle = hooks.RemovableHandle(self._load_model_state_pre_hook) + self._load_model_state_pre_hook[handle.id] = hook + return handle + + def load_state(self, input_dir: str = None, **load_model_func_kwargs): + """ + Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects. + + + + Should only be used in conjunction with [`Accelerator.save_state`]. If a file is not registered for + checkpointing, it will not be loaded if stored in the directory. + + + + Args: + input_dir (`str` or `os.PathLike`): + The name of the folder all relevant weights and states were saved in. Can be `None` if + `automatic_checkpoint_naming` is used, and will pick up from the latest checkpoint. + load_model_func_kwargs (`dict`, *optional*): + Additional keyword arguments for loading model which can be passed to the underlying load function, + such as optional arguments for DeepSpeed's `load_checkpoint` function or a `map_location` to load the + model and optimizer on. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model, optimizer, lr_scheduler = ... + >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler) + >>> accelerator.load_state("my_checkpoint") + ``` + """ + if input_dir is not None: + # Check if folder exists + input_dir = os.path.expanduser(input_dir) + if not os.path.isdir(input_dir): + raise ValueError(f"Tried to find {input_dir} but folder does not exist") + elif self.project_configuration.automatic_checkpoint_naming: + # Pick up from automatic checkpoint naming + input_dir = os.path.join(self.project_dir, "checkpoints") + folders = [os.path.join(input_dir, folder) for folder in os.listdir(input_dir)] + + def _inner(folder): + return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0] + + folders.sort(key=_inner) + input_dir = os.path.join(input_dir, folders[-1]) + else: + raise ValueError("No input_dir provided and automatic checkpoint naming is disabled.") + logger.info(f"Loading states from {input_dir}") + + # Load the models taking care of FSDP and DeepSpeed nuances + models = [] + for i, model in enumerate(self._models): + if self.distributed_type == DistributedType.FSDP: + logger.info("Loading FSDP model") + load_fsdp_model(self.state.fsdp_plugin, self, model, input_dir, i) + logger.info(f"FSDP Model loaded from input dir {input_dir}") + elif self.distributed_type == DistributedType.DEEPSPEED: + logger.info("Loading DeepSpeed Model and Optimizer") + ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}" + model.load_checkpoint(input_dir, ckpt_id, **load_model_func_kwargs) + logger.info(f"DeepSpeed Model and Optimizer loaded from input dir {os.path.join(input_dir, ckpt_id)}") + elif self.distributed_type == DistributedType.MEGATRON_LM: + logger.info("Loading Megatron-LM Model, Optimizer and Scheduler") + model.load_checkpoint(input_dir) + logger.info(f"Megatron-LM Model , Optimizer and Scheduler loaded from input dir {input_dir}") + else: + models.append(model) + + # Load the optimizers taking care of FSDP and DeepSpeed nuances + optimizers = [] + if self.distributed_type == DistributedType.FSDP: + for i, opt in enumerate(self._optimizers): + logger.info("Loading FSDP Optimizer") + load_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], input_dir, i) + logger.info(f"FSDP Optimizer loaded from input dir {input_dir}") + elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]: + optimizers = self._optimizers + + # Load the lr schedulers taking care of DeepSpeed nuances + schedulers = [] + if self.distributed_type == DistributedType.DEEPSPEED: + for i, scheduler in enumerate(self._schedulers): + if isinstance(scheduler, DeepSpeedSchedulerWrapper): + continue + schedulers.append(scheduler) + elif self.distributed_type not in [DistributedType.MEGATRON_LM]: + schedulers = self._schedulers + + # Call model loading hooks that might have been registered with + # accelerator.register_model_state_hook + for hook in self._load_model_state_pre_hook.values(): + hook(models, input_dir) + + map_location = load_model_func_kwargs.pop("map_location", None) + if map_location is None: + if self.num_processes > 1 and self.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_NPU, + ): + map_location = "on_device" + else: + map_location = "cpu" + + load_accelerator_state( + input_dir, + models, + optimizers, + schedulers, + self.state.process_index, + self.scaler, + map_location, + **load_model_func_kwargs, + ) + custom_checkpoints = [ + f for f in os.listdir(input_dir) if re.search(r"^custom_checkpoint_\d+\.pkl$", f) is not None + ] + if len(custom_checkpoints) != len(self._custom_objects): + err = "Number of custom checkpoints in folder {input_dir} does not match the number of registered objects:" + err += f"\n\tFound checkpoints: {len(custom_checkpoints)}" + err += f"\n\tRegistered objects: {len(self._custom_objects)}\n" + err += "Please make sure to only load checkpoints from folders that were created with the same set of registered objects," + err += "or avoid using `custom_checkpoint` in the filename for files in that same directory and load them in manually." + raise RuntimeError(err) + else: + logger.info(f"Loading in {len(custom_checkpoints)} custom states") + for index, obj in enumerate(self._custom_objects): + load_custom_state(obj, input_dir, index) + + def free_memory(self): + """ + Will release all references to the internal objects stored and call the garbage collector. You should call this + method between two trainings with different models/optimizers. Also will reset `Accelerator.step` to 0. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model, optimizer, scheduler = ... + >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler) + >>> accelerator.free_memory() + >>> del model, optimizer, scheduler + ``` + """ + self._schedulers = [] + self._optimizers = [] + self._models = [] + self._dataloaders = [] + self.deepspeed_engine_wrapped = None + self.step = 0 + release_memory() + + def clear(self): + """ + Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the + garbage collector. You should call this method between two trainings with different models/optimizers. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> model, optimizer, scheduler = ... + >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler) + >>> accelerator.free_memory() + >>> del model, optimizer, scheduler + ``` + """ + self.free_memory() + + def _get_named_parameters(self, *args): + named_parameters = {} + for obj in args: + if isinstance(obj, torch.nn.Module): + obj = extract_model_from_parallel(obj) + named_parameters.update({n: p for n, p in obj.named_parameters()}) + return named_parameters + + def _get_devices(self, *args): + model_device = None + optimizer_device = None + for obj in args: + # Loop through model parameters and stop at the first once we have its device. + if isinstance(obj, torch.nn.Module): + for param in obj.parameters(): + model_device = param.device + break + # Loop through optimizer parameters groups and stop at the first once we have its device. + if isinstance(obj, torch.optim.Optimizer): + for param_group in obj.param_groups: + if len(param_group["params"]) > 0: + optimizer_device = param_group["params"][0].device + break + return (model_device, optimizer_device) + + def get_state_dict(self, model, unwrap=True): + """ + Returns the state dictionary of a model sent through [`Accelerator.prepare`] potentially without full + precision. + + Args: + model (`torch.nn.Module`): + A PyTorch model sent through [`Accelerator.prepare`] + unwrap (`bool`, *optional*, defaults to `True`): + Whether to return the original underlying state_dict of `model` or to return the wrapped state_dict + + Returns: + `dict`: The state dictionary of the model potentially without full precision. + + Example: + + ```python + >>> import torch + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> net = torch.nn.Linear(2, 2) + >>> net = accelerator.prepare(net) + >>> state_dict = accelerator.get_state_dict(net) + ``` + """ + + if self.distributed_type == DistributedType.DEEPSPEED: + if self.deepspeed_config["zero_optimization"]["stage"] == 3: + if model.zero_gather_16bit_weights_on_model_save(): + state_dict = model._zero3_consolidated_16bit_state_dict() + else: + raise ValueError( + "Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. " + "To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or " + "set `zero3_save_16bit_model` to True when using `accelerate config`. " + "To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights." + ) + else: + from deepspeed.checkpoint.utils import clone_tensors_for_torch_save + + state_dict = clone_tensors_for_torch_save(self.unwrap_model(model).state_dict()) + else: + if unwrap: + model = self.unwrap_model(model) + state_dict = model.state_dict() + + return state_dict + + def register_for_checkpointing(self, *objects): + """ + Makes note of `objects` and will save or load them in during `save_state` or `load_state`. + + These should be utilized when the state is being loaded or saved in the same script. It is not designed to be + used in different scripts. + + + + Every `object` must have a `load_state_dict` and `state_dict` function to be stored. + + + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> # Assume `CustomObject` has a `state_dict` and `load_state_dict` function. + >>> obj = CustomObject() + >>> accelerator.register_for_checkpointing(obj) + >>> accelerator.save_state("checkpoint.pt") + ``` + """ + invalid_objects = [] + for obj in objects: + if not hasattr(obj, "state_dict") or not hasattr(obj, "load_state_dict"): + invalid_objects.append(obj) + if len(invalid_objects) > 0: + err = "All `objects` must include a `state_dict` and `load_state_dict` function to be stored. The following inputs are invalid:" + for index, obj in enumerate(invalid_objects): + err += f"\n\t- Item at index {index}, `{get_pretty_name(obj)}`" + raise ValueError(err) + self._custom_objects.extend(objects) + + @contextmanager + def autocast(self, cache_enabled: bool = False, autocast_handler: AutocastKwargs = None): + """ + Will apply automatic mixed-precision inside the block inside this context manager, if it is enabled. Nothing + different will happen otherwise. + + A different `autocast_handler` can be passed in to override the one set in the `Accelerator` object. This is + useful in blocks under `autocast` where you want to revert to fp32. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator(mixed_precision="fp16") + >>> with accelerator.autocast(): + ... train() + ``` + """ + if cache_enabled: + warnings.warn( + "Passing `cache_enabled=True` to `accelerator.autocast` is deprecated and will be removed in v0.23.0. " + "Please use the `AutocastKwargs` class instead and pass it to the `Accelerator` as a `kwarg_handler`.", + FutureWarning, + ) + if self.autocast_handler is not None: + self.autocast_handler.cache_enabled = True + else: + self.autocast_handler = AutocastKwargs(cache_enabled=True) + if autocast_handler is None: + autocast_handler = self.autocast_handler + autocast_context = get_mixed_precision_context_manager(self.native_amp, autocast_handler) + autocast_context.__enter__() + yield + autocast_context.__exit__(*sys.exc_info()) + + @property + def optimizer_step_was_skipped(self): + """ + Whether or not the optimizer update was skipped (because of gradient overflow in mixed precision), in which + case the learning rate should not be changed. + """ + for optimizer in self._optimizers: + if optimizer.step_was_skipped: + return True + return False + + def skip_first_batches(self, dataloader, num_batches: int = 0): + """ + Creates a new `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`. + + Args: + dataloader (`torch.utils.data.DataLoader`): The data loader in which to skip batches. + num_batches (`int`, *optional*, defaults to 0): The number of batches to skip + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler) + >>> skipped_dataloader = accelerator.skip_first_batches(dataloader, num_batches=2) + >>> # for the first epoch only + >>> for input, target in skipped_dataloader: + ... optimizer.zero_grad() + ... output = model(input) + ... loss = loss_func(output, target) + ... accelerator.backward(loss) + ... optimizer.step() + + >>> # subsequent epochs + >>> for input, target in dataloader: + ... optimizer.zero_grad() + ... ... + ``` + """ + return skip_first_batches(dataloader, num_batches=num_batches) + + def __deepcopy__(self, memo): + logger.info("Deep copying the `Accelerator` object, note that this will point to the same original object.") + return self + + def verify_device_map(self, model: torch.nn.Module) -> bool: + """ + Verifies that `model` has not been prepared with big model inference with a device-map resembling `auto`. + """ + # Checks if any of the child modules has the attribute `hf_device_map` and this map has more than one entry. + for m in model.modules(): + if hasattr(m, "hf_device_map") and len(m.hf_device_map) > 1: + return True + + return False diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/hooks.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..b9bc2a86215b2d595657be0fe5916fb319e3b710 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/hooks.py @@ -0,0 +1,597 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +from typing import Dict, List, Mapping, Optional, Union + +import torch +import torch.nn as nn + +from .state import PartialState +from .utils import ( + PrefixedDataset, + find_device, + named_module_tensors, + send_to_device, + set_module_tensor_to_device, +) + + +class ModelHook: + """ + A hook that contains callbacks to be executed just before and after the forward method of a model. The difference + with PyTorch existing hooks is that they get passed along the kwargs. + + Class attribute: + - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under + the `torch.no_grad()` context manager. + """ + + no_grad = False + + def init_hook(self, module): + """ + To be executed when the hook is attached to the module. + + Args: + module (`torch.nn.Module`): The module attached to this hook. + """ + return module + + def pre_forward(self, module, *args, **kwargs): + """ + To be executed just before the forward method of the model. + + Args: + module (`torch.nn.Module`): The module whose forward pass will be executed just after this event. + args (`Tuple[Any]`): The positional arguments passed to the module. + kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module. + + Returns: + `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`. + """ + return args, kwargs + + def post_forward(self, module, output): + """ + To be executed just after the forward method of the model. + + Args: + module (`torch.nn.Module`): The module whose forward pass been executed just before this event. + output (`Any`): The output of the module. + + Returns: + `Any`: The processed `output`. + """ + return output + + def detach_hook(self, module): + """ + To be executed when the hook is detached from a module. + + Args: + module (`torch.nn.Module`): The module detached from this hook. + """ + return module + + +class SequentialHook(ModelHook): + """ + A hook that can contain several hooks and iterates through them at each event. + """ + + def __init__(self, *hooks): + self.hooks = hooks + + def init_hook(self, module): + for hook in self.hooks: + module = hook.init_hook(module) + return module + + def pre_forward(self, module, *args, **kwargs): + for hook in self.hooks: + args, kwargs = hook.pre_forward(module, *args, **kwargs) + return args, kwargs + + def post_forward(self, module, output): + for hook in self.hooks: + output = hook.post_forward(module, output) + return output + + def detach_hook(self, module): + for hook in self.hooks: + module = hook.detach_hook(module) + return module + + +def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False): + """ + Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove + this behavior and restore the original `forward` method, use `remove_hook_from_module`. + + + + If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks + together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class. + + + + Args: + module (`torch.nn.Module`): + The module to attach a hook to. + hook (`ModelHook`): + The hook to attach. + append (`bool`, *optional*, defaults to `False`): + Whether the hook should be chained with an existing one (if module already contains a hook) or not. + + Returns: + `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can + be discarded). + """ + + if append and (getattr(module, "_hf_hook", None) is not None): + old_hook = module._hf_hook + remove_hook_from_module(module) + hook = SequentialHook(old_hook, hook) + + if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"): + # If we already put some hook on this module, we replace it with the new one. + old_forward = module._old_forward + else: + old_forward = module.forward + module._old_forward = old_forward + + module = hook.init_hook(module) + module._hf_hook = hook + + @functools.wraps(old_forward) + def new_forward(*args, **kwargs): + args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs) + if module._hf_hook.no_grad: + with torch.no_grad(): + output = old_forward(*args, **kwargs) + else: + output = old_forward(*args, **kwargs) + return module._hf_hook.post_forward(module, output) + + module.forward = new_forward + return module + + +def remove_hook_from_module(module: nn.Module, recurse=False): + """ + Removes any hook attached to a module via `add_hook_to_module`. + + Args: + module (`torch.nn.Module`): The module to attach a hook to. + recurse (`bool`, **optional**): Whether to remove the hooks recursively + + Returns: + `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can + be discarded). + """ + + if hasattr(module, "_hf_hook"): + module._hf_hook.detach_hook(module) + delattr(module, "_hf_hook") + + if hasattr(module, "_old_forward"): + module.forward = module._old_forward + delattr(module, "_old_forward") + + if recurse: + for child in module.children(): + remove_hook_from_module(child, recurse) + + return module + + +class AlignDevicesHook(ModelHook): + """ + A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the + associated module, potentially offloading the weights after the forward pass. + + Args: + execution_device (`torch.device`, *optional*): + The device on which inputs and model weights should be placed before the forward pass. + offload (`bool`, *optional*, defaults to `False`): + Whether or not the weights should be offloaded after the forward pass. + io_same_device (`bool`, *optional*, defaults to `False`): + Whether or not the output should be placed on the same device as the input was. + weights_map (`Mapping[str, torch.Tensor]`, *optional*): + When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to include the associated module's buffers when offloading. + place_submodules (`bool`, *optional*, defaults to `False`): + Whether to place the submodules on `execution_device` during the `init_hook` event. + """ + + def __init__( + self, + execution_device: Optional[Union[int, str, torch.device]] = None, + offload: bool = False, + io_same_device: bool = False, + weights_map: Optional[Mapping] = None, + offload_buffers: bool = False, + place_submodules: bool = False, + skip_keys: Optional[Union[str, List[str]]] = None, + ): + self.execution_device = execution_device + self.offload = offload + self.io_same_device = io_same_device + self.weights_map = weights_map + self.offload_buffers = offload_buffers + self.place_submodules = place_submodules + self.skip_keys = skip_keys + + # Will contain the input device when `io_same_device=True`. + self.input_device = None + self.param_original_devices = {} + self.buffer_original_devices = {} + + def __repr__(self): + return ( + f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, " + f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, " + f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})" + ) + + def init_hook(self, module): + if not self.offload and self.execution_device is not None: + for name, _ in named_module_tensors(module, recurse=self.place_submodules): + set_module_tensor_to_device(module, name, self.execution_device) + elif self.offload: + self.original_devices = { + name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules) + } + if self.weights_map is None: + self.weights_map = { + name: param.to("cpu") + for name, param in named_module_tensors( + module, include_buffers=self.offload_buffers, recurse=self.place_submodules + ) + } + + for name, _ in named_module_tensors( + module, include_buffers=self.offload_buffers, recurse=self.place_submodules + ): + set_module_tensor_to_device(module, name, "meta") + if not self.offload_buffers and self.execution_device is not None: + for name, _ in module.named_buffers(recurse=self.place_submodules): + set_module_tensor_to_device(module, name, self.execution_device) + return module + + def pre_forward(self, module, *args, **kwargs): + if self.io_same_device: + self.input_device = find_device([args, kwargs]) + if self.offload: + for name, _ in named_module_tensors( + module, include_buffers=self.offload_buffers, recurse=self.place_submodules + ): + fp16_statistics = None + if "weight" in name and name.replace("weight", "SCB") in self.weights_map.keys(): + if self.weights_map[name].dtype == torch.int8: + fp16_statistics = self.weights_map[name.replace("weight", "SCB")] + set_module_tensor_to_device( + module, name, self.execution_device, value=self.weights_map[name], fp16_statistics=fp16_statistics + ) + + return send_to_device(args, self.execution_device), send_to_device( + kwargs, self.execution_device, skip_keys=self.skip_keys + ) + + def post_forward(self, module, output): + if self.offload: + for name, _ in named_module_tensors( + module, include_buffers=self.offload_buffers, recurse=self.place_submodules + ): + set_module_tensor_to_device(module, name, "meta") + if type(module).__name__ == "Linear8bitLt": + module.state.SCB = None + module.state.CxB = None + + if self.io_same_device and self.input_device is not None: + output = send_to_device(output, self.input_device, skip_keys=self.skip_keys) + + return output + + def detach_hook(self, module): + if self.offload: + for name, device in self.original_devices.items(): + if device != torch.device("meta"): + set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None)) + return module + + +def attach_execution_device_hook( + module: torch.nn.Module, + execution_device: Union[int, str, torch.device], + skip_keys: Optional[Union[str, List[str]]] = None, + preload_module_classes: Optional[List[str]] = None, +): + """ + Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right + execution device + + Args: + module (`torch.nn.Module`): + The module where we want to attach the hooks. + execution_device (`int`, `str` or `torch.device`): + The device on which inputs and model weights should be placed before the forward pass. + skip_keys (`str` or `List[str]`, *optional*): + A list of keys to ignore when moving inputs or outputs between devices. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + """ + if not hasattr(module, "_hf_hook") and len(module.state_dict()) > 0: + add_hook_to_module(module, AlignDevicesHook(execution_device, skip_keys=skip_keys)) + + # Break the recursion if we get to a preload module. + if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes: + return + + for child in module.children(): + attach_execution_device_hook(child, execution_device) + + +def attach_align_device_hook( + module: torch.nn.Module, + execution_device: Optional[torch.device] = None, + offload: bool = False, + weights_map: Optional[Mapping] = None, + offload_buffers: bool = False, + module_name: str = "", + skip_keys: Optional[Union[str, List[str]]] = None, + preload_module_classes: Optional[List[str]] = None, +): + """ + Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or + buffers. + + Args: + module (`torch.nn.Module`): + The module where we want to attach the hooks. + execution_device (`torch.device`, *optional*): + The device on which inputs and model weights should be placed before the forward pass. + offload (`bool`, *optional*, defaults to `False`): + Whether or not the weights should be offloaded after the forward pass. + weights_map (`Mapping[str, torch.Tensor]`, *optional*): + When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to include the associated module's buffers when offloading. + module_name (`str`, *optional*, defaults to `""`): + The name of the module. + skip_keys (`str` or `List[str]`, *optional*): + A list of keys to ignore when moving inputs or outputs between devices. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + """ + # Attach the hook on this module if it has any direct tensor. + directs = named_module_tensors(module) + full_offload = ( + offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes + ) + + if len(list(directs)) > 0 or full_offload: + if weights_map is not None: + prefix = f"{module_name}." if len(module_name) > 0 else "" + prefixed_weights_map = PrefixedDataset(weights_map, prefix) + else: + prefixed_weights_map = None + hook = AlignDevicesHook( + execution_device=execution_device, + offload=offload, + weights_map=prefixed_weights_map, + offload_buffers=offload_buffers, + place_submodules=full_offload, + skip_keys=skip_keys, + ) + add_hook_to_module(module, hook, append=True) + + # We stop the recursion in case we hit the full offload. + if full_offload: + return + + # Recurse on all children of the module. + for child_name, child in module.named_children(): + child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name + attach_align_device_hook( + child, + execution_device=execution_device, + offload=offload, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=child_name, + preload_module_classes=preload_module_classes, + skip_keys=skip_keys, + ) + + +def remove_hook_from_submodules(module: nn.Module): + """ + Recursively removes all hooks attached on the submodules of a given model. + + Args: + module (`torch.nn.Module`): The module on which to remove all hooks. + """ + remove_hook_from_module(module) + for child in module.children(): + remove_hook_from_submodules(child) + + +def attach_align_device_hook_on_blocks( + module: nn.Module, + execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None, + offload: Union[bool, Dict[str, bool]] = False, + weights_map: Mapping = None, + offload_buffers: bool = False, + module_name: str = "", + skip_keys: Optional[Union[str, List[str]]] = None, + preload_module_classes: Optional[List[str]] = None, +): + """ + Attaches `AlignDevicesHook` to all blocks of a given model as needed. + + Args: + module (`torch.nn.Module`): + The module where we want to attach the hooks. + execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*): + The device on which inputs and model weights should be placed before the forward pass. It can be one device + for the whole module, or a dictionary mapping module name to device. + offload (`bool`, *optional*, defaults to `False`): + Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole + module, or a dictionary mapping module name to boolean. + weights_map (`Mapping[str, torch.Tensor]`, *optional*): + When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. + offload_buffers (`bool`, *optional*, defaults to `False`): + Whether or not to include the associated module's buffers when offloading. + module_name (`str`, *optional*, defaults to `""`): + The name of the module. + skip_keys (`str` or `List[str]`, *optional*): + A list of keys to ignore when moving inputs or outputs between devices. + preload_module_classes (`List[str]`, *optional*): + A list of classes whose instances should load all their weights (even in the submodules) at the beginning + of the forward. This should only be used for classes that have submodules which are registered but not + called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, + `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. + """ + # If one device and one offload, we've got one hook. + if not isinstance(execution_device, Mapping) and not isinstance(offload, dict): + if not offload: + hook = AlignDevicesHook( + execution_device=execution_device, io_same_device=True, skip_keys=skip_keys, place_submodules=True + ) + add_hook_to_module(module, hook) + else: + attach_align_device_hook( + module, + execution_device=execution_device, + offload=True, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=module_name, + skip_keys=skip_keys, + ) + return + + if not isinstance(execution_device, Mapping): + execution_device = {key: execution_device for key in offload.keys()} + if not isinstance(offload, Mapping): + offload = {key: offload for key in execution_device.keys()} + + if module_name in execution_device and module_name in offload and not offload[module_name]: + hook = AlignDevicesHook( + execution_device=execution_device[module_name], + offload_buffers=offload_buffers, + io_same_device=(module_name == ""), + place_submodules=True, + skip_keys=skip_keys, + ) + add_hook_to_module(module, hook) + attach_execution_device_hook(module, execution_device[module_name]) + elif module_name in execution_device and module_name in offload: + attach_align_device_hook( + module, + execution_device=execution_device[module_name], + offload=True, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=module_name, + skip_keys=skip_keys, + preload_module_classes=preload_module_classes, + ) + if not hasattr(module, "_hf_hook"): + hook = AlignDevicesHook( + execution_device=execution_device[module_name], io_same_device=(module_name == ""), skip_keys=skip_keys + ) + add_hook_to_module(module, hook) + attach_execution_device_hook( + module, + execution_device[module_name], + preload_module_classes=preload_module_classes, + skip_keys=skip_keys, + ) + elif module_name == "": + hook = AlignDevicesHook(execution_device=execution_device.get(""), io_same_device=True, skip_keys=skip_keys) + add_hook_to_module(module, hook) + + for child_name, child in module.named_children(): + child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name + attach_align_device_hook_on_blocks( + child, + execution_device=execution_device, + offload=offload, + weights_map=weights_map, + offload_buffers=offload_buffers, + module_name=child_name, + preload_module_classes=preload_module_classes, + skip_keys=skip_keys, + ) + + +class CpuOffload(ModelHook): + """ + Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after + the forward, the user needs to call the `init_hook` method again for this. + + Args: + execution_device(`str`, `int` or `torch.device`, *optional*): + The device on which the model should be executed. Will default to the MPS device if it's available, then + GPU 0 if there is a GPU, and finally to the CPU. + prev_module_hook (`UserCpuOffloadHook`, *optional*): + The hook sent back by [`cpu_offload_with_hook`] for a previous model in the pipeline you are running. If + passed, its offload method will be called just before the forward of the model to which this hook is + attached. + """ + + def __init__( + self, + execution_device: Optional[Union[str, int, torch.device]] = None, + prev_module_hook: Optional["UserCpuOffloadHook"] = None, + ): + self.prev_module_hook = prev_module_hook + + self.execution_device = execution_device if execution_device is not None else PartialState().default_device + + def init_hook(self, module): + return module.to("cpu") + + def pre_forward(self, module, *args, **kwargs): + if self.prev_module_hook is not None: + self.prev_module_hook.offload() + module.to(self.execution_device) + return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device) + + +class UserCpuOffloadHook: + """ + A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook + or remove it entirely. + """ + + def __init__(self, model, hook): + self.model = model + self.hook = hook + + def offload(self): + self.hook.init_hook(self.model) + + def remove(self): + remove_hook_from_module(self.model) diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/logging.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..baf89701b2e537ba3ac35eaec6eabda82a9a46ac --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/logging.py @@ -0,0 +1,112 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os + +from .state import PartialState + + +class MultiProcessAdapter(logging.LoggerAdapter): + """ + An adapter to assist with logging in multiprocess. + + `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes + or only the main executed one. Default is `main_process_only=True`. + + Does not require an `Accelerator` object to be created first. + """ + + @staticmethod + def _should_log(main_process_only): + "Check if log should be performed" + state = PartialState() + return not main_process_only or (main_process_only and state.is_main_process) + + def log(self, level, msg, *args, **kwargs): + """ + Delegates logger call after checking if we should log. + + Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes + or only the main executed one. Default is `True` if not passed + + Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to + read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not + break with the previous behavior. + + `in_order` is ignored if `main_process_only` is passed. + """ + if PartialState._shared_state == {}: + raise RuntimeError( + "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." + ) + main_process_only = kwargs.pop("main_process_only", True) + in_order = kwargs.pop("in_order", False) + + if self.isEnabledFor(level): + if self._should_log(main_process_only): + msg, kwargs = self.process(msg, kwargs) + self.logger.log(level, msg, *args, **kwargs) + + elif in_order: + state = PartialState() + for i in range(state.num_processes): + if i == state.process_index: + msg, kwargs = self.process(msg, kwargs) + self.logger.log(level, msg, *args, **kwargs) + state.wait_for_everyone() + + +def get_logger(name: str, log_level: str = None): + """ + Returns a `logging.Logger` for `name` that can handle multiprocessing. + + If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all + processes and in order, also pass `in_order=True` + + Args: + name (`str`): + The name for the logger, such as `__file__` + log_level (`str`, *optional*): + The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not + + Example: + + ```python + >>> from accelerate.logging import get_logger + + >>> logger = get_logger(__name__) + + >>> logger.info("My log", main_process_only=False) + >>> logger.debug("My log", main_process_only=True) + + >>> logger = get_logger(__name__, log_level="DEBUG") + >>> logger.info("My log") + >>> logger.debug("My second log") + + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> array = ["a", "b", "c", "d"] + >>> letter_at_rank = array[accelerator.process_index] + >>> logger.info(letter_at_rank, in_order=True) + ``` + """ + if log_level is None: + log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None) + logger = logging.getLogger(name) + if log_level is not None: + logger.setLevel(log_level.upper()) + logger.root.setLevel(log_level.upper()) + return MultiProcessAdapter(logger, {}) diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/memory_utils.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/memory_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fa2e2c8b9d7d0064c3e5e282737a7ad6919bde29 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/memory_utils.py @@ -0,0 +1,22 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + + +warnings.warn( + "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: " + "`from accelerate import find_executable_batch_size` to avoid this warning.", + FutureWarning, +) diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/optimizer.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..fa4f164b4cf139c02b961a0596e826e98b4f60a6 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/optimizer.py @@ -0,0 +1,187 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import warnings + +import torch + +from .state import AcceleratorState, GradientState +from .utils import DistributedType, honor_type, is_tpu_available + + +if is_tpu_available(check_device=False): + import torch_xla.core.xla_model as xm + + +def move_to_device(state, device): + if isinstance(state, (list, tuple)): + return honor_type(state, (move_to_device(t, device) for t in state)) + elif isinstance(state, dict): + return type(state)({k: move_to_device(v, device) for k, v in state.items()}) + elif isinstance(state, torch.Tensor): + return state.to(device) + return state + + +class AcceleratedOptimizer(torch.optim.Optimizer): + """ + Internal wrapper around a torch optimizer. + + Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient + accumulation. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + device_placement (`bool`, *optional*, defaults to `True`): + Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of + `optimizer` on the right device. + scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*): + The scaler to use in the step function if training with mixed precision. + """ + + def __init__(self, optimizer, device_placement=True, scaler=None): + self.optimizer = optimizer + self.scaler = scaler + self.accelerator_state = AcceleratorState() + self.gradient_state = GradientState() + self.device_placement = device_placement + self._is_overflow = False + + if self.scaler is not None: + self._accelerate_step_called = False + self._optimizer_original_step_method = self.optimizer.step + self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) + + # Handle device placement + if device_placement: + state_dict = self.optimizer.state_dict() + if self.accelerator_state.distributed_type == DistributedType.TPU: + xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) + else: + state_dict = move_to_device(state_dict, self.accelerator_state.device) + self.optimizer.load_state_dict(state_dict) + + @property + def state(self): + return self.optimizer.state + + @state.setter + def state(self, state): + self.optimizer.state = state + + @property + def param_groups(self): + return self.optimizer.param_groups + + @param_groups.setter + def param_groups(self, param_groups): + self.optimizer.param_groups = param_groups + + @property + def defaults(self): + return self.optimizer.defaults + + @defaults.setter + def defaults(self, defaults): + self.optimizer.defaults = defaults + + def add_param_group(self, param_group): + self.optimizer.add_param_group(param_group) + + def load_state_dict(self, state_dict): + if self.accelerator_state.distributed_type == DistributedType.TPU and self.device_placement: + xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) + self.optimizer.load_state_dict(state_dict) + + def state_dict(self): + return self.optimizer.state_dict() + + def zero_grad(self, set_to_none=None): + if self.gradient_state.sync_gradients: + accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters + if accept_arg: + if set_to_none is None: + set_to_none = False + self.optimizer.zero_grad(set_to_none=set_to_none) + else: + if set_to_none is not None: + raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.") + self.optimizer.zero_grad() + + def step(self, closure=None): + if self.gradient_state.sync_gradients: + if self.accelerator_state.distributed_type == DistributedType.TPU: + optimizer_args = {"closure": closure} if closure is not None else {} + xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args) + elif self.scaler is not None: + self.optimizer.step = self._optimizer_patched_step_method + + self.scaler.step(self.optimizer, closure) + self.scaler.update() + + if not self._accelerate_step_called: + # If the optimizer step was skipped, gradient overflow was detected. + self._is_overflow = True + else: + self._is_overflow = False + # Reset the step method to the original one + self.optimizer.step = self._optimizer_original_step_method + # Reset the indicator + self._accelerate_step_called = False + else: + self.optimizer.step(closure) + + def _switch_parameters(self, parameters_map): + for param_group in self.optimizer.param_groups: + param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]] + + @property + def is_overflow(self): + """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" + warnings.warn( + "The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use " + "`optimizer.step_was_skipped` instead.", + FutureWarning, + ) + return self._is_overflow + + @property + def step_was_skipped(self): + """Whether or not the optimizer step was skipped.""" + return self._is_overflow + + def __getstate__(self): + _ignored_keys = [ + "_accelerate_step_called", + "_optimizer_original_step_method", + "_optimizer_patched_step_method", + ] + return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys} + + def __setstate__(self, state): + self.__dict__.update(state) + if self.scaler is not None: + self._accelerate_step_called = False + self._optimizer_original_step_method = self.optimizer.step + self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) + + +def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method): + def patched_step(*args, **kwargs): + accelerated_optimizer._accelerate_step_called = True + return method(*args, **kwargs) + + return patched_step diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/state.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/state.py new file mode 100644 index 0000000000000000000000000000000000000000..6e427100ceef34e9de7eec91efed5bd6098b18df --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/state.py @@ -0,0 +1,1046 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import math +import os +import threading +import warnings +from contextlib import contextmanager +from functools import partial +from typing import Any, Callable, Optional + +import torch + +from .utils import ( + DistributedType, + DynamoBackend, + GradientAccumulationPlugin, + get_ccl_version, + get_int_from_env, + is_ccl_available, + is_deepspeed_available, + is_fp8_available, + is_ipex_available, + is_mps_available, + is_npu_available, + is_tpu_available, + is_xpu_available, + parse_choice_from_env, + parse_flag_from_env, +) +from .utils.dataclasses import SageMakerDistributedType + + +if is_tpu_available(check_device=False): + import torch_xla.core.xla_model as xm + + +if is_npu_available(check_device=False): + import torch_npu # noqa: F401 + + +def is_initialized() -> bool: + """ + Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`, + but works as a module method. + """ + return AcceleratorState._shared_state != {} + + +# Lambda function that does nothing +def do_nothing(*args, **kwargs): + return None + + +class ThreadLocalSharedDict(threading.local): + """ + Descriptor that holds a dict shared between instances of a class in the same thread. + + Note: Descriptors have slightly different semantics than just a dict field on its own. + `PartialState(...)._shared_state` and `PartialState._shared_state` (instance vs class) give the same value: the + underlying _storage dict. Likewise, `PartialState(...)._shared_state = {...}` overrides the _storage dict inside + the descriptor as you would expect. However, `PartialState._shared_state = {}` actually replaces the descriptor + object with a dict instead Thus, you should modify the _storage dict in-place (e.g. `_shared_state.clear()`). + + See Python documentation for an explanation of descriptors: https://docs.python.org/3/howto/descriptor.html + + This is required for using PyTorch/XLA with PJRT in multithreaded mode (required for TPU v2 and v3). + + See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3 + """ + + def __init__(self, thread_local: bool = False): + self._storage = {} + + def __get__(self, obj, objtype=None): + return self._storage + + def __set__(self, obj, value): + self._storage = value + + +# Prefer global shared dictionary, except when using TPU. +SharedDict = dict if not is_tpu_available(check_device=False) else ThreadLocalSharedDict + + +# Inspired by Alex Martelli's 'Borg'. +class PartialState: + """ + Singleton class that has information about the current training environment and functions to help with process + control. Designed to be used when only process control and device execution states are needed. Does *not* need to + be initialized from `Accelerator`. + + **Available attributes:** + + - **device** (`torch.device`) -- The device to use. + - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently + in use. + - **local_process_index** (`int`) -- The index of the current process on the current server. + - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type + of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). + - **num_processes** (`int`) -- The number of processes currently launched in parallel. + - **process_index** (`int`) -- The index of the current process. + - **is_last_process** (`bool`) -- Whether or not the current process is the last one. + - **is_main_process** (`bool`) -- Whether or not the current process is the main one. + - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. + - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. + """ + + _shared_state = SharedDict() + + def __init__(self, cpu: bool = False, **kwargs): + self.__dict__ = self._shared_state + if not self.initialized: + self._cpu = cpu + self.backend = None + env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None) + self.device = torch.device(env_device) if env_device is not None else None + self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE") + use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None) + if use_sagemaker_dp is None: + use_sagemaker_dp = ( + os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true" + and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO + ) + + if use_sagemaker_dp and not cpu: + if ( + os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") == SageMakerDistributedType.DATA_PARALLEL + ) or use_sagemaker_dp: + self.distributed_type = DistributedType.MULTI_GPU + import smdistributed.dataparallel.torch.torch_smddp # noqa + + if not torch.distributed.is_initialized(): + torch.distributed.init_process_group(backend="smddp") + self.backend = "smddp" + self.num_processes = torch.distributed.get_world_size() + self.process_index = torch.distributed.get_rank() + self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) + if self.device is None: + self.device = torch.device("cuda", self.local_process_index) + torch.cuda.set_device(self.device) + elif is_tpu_available() and not cpu: + self.distributed_type = DistributedType.TPU + self.num_processes = xm.xrt_world_size() + self.process_index = xm.get_ordinal() + self.local_process_index = xm.get_local_ordinal() + self.device = xm.xla_device() + elif ( + os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" + and int(os.environ.get("LOCAL_RANK", -1)) != -1 + and not cpu + ): + assert ( + is_deepspeed_available() + ), "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source" + self.distributed_type = DistributedType.DEEPSPEED + if not torch.distributed.is_initialized(): + from deepspeed import comm as dist + + # DeepSpeed always uses nccl + kwargs.pop("backend", None) + if is_xpu_available and is_ccl_available(): + # Set DeepSpeed backend to ccl for xpu + self.backend = "ccl" + else: + self.backend = "nccl" + dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs) + + self.num_processes = torch.distributed.get_world_size() + self.process_index = torch.distributed.get_rank() + self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) + if self.device is None: + if is_xpu_available(): + self.device = torch.device("xpu", self.local_process_index) + if self.device is not None: + torch.xpu.set_device(self.device) + else: + self.device = torch.device("cuda", self.local_process_index) + if self.device is not None: + torch.cuda.set_device(self.device) + self._mixed_precision = "no" # deepspeed handles mixed_precision using deepspeed_config + elif int(os.environ.get("LOCAL_RANK", -1)) != -1 and not cpu and torch.cuda.is_available(): + self.distributed_type = DistributedType.MULTI_GPU + if not torch.distributed.is_initialized(): + self.backend = kwargs.pop("backend", "nccl") + # Special case for `TrainingArguments`, where `backend` will be `None` + if self.backend is None: + self.backend = "nccl" + torch.distributed.init_process_group(backend=self.backend, **kwargs) + self.num_processes = torch.distributed.get_world_size() + self.process_index = torch.distributed.get_rank() + self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) + if self.device is None: + self.device = torch.device("cuda", self.local_process_index) + torch.cuda.set_device(self.device) + elif is_npu_available() and not cpu and int(os.environ.get("LOCAL_RANK", -1)) != -1: + self.distributed_type = DistributedType.MULTI_NPU + if not torch.distributed.is_initialized(): + # Backend is not set by the user, we set it here + kwargs.pop("backend", None) + self.backend = "hccl" + torch.distributed.init_process_group(backend=self.backend, **kwargs) + self.num_processes = torch.distributed.get_world_size() + self.process_index = torch.distributed.get_rank() + self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) + if self.device is None: + self.device = torch.device("npu", self.local_process_index) + torch.npu.set_device(self.device) + elif get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1: + if not cpu and is_xpu_available(): + self.distributed_type = DistributedType.MULTI_XPU + else: + self.distributed_type = DistributedType.MULTI_CPU + # Actually, CCL_WORKER_COUNT is a CPU only env var in CCL, no need to set it for XPU. + if is_ccl_available() and ( + get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or self.distributed_type == DistributedType.MULTI_XPU + ): + if get_ccl_version() >= "1.12": + import oneccl_bindings_for_pytorch # noqa: F401 + else: + import torch_ccl # noqa: F401 + backend = "ccl" + elif torch.distributed.is_mpi_available(): + backend = "mpi" + else: + backend = "gloo" + # Try to get launch configuration from environment variables set by MPI launcher - works for Intel MPI, OpenMPI and MVAPICH + rank = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0) + size = get_int_from_env(["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1) + local_rank = get_int_from_env( + ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0 + ) + local_size = get_int_from_env( + ["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1 + ) + self.local_process_index = local_rank + os.environ["RANK"] = str(rank) + os.environ["WORLD_SIZE"] = str(size) + os.environ["LOCAL_RANK"] = str(local_rank) + if not os.environ.get("MASTER_PORT", None): + os.environ["MASTER_PORT"] = "29500" + if not os.environ.get("MASTER_ADDR", None): + if local_size != size and backend != "mpi": + raise ValueError( + "Looks like distributed multinode run but MASTER_ADDR env not set, " + "please try exporting rank 0's hostname as MASTER_ADDR" + ) + if ( + self.distributed_type == DistributedType.MULTI_CPU + and get_int_from_env(["OMP_NUM_THREADS", "MKL_NUM_THREADS"], 0) == 0 + ): + import psutil + + num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size) + if num_cpu_threads_per_process == 0: + num_cpu_threads_per_process = 1 + torch.set_num_threads(num_cpu_threads_per_process) + warnings.warn( + f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob" + " performance." + ) + if not torch.distributed.is_initialized(): + # Backend is not set by the user, we set it here + kwargs.pop("backend", None) + self.backend = backend + torch.distributed.init_process_group(self.backend, rank=rank, world_size=size, **kwargs) + self.num_processes = torch.distributed.get_world_size() + self.process_index = torch.distributed.get_rank() + if cpu: + self.device = torch.device("cpu") + elif is_xpu_available(): + self.device = torch.device("xpu", self.local_process_index) + torch.xpu.set_device(self.device) + else: + self.device = self.default_device + else: + self.distributed_type = DistributedType.NO + self.num_processes = 1 + self.process_index = self.local_process_index = 0 + + if self.device is None: + self.device = torch.device("cpu") if cpu else self.default_device + + self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0) + + def __repr__(self) -> str: + return ( + f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n" + f"Num processes: {self.num_processes}\n" + f"Process index: {self.process_index}\n" + f"Local process index: {self.local_process_index}\n" + f"Device: {self.device}\n" + ) + + @staticmethod + def _reset_state(): + "Resets `_shared_state`, is used internally and should not be called" + PartialState._shared_state.clear() + + @property + def initialized(self) -> bool: + "Returns whether the `PartialState` has been initialized" + return self._shared_state != {} + + @property + def use_distributed(self): + """ + Whether the Accelerator is configured for distributed training + """ + return self.distributed_type != DistributedType.NO and self.num_processes > 1 + + @property + def is_last_process(self) -> bool: + "Returns whether the current process is the last one" + return self.process_index == self.num_processes - 1 + + @property + def is_main_process(self) -> bool: + "Returns whether the current process is the main process" + return ( + self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process + ) + + @property + def is_local_main_process(self) -> bool: + "Returns whether the current process is the main process on the local node" + return ( + self.local_process_index == 0 + if self.distributed_type != DistributedType.MEGATRON_LM + else self.is_last_process + ) + + def wait_for_everyone(self): + """ + Will stop the execution of the current process until every other process has reached that point (so this does + nothing when the script is only run in one process). Useful to do before saving a model. + + Example: + + ```python + >>> # Assuming two GPU processes + >>> import time + >>> from accelerate.state import PartialState + + >>> state = PartialState() + >>> if state.is_main_process: + ... time.sleep(2) + >>> else: + ... print("I'm waiting for the main process to finish its sleep...") + >>> state.wait_for_everyone() + >>> # Should print on every process at the same time + >>> print("Everyone is here") + ``` + """ + if self.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_CPU, + DistributedType.DEEPSPEED, + DistributedType.FSDP, + ): + torch.distributed.barrier() + elif self.distributed_type == DistributedType.TPU: + xm.rendezvous("accelerate.utils.wait_for_everyone") + + def _goes_first(self, is_main: bool): + if not is_main: + self.wait_for_everyone() + + yield + + if is_main: + self.wait_for_everyone() + + @contextmanager + def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): + """ + Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing + distributed inference, such as with different prompts. + + Note that when using a `dict`, all keys need to have the same number of elements. + + Args: + inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): + The input to split between processes. + apply_padding (`bool`, `optional`, defaults to `False`): + Whether to apply padding by repeating the last element of the input so that all processes have the same + number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing + in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. + + + Example: + + ```python + # Assume there are two processes + from accelerate import PartialState + + state = PartialState() + with state.split_between_processes(["A", "B", "C"]) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C"] + + with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C", "C"] + ``` + """ + if self.num_processes == 1: + yield inputs + return + length = len(inputs) + # Nested dictionary of any types + if isinstance(inputs, dict): + length = len(inputs[list(inputs.keys())[0]]) + if not all(len(v) == length for v in inputs.values()): + raise ValueError("All values in the dictionary must have the same length") + num_samples_per_process = math.ceil(length / self.num_processes) + start_index = self.process_index * num_samples_per_process + end_index = start_index + num_samples_per_process + if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1): + end_index = length + + def _split_values(inputs, start_index, end_index): + if isinstance(inputs, (list, tuple, torch.Tensor)): + if start_index >= len(inputs): + result = inputs[-1:] + else: + result = inputs[start_index:end_index] + if apply_padding: + if isinstance(result, torch.Tensor): + from accelerate.utils import pad_across_processes, send_to_device + + # The tensor needs to be on the device before we can pad it + tensorized_result = send_to_device(result, self.device) + result = pad_across_processes(tensorized_result, pad_index=inputs[-1]) + else: + result += [result[-1]] * (num_samples_per_process - len(result)) + return result + elif isinstance(inputs, dict): + for key in inputs.keys(): + inputs[key] = _split_values(inputs[key], start_index, end_index) + return inputs + else: + return inputs + + yield _split_values(inputs, start_index, end_index) + + @contextmanager + def main_process_first(self): + """ + Lets the main process go first inside a with block. + + The other processes will enter the with block after the main process exits. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> with accelerator.main_process_first(): + ... # This will be printed first by process 0 then in a seemingly + ... # random order by the other processes. + ... print(f"This will be printed by process {accelerator.process_index}") + ``` + """ + yield from self._goes_first(self.is_main_process) + + @contextmanager + def local_main_process_first(self): + """ + Lets the local main process go inside a with block. + + The other processes will enter the with block after the main process exits. + + Example: + + ```python + >>> from accelerate.state import PartialState + + >>> state = PartialState() + >>> with state.local_main_process_first(): + ... # This will be printed first by local process 0 then in a seemingly + ... # random order by the other processes. + ... print(f"This will be printed by process {state.local_process_index}") + ``` + """ + yield from self._goes_first(self.is_local_main_process) + + def on_main_process(self, function: Callable[..., Any] = None): + """ + Decorator that only runs the decorated function on the main process. + + Args: + function (`Callable`): The function to decorate. + + Example: + + ```python + >>> from accelerate.state import PartialState + + >>> state = PartialState() + + + >>> @state.on_main_process + ... def print_something(): + ... print("This will be printed by process 0 only.") + + + >>> print_something() + "This will be printed by process 0 only" + ``` + """ + if not self.initialized: + raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.") + if self.is_main_process or not self.use_distributed: + return function + return do_nothing + + def on_local_main_process(self, function: Callable[..., Any] = None): + """ + Decorator that only runs the decorated function on the local main process. + + Args: + function (`Callable`): The function to decorate. + + Example: + ```python + # Assume we have 2 servers with 4 processes each. + from accelerate.state import PartialState + + state = PartialState() + + + @state.on_local_main_process + def print_something(): + print("This will be printed by process 0 only on each server.") + + + print_something() + # On server 1: + "This will be printed by process 0 only" + # On server 2: + "This will be printed by process 0 only" + ``` + """ + if self.is_local_main_process or not self.use_distributed: + return function + return do_nothing + + def on_last_process(self, function: Callable[..., Any]): + """ + Decorator that only runs the decorated function on the last process. + + Args: + function (`Callable`): The function to decorate. + + Example: + ```python + # Assume we have 4 processes. + from accelerate.state import PartialState + + state = PartialState() + + + @state.on_last_process + def print_something(): + print(f"Printed on process {state.process_index}") + + + print_something() + "Printed on process 3" + ``` + """ + if self.is_last_process or not self.use_distributed: + return function + return do_nothing + + def on_process(self, function: Callable[..., Any] = None, process_index: int = None): + """ + Decorator that only runs the decorated function on the process with the given index. + + Args: + function (`Callable`, `optional`): + The function to decorate. + process_index (`int`, `optional`): + The index of the process on which to run the function. + + Example: + ```python + # Assume we have 4 processes. + from accelerate.state import PartialState + + state = PartialState() + + + @state.on_process(process_index=2) + def print_something(): + print(f"Printed on process {state.process_index}") + + + print_something() + "Printed on process 2" + ``` + """ + if function is None: + return partial(self.on_process, process_index=process_index) + if (self.process_index == process_index) or (not self.use_distributed): + return function + return do_nothing + + def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): + """ + Decorator that only runs the decorated function on the process with the given index on the current node. + + Args: + function (`Callable`, *optional*): + The function to decorate. + local_process_index (`int`, *optional*): + The index of the local process on which to run the function. + + Example: + ```python + # Assume we have 2 servers with 4 processes each. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_local_process(local_process_index=2) + def print_something(): + print(f"Printed on process {accelerator.local_process_index}") + + + print_something() + # On server 1: + "Printed on process 2" + # On server 2: + "Printed on process 2" + ``` + """ + if function is None: + return partial(self.on_local_process, local_process_index=local_process_index) + if (self.local_process_index == local_process_index) or (not self.use_distributed): + return function + return do_nothing + + def print(self, *args, **kwargs): + if self.is_local_main_process: + print(*args, **kwargs) + + @property + def default_device(self) -> torch.device: + """ + Returns the default device which is: + - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True. + - CUDA if `torch.cuda.is_available()` + - NPU if `is_npu_available()` + - CPU otherwise + """ + if is_mps_available(): + os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" + return torch.device("mps") + elif torch.cuda.is_available(): + return torch.device("cuda") + elif is_xpu_available(): + return torch.device("xpu:0") + elif is_npu_available(): + return torch.device("npu") + else: + return torch.device("cpu") + + +class AcceleratorState: + """ + Singleton class that has information about the current training environment. + + **Available attributes:** + + - **device** (`torch.device`) -- The device to use. + - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently + in use. + - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`. + - **local_process_index** (`int`) -- The index of the current process on the current server. + - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type + of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). + - **num_processes** (`int`) -- The number of processes currently launched in parallel. + - **process_index** (`int`) -- The index of the current process. + - **is_last_process** (`bool`) -- Whether or not the current process is the last one. + - **is_main_process** (`bool`) -- Whether or not the current process is the main one. + - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. + - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. + """ + + _shared_state = SharedDict() + + def __init__( + self, + mixed_precision: str = None, + cpu: bool = False, + dynamo_plugin=None, + deepspeed_plugin=None, + fsdp_plugin=None, + megatron_lm_plugin=None, + _from_accelerator: bool = False, + **kwargs, + ): + self.__dict__ = self._shared_state + if parse_flag_from_env("ACCELERATE_USE_CPU"): + cpu = True + if PartialState._shared_state == {}: + PartialState(cpu, **kwargs) + self.__dict__.update(PartialState._shared_state) + self._check_initialized(mixed_precision, cpu) + if not self.initialized: + self.deepspeed_plugin = None + self.use_ipex = None + mixed_precision = ( + parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no") + if mixed_precision is None + else mixed_precision.lower() + ) + if mixed_precision == "fp8" and not is_fp8_available(): + raise ValueError("Using `fp8` precision requires `transformer_engine` to be installed.") + self.dynamo_plugin = dynamo_plugin + if not _from_accelerator: + raise ValueError( + "Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` " + "before using any functionality from the `accelerate` library." + ) + # deepspeed handles mixed_precision using deepspeed_config + self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision + if self.distributed_type == DistributedType.TPU: + if mixed_precision == "bf16": + if os.environ.get("ACCELERATE_DOWNCAST_BF16"): + os.environ["XLA_USE_BF16"] = str(0) + os.environ["XLA_DOWNCAST_BF16"] = str(1) + self.downcast_bfloat = True + else: + os.environ["XLA_USE_BF16"] = str(1) + os.environ["XLA_DOWNCAST_BF16"] = str(0) + self.downcast_bfloat = False + elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu: + self.deepspeed_plugin = deepspeed_plugin + elif self.distributed_type == DistributedType.MULTI_GPU: + if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": + self.distributed_type = DistributedType.FSDP + if self._mixed_precision != "no": + fsdp_plugin.set_mixed_precision(self._mixed_precision) + self.fsdp_plugin = fsdp_plugin + if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true": + self.distributed_type = DistributedType.MEGATRON_LM + megatron_lm_plugin.set_mixed_precision(self._mixed_precision) + self.megatron_lm_plugin = megatron_lm_plugin + elif self.distributed_type == DistributedType.MULTI_NPU: + if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": + self.distributed_type = DistributedType.FSDP + if self._mixed_precision != "no": + fsdp_plugin.set_mixed_precision(self._mixed_precision) + self.fsdp_plugin = fsdp_plugin + elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]: + if is_ipex_available(): + "check if user disables it explicitly" + self.use_ipex = parse_flag_from_env("ACCELERATE_USE_IPEX", default=True) + else: + self.use_ipex = False + if self.distributed_type == DistributedType.MULTI_XPU: + if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": + self.distributed_type = DistributedType.FSDP + if self._mixed_precision != "no": + fsdp_plugin.set_mixed_precision(self._mixed_precision) + self.fsdp_plugin = fsdp_plugin + + if ( + self.dynamo_plugin.backend != DynamoBackend.NO + and self._mixed_precision == "no" + and self.device.type == "cuda" + ): + torch.backends.cuda.matmul.allow_tf32 = True + PartialState._shared_state["distributed_type"] = self.distributed_type + + @property + def initialized(self) -> bool: + return self._shared_state != PartialState._shared_state + + def __repr__(self): + repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n" + if self.distributed_type == DistributedType.DEEPSPEED: + repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n" + return repr + + def _check_initialized(self, mixed_precision=None, cpu=None): + "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized" + if self.initialized: + err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`." + if cpu and self.device.type != "cpu": + raise ValueError(err.format(flag="cpu=True")) + if ( + mixed_precision is not None + and mixed_precision != self._mixed_precision + and self.distributed_type != DistributedType.DEEPSPEED + ): + raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'")) + + # For backward compatibility + @property + def use_fp16(self): + warnings.warn( + "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use " + "`AcceleratorState.mixed_precision == 'fp16'` instead.", + FutureWarning, + ) + return self._mixed_precision != "no" + + @property + def mixed_precision(self): + if self.distributed_type == DistributedType.DEEPSPEED: + config = self.deepspeed_plugin.deepspeed_config + if config.get("fp16", {}).get("enabled", False): + mixed_precision = "fp16" + elif config.get("bf16", {}).get("enabled", False): + mixed_precision = "bf16" + else: + mixed_precision = "no" + else: + mixed_precision = self._mixed_precision + return mixed_precision + + @staticmethod + def _reset_state(reset_partial_state: bool = False): + "Resets `_shared_state`, is used internally and should not be called" + AcceleratorState._shared_state.clear() + if reset_partial_state: + PartialState._reset_state() + + @property + def use_distributed(self): + """ + Whether the Accelerator is configured for distributed training + """ + return PartialState().use_distributed + + @property + def is_last_process(self) -> bool: + "Returns whether the current process is the last one" + return PartialState().is_last_process + + @property + def is_main_process(self) -> bool: + "Returns whether the current process is the main process" + return PartialState().is_main_process + + @property + def is_local_main_process(self) -> bool: + "Returns whether the current process is the main process on the local node" + return PartialState().is_local_main_process + + def wait_for_everyone(self): + PartialState().wait_for_everyone() + + @contextmanager + def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): + """ + Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing + distributed inference, such as with different prompts. + + Note that when using a `dict`, all keys need to have the same number of elements. + + Args: + inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): + The input to split between processes. + apply_padding (`bool`, `optional`, defaults to `False`): + Whether to apply padding by repeating the last element of the input so that all processes have the same + number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing + in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. + + + Example: + + ```python + # Assume there are two processes + from accelerate.state import AcceleratorState + + state = AcceleratorState() + with state.split_between_processes(["A", "B", "C"]) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C"] + + with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C", "C"] + ``` + """ + with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs: + yield inputs + + @contextmanager + def main_process_first(self): + """ + Lets the main process go first inside a with block. + + The other processes will enter the with block after the main process exits. + """ + with PartialState().main_process_first(): + yield + + @contextmanager + def local_main_process_first(self): + """ + Lets the local main process go inside a with block. + + The other processes will enter the with block after the main process exits. + """ + with PartialState().local_main_process_first(): + yield + + def print(self, *args, **kwargs): + PartialState().print(*args, **kwargs) + + +class GradientState: + """ + Singleton class that has information related to gradient synchronization for gradient accumulation + + **Available attributes:** + + - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader + - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader + - **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices + - **active_dataloader** (`Optional[DataLoader]`) -- The dataloader that is currently being iterated over + - **dataloader_references** (`List[Optional[DataLoader]]`) -- A list of references to the dataloaders that are + being iterated over + - **num_steps** (`int`) -- The number of steps to accumulate over + - **adjust_scheduler** (`bool`) -- Whether the scheduler should be adjusted to account for the gradient + accumulation + - **sync_with_dataloader** (`bool`) -- Whether the gradients should be synced at the end of the dataloader + iteration and the number of total steps reset + """ + + _shared_state = SharedDict() + + def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None): + self.__dict__ = self._shared_state + if not self.initialized: + self.sync_gradients = True + self.active_dataloader = None + self.dataloader_references = [None] + self.plugin_kwargs = ( + gradient_accumulation_plugin.to_kwargs() if gradient_accumulation_plugin is not None else {} + ) + + # Plugin args are different and can be updated + if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs(): + self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs() + + @property + def num_steps(self) -> int: + "Returns the number of steps to accumulate over" + return self.plugin_kwargs.get("num_steps", 1) + + @property + def adjust_scheduler(self) -> bool: + "Returns whether the scheduler should be adjusted" + return self.plugin_kwargs.get("adjust_scheduler", False) + + @property + def sync_with_dataloader(self) -> bool: + "Returns whether the gradients should be synced at the end of the dataloader iteration and the number of total steps reset" + return self.plugin_kwargs.get("sync_with_dataloader", True) + + @property + def initialized(self) -> bool: + "Returns whether the `GradientState` has been initialized" + return GradientState._shared_state != {} + + @property + def end_of_dataloader(self) -> bool: + "Returns whether we have reached the end of the current dataloader" + if not self.in_dataloader: + return False + return self.active_dataloader.end_of_dataloader + + @property + def remainder(self) -> int: + "Returns the number of extra samples that were added from padding the dataloader" + if not self.in_dataloader: + return -1 + return self.active_dataloader.remainder + + def __repr__(self): + return ( + f"Sync Gradients: {self.sync_gradients}\n" + f"At end of current dataloader: {self.end_of_dataloader}\n" + f"Extra samples added: {self.remainder}\n" + f"Gradient accumulation plugin: {self.plugin_kwargs}\n" + ) + + def _set_sync_gradients(self, sync_gradients): + "Private function that sets whether gradients should be synchronized. Users should not have to call this." + self.sync_gradients = sync_gradients + + def _add_dataloader(self, dataloader): + "Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this." + self.active_dataloader = dataloader + self.dataloader_references.append(self.active_dataloader) + + def _remove_dataloader(self, dataloader): + "Private function that removes a dataloader from `self.dataloader_references` and sets `in_dataloader` to `False` if there are no more dataloaders. Users should not have to call this." + self.dataloader_references.remove(dataloader) + self.active_dataloader = self.dataloader_references[-1] + + @property + def in_dataloader(self) -> bool: + "Returns whether the current process is in a dataloader" + return self.active_dataloader is not None + + @staticmethod + def _reset_state(): + "Resets `_shared_state`, is used internally and should not be called" + GradientState._shared_state.clear() diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f716bf6d25d1b422bc10603e8c2eb45f61ecc1a4 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/__init__.py @@ -0,0 +1,24 @@ +from .testing import ( + are_the_same_tensors, + assert_exception, + execute_subprocess_async, + require_bnb, + require_cpu, + require_cuda, + require_huggingface_suite, + require_mps, + require_multi_gpu, + require_multi_xpu, + require_safetensors, + require_single_gpu, + require_single_xpu, + require_torch_min_version, + require_tpu, + require_xpu, + skip, + slow, +) +from .training import RegressionDataset, RegressionModel, RegressionModel4XPU + + +from .scripts import test_script, test_sync, test_ops # isort: skip diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3dfac219a905bb59e2882daa062dbba7297b575 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..207a1547e5e04b3b31caf9e7bbb2c370fc5e00ed Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de18e933d894068997ebd97e5ca2d6dc6fc09c83 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..444345f4da88480634834d779919f9eecb9cebcf Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py new file mode 100644 index 0000000000000000000000000000000000000000..d865435289bc529a0c45b88f8584725f3acf064a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py @@ -0,0 +1,269 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import json +import os + +import evaluate +import torch +from datasets import load_dataset +from torch.optim import AdamW +from torch.utils.data import DataLoader +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + +from accelerate import Accelerator, DistributedType +from accelerate.utils.deepspeed import DummyOptim, DummyScheduler + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"): + """ + Creates a set of `DataLoader`s for the `glue` dataset. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + model_name (`str`, *optional*): + """ + tokenizer = AutoTokenizer.from_pretrained(model_name) + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +def evaluation_loop(accelerator, model, eval_dataloader, metric): + model.eval() + samples_seen = 0 + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + # It is slightly faster to call this once, than multiple times + predictions, references = accelerator.gather( + (predictions, batch["labels"]) + ) # If we are in a multiprocess environment, the last batch has duplicates + if accelerator.use_distributed: + if step == len(eval_dataloader) - 1: + predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] + references = references[: len(eval_dataloader.dataset) - samples_seen] + else: + samples_seen += references.shape[0] + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + return eval_metric["accuracy"] + + +def training_function(config, args): + # Initialize accelerator + accelerator = Accelerator() + + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + model_name = args.model_name_or_path + + set_seed(seed) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True) + + # Instantiate optimizer + optimizer_cls = ( + AdamW + if accelerator.state.deepspeed_plugin is None + or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config + else DummyOptim + ) + optimizer = optimizer_cls(params=model.parameters(), lr=lr) + + if accelerator.state.deepspeed_plugin is not None: + gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[ + "gradient_accumulation_steps" + ] + else: + gradient_accumulation_steps = 1 + max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps + + # Instantiate scheduler + if ( + accelerator.state.deepspeed_plugin is None + or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config + ): + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=max_training_steps, + ) + else: + lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # We need to keep track of how many total steps we have iterated over + overall_step = 0 + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + metric = evaluate.load("glue", "mrpc") + ending_epoch = num_epochs + + if args.partial_train_epoch is not None: + ending_epoch = args.partial_train_epoch + + if args.resume_from_checkpoint: + accelerator.load_state(args.resume_from_checkpoint) + epoch_string = args.resume_from_checkpoint.split("epoch_")[1] + state_epoch_num = "" + for char in epoch_string: + if char.isdigit(): + state_epoch_num += char + else: + break + starting_epoch = int(state_epoch_num) + 1 + accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric) + accelerator.print("resumed checkpoint performance:", accuracy) + accelerator.print("resumed checkpoint's scheduler's lr:", lr_scheduler.get_lr()[0]) + accelerator.print("resumed optimizers's lr:", optimizer.param_groups[0]["lr"]) + with open(os.path.join(args.output_dir, f"state_{starting_epoch-1}.json"), "r") as f: + resumed_state = json.load(f) + assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" + assert ( + resumed_state["lr"] == lr_scheduler.get_lr()[0] + ), "Scheduler learning rate mismatch, loading from checkpoint failed" + assert ( + resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] + ), "Optimizer learning rate mismatch, loading from checkpoint failed" + assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" + return + + # Now we train the model + state = {} + for epoch in range(starting_epoch, ending_epoch): + model.train() + for step, batch in enumerate(train_dataloader): + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + overall_step += 1 + output_dir = f"epoch_{epoch}" + output_dir = os.path.join(args.output_dir, output_dir) + accelerator.save_state(output_dir) + accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric) + state["accuracy"] = accuracy + state["lr"] = lr_scheduler.get_lr()[0] + state["optimizer_lr"] = optimizer.param_groups[0]["lr"] + state["epoch"] = epoch + state["step"] = overall_step + accelerator.print(f"epoch {epoch}:", state) + + accelerator.wait_for_everyone() + if accelerator.is_main_process: + with open(os.path.join(args.output_dir, f"state_{epoch}.json"), "w") as f: + json.dump(state, f) + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") + parser.add_argument( + "--model_name_or_path", + type=str, + default="bert-base-cased", + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help="If the training should continue from a checkpoint folder.", + ) + parser.add_argument( + "--partial_train_epoch", + type=int, + default=None, + help="If passed, the training will stop after this number of epochs.", + ) + parser.add_argument( + "--num_epochs", + type=int, + default=2, + help="Number of train epochs.", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} + + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..cc02f969878700050a210b894ec09ca58a965761 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py @@ -0,0 +1,266 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import math +import os +from copy import deepcopy + +import datasets +import evaluate +import torch +import transformers +from datasets import load_dataset +from torch.utils.data import DataLoader, IterableDataset +from transformers import AutoModelForSequenceClassification, AutoTokenizer + +from accelerate import Accelerator +from accelerate.data_loader import DataLoaderDispatcher +from accelerate.test_utils import RegressionDataset, RegressionModel +from accelerate.utils import is_tpu_available, set_seed + + +os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "true" + + +class ListHandler(logging.Handler): + def __init__(self, *args, **kwargs): + super(ListHandler, self).__init__(*args, **kwargs) + self.logs = [] + + def emit(self, record): + self.logs.append(record) + + +def get_basic_setup(accelerator, num_samples=82, batch_size=16): + "Returns everything needed to perform basic training" + set_seed(42) + model = RegressionModel() + ddp_model = deepcopy(model) + dset = RegressionDataset(length=num_samples) + dataloader = DataLoader(dset, batch_size=batch_size) + model.to(accelerator.device) + ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader) + return model, ddp_model, dataloader + + +def get_dataloader(accelerator: Accelerator, use_longest=False): + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased") + dataset = load_dataset("glue", "mrpc", split="validation") + + def tokenize_function(examples): + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + with accelerator.main_process_first(): + tokenized_datasets = dataset.map( + tokenize_function, + batched=True, + remove_columns=["idx", "sentence1", "sentence2"], + ) + + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + if use_longest: + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + + return DataLoader(tokenized_datasets, shuffle=False, collate_fn=collate_fn, batch_size=16) + + +def get_mrpc_setup(dispatch_batches, split_batches): + accelerator = Accelerator(dispatch_batches=dispatch_batches, split_batches=split_batches) + dataloader = get_dataloader(accelerator, not dispatch_batches) + model = AutoModelForSequenceClassification.from_pretrained( + "hf-internal-testing/mrpc-bert-base-cased", return_dict=True + ) + ddp_model, ddp_dataloader = accelerator.prepare(model, dataloader) + return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator + + +def generate_predictions(model, dataloader, accelerator): + logits_and_targets = [] + for batch in dataloader: + input, target = batch.values() + with torch.no_grad(): + logit = model(input) + logit, target = accelerator.gather_for_metrics((logit, target)) + logits_and_targets.append((logit, target)) + logits, targs = [], [] + for logit, targ in logits_and_targets: + logits.append(logit) + targs.append(targ) + logits, targs = torch.cat(logits), torch.cat(targs) + return logits, targs + + +def test_torch_metrics( + accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False, batch_size=16 +): + model, ddp_model, dataloader = get_basic_setup(accelerator, num_samples, batch_size) + logits, targs = generate_predictions(ddp_model, dataloader, accelerator) + assert ( + len(logits) == num_samples + ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(logits)}" + + +def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False): + metric = evaluate.load("glue", "mrpc") + setup, accelerator = get_mrpc_setup(dispatch_batches, split_batches) + # First do baseline + model, dataloader, device = setup["no"] + model.to(device) + model.eval() + for batch in dataloader: + batch.to(device) + with torch.inference_mode(): + outputs = model(**batch) + preds = outputs.logits.argmax(dim=-1) + metric.add_batch(predictions=preds, references=batch["labels"]) + baseline = metric.compute() + + # Then do distributed + model, dataloader, device = setup["ddp"] + model.eval() + for batch in dataloader: + with torch.inference_mode(): + outputs = model(**batch) + preds = outputs.logits.argmax(dim=-1) + references = batch["labels"] + preds, references = accelerator.gather_for_metrics((preds, references)) + metric.add_batch(predictions=preds, references=references) + distributed = metric.compute() + + for key in "accuracy f1".split(): + assert math.isclose( + baseline[key], distributed[key] + ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" + + +def test_gather_for_metrics_with_non_tensor_objects_iterable_dataset(): + class DummyIterableDataset(IterableDataset): + def __init__(self, data): + self.data = data + + def __len__(self): + return len(self.data) + + def __iter__(self): + for element in self.data: + yield element + + iterable_dataset = DummyIterableDataset([n for n in range(30)]) + dataloader = DataLoader(iterable_dataset, batch_size=4) + accelerator = Accelerator() + prepared_dataloader = accelerator.prepare(dataloader) + + if accelerator.is_main_process: + logger = logging.root.manager.loggerDict["accelerate.accelerator"] + list_handler = ListHandler() + logger.addHandler(list_handler) + + batches_for_metrics = [] + for batch in prepared_dataloader: + batches_for_metrics.append(accelerator.gather_for_metrics(batch)) + + assert torch.cat(batches_for_metrics).size(0) == 30 + + if accelerator.is_main_process: + assert len(list_handler.logs) == 0 + logger.removeHandler(list_handler) + + +def test_gather_for_metrics_with_iterable_dataset(): + class DummyIterableDataset(IterableDataset): + def __init__(self, data): + self.data = data + + def __len__(self): + return len(self.data) + + def __iter__(self): + for element in self.data: + yield element + + iterable_dataset = DummyIterableDataset(torch.as_tensor(range(30))) + dataloader = DataLoader(iterable_dataset, batch_size=4) + + accelerator = Accelerator() + prepared_dataloader = accelerator.prepare(dataloader) + + assert isinstance(prepared_dataloader, DataLoaderDispatcher) + + if accelerator.is_main_process: + logger = logging.root.manager.loggerDict["accelerate.accelerator"] + list_handler = ListHandler() + logger.addHandler(list_handler) + + batches_for_metrics = [] + for batch in prepared_dataloader: + batches_for_metrics.append(accelerator.gather_for_metrics(batch)) + + assert torch.cat(batches_for_metrics).size(0) == 30 + + if accelerator.is_main_process: + assert len(list_handler.logs) == 0 + + logger.removeHandler(list_handler) + + +def main(): + accelerator = Accelerator(split_batches=False, dispatch_batches=False) + if accelerator.is_local_main_process: + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_warning() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + # These are a bit slower so they should only be ran on the GPU or TPU + if torch.cuda.is_available() or is_tpu_available(): + if accelerator.is_local_main_process: + print("**Testing gather_for_metrics**") + for split_batches in [True, False]: + for dispatch_batches in [True, False]: + if accelerator.is_local_main_process: + print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`") + test_mrpc(dispatch_batches, split_batches) + accelerator.state._reset_state() + print("test_gather_for_metrics_with_iterable_dataset") + test_gather_for_metrics_with_iterable_dataset() + print("test gather_for_metrics_with_non_tensor_objects_iterable_dataset") + test_gather_for_metrics_with_non_tensor_objects_iterable_dataset() + if accelerator.is_local_main_process: + print("**Test torch metrics**") + for split_batches in [True, False]: + for dispatch_batches in [True, False]: + accelerator = Accelerator(split_batches=split_batches, dispatch_batches=dispatch_batches) + if accelerator.is_local_main_process: + print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99") + test_torch_metrics(accelerator, 99) + accelerator.state._reset_state() + if accelerator.is_local_main_process: + print("**Test last batch is not dropped when perfectly divisible**") + accelerator = Accelerator() + test_torch_metrics(accelerator, 512) + accelerator.state._reset_state() + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py new file mode 100644 index 0000000000000000000000000000000000000000..3236bbe79c67fd3d357ea7e3c67176cf609d82a4 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py @@ -0,0 +1,277 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import gc +import json +import os + +import torch +from datasets import load_dataset +from torch.optim import AdamW +from torch.utils.data import DataLoader +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + +from accelerate import Accelerator, DistributedType +from accelerate.utils import is_npu_available, is_xpu_available +from accelerate.utils.deepspeed import DummyOptim, DummyScheduler + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +# Converting Bytes to Megabytes +def b2mb(x): + return int(x / 2**20) + + +# This context manager is used to track the peak memory usage of the process +class TorchTracemalloc: + def __enter__(self): + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero + self.begin = torch.cuda.memory_allocated() + elif is_npu_available(): + torch.npu.empty_cache() + torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero + self.begin = torch.npu.memory_allocated() + elif is_xpu_available(): + torch.xpu.empty_cache() + torch.xpu.reset_max_memory_allocated() # reset the peak gauge to zero + self.begin = torch.xpu.memory_allocated() + return self + + def __exit__(self, *exc): + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + self.end = torch.cuda.memory_allocated() + self.peak = torch.cuda.max_memory_allocated() + elif is_npu_available(): + torch.npu.empty_cache() + self.end = torch.npu.memory_allocated() + self.peak = torch.npu.max_memory_allocated() + elif is_xpu_available(): + torch.xpu.empty_cache() + self.end = torch.xpu.memory_allocated() + self.peak = torch.xpu.max_memory_allocated() + self.used = b2mb(self.end - self.begin) + self.peaked = b2mb(self.peak - self.begin) + # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") + + +def get_dataloaders( + accelerator: Accelerator, + batch_size: int = 16, + model_name: str = "bert-base-cased", + n_train: int = 320, + n_val: int = 160, +): + """ + Creates a set of `DataLoader`s for the `glue` dataset. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + model_name (`str`, *optional*): + The name of the model to use. + n_train (`int`, *optional*): + The number of training examples to use. + n_val (`int`, *optional*): + The number of validation examples to use. + """ + tokenizer = AutoTokenizer.from_pretrained(model_name) + datasets = load_dataset( + "glue", "mrpc", split={"train": f"train[:{n_train}]", "validation": f"validation[:{n_val}]"} + ) + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +def training_function(config, args): + # Initialize accelerator + accelerator = Accelerator() + + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + model_name = args.model_name_or_path + + set_seed(seed) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name, args.n_train, args.n_val) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True) + + # Instantiate optimizer + optimizer_cls = ( + AdamW + if accelerator.state.deepspeed_plugin is None + or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config + else DummyOptim + ) + optimizer = optimizer_cls(params=model.parameters(), lr=lr) + + if accelerator.state.deepspeed_plugin is not None: + gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[ + "gradient_accumulation_steps" + ] + else: + gradient_accumulation_steps = 1 + max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps + + # Instantiate scheduler + if ( + accelerator.state.deepspeed_plugin is None + or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config + ): + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=max_training_steps, + ) + else: + lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # We need to keep track of how many total steps we have iterated over + overall_step = 0 + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + + # Now we train the model + train_total_peak_memory = {} + for epoch in range(starting_epoch, num_epochs): + with TorchTracemalloc() as tracemalloc: + model.train() + for step, batch in enumerate(train_dataloader): + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + overall_step += 1 + + # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage + accelerator.print("Memory before entering the train : {}".format(b2mb(tracemalloc.begin))) + accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used)) + accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked)) + accelerator.print( + "Total Peak Memory consumed during the train (max): {}".format( + tracemalloc.peaked + b2mb(tracemalloc.begin) + ) + ) + train_total_peak_memory[f"epoch-{epoch}"] = tracemalloc.peaked + b2mb(tracemalloc.begin) + if args.peak_memory_upper_bound is not None: + assert ( + train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound + ), "Peak memory usage exceeded the upper bound" + + accelerator.wait_for_everyone() + if accelerator.is_main_process: + with open(os.path.join(args.output_dir, "peak_memory_utilization.json"), "w") as f: + json.dump(train_total_peak_memory, f) + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") + parser.add_argument( + "--model_name_or_path", + type=str, + default="bert-base-cased", + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--peak_memory_upper_bound", + type=float, + default=None, + help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.", + ) + parser.add_argument( + "--n_train", + type=int, + default=320, + help="Number of training examples to use.", + ) + parser.add_argument( + "--n_val", + type=int, + default=160, + help="Number of validation examples to use.", + ) + parser.add_argument( + "--num_epochs", + type=int, + default=1, + help="Number of train epochs.", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py new file mode 100644 index 0000000000000000000000000000000000000000..93382026e5e24addcbc0ecf8dee1baedbcee9ff0 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py @@ -0,0 +1,231 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import json +import os + +import evaluate +import torch +from datasets import load_dataset +from torch.optim import AdamW +from torch.utils.data import DataLoader +from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed + +from accelerate import Accelerator, DistributedType +from accelerate.utils.deepspeed import DummyOptim, DummyScheduler + + +MAX_GPU_BATCH_SIZE = 16 +EVAL_BATCH_SIZE = 32 + + +def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"): + """ + Creates a set of `DataLoader`s for the `glue` dataset. + + Args: + accelerator (`Accelerator`): + An `Accelerator` object + batch_size (`int`, *optional*): + The batch size for the train and validation DataLoaders. + model_name (`str`, *optional*): + """ + tokenizer = AutoTokenizer.from_pretrained(model_name) + datasets = load_dataset("glue", "mrpc") + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False + ) + + # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the + # transformers library + tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader( + tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size + ) + eval_dataloader = DataLoader( + tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE + ) + + return train_dataloader, eval_dataloader + + +def training_function(config, args): + # Initialize accelerator + accelerator = Accelerator() + + # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs + lr = config["lr"] + num_epochs = int(config["num_epochs"]) + seed = int(config["seed"]) + batch_size = int(config["batch_size"]) + model_name = args.model_name_or_path + + set_seed(seed) + train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name) + + # Instantiate the model (we build the model here so that the seed also control new weights initialization) + model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True) + + # Instantiate optimizer + optimizer_cls = ( + AdamW + if accelerator.state.deepspeed_plugin is None + or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config + else DummyOptim + ) + optimizer = optimizer_cls(params=model.parameters(), lr=lr) + + if accelerator.state.deepspeed_plugin is not None: + gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[ + "gradient_accumulation_steps" + ] + else: + gradient_accumulation_steps = 1 + max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps + + # Instantiate scheduler + if ( + accelerator.state.deepspeed_plugin is None + or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config + ): + lr_scheduler = get_linear_schedule_with_warmup( + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=max_training_steps, + ) + else: + lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) + + # Prepare everything + # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the + # prepare method. + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( + model, optimizer, train_dataloader, eval_dataloader, lr_scheduler + ) + + # We need to keep track of how many total steps we have iterated over + overall_step = 0 + # We also need to keep track of the stating epoch so files are named properly + starting_epoch = 0 + + # Now we train the model + metric = evaluate.load("glue", "mrpc") + best_performance = 0 + performance_metric = {} + for epoch in range(starting_epoch, num_epochs): + model.train() + for step, batch in enumerate(train_dataloader): + outputs = model(**batch) + loss = outputs.loss + loss = loss / gradient_accumulation_steps + accelerator.backward(loss) + if step % gradient_accumulation_steps == 0: + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + overall_step += 1 + + model.eval() + samples_seen = 0 + for step, batch in enumerate(eval_dataloader): + # We could avoid this line since we set the accelerator with `device_placement=True`. + batch.to(accelerator.device) + with torch.no_grad(): + outputs = model(**batch) + predictions = outputs.logits.argmax(dim=-1) + # It is slightly faster to call this once, than multiple times + predictions, references = accelerator.gather( + (predictions, batch["labels"]) + ) # If we are in a multiprocess environment, the last batch has duplicates + if accelerator.use_distributed: + if step == len(eval_dataloader) - 1: + predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] + references = references[: len(eval_dataloader.dataset) - samples_seen] + else: + samples_seen += references.shape[0] + metric.add_batch( + predictions=predictions, + references=references, + ) + + eval_metric = metric.compute() + # Use accelerator.print to print only on the main process. + accelerator.print(f"epoch {epoch}:", eval_metric) + performance_metric[f"epoch-{epoch}"] = eval_metric["accuracy"] + + if best_performance < eval_metric["accuracy"]: + best_performance = eval_metric["accuracy"] + + if args.performance_lower_bound is not None: + assert ( + args.performance_lower_bound <= best_performance + ), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}" + + accelerator.wait_for_everyone() + if accelerator.is_main_process: + with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: + json.dump(performance_metric, f) + + +def main(): + parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") + parser.add_argument( + "--model_name_or_path", + type=str, + default="bert-base-cased", + help="Path to pretrained model or model identifier from huggingface.co/models.", + required=False, + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", + ) + parser.add_argument( + "--performance_lower_bound", + type=float, + default=None, + help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.", + ) + parser.add_argument( + "--num_epochs", + type=int, + default=3, + help="Number of train epochs.", + ) + args = parser.parse_args() + config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} + training_function(config, args) + + +if __name__ == "__main__": + main() diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..491410e5fc33e663d977d70fdb6aef168ddcffc7 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py @@ -0,0 +1,13 @@ +import torch + + +def main(): + if torch.cuda.is_available(): + num_gpus = torch.cuda.device_count() + else: + num_gpus = 0 + print(f"Successfully ran on {num_gpus} GPUs") + + +if __name__ == "__main__": + main() diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py new file mode 100644 index 0000000000000000000000000000000000000000..850f7310f79f4214f21680487a5ac63c85d4d767 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py @@ -0,0 +1,238 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import warnings +from typing import List +from unittest.mock import Mock + +import torch +from torch.utils.data import DataLoader, IterableDataset, TensorDataset + +from accelerate.accelerator import Accelerator +from accelerate.utils.dataclasses import DistributedType + + +class DummyIterableDataset(IterableDataset): + def __init__(self, data): + self.data = data + + def __iter__(self): + for element in self.data: + yield element + + +def create_accelerator(even_batches=True): + accelerator = Accelerator(even_batches=even_batches) + assert accelerator.num_processes == 2, "this script expects that two GPUs are available" + return accelerator + + +def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int, iterable: bool = False): + """ + Create a simple DataLoader to use during the test cases + """ + if iterable: + dataset = DummyIterableDataset(torch.as_tensor(range(dataset_size))) + else: + dataset = TensorDataset(torch.as_tensor(range(dataset_size))) + + dl = DataLoader(dataset, batch_size=batch_size) + dl = accelerator.prepare(dl) + + return dl + + +def verify_dataloader_batch_sizes( + accelerator: Accelerator, + dataset_size: int, + batch_size: int, + process_0_expected_batch_sizes: List[int], + process_1_expected_batch_sizes: List[int], +): + """ + A helper function for verifying the batch sizes coming from a prepared dataloader in each process + """ + dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size) + + batch_sizes = [len(batch[0]) for batch in dl] + + if accelerator.process_index == 0: + assert batch_sizes == process_0_expected_batch_sizes + elif accelerator.process_index == 1: + assert batch_sizes == process_1_expected_batch_sizes + + +def test_default_ensures_even_batch_sizes(): + accelerator = create_accelerator() + + # without padding, we would expect a different number of batches + verify_dataloader_batch_sizes( + accelerator, + dataset_size=3, + batch_size=1, + process_0_expected_batch_sizes=[1, 1], + process_1_expected_batch_sizes=[1, 1], + ) + + # without padding, we would expect the same number of batches, but different sizes + verify_dataloader_batch_sizes( + accelerator, + dataset_size=7, + batch_size=2, + process_0_expected_batch_sizes=[2, 2], + process_1_expected_batch_sizes=[2, 2], + ) + + +def test_can_disable_even_batches(): + accelerator = create_accelerator(even_batches=False) + + verify_dataloader_batch_sizes( + accelerator, + dataset_size=3, + batch_size=1, + process_0_expected_batch_sizes=[1, 1], + process_1_expected_batch_sizes=[1], + ) + + verify_dataloader_batch_sizes( + accelerator, + dataset_size=7, + batch_size=2, + process_0_expected_batch_sizes=[2, 2], + process_1_expected_batch_sizes=[2, 1], + ) + + +def test_can_join_uneven_inputs(): + accelerator = create_accelerator(even_batches=False) + + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + + dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + + batch_idxs = [] + with accelerator.join_uneven_inputs([ddp_model]): + for batch_idx, batch in enumerate(dl): + output = ddp_model(batch[0].float()) + loss = output.sum() + loss.backward() + batch_idxs.append(batch_idx) + + accelerator.wait_for_everyone() + + if accelerator.process_index == 0: + assert batch_idxs == [0, 1] + elif accelerator.process_index == 1: + assert batch_idxs == [0] + + +def test_join_raises_warning_for_non_ddp_distributed(accelerator): + with warnings.catch_warnings(record=True) as w: + with accelerator.join_uneven_inputs([Mock()]): + pass + + assert issubclass(w[-1].category, UserWarning) + assert "only supported for multi-GPU" in str(w[-1].message) + + +def test_join_can_override_even_batches(): + default_even_batches = True + overridden_even_batches = False + accelerator = create_accelerator(even_batches=default_even_batches) + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + train_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + valid_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + + with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches): + train_dl_overridden_value = train_dl.batch_sampler.even_batches + valid_dl_overridden_value = valid_dl.batch_sampler.even_batches + + assert train_dl_overridden_value == overridden_even_batches + assert valid_dl_overridden_value == overridden_even_batches + assert train_dl.batch_sampler.even_batches == default_even_batches + assert valid_dl.batch_sampler.even_batches == default_even_batches + + +def test_join_can_override_for_mixed_type_dataloaders(): + default_even_batches = True + overridden_even_batches = False + accelerator = create_accelerator(even_batches=default_even_batches) + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True) + batch_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + try: + with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches): + batch_dl_overridden_value = batch_dl.batch_sampler.even_batches + except AttributeError: + # ensure attribute error is not raised when processing iterable dl + raise AssertionError + + assert batch_dl_overridden_value == overridden_even_batches + assert batch_dl.batch_sampler.even_batches == default_even_batches + + +def test_join_raises_warning_for_iterable_when_overriding_even_batches(): + accelerator = create_accelerator() + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True) + + with warnings.catch_warnings(record=True) as w: + with accelerator.join_uneven_inputs([ddp_model], even_batches=False): + pass + + assert issubclass(w[-1].category, UserWarning) + assert "only supported for map-style datasets" in str(w[-1].message) + + +def main(): + accelerator = create_accelerator() + + accelerator.print("Test that even_batches variable ensures uniform batches across processes") + test_default_ensures_even_batch_sizes() + + accelerator.print("Run tests with even_batches disabled") + test_can_disable_even_batches() + + accelerator.print("Test joining uneven inputs") + test_can_join_uneven_inputs() + + accelerator.print("Test overriding even_batches when joining uneven inputs") + test_join_can_override_even_batches() + + accelerator.print("Test overriding even_batches for mixed dataloader types") + test_join_can_override_for_mixed_type_dataloaders() + + accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders") + test_join_raises_warning_for_iterable_when_overriding_even_batches() + + accelerator.print("Test join with non DDP distributed raises warning") + original_state = accelerator.state.distributed_type + accelerator.state.distributed_type = DistributedType.FSDP + test_join_raises_warning_for_non_ddp_distributed(accelerator) + accelerator.state.distributed_type = original_state + + +if __name__ == "__main__": + main() diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_notebook.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_notebook.py new file mode 100644 index 0000000000000000000000000000000000000000..8f215d8fd199163eafed5bf803b2d18383fcd25e --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_notebook.py @@ -0,0 +1,17 @@ +# Test file to ensure that in general certain situational setups for notebooks work. +import argparse + +from accelerate import PartialState, notebook_launcher + + +parser = argparse.ArgumentParser() +parser.add_argument("--num_processes", type=int, default=1) +args = parser.parse_args() + + +def function(): + print(f"PartialState:\n{PartialState()}") + + +if __name__ == "__main__": + notebook_launcher(function, num_processes=int(args.num_processes)) diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ops.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..734c05247ce3cd47da8b1ca66050a121fcfc389a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ops.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python + +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + +from accelerate import PartialState +from accelerate.test_utils.testing import assert_exception +from accelerate.utils.dataclasses import DistributedType +from accelerate.utils.operations import ( + DistributedOperationException, + broadcast, + gather, + gather_object, + pad_across_processes, + reduce, +) + + +def create_tensor(state): + return (torch.arange(state.num_processes) + 1.0 + (state.num_processes * state.process_index)).to(state.device) + + +def test_gather(state): + tensor = create_tensor(state) + gathered_tensor = gather(tensor) + assert gathered_tensor.tolist() == list(range(1, state.num_processes**2 + 1)) + + +def test_gather_object(state): + obj = [state.process_index] + gathered_obj = gather_object(obj) + assert len(gathered_obj) == state.num_processes, f"{gathered_obj}, {len(gathered_obj)} != {state.num_processes}" + assert gathered_obj == list(range(state.num_processes)), f"{gathered_obj} != {list(range(state.num_processes))}" + + +def test_gather_non_contigous(state): + # Create a non-contiguous tensor + tensor = torch.arange(12).view(4, 3).t().to(state.device) + assert not tensor.is_contiguous() + # Shouldn't error out + _ = gather(tensor) + + +def test_broadcast(state): + tensor = create_tensor(state) + broadcasted_tensor = broadcast(tensor) + assert broadcasted_tensor.shape == torch.Size([state.num_processes]) + assert broadcasted_tensor.tolist() == list(range(1, state.num_processes + 1)) + + +def test_pad_across_processes(state): + # We need to pad the tensor with one more element if we are the main process + # to ensure that we can pad + if state.is_main_process: + tensor = torch.arange(state.num_processes + 1).to(state.device) + else: + tensor = torch.arange(state.num_processes).to(state.device) + padded_tensor = pad_across_processes(tensor) + assert padded_tensor.shape == torch.Size([state.num_processes + 1]) + if not state.is_main_process: + assert padded_tensor.tolist() == list(range(0, state.num_processes)) + [0] + + +def test_reduce_sum(state): + # For now runs on only two processes + if state.num_processes != 2: + return + tensor = create_tensor(state) + reduced_tensor = reduce(tensor, "sum") + truth_tensor = torch.tensor([4.0, 6]).to(state.device) + assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}" + + +def test_reduce_mean(state): + # For now runs on only two processes + if state.num_processes != 2: + return + tensor = create_tensor(state) + reduced_tensor = reduce(tensor, "mean") + truth_tensor = torch.tensor([2.0, 3]).to(state.device) + assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}" + + +def test_op_checker(state): + # Must be in a distributed state + if state.distributed_type == DistributedType.NO: + return + state.debug = True + # `pad_across_processes` + if state.process_index == 0: + data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)} + else: + data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4, 5]]]).to(state.device)} + + with assert_exception(DistributedOperationException): + pad_across_processes(data, dim=0) + + # `reduce` + if state.process_index == 0: + data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)} + else: + data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)} + + with assert_exception(DistributedOperationException): + reduce(data) + + # `broadcast` + if state.process_index == 0: + data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)} + else: + data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)} + + with assert_exception(DistributedOperationException): + broadcast(data) + + state.debug = False + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +def main(): + state = PartialState() + state.print(f"State: {state}") + state.print("testing gather") + test_gather(state) + state.print("testing gather_object") + test_gather_object(state) + state.print("testing gather non-contigous") + test_gather_non_contigous(state) + state.print("testing broadcast") + test_broadcast(state) + state.print("testing pad_across_processes") + test_pad_across_processes(state) + state.print("testing reduce_sum") + test_reduce_sum(state) + state.print("testing reduce_mean") + test_reduce_mean(state) + state.print("testing op_checker") + test_op_checker(state) + + +if __name__ == "__main__": + main() diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_script.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_script.py new file mode 100644 index 0000000000000000000000000000000000000000..9ee508f8be458ed1898484b3237ec159f3ea635c --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_script.py @@ -0,0 +1,616 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import io +import math +import time +from copy import deepcopy +from pathlib import Path + +import torch +from torch.utils.data import DataLoader + +from accelerate import Accelerator +from accelerate.data_loader import prepare_data_loader +from accelerate.state import AcceleratorState +from accelerate.test_utils import RegressionDataset, are_the_same_tensors +from accelerate.utils import ( + DistributedType, + gather, + is_bf16_available, + is_ipex_available, + is_npu_available, + is_xpu_available, + set_seed, + synchronize_rng_states, +) + + +# TODO: remove RegressionModel4XPU once ccl support empty buffer in broadcasting. +if is_xpu_available(): + from accelerate.test_utils import RegressionModel4XPU as RegressionModel +else: + from accelerate.test_utils import RegressionModel + + +def print_main(state): + print(f"Printing from the main process {state.process_index}") + + +def print_local_main(state): + print(f"Printing from the local main process {state.local_process_index}") + + +def print_last(state): + print(f"Printing from the last process {state.process_index}") + + +def print_on(state, process_idx): + print(f"Printing from process {process_idx}: {state.process_index}") + + +def process_execution_check(): + accelerator = Accelerator() + num_processes = accelerator.num_processes + # Test main_process_first context manager + path = Path("check_main_process_first.txt") + with accelerator.main_process_first(): + if accelerator.is_main_process: + time.sleep(0.1) # ensure main process takes longest + with open(path, "a+") as f: + f.write("Currently in the main process\n") + else: + with open(path, "a+") as f: + f.write("Now on another process\n") + accelerator.wait_for_everyone() + + if accelerator.is_main_process: + with open(path, "r") as f: + text = "".join(f.readlines()) + try: + assert text.startswith("Currently in the main process\n"), "Main process was not first" + if num_processes > 1: + assert text.endswith("Now on another process\n"), "Main process was not first" + assert ( + text.count("Now on another process\n") == accelerator.num_processes - 1 + ), f"Only wrote to file {text.count('Now on another process') + 1} times, not {accelerator.num_processes}" + except AssertionError: + path.unlink() + raise + + if accelerator.is_main_process and path.exists(): + path.unlink() + accelerator.wait_for_everyone() + # Test the decorators + f = io.StringIO() + with contextlib.redirect_stdout(f): + accelerator.on_main_process(print_main)(accelerator.state) + result = f.getvalue().rstrip() + if accelerator.is_main_process: + assert result == "Printing from the main process 0", f"{result} != Printing from the main process 0" + else: + assert f.getvalue().rstrip() == "", f'{result} != ""' + f.truncate(0) + f.seek(0) + + with contextlib.redirect_stdout(f): + accelerator.on_local_main_process(print_local_main)(accelerator.state) + if accelerator.is_local_main_process: + assert f.getvalue().rstrip() == "Printing from the local main process 0" + else: + assert f.getvalue().rstrip() == "" + f.truncate(0) + f.seek(0) + + with contextlib.redirect_stdout(f): + accelerator.on_last_process(print_last)(accelerator.state) + if accelerator.is_last_process: + assert f.getvalue().rstrip() == f"Printing from the last process {accelerator.state.num_processes - 1}" + else: + assert f.getvalue().rstrip() == "" + f.truncate(0) + f.seek(0) + + for process_idx in range(num_processes): + with contextlib.redirect_stdout(f): + accelerator.on_process(print_on, process_index=process_idx)(accelerator.state, process_idx) + if accelerator.process_index == process_idx: + assert f.getvalue().rstrip() == f"Printing from process {process_idx}: {accelerator.process_index}" + else: + assert f.getvalue().rstrip() == "" + f.truncate(0) + f.seek(0) + + +def init_state_check(): + # Test we can instantiate this twice in a row. + state = AcceleratorState() + if state.local_process_index == 0: + print("Testing, testing. 1, 2, 3.") + print(state) + + +def rng_sync_check(): + state = AcceleratorState() + synchronize_rng_states(["torch"]) + assert are_the_same_tensors(torch.get_rng_state()), "RNG states improperly synchronized on CPU." + if state.distributed_type == DistributedType.MULTI_GPU: + synchronize_rng_states(["cuda"]) + assert are_the_same_tensors(torch.cuda.get_rng_state()), "RNG states improperly synchronized on GPU." + elif state.distributed_type == DistributedType.MULTI_XPU: + synchronize_rng_states(["xpu"]) + assert are_the_same_tensors(torch.xpu.get_rng_state()), "RNG states improperly synchronized on XPU." + generator = torch.Generator() + synchronize_rng_states(["generator"], generator=generator) + assert are_the_same_tensors(generator.get_state()), "RNG states improperly synchronized in generator." + + if state.local_process_index == 0: + print("All rng are properly synched.") + + +def dl_preparation_check(): + state = AcceleratorState() + length = 32 * state.num_processes + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + + print(state.process_index, result, type(dl)) + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + if state.process_index == 0: + print("Non-shuffled dataloader passing.") + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + if state.local_process_index == 0: + print("Shuffled dataloader passing.") + + +def central_dl_preparation_check(): + state = AcceleratorState() + length = 32 * state.num_processes + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader( + dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + dispatch_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result) + assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result." + + if state.process_index == 0: + print("Non-shuffled central dataloader passing.") + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader( + dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + dl = DataLoader(range(length), batch_size=8, shuffle=True) + dl = prepare_data_loader( + dl, + state.device, + state.num_processes, + state.process_index, + put_on_device=True, + split_batches=True, + dispatch_batches=True, + ) + result = [] + for batch in dl: + result.append(gather(batch)) + result = torch.cat(result).tolist() + result.sort() + assert result == list(range(length)), "Wrong shuffled dataloader result." + + if state.local_process_index == 0: + print("Shuffled central dataloader passing.") + + +def mock_training(length, batch_size, generator): + set_seed(42) + generator.manual_seed(42) + train_set = RegressionDataset(length=length, seed=42) + train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + for epoch in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + loss.backward() + optimizer.step() + return train_set, model + + +def training_check(): + state = AcceleratorState() + generator = torch.Generator() + batch_size = 8 + length = batch_size * 4 * state.num_processes + + train_set, old_model = mock_training(length, batch_size * state.num_processes, generator) + assert are_the_same_tensors(old_model.a), "Did not obtain the same model on both processes." + assert are_the_same_tensors(old_model.b), "Did not obtain the same model on both processes." + + accelerator = Accelerator() + train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for epoch in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." + + accelerator.print("Training yielded the same results on one CPU or distributed setup with no batch split.") + + accelerator = Accelerator(split_batches=True) + train_dl = DataLoader(train_set, batch_size=batch_size * state.num_processes, shuffle=True, generator=generator) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." + + accelerator.print("Training yielded the same results on one CPU or distributes setup with batch split.") + + if torch.cuda.is_available() or is_npu_available(): + # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16 + print("FP16 training check.") + AcceleratorState._reset_state() + accelerator = Accelerator(mixed_precision="fp16") + train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." + + if torch.cuda.is_available(): + # Mostly a test that model.forward will have autocast when running unwrap_model(model, keep_fp32_wrapper=True) + print("Keep fp32 wrapper check.") + AcceleratorState._reset_state() + accelerator = Accelerator(mixed_precision="fp16") + + model = torch.nn.Linear(2, 4) + model = accelerator.prepare(model) + model_with_fp32_wrapper = accelerator.unwrap_model(model, keep_fp32_wrapper=True) + + # Run forward with fp16 as input. + # When the model is with mixed precision wrapper, no error will be raised. + input_tensor = torch.Tensor([1, 2]).to(dtype=torch.float16, device=accelerator.device) + output = model_with_fp32_wrapper(input_tensor) + + # BF16 support is only for CPU + TPU, and some GPU + if is_bf16_available(): + # Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16 + print("BF16 training check.") + AcceleratorState._reset_state() + accelerator = Accelerator(mixed_precision="bf16") + train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." + + # IPEX support is only for CPU + if is_ipex_available(): + print("ipex BF16 training check.") + AcceleratorState._reset_state() + accelerator = Accelerator(mixed_precision="bf16", cpu=True) + train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training." + + # XPU support is only for XPU + if is_xpu_available(): + print("xpu BF16 training check.") + AcceleratorState._reset_state() + accelerator = Accelerator(mixed_precision="bf16", cpu=False) + train_dl = DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator) + model = RegressionModel() + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + + train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) + set_seed(42) + generator.manual_seed(42) + for _ in range(3): + for batch in train_dl: + model.zero_grad() + output = model(batch["x"]) + loss = torch.nn.functional.mse_loss(output, batch["y"]) + accelerator.backward(loss) + optimizer.step() + + model = accelerator.unwrap_model(model).cpu() + assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on XPU or distributed training." + assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on XPU or distributed training." + + +def test_split_between_processes_list(): + state = AcceleratorState() + data = list(range(0, 2 * state.num_processes)) + with state.split_between_processes(data) as results: + assert ( + len(results) == 2 + ), f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}" + + data = list(range(0, (3 * state.num_processes) - 1)) + with state.split_between_processes(data, apply_padding=True) as results: + if state.is_last_process: + # Test that the last process gets the extra item(s) + num_samples_per_device = math.ceil(len(data) / state.num_processes) + assert ( + len(results) == num_samples_per_device + ), f"Last process did not get the extra item(s). Process index: {state.process_index}; Length: {len(results)}" + state.wait_for_everyone() + + +def test_split_between_processes_nested_dict(): + state = AcceleratorState() + a = [1, 2, 3, 4, 5, 6, 7, 8] + b = ["a", "b", "c", "d", "e", "f", "g", "h"] + c = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]) + if state.num_processes in (1, 2, 4): + data = {"a": a, "b": b, "c": c} + data_copy = deepcopy(data) + with state.split_between_processes(data) as results: + if state.process_index == 0: + assert results["a"] == data_copy["a"][: 8 // state.num_processes] + elif state.num_processes == 2: + assert results["a"] == data_copy["a"][4:] + elif state.process_index == 3: + # We return a list each time + assert results["a"] == data_copy["a"][-2:], f'Expected: {data_copy["a"][-2]}, Actual: {results["a"]}' + if state.process_index == 0: + assert results["b"] == data_copy["b"][: 8 // state.num_processes] + elif state.num_processes == 2: + assert results["b"] == data_copy["b"][4:] + elif state.process_index == 3: + assert results["b"] == data_copy["b"][-2:] + if state.process_index == 0: + assert torch.allclose( + results["c"], data_copy["c"][: 8 // state.num_processes] + ), f"Did not obtain expected values on process 0, expected `{data['c'][:8 // state.num_processes]}`, received: {results['c']}" + elif state.num_processes == 2: + assert torch.allclose( + results["c"], data_copy["c"][4:] + ), f"Did not obtain expected values on process 2, expected `{data['c'][4:]}`, received: {results['c']}" + elif state.process_index == 3: + assert torch.allclose( + results["c"], data_copy["c"][-2:] + ), f"Did not obtain expected values on process 4, expected `{data['c'][-2:]}`, received: {results['c']}" + + state.wait_for_everyone() + + +def test_split_between_processes_tensor(): + state = AcceleratorState() + if state.num_processes > 1: + data = torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]]).to(state.device) + with state.split_between_processes(data) as results: + if state.process_index == 0: + assert torch.allclose(results, torch.tensor([0, 1, 2, 3]).to(state.device)) + else: + assert torch.allclose(results, torch.tensor([4, 5, 6, 7]).to(state.device)) + state.wait_for_everyone() + + +def test_trigger(): + accelerator = Accelerator() + # should start with being false + assert accelerator.check_trigger() is False + + # set a breakpoint on the main process + if accelerator.is_main_process: + accelerator.set_trigger() + + # check it's been activated across all processes + # calls `all_reduce` and triggers a sync + assert accelerator.check_trigger() is True + + # check it's been reset after the sync + assert accelerator.check_trigger() is False + + +def main(): + accelerator = Accelerator() + state = accelerator.state + if state.local_process_index == 0: + print("**Initialization**") + init_state_check() + state.wait_for_everyone() + + if state.distributed_type == DistributedType.MULTI_GPU: + num_processes_per_node = torch.cuda.device_count() + else: + num_processes_per_node = state.num_processes + + # We only run this test on non-multinode + if num_processes_per_node == state.num_processes: + if state.process_index == 0: + print("\n**Test process execution**") + process_execution_check() + + if state.process_index == 0: + print("\n**Test split between processes as a list**") + test_split_between_processes_list() + + if state.process_index == 0: + print("\n**Test split between processes as a dict**") + test_split_between_processes_nested_dict() + + if state.process_index == 0: + print("\n**Test split between processes as a tensor**") + test_split_between_processes_tensor() + + if state.local_process_index == 0: + print("\n**Test random number generator synchronization**") + rng_sync_check() + + if state.local_process_index == 0: + print("\n**DataLoader integration test**") + dl_preparation_check() + if state.distributed_type != DistributedType.TPU: + central_dl_preparation_check() + + # Trainings are not exactly the same in DeepSpeed and CPU mode + if state.distributed_type == DistributedType.DEEPSPEED: + return + + if state.local_process_index == 0: + print("\n**Training integration test**") + training_check() + + if state.local_process_index == 0: + print("\n**Breakpoint trigger test**") + test_trigger() + + +if __name__ == "__main__": + main() diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py new file mode 100644 index 0000000000000000000000000000000000000000..18ebb05e9b575293edf5ae119cc1b22719252242 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py @@ -0,0 +1,367 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from copy import deepcopy + +import torch +import torch.nn.functional as F +from torch.optim import AdamW +from torch.optim.lr_scheduler import LambdaLR +from torch.utils.data import DataLoader + +from accelerate.accelerator import Accelerator +from accelerate.state import GradientState +from accelerate.test_utils import RegressionDataset, RegressionModel +from accelerate.utils import DistributedType, is_torch_version, set_seed + + +def check_model_parameters(model_a, model_b, did_step, iteration): + for param, grad_param in zip(model_a.parameters(), model_b.parameters()): + if not param.requires_grad: + continue + if not did_step: + # Grads should not be in sync + assert ( + torch.allclose(param.grad, grad_param.grad) is False + ), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})" + else: + # Grads should be in sync + assert ( + torch.allclose(param.grad, grad_param.grad) is True + ), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})" + + +def step_model(model, input, target, accelerator, do_backward=True): + model.train() + output = model(input) + loss = F.mse_loss(output, target.to(output.device)) + if not do_backward: + loss /= accelerator.gradient_accumulation_steps + loss.backward() + else: + accelerator.backward(loss) + + +def get_training_setup(accelerator, sched=False): + "Returns everything needed to perform basic training" + set_seed(42) + model = RegressionModel() + ddp_model = deepcopy(model) + dset = RegressionDataset(length=80) + dataloader = DataLoader(dset, batch_size=16) + model.to(accelerator.device) + if sched: + opt = AdamW(params=model.parameters(), lr=1e-3) + ddp_opt = AdamW(params=ddp_model.parameters(), lr=1e-3) + sched = LambdaLR(opt, lr_lambda=lambda epoch: epoch**0.65) + ddp_sched = LambdaLR(ddp_opt, lr_lambda=lambda epoch: epoch**0.65) + # Make a copy of `model` + if sched: + ddp_model, ddp_opt, ddp_sched, dataloader = accelerator.prepare(ddp_model, ddp_opt, ddp_sched, dataloader) + else: + ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader) + if sched: + return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) + return model, ddp_model, dataloader + + +def test_noop_sync(accelerator): + # Test when on a single CPU or GPU that the context manager does nothing + model, ddp_model, dataloader = get_training_setup(accelerator) + # Use a single batch + ddp_input, ddp_target = next(iter(dataloader)).values() + for iteration in range(3): + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator) + # Do "gradient accumulation" (noop) + if iteration % 2 == 0: + # Accumulate grads locally + with accelerator.no_sync(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + else: + # Sync grads + step_model(ddp_model, ddp_input, ddp_target, accelerator) + + # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync + check_model_parameters(model, ddp_model, True, iteration) + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + assert torch.allclose( + param.grad, ddp_param.grad + ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + ddp_input = ddp_input[torch.randperm(len(ddp_input))] + + +def test_distributed_sync(accelerator): + # Test on distributed setup that context manager behaves properly + model, ddp_model, dataloader = get_training_setup(accelerator) + # Use a single batch + ddp_input, ddp_target = next(iter(dataloader)).values() + for iteration in range(3): + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator) + # Do "gradient accumulation" (noop) + if iteration % 2 == 0: + # Accumulate grads locally + with accelerator.no_sync(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + else: + # Sync grads + step_model(ddp_model, ddp_input, ddp_target, accelerator) + + # DDP model and model should only be in sync when not (iteration % 2 == 0) + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + if iteration % 2 == 0: + # Grads should not be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is False + ), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" + else: + # Grads should be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is True + ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + ddp_input = ddp_input[torch.randperm(len(ddp_input))] + + +def test_distributed_sync_multiple_fwd(accelerator): + # Test on distributed setup that context manager behaves properly when used with multiple forwards followed by multiple backwards + model, ddp_model, dataloader = get_training_setup(accelerator) + # Do multiple forwards + losses = [] + num_iterations = 3 + for iteration in range(num_iterations): + ddp_input, ddp_target = next(iter(dataloader)).values() + + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator) + + # Accumulate grads locally + with accelerator.no_sync(ddp_model): + ddp_output = ddp_model(ddp_input) + loss = F.mse_loss(ddp_output, ddp_target.to(ddp_output.device)) + losses.append(loss) + + # Do multiple backwards and sync only at the last backward + for iteration in range(num_iterations): + loss = losses[iteration] + + if iteration < num_iterations - 1: + # Accumulate grads locally + accelerator.backward(loss) + + # DDP model and model should only be in sync after last backward + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + # Grads should not be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is False + ), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" + + else: + # Sync grads if last backward + with accelerator.trigger_sync_in_backward(ddp_model): + accelerator.backward(loss) + + # DDP model and model should only be in sync after last backward + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + # Grads should be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is True + ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + + +def test_gradient_accumulation(split_batches=False, dispatch_batches=False): + accelerator = Accelerator( + split_batches=split_batches, dispatch_batches=dispatch_batches, gradient_accumulation_steps=2 + ) + # Test that context manager behaves properly + model, ddp_model, dataloader = get_training_setup(accelerator) + for iteration, batch in enumerate(dataloader): + ddp_input, ddp_target = batch.values() + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator, False) + # Do "gradient accumulation" (noop) + with accelerator.accumulate(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + + # DDP model and model should only be in sync when not (iteration % 2 == 0) + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + if ((iteration + 1) % 2 == 0) or (iteration == len(dataloader) - 1): + # Grads should be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is True + ), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + else: + # Grads should not be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is False + ), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + ddp_input = ddp_input[torch.randperm(len(ddp_input))] + GradientState._reset_state() + + +def test_gradient_accumulation_with_opt_and_scheduler(split_batches=False, dispatch_batches=False): + accelerator = Accelerator( + split_batches=split_batches, dispatch_batches=dispatch_batches, gradient_accumulation_steps=2 + ) + # Test that context manager behaves properly + model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched = get_training_setup(accelerator, True) + for iteration, batch in enumerate(dataloader): + ddp_input, ddp_target = batch.values() + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + model.train() + ddp_model.train() + step_model(model, input, target, accelerator, False) + opt.step() + + if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(dataloader)): + if split_batches: + sched.step() + else: + for _ in range(accelerator.num_processes): + sched.step() + opt.zero_grad() + # Perform gradient accumulation under wrapper + with accelerator.accumulate(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + ddp_opt.step() + ddp_sched.step() + ddp_opt.zero_grad() + + # Learning rates should be the same + assert ( + opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] + ), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' + did_step = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(dataloader)) + if accelerator.num_processes > 1: + check_model_parameters(model, ddp_model, did_step, iteration) + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + GradientState._reset_state() + + +def test_dataloader_break(): + accelerator = Accelerator() + + first_dset = RegressionDataset(length=80) + first_dataloader = DataLoader(first_dset, batch_size=16) + second_dset = RegressionDataset(length=96) + second_dataloader = DataLoader(second_dset, batch_size=16) + first_dataloader, second_dataloader = accelerator.prepare(first_dataloader, second_dataloader) + assert accelerator.gradient_state.active_dataloader is None + for iteration, _ in enumerate(first_dataloader): + assert id(accelerator.gradient_state.active_dataloader) == id(first_dataloader) + if iteration < len(first_dataloader) - 1: + assert not accelerator.gradient_state.end_of_dataloader + if iteration == 1: + for batch_num, _ in enumerate(second_dataloader): + assert id(accelerator.gradient_state.active_dataloader) == id(second_dataloader) + if batch_num < len(second_dataloader) - 1: + assert not accelerator.gradient_state.end_of_dataloader + else: + assert accelerator.gradient_state.end_of_dataloader + else: + assert accelerator.gradient_state.end_of_dataloader + assert accelerator.gradient_state.active_dataloader is None + + +def main(): + accelerator = Accelerator() + state = accelerator.state + if state.local_process_index == 0: + print("**Test `accumulate` gradient accumulation with dataloader break**") + test_dataloader_break() + if state.distributed_type == DistributedType.NO: + if state.local_process_index == 0: + print("**Test NOOP `no_sync` context manager**") + test_noop_sync(accelerator) + if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_CPU): + if state.local_process_index == 0: + print("**Test Distributed `no_sync` context manager**") + test_distributed_sync(accelerator) + if state.local_process_index == 0: + print("**Test Distributed `no_sync` context manager with multiple forwards**") + test_distributed_sync_multiple_fwd(accelerator) + if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU): + for split_batch in [True, False]: + for dispatch_batches in [True, False]: + if state.local_process_index == 0: + print( + "**Test `accumulate` gradient accumulation, ", + f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**", + ) + test_gradient_accumulation(split_batch, dispatch_batches) + + # Currently will break on torch 2.0 +, need to investigate why + if is_torch_version("<", "2.0") or state.distributed_type == DistributedType.NO: + if state.local_process_index == 0: + print( + "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", + "`split_batches=False`, `dispatch_batches=False`**", + ) + test_gradient_accumulation_with_opt_and_scheduler() + if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU): + for split_batch in [True, False]: + for dispatch_batches in [True, False]: + if not split_batch and not dispatch_batches: + continue + if state.local_process_index == 0: + print( + "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", + f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**", + ) + test_gradient_accumulation_with_opt_and_scheduler(split_batch, dispatch_batches) + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/testing.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..40afdac48136f712db7c96a3ebbac4056a2cae13 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/testing.py @@ -0,0 +1,452 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import os +import shutil +import subprocess +import sys +import tempfile +import unittest +from contextlib import contextmanager +from functools import partial +from pathlib import Path +from typing import List, Union +from unittest import mock + +import torch + +from ..state import AcceleratorState, PartialState +from ..utils import ( + gather, + is_bnb_available, + is_comet_ml_available, + is_datasets_available, + is_deepspeed_available, + is_mps_available, + is_safetensors_available, + is_tensorboard_available, + is_timm_available, + is_torch_version, + is_tpu_available, + is_transformers_available, + is_wandb_available, + is_xpu_available, + str_to_bool, +) + + +def parse_flag_from_env(key, default=False): + try: + value = os.environ[key] + except KeyError: + # KEY isn't set, default to `default`. + _value = default + else: + # KEY is set, convert it to True or False. + try: + _value = str_to_bool(value) + except ValueError: + # More values are supported, but let's keep the message simple. + raise ValueError(f"If set, {key} must be yes or no.") + return _value + + +_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) + + +def skip(test_case): + "Decorator that skips a test unconditionally" + return unittest.skip("Test was skipped")(test_case) + + +def slow(test_case): + """ + Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a + truthy value to run them. + """ + return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case) + + +def require_cpu(test_case): + """ + Decorator marking a test that must be only ran on the CPU. These tests are skipped when a GPU is available. + """ + return unittest.skipUnless(not torch.cuda.is_available(), "test requires only a CPU")(test_case) + + +def require_cuda(test_case): + """ + Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available. + """ + return unittest.skipUnless(torch.cuda.is_available(), "test requires a GPU")(test_case) + + +def require_xpu(test_case): + """ + Decorator marking a test that requires XPU. These tests are skipped when there are no XPU available. + """ + return unittest.skipUnless(is_xpu_available(), "test requires a XPU")(test_case) + + +def require_mps(test_case): + """ + Decorator marking a test that requires MPS backend. These tests are skipped when torch doesn't support `mps` + backend. + """ + return unittest.skipUnless(is_mps_available(), "test requires a `mps` backend support in `torch`")(test_case) + + +def require_huggingface_suite(test_case): + """ + Decorator marking a test that requires transformers and datasets. These tests are skipped when they are not. + """ + return unittest.skipUnless( + is_transformers_available() and is_datasets_available(), "test requires the Hugging Face suite" + )(test_case) + + +def require_transformers(test_case): + """ + Decorator marking a test that requires transformers. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_transformers_available(), "test requires the transformers library")(test_case) + + +def require_timm(test_case): + """ + Decorator marking a test that requires transformers. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_timm_available(), "test requires the timm library")(test_case) + + +def require_bnb(test_case): + """ + Decorator marking a test that requires bitsandbytes. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_bnb_available(), "test requires the bitsandbytes library")(test_case) + + +def require_tpu(test_case): + """ + Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available. + """ + return unittest.skipUnless(is_tpu_available(), "test requires TPU")(test_case) + + +def require_single_gpu(test_case): + """ + Decorator marking a test that requires CUDA on a single GPU. These tests are skipped when there are no GPU + available or number of GPUs is more than one. + """ + return unittest.skipUnless(torch.cuda.device_count() == 1, "test requires a GPU")(test_case) + + +def require_single_xpu(test_case): + """ + Decorator marking a test that requires CUDA on a single XPU. These tests are skipped when there are no XPU + available or number of xPUs is more than one. + """ + return unittest.skipUnless(torch.xpu.device_count() == 1, "test requires a XPU")(test_case) + + +def require_multi_gpu(test_case): + """ + Decorator marking a test that requires a multi-GPU setup. These tests are skipped on a machine without multiple + GPUs. + """ + return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case) + + +def require_multi_xpu(test_case): + """ + Decorator marking a test that requires a multi-XPU setup. These tests are skipped on a machine without multiple + XPUs. + """ + return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case) + + +def require_safetensors(test_case): + """ + Decorator marking a test that requires safetensors installed. These tests are skipped when safetensors isn't + installed + """ + return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case) + + +def require_deepspeed(test_case): + """ + Decorator marking a test that requires DeepSpeed installed. These tests are skipped when DeepSpeed isn't installed + """ + return unittest.skipUnless(is_deepspeed_available(), "test requires DeepSpeed")(test_case) + + +def require_fsdp(test_case): + """ + Decorator marking a test that requires FSDP installed. These tests are skipped when FSDP isn't installed + """ + return unittest.skipUnless(is_torch_version(">=", "1.12.0"), "test requires torch version >= 1.12.0")(test_case) + + +def require_torch_min_version(test_case=None, version=None): + """ + Decorator marking that a test requires a particular torch version to be tested. These tests are skipped when an + installed torch version is less than the required one. + """ + if test_case is None: + return partial(require_torch_min_version, version=version) + return unittest.skipUnless(is_torch_version(">=", version), f"test requires torch version >= {version}")(test_case) + + +def require_tensorboard(test_case): + """ + Decorator marking a test that requires tensorboard installed. These tests are skipped when tensorboard isn't + installed + """ + return unittest.skipUnless(is_tensorboard_available(), "test requires Tensorboard")(test_case) + + +def require_wandb(test_case): + """ + Decorator marking a test that requires wandb installed. These tests are skipped when wandb isn't installed + """ + return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case) + + +def require_comet_ml(test_case): + """ + Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed + """ + return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml")(test_case) + + +_atleast_one_tracker_available = ( + any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() +) + + +def require_trackers(test_case): + """ + Decorator marking that a test requires at least one tracking library installed. These tests are skipped when none + are installed + """ + return unittest.skipUnless( + _atleast_one_tracker_available, + "test requires at least one tracker to be available and for `comet_ml` to not be installed", + )(test_case) + + +class TempDirTestCase(unittest.TestCase): + """ + A TestCase class that keeps a single `tempfile.TemporaryDirectory` open for the duration of the class, wipes its + data at the start of a test, and then destroyes it at the end of the TestCase. + + Useful for when a class or API requires a single constant folder throughout it's use, such as Weights and Biases + + The temporary directory location will be stored in `self.tmpdir` + """ + + clear_on_setup = True + + @classmethod + def setUpClass(cls): + "Creates a `tempfile.TemporaryDirectory` and stores it in `cls.tmpdir`" + cls.tmpdir = tempfile.mkdtemp() + + @classmethod + def tearDownClass(cls): + "Remove `cls.tmpdir` after test suite has finished" + if os.path.exists(cls.tmpdir): + shutil.rmtree(cls.tmpdir) + + def setUp(self): + "Destroy all contents in `self.tmpdir`, but not `self.tmpdir`" + if self.clear_on_setup: + for path in Path(self.tmpdir).glob("**/*"): + if path.is_file(): + path.unlink() + elif path.is_dir(): + shutil.rmtree(path) + + +class AccelerateTestCase(unittest.TestCase): + """ + A TestCase class that will reset the accelerator state at the end of every test. Every test that checks or utilizes + the `AcceleratorState` class should inherit from this to avoid silent failures due to state being shared between + tests. + """ + + def tearDown(self): + super().tearDown() + # Reset the state of the AcceleratorState singleton. + AcceleratorState._reset_state() + PartialState._reset_state() + + +class MockingTestCase(unittest.TestCase): + """ + A TestCase class designed to dynamically add various mockers that should be used in every test, mimicking the + behavior of a class-wide mock when defining one normally will not do. + + Useful when a mock requires specific information available only initialized after `TestCase.setUpClass`, such as + setting an environment variable with that information. + + The `add_mocks` function should be ran at the end of a `TestCase`'s `setUp` function, after a call to + `super().setUp()` such as: + ```python + def setUp(self): + super().setUp() + mocks = mock.patch.dict(os.environ, {"SOME_ENV_VAR", "SOME_VALUE"}) + self.add_mocks(mocks) + ``` + """ + + def add_mocks(self, mocks: Union[mock.Mock, List[mock.Mock]]): + """ + Add custom mocks for tests that should be repeated on each test. Should be called during + `MockingTestCase.setUp`, after `super().setUp()`. + + Args: + mocks (`mock.Mock` or list of `mock.Mock`): + Mocks that should be added to the `TestCase` after `TestCase.setUpClass` has been run + """ + self.mocks = mocks if isinstance(mocks, (tuple, list)) else [mocks] + for m in self.mocks: + m.start() + self.addCleanup(m.stop) + + +def are_the_same_tensors(tensor): + state = AcceleratorState() + tensor = tensor[None].clone().to(state.device) + tensors = gather(tensor).cpu() + tensor = tensor[0].cpu() + for i in range(tensors.shape[0]): + if not torch.equal(tensors[i], tensor): + return False + return True + + +class _RunOutput: + def __init__(self, returncode, stdout, stderr): + self.returncode = returncode + self.stdout = stdout + self.stderr = stderr + + +async def _read_stream(stream, callback): + while True: + line = await stream.readline() + if line: + callback(line) + else: + break + + +async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput: + if echo: + print("\nRunning: ", " ".join(cmd)) + + p = await asyncio.create_subprocess_exec( + cmd[0], + *cmd[1:], + stdin=stdin, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + env=env, + ) + + # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe + # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait + # + # If it starts hanging, will need to switch to the following code. The problem is that no data + # will be seen until it's done and if it hangs for example there will be no debug info. + # out, err = await p.communicate() + # return _RunOutput(p.returncode, out, err) + + out = [] + err = [] + + def tee(line, sink, pipe, label=""): + line = line.decode("utf-8").rstrip() + sink.append(line) + if not quiet: + print(label, line, file=pipe) + + # XXX: the timeout doesn't seem to make any difference here + await asyncio.wait( + [ + asyncio.create_task(_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:"))), + asyncio.create_task(_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:"))), + ], + timeout=timeout, + ) + return _RunOutput(await p.wait(), out, err) + + +def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput: + loop = asyncio.get_event_loop() + result = loop.run_until_complete( + _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo) + ) + + cmd_str = " ".join(cmd) + if result.returncode > 0: + stderr = "\n".join(result.stderr) + raise RuntimeError( + f"'{cmd_str}' failed with returncode {result.returncode}\n\n" + f"The combined stderr from workers follows:\n{stderr}" + ) + + return result + + +class SubprocessCallException(Exception): + pass + + +def run_command(command: List[str], return_stdout=False): + """ + Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture + if an error occured while running `command` + """ + try: + output = subprocess.check_output(command, stderr=subprocess.STDOUT) + if return_stdout: + if hasattr(output, "decode"): + output = output.decode("utf-8") + return output + except subprocess.CalledProcessError as e: + raise SubprocessCallException( + f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}" + ) from e + + +@contextmanager +def assert_exception(exception_class: Exception, msg: str = None) -> bool: + """ + Context manager to assert that the right `Exception` class was raised. + + If `msg` is provided, will check that the message is contained in the raised exception. + """ + was_ran = False + try: + yield + was_ran = True + except Exception as e: + assert isinstance(e, exception_class), f"Expected exception of type {exception_class} but got {type(e)}" + if msg is not None: + assert msg in str(e), f"Expected message '{msg}' to be in exception but got '{str(e)}'" + if was_ran: + raise AssertionError(f"Expected exception of type {exception_class} but ran without issue.") diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/training.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/training.py new file mode 100644 index 0000000000000000000000000000000000000000..7cda5927974b96519f6cd01d4e2111804422a993 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/test_utils/training.py @@ -0,0 +1,101 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import torch +from torch.utils.data import DataLoader + +from accelerate.utils.dataclasses import DistributedType + + +class RegressionDataset: + def __init__(self, a=2, b=3, length=64, seed=None): + rng = np.random.default_rng(seed) + self.length = length + self.x = rng.normal(size=(length,)).astype(np.float32) + self.y = a * self.x + b + rng.normal(scale=0.1, size=(length,)).astype(np.float32) + + def __len__(self): + return self.length + + def __getitem__(self, i): + return {"x": self.x[i], "y": self.y[i]} + + +class RegressionModel4XPU(torch.nn.Module): + def __init__(self, a=0, b=0, double_output=False): + super().__init__() + self.a = torch.nn.Parameter(torch.tensor([2, 3]).float()) + self.b = torch.nn.Parameter(torch.tensor([2, 3]).float()) + self.first_batch = True + + def forward(self, x=None): + if self.first_batch: + print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}") + self.first_batch = False + return x * self.a[0] + self.b[0] + + +class RegressionModel(torch.nn.Module): + def __init__(self, a=0, b=0, double_output=False): + super().__init__() + self.a = torch.nn.Parameter(torch.tensor(a).float()) + self.b = torch.nn.Parameter(torch.tensor(b).float()) + self.first_batch = True + + def forward(self, x=None): + if self.first_batch: + print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}") + self.first_batch = False + return x * self.a + self.b + + +def mocked_dataloaders(accelerator, batch_size: int = 16): + from datasets import load_dataset + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + data_files = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"} + datasets = load_dataset("csv", data_files=data_files) + label_list = datasets["train"].unique("label") + + label_to_id = {v: i for i, v in enumerate(label_list)} + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer( + examples["sentence1"], examples["sentence2"], truncation=True, max_length=None, padding="max_length" + ) + if "label" in examples: + outputs["labels"] = [label_to_id[l] for l in examples["label"]] + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["sentence1", "sentence2", "label"], + ) + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.TPU: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=2) + eval_dataloader = DataLoader(tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=1) + + return train_dataloader, eval_dataloader diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/deepspeed.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/deepspeed.py new file mode 100644 index 0000000000000000000000000000000000000000..e22cd1080ee4e678fd7d1a6d6e6b08211b0db82d --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/deepspeed.py @@ -0,0 +1,272 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import io +import json +import os +from copy import deepcopy + +from ..optimizer import AcceleratedOptimizer +from ..scheduler import AcceleratedScheduler + + +class HfDeepSpeedConfig: + """ + This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage. + + A `weakref` of this object is stored in the module's globals to be able to access the config from areas where + things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore + it's important that this object remains alive while the program is still running. + + [`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration + with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic + the DeepSpeed configuration is not modified in any way. + + Args: + config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict. + + """ + + def __init__(self, config_file_or_dict): + if isinstance(config_file_or_dict, dict): + # Don't modify user's data should they want to reuse it (e.g. in tests), because once we + # modified it, it will not be accepted here again, since `auto` values would have been overridden + config = deepcopy(config_file_or_dict) + elif os.path.exists(config_file_or_dict): + with io.open(config_file_or_dict, "r", encoding="utf-8") as f: + config = json.load(f) + else: + try: + config_decoded = base64.urlsafe_b64decode(config_file_or_dict).decode("utf-8") + config = json.loads(config_decoded) + except (UnicodeDecodeError, AttributeError, ValueError): + raise ValueError( + f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" + ) + + self.config = config + + self.set_stage_and_offload() + + def set_stage_and_offload(self): + # zero stage - this is done as early as possible, before model is created, to allow + # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object + # during ``zero.Init()`` which needs to know the dtype, and some other hparams. + self._stage = self.get_value("zero_optimization.stage", -1) + + # offload + self._offload = False + if self.is_zero2() or self.is_zero3(): + offload_devices_valid = set(["cpu", "nvme"]) + offload_devices = set( + [ + self.get_value("zero_optimization.offload_optimizer.device"), + self.get_value("zero_optimization.offload_param.device"), + ] + ) + if len(offload_devices & offload_devices_valid) > 0: + self._offload = True + + def find_config_node(self, ds_key_long): + config = self.config + + # find the config node of interest if it exists + nodes = ds_key_long.split(".") + ds_key = nodes.pop() + for node in nodes: + config = config.get(node) + if config is None: + return None, ds_key + + return config, ds_key + + def get_value(self, ds_key_long, default=None): + """ + Returns the set value or `default` if no value is set + """ + config, ds_key = self.find_config_node(ds_key_long) + if config is None: + return default + return config.get(ds_key, default) + + def del_config_sub_tree(self, ds_key_long, must_exist=False): + """ + Deletes a sub-section of the config file if it's found. + + Unless `must_exist` is `True` the section doesn't have to exist. + """ + config = self.config + + # find the config node of interest if it exists + nodes = ds_key_long.split(".") + for node in nodes: + parent_config = config + config = config.get(node) + if config is None: + if must_exist: + raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}") + else: + return + + # if found remove it + if parent_config is not None: + parent_config.pop(node) + + def is_true(self, ds_key_long): + """ + Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very + specific question of whether the value is set to `True` (and it's not set to `False`` or isn't set). + + """ + value = self.get_value(ds_key_long) + return False if value is None else bool(value) + + def is_false(self, ds_key_long): + """ + Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very + specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set). + """ + value = self.get_value(ds_key_long) + return False if value is None else not bool(value) + + def is_zero2(self): + return self._stage == 2 + + def is_zero3(self): + return self._stage == 3 + + def is_offload(self): + return self._offload + + +class DeepSpeedEngineWrapper: + """ + Internal wrapper for deepspeed.runtime.engine.DeepSpeedEngine. This is used to follow conventional training loop. + + Args: + engine (deepspeed.runtime.engine.DeepSpeedEngine): deepspeed engine to wrap + """ + + def __init__(self, engine): + self.engine = engine + + def backward(self, loss, **kwargs): + # runs backpropagation and handles mixed precision + self.engine.backward(loss, **kwargs) + + # Deepspeed's `engine.step` performs the following operations: + # - gradient accumulation check + # - gradient clipping + # - optimizer step + # - zero grad + # - checking overflow + # - lr_scheduler step (only if engine.lr_scheduler is not None) + self.engine.step() + # and this plugin overrides the above calls with no-ops when Accelerate runs under + # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple + # training loop that works transparently under many training regimes. + + +class DeepSpeedOptimizerWrapper(AcceleratedOptimizer): + """ + Internal wrapper around a deepspeed optimizer. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + """ + + def __init__(self, optimizer): + super().__init__(optimizer, device_placement=False, scaler=None) + self.__has_overflow__ = hasattr(self.optimizer, "overflow") + + def zero_grad(self, set_to_none=None): + pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + + def step(self): + pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + + @property + def step_was_skipped(self): + """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" + if self.__has_overflow__: + return self.optimizer.overflow + return False + + +class DeepSpeedSchedulerWrapper(AcceleratedScheduler): + """ + Internal wrapper around a deepspeed scheduler. + + Args: + scheduler (`torch.optim.lr_scheduler.LambdaLR`): + The scheduler to wrap. + optimizers (one or a list of `torch.optim.Optimizer`): + """ + + def __init__(self, scheduler, optimizers): + super().__init__(scheduler, optimizers) + + def step(self): + pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed + + +class DummyOptim: + """ + Dummy optimizer presents model parameters or param groups, this is primarily used to follow conventional training + loop when optimizer config is specified in the deepspeed config file. + + Args: + lr (float): + Learning rate. + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + weight_decay (float): + Weight decay. + **kwargs: + Other arguments. + """ + + def __init__(self, params, lr=0.001, weight_decay=0, **kwargs): + self.params = params + self.lr = lr + self.weight_decay = weight_decay + self.kwargs = kwargs + + +class DummyScheduler: + """ + Dummy scheduler presents model parameters or param groups, this is primarily used to follow conventional training + loop when scheduler config is specified in the deepspeed config file. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + total_num_steps (int, *optional*): + Total number of steps. + warmup_num_steps (int, *optional*): + Number of steps for warmup. + lr_scheduler_callable (callable, *optional*): + A callable function that creates an LR Scheduler. It accepts only one argument `optimizer`. + **kwargs: + Other arguments. + """ + + def __init__(self, optimizer, total_num_steps=None, warmup_num_steps=0, lr_scheduler_callable=None, **kwargs): + self.optimizer = optimizer + self.total_num_steps = total_num_steps + self.warmup_num_steps = warmup_num_steps + self.lr_scheduler_callable = lr_scheduler_callable + self.kwargs = kwargs diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/offload.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/offload.py new file mode 100644 index 0000000000000000000000000000000000000000..ca6efc080dcc070c8ed1636357479de1f2c3ee44 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/offload.py @@ -0,0 +1,211 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from collections.abc import Mapping +from typing import Dict, List, Optional, Union + +import numpy as np +import torch + +from .imports import is_safetensors_available + + +def offload_weight(weight, weight_name, offload_folder, index=None): + dtype = None + # Check the string instead of the dtype to be compatible with versions of PyTorch that don't have bfloat16. + if str(weight.dtype) == "torch.bfloat16": + # Need to reinterpret the underlined data as int16 since NumPy does not handle bfloat16s. + weight = weight.view(torch.int16) + dtype = "bfloat16" + array = weight.cpu().numpy() + tensor_file = os.path.join(offload_folder, f"{weight_name}.dat") + if index is not None: + if dtype is None: + dtype = str(array.dtype) + index[weight_name] = {"dtype": dtype, "shape": list(array.shape)} + if array.ndim == 0: + array = array[None] + file_array = np.memmap(tensor_file, dtype=array.dtype, mode="w+", shape=array.shape) + file_array[:] = array[:] + file_array.flush() + return index + + +def load_offloaded_weight(weight_file, weight_info): + shape = tuple(weight_info["shape"]) + if shape == (): + # NumPy memory-mapped arrays can't have 0 dims so it was saved as 1d tensor + shape = (1,) + + dtype = weight_info["dtype"] + if dtype == "bfloat16": + # NumPy does not support bfloat16 so this was saved as a int16 + dtype = "int16" + + weight = np.memmap(weight_file, dtype=dtype, shape=shape, mode="r") + + if len(weight_info["shape"]) == 0: + weight = weight[0] + weight = torch.tensor(weight) + if weight_info["dtype"] == "bfloat16": + weight = weight.view(torch.bfloat16) + + return weight + + +def save_offload_index(index, offload_folder): + if index is None or len(index) == 0: + # Nothing to save + return + + offload_index_file = os.path.join(offload_folder, "index.json") + if os.path.isfile(offload_index_file): + with open(offload_index_file, "r", encoding="utf-8") as f: + current_index = json.load(f) + else: + current_index = {} + current_index.update(index) + + with open(offload_index_file, "w", encoding="utf-8") as f: + json.dump(current_index, f, indent=2) + + +def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor]): + """ + Offload a state dict in a given folder. + + Args: + save_dir (`str` or `os.PathLike`): + The directory in which to offload the state dict. + state_dict (`Dict[str, torch.Tensor]`): + The dictionary of tensors to offload. + """ + os.makedirs(save_dir, exist_ok=True) + index = {} + for name, parameter in state_dict.items(): + index = offload_weight(parameter, name, save_dir, index=index) + + # Update index + save_offload_index(index, save_dir) + + +class PrefixedDataset(Mapping): + """ + Will access keys in a given dataset by adding a prefix. + + Args: + dataset (`Mapping`): Any map with string keys. + prefix (`str`): A prefix to add when trying to access any element in the underlying dataset. + """ + + def __init__(self, dataset: Mapping, prefix: str): + self.dataset = dataset + self.prefix = prefix + + def __getitem__(self, key): + return self.dataset[f"{self.prefix}{key}"] + + def __iter__(self): + return iter([key for key in self.dataset if key.startswith(self.prefix)]) + + def __len__(self): + return len(self.dataset) + + +class OffloadedWeightsLoader(Mapping): + """ + A collection that loads weights stored in a given state dict or memory-mapped on disk. + + Args: + state_dict (`Dict[str, torch.Tensor]`, *optional*): + A dictionary parameter name to tensor. + save_folder (`str` or `os.PathLike`, *optional*): + The directory in which the weights are stored (by `offload_state_dict` for instance). + index (`Dict`, *optional*): + A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default + to the index saved in `save_folder`. + """ + + def __init__( + self, + state_dict: Dict[str, torch.Tensor] = None, + save_folder: Optional[Union[str, os.PathLike]] = None, + index: Mapping = None, + device=None, + ): + if state_dict is None and save_folder is None and index is None: + raise ValueError("Need either a `state_dict`, a `save_folder` or an `index` containing offloaded weights.") + + self.state_dict = {} if state_dict is None else state_dict + self.save_folder = save_folder + if index is None and save_folder is not None: + with open(os.path.join(save_folder, "index.json")) as f: + index = json.load(f) + self.index = {} if index is None else index + self.all_keys = list(self.state_dict.keys()) + self.all_keys.extend([key for key in self.index if key not in self.all_keys]) + self.device = device + + def __getitem__(self, key: str): + # State dict gets priority + if key in self.state_dict: + return self.state_dict[key] + weight_info = self.index[key] + if weight_info.get("safetensors_file") is not None: + if not is_safetensors_available(): + raise ImportError("These offloaded weights require the use of safetensors: `pip install safetensors`.") + + from safetensors import safe_open + + device = "cpu" if self.device is None else self.device + with safe_open(weight_info["safetensors_file"], framework="pt", device=device) as f: + tensor = f.get_tensor(weight_info.get("weight_name", key)) + + if "dtype" in weight_info: + return tensor.to(getattr(torch, weight_info["dtype"])) + else: + return tensor + + weight_file = os.path.join(self.save_folder, f"{key}.dat") + return load_offloaded_weight(weight_file, weight_info) + + def __iter__(self): + return iter(self.all_keys) + + def __len__(self): + return len(self.all_keys) + + +def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule_names: List[str]): + """ + Extract the sub state-dict corresponding to a list of given submodules. + + Args: + state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from. + submodule_names (`List[str]`): The list of submodule names we want to extract. + """ + result = {} + for module_name in submodule_names: + # We want to catch module_name parameter (module_name.xxx) or potentially module_name, but not any of the + # submodules that could being like module_name (transformers.h.1 and transformers.h.10 for instance) + result.update( + { + key: param + for key, param in state_dict.items() + if key == module_name or key.startswith(module_name + ".") + } + ) + return result diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/other.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/other.py new file mode 100644 index 0000000000000000000000000000000000000000..6d7608f086dc41245329e27849db483fdfcef22a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/other.py @@ -0,0 +1,248 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import socket +from contextlib import contextmanager +from types import MethodType + +import torch + +from ..commands.config.default import write_basic_config # noqa: F401 +from ..state import PartialState +from .constants import FSDP_PYTORCH_VERSION +from .dataclasses import DistributedType +from .imports import is_deepspeed_available, is_safetensors_available, is_tpu_available +from .transformer_engine import convert_model +from .versions import is_torch_version + + +if is_tpu_available(check_device=False): + import torch_xla.core.xla_model as xm + +if is_safetensors_available(): + from safetensors.torch import save_file as safe_save_file + + +def is_compiled_module(module): + """ + Check whether the module was compiled with torch.compile() + """ + if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"): + return False + return isinstance(module, torch._dynamo.eval_frame.OptimizedModule) + + +def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True): + """ + Extract a model from its distributed containers. + + Args: + model (`torch.nn.Module`): + The model to extract. + keep_fp32_wrapper (`bool`, *optional*): + Whether to remove mixed precision hooks from the model. + + Returns: + `torch.nn.Module`: The extracted model. + """ + options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) + + is_compiled = is_compiled_module(model) + if is_compiled: + compiled_model = model + model = model._orig_mod + + if is_deepspeed_available(): + from deepspeed import DeepSpeedEngine + + options += (DeepSpeedEngine,) + + if is_torch_version(">=", FSDP_PYTORCH_VERSION): + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + + options += (FSDP,) + + while isinstance(model, options): + model = model.module + + if not keep_fp32_wrapper: + forward = getattr(model, "forward") + original_forward = model.__dict__.pop("_original_forward", None) + if original_forward is not None: + while hasattr(forward, "__wrapped__"): + forward = forward.__wrapped__ + if forward == original_forward: + break + model.forward = MethodType(forward, model) + if getattr(model, "_converted_to_transformer_engine", False): + convert_model(model, to_transformer_engine=False) + + if is_compiled: + compiled_model._orig_mod = model + model = compiled_model + + return model + + +def wait_for_everyone(): + """ + Introduces a blocking point in the script, making sure all processes have reached this point before continuing. + + + + Make sure all processes will reach this instruction otherwise one of your processes will hang forever. + + + """ + PartialState().wait_for_everyone() + + +def save(obj, f, safe_serialization=False): + """ + Save the data to disk. Use in place of `torch.save()`. + + Args: + obj: The data to save + f: The file (or file-like object) to use to save the data + safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors` + """ + if PartialState().distributed_type == DistributedType.TPU: + xm.save(obj, f) + elif PartialState().local_process_index == 0: + if safe_serialization: + safe_save_file(obj, f, metadata={"format": "pt"}) + else: + torch.save(obj, f) + + +@contextmanager +def clear_environment(): + """ + A context manager that will cache origin `os.environ` and replace it with a empty dictionary in this context. + + When this context exits, the cached `os.environ` will be back. + + Example: + + ```python + >>> import os + >>> from accelerate.utils import clear_environment + + >>> os.environ["FOO"] = "bar" + >>> with clear_environment(): + ... print(os.environ) + ... os.environ["FOO"] = "new_bar" + ... print(os.environ["FOO"]) + {} + new_bar + + >>> print(os.environ["FOO"]) + bar + ``` + """ + _old_os_environ = os.environ + os.environ = dict() + + yield + + os.environ = _old_os_environ + + +@contextmanager +def patch_environment(**kwargs): + """ + A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting. + + Will convert the values in `kwargs` to strings and upper-case all the keys. + + Example: + + ```python + >>> import os + >>> from accelerate.utils import patch_environment + + >>> with patch_environment(FOO="bar"): + ... print(os.environ["FOO"]) # prints "bar" + >>> print(os.environ["FOO"]) # raises KeyError + ``` + """ + existing_vars = {} + for key, value in kwargs.items(): + key = key.upper() + if key in os.environ: + existing_vars[key] = os.environ[key] + os.environ[key] = str(value) + + yield + + for key in kwargs: + key = key.upper() + if key in existing_vars: + # restore previous value + os.environ[key] = existing_vars[key] + else: + os.environ.pop(key, None) + + +def get_pretty_name(obj): + """ + Gets a pretty name from `obj`. + """ + if not hasattr(obj, "__qualname__") and not hasattr(obj, "__name__"): + obj = getattr(obj, "__class__", obj) + if hasattr(obj, "__qualname__"): + return obj.__qualname__ + if hasattr(obj, "__name__"): + return obj.__name__ + return str(obj) + + +def merge_dicts(source, destination): + """ + Recursively merges two dictionaries. + + Args: + source (`dict`): The dictionary to merge into `destination`. + destination (`dict`): The dictionary to merge `source` into. + """ + for key, value in source.items(): + if isinstance(value, dict): + node = destination.setdefault(key, {}) + merge_dicts(value, node) + else: + destination[key] = value + + return destination + + +def is_port_in_use(port: int = None) -> bool: + """ + Checks if a port is in use on `localhost`. Useful for checking if multiple `accelerate launch` commands have been + run and need to see if the port is already in use. + """ + if port is None: + port = 29500 + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + return s.connect_ex(("localhost", port)) == 0 + + +def convert_bytes(size): + "Converts `size` from bytes to the largest possible unit" + for x in ["bytes", "KB", "MB", "GB", "TB"]: + if size < 1024.0: + return f"{round(size, 2)} {x}" + size /= 1024.0 + + return f"{round(size, 2)} PB" diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/random.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/random.py new file mode 100644 index 0000000000000000000000000000000000000000..b61238d25225580f52b6e947f231fd9895115323 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/random.py @@ -0,0 +1,111 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +from typing import List, Optional, Union + +import numpy as np +import torch + +from ..state import AcceleratorState +from .constants import CUDA_DISTRIBUTED_TYPES +from .dataclasses import DistributedType, RNGType +from .imports import is_npu_available, is_tpu_available, is_xpu_available + + +if is_tpu_available(check_device=False): + import torch_xla.core.xla_model as xm + + +def set_seed(seed: int, device_specific: bool = False): + """ + Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`. + + Args: + seed (`int`): + The seed to set. + device_specific (`bool`, *optional*, defaults to `False`): + Whether to differ the seed on each device slightly with `self.process_index`. + """ + if device_specific: + seed += AcceleratorState().process_index + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if is_xpu_available(): + torch.xpu.manual_seed_all(seed) + elif is_npu_available(): + torch.npu.manual_seed_all(seed) + else: + torch.cuda.manual_seed_all(seed) + # ^^ safe to call this function even if cuda is not available + if is_tpu_available(): + xm.set_rng_state(seed) + + +def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None): + # Get the proper rng state + if rng_type == RNGType.TORCH: + rng_state = torch.get_rng_state() + elif rng_type == RNGType.CUDA: + rng_state = torch.cuda.get_rng_state() + elif rng_type == RNGType.XLA: + assert is_tpu_available(), "Can't synchronize XLA seeds on an environment without TPUs." + rng_state = torch.tensor(xm.get_rng_state()) + elif rng_type == RNGType.NPU: + assert is_npu_available(), "Can't synchronize NPU seeds on an environment without NPUs." + rng_state = torch.npu.get_rng_state() + elif rng_type == RNGType.XPU: + assert is_xpu_available(), "Can't synchronize XPU seeds on an environment without XPUs." + rng_state = torch.xpu.get_rng_state() + elif rng_type == RNGType.GENERATOR: + assert generator is not None, "Need a generator to synchronize its seed." + rng_state = generator.get_state() + + # Broadcast the rng state from device 0 to other devices + state = AcceleratorState() + if state.distributed_type == DistributedType.TPU: + rng_state = rng_state.to(xm.xla_device()) + xm.collective_broadcast([rng_state]) + xm.mark_step() + rng_state = rng_state.cpu() + elif ( + state.distributed_type in CUDA_DISTRIBUTED_TYPES + or state.distributed_type == DistributedType.MULTI_NPU + or state.distributed_type == DistributedType.MULTI_XPU + ): + rng_state = rng_state.to(state.device) + torch.distributed.broadcast(rng_state, 0) + rng_state = rng_state.cpu() + elif state.distributed_type == DistributedType.MULTI_CPU: + torch.distributed.broadcast(rng_state, 0) + + # Set the broadcast rng state + if rng_type == RNGType.TORCH: + torch.set_rng_state(rng_state) + elif rng_type == RNGType.CUDA: + torch.cuda.set_rng_state(rng_state) + elif rng_type == RNGType.NPU: + torch.npu.set_rng_state(rng_state) + elif rng_type == RNGType.XPU: + torch.xpu.set_rng_state(rng_state) + elif rng_type == RNGType.XLA: + xm.set_rng_state(rng_state.item()) + elif rng_type == RNGType.GENERATOR: + generator.set_state(rng_state) + + +def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None): + for rng_type in rng_types: + synchronize_rng_state(RNGType(rng_type), generator=generator) diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/tqdm.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/tqdm.py new file mode 100644 index 0000000000000000000000000000000000000000..dda99c990cb1c6f2b43e4302e526df4e6962072a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/tqdm.py @@ -0,0 +1,37 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .imports import is_tqdm_available + + +if is_tqdm_available(): + from tqdm.auto import tqdm as _tqdm + +from ..state import PartialState + + +def tqdm(main_process_only: bool = True, *args, **kwargs): + """ + Wrapper around `tqdm.tqdm` that optionally displays only on the main process. + + Args: + main_process_only (`bool`, *optional*): + Whether to display the progress bar only on the main process + """ + if not is_tqdm_available(): + raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.") + disable = False + if main_process_only: + disable = PartialState().local_process_index == 0 + return _tqdm(*args, **kwargs, disable=disable) diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/transformer_engine.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/transformer_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..a6342d7150f1abf083bed7a19c2a5e299e29e3e3 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/transformer_engine.py @@ -0,0 +1,84 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch.nn as nn + +from .imports import is_fp8_available + + +if is_fp8_available(): + import transformer_engine.pytorch as te + + +def convert_model(model, to_transformer_engine=True, _convert_linear=True, _convert_ln=True): + """ + Recursively converts the linear and layernorm layers of a model to their `transformers_engine` counterpart. + """ + if not is_fp8_available(): + raise ImportError("Using `convert_model` requires transformer_engine to be installed.") + for name, module in model.named_children(): + if isinstance(module, nn.Linear) and to_transformer_engine and _convert_linear: + # Return early if the linear layer weights are not multiples of 16 + if any(p % 16 != 0 for p in module.weight.shape): + return + has_bias = module.bias is not None + te_module = te.Linear( + module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype + ) + te_module.weight.data = module.weight.data.clone() + if has_bias: + te_module.bias.data = module.bias.data.clone() + + setattr(model, name, te_module) + elif isinstance(module, nn.LayerNorm) and to_transformer_engine and _convert_ln: + te_module = te.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype) + te_module.weight.data = module.weight.data.clone() + te_module.bias.data = module.bias.data.clone() + + setattr(model, name, te_module) + elif isinstance(module, te.Linear) and not to_transformer_engine and _convert_linear: + has_bias = module.bias is not None + new_module = nn.Linear( + module.in_features, module.out_features, bias=has_bias, params_dtype=module.weight.dtype + ) + new_module.weight.data = module.weight.data.clone() + if has_bias: + new_module.bias.data = module.bias.data.clone() + + setattr(model, name, new_module) + elif isinstance(module, te.LayerNorm) and not to_transformer_engine and _convert_ln: + new_module = nn.LayerNorm(module.normalized_shape[0], eps=module.eps, params_dtype=module.weight.dtype) + new_module.weight.data = module.weight.data.clone() + new_module.bias.data = module.bias.data.clone() + + setattr(model, name, new_module) + else: + convert_model( + module, + to_transformer_engine=to_transformer_engine, + _convert_linear=_convert_linear, + _convert_ln=_convert_ln, + ) + + +def has_transformer_engine_layers(model): + """ + Returns whether a given model has some `transformer_engine` layer or not. + """ + if not is_fp8_available(): + raise ImportError("Using `has_transformer_engine_layers` requires transformer_engine to be installed.") + for m in model.modules(): + if isinstance(m, (te.LayerNorm, te.Linear)): + return True + return False diff --git a/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/versions.py b/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/versions.py new file mode 100644 index 0000000000000000000000000000000000000000..985c918f0e057bacc70c372f6906071bb73db577 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/accelerate/utils/versions.py @@ -0,0 +1,56 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib.metadata +from typing import Union + +from packaging.version import Version, parse + +from .constants import STR_OPERATION_TO_FUNC + + +torch_version = parse(importlib.metadata.version("torch")) + + +def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str): + """ + Compares a library version to some requirement using a given operation. + + Args: + library_or_version (`str` or `packaging.version.Version`): + A library name or a version to check. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="`. + requirement_version (`str`): + The version to compare the library version against + """ + if operation not in STR_OPERATION_TO_FUNC.keys(): + raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}") + operation = STR_OPERATION_TO_FUNC[operation] + if isinstance(library_or_version, str): + library_or_version = parse(importlib.metadata.version(library_or_version)) + return operation(library_or_version, parse(requirement_version)) + + +def is_torch_version(operation: str, version: str): + """ + Compares the current PyTorch version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A string version of PyTorch + """ + return compare_versions(torch_version, operation, version) diff --git a/evalkit_tf437/lib/python3.10/site-packages/jedi-0.19.1.dist-info/LICENSE.txt b/evalkit_tf437/lib/python3.10/site-packages/jedi-0.19.1.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..94f954567b1c4a33c601b6bf3688ec08b695cbc0 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/jedi-0.19.1.dist-info/LICENSE.txt @@ -0,0 +1,24 @@ +All contributions towards Jedi are MIT licensed. + +------------------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) <2013> + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/evalkit_tf437/lib/python3.10/site-packages/jedi-0.19.1.dist-info/RECORD b/evalkit_tf437/lib/python3.10/site-packages/jedi-0.19.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..da1dad958db7cbe6c014d2988977d6fbff956eb0 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/jedi-0.19.1.dist-info/RECORD @@ -0,0 +1,1966 @@ +jedi-0.19.1.dist-info/AUTHORS.txt,sha256=XbObKGwco4wdauN5qfPxKbWNVNDUrZySK1f3Rxjve4s,2506 +jedi-0.19.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +jedi-0.19.1.dist-info/LICENSE.txt,sha256=eOYM0LjyhpTzAZVILDPXaQjYRrDRUnjetzMqoiuo5BI,1241 +jedi-0.19.1.dist-info/METADATA,sha256=0euKUirSiVw_UsREAmtaueGuPB36y9nfUut8QtRC5wg,22822 +jedi-0.19.1.dist-info/RECORD,, +jedi-0.19.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi-0.19.1.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110 +jedi-0.19.1.dist-info/top_level.txt,sha256=SqNj9U77yOn5Zf4Ji6CqL8-D2eNg_F71GHo8V8SMV04,5 +jedi/__init__.py,sha256=Yp8PYqhL0ZsIP5XY1R-i7YOk1o84JZPfjfzpB_3fECQ,1486 +jedi/__main__.py,sha256=SONRAB5KoCcU3DRfUZhWWFG8n_fs7Rls-bzzM4VWqD0,1950 +jedi/__pycache__/__init__.cpython-310.pyc,, +jedi/__pycache__/__main__.cpython-310.pyc,, +jedi/__pycache__/_compatibility.cpython-310.pyc,, +jedi/__pycache__/cache.cpython-310.pyc,, +jedi/__pycache__/common.cpython-310.pyc,, +jedi/__pycache__/debug.cpython-310.pyc,, +jedi/__pycache__/file_io.cpython-310.pyc,, +jedi/__pycache__/parser_utils.cpython-310.pyc,, +jedi/__pycache__/settings.cpython-310.pyc,, +jedi/__pycache__/utils.cpython-310.pyc,, +jedi/_compatibility.py,sha256=uSI5oS3axM4l5qnr64Qe5nxHA3zGe2QBaTC4rAdRMls,918 +jedi/api/__init__.py,sha256=Wyj5gev7jBpRNq0n2md7jq0rDGWdrTUFA1qGx1JXK-c,32428 +jedi/api/__pycache__/__init__.cpython-310.pyc,, +jedi/api/__pycache__/classes.cpython-310.pyc,, +jedi/api/__pycache__/completion.cpython-310.pyc,, +jedi/api/__pycache__/completion_cache.cpython-310.pyc,, +jedi/api/__pycache__/environment.cpython-310.pyc,, +jedi/api/__pycache__/errors.cpython-310.pyc,, +jedi/api/__pycache__/exceptions.cpython-310.pyc,, +jedi/api/__pycache__/file_name.cpython-310.pyc,, +jedi/api/__pycache__/helpers.cpython-310.pyc,, +jedi/api/__pycache__/interpreter.cpython-310.pyc,, +jedi/api/__pycache__/keywords.cpython-310.pyc,, +jedi/api/__pycache__/project.cpython-310.pyc,, +jedi/api/__pycache__/replstartup.cpython-310.pyc,, +jedi/api/__pycache__/strings.cpython-310.pyc,, +jedi/api/classes.py,sha256=H1HBJY6bkCHOKKfvJ2AoKwG44CB820rWV78ZRFEf23M,29600 +jedi/api/completion.py,sha256=rx9NFRmQNPSZETe3sC453v5VcfYZIX0BF23kwz4-R44,27191 +jedi/api/completion_cache.py,sha256=UOaBWoCsSLjyOiDEWHlUwgDiMCY2Rn2J2zMeI_s5lCM,954 +jedi/api/environment.py,sha256=CfFTXnCk-9vUq5cBshNVaMhGVmUYfrBMLP1uTvpvgew,16913 +jedi/api/errors.py,sha256=jvJvx0Sk8mWLQagxZlZuDszOrQ8DYenfViZrlgWUdeU,1253 +jedi/api/exceptions.py,sha256=RphZJj59VqGJnfw8XUVJ_VU-5N3nwgOk08GADNflxPY,990 +jedi/api/file_name.py,sha256=xlevCfp9B5hUsR94AH09tg4Qn1YXkGxM92uQPf8WpTw,5620 +jedi/api/helpers.py,sha256=F26nueg093a3fBYU7QxE4QXrdV3zBEOnSqLdS9AMbVg,18982 +jedi/api/interpreter.py,sha256=BQrbeTG2uKT3YtJxVria3raXqiCvYj-wl3nSxWvqOkU,2415 +jedi/api/keywords.py,sha256=kEJfvmenj2MPTs8rd5b6onI8YPjwrDo29bq5sjGmDo0,1192 +jedi/api/project.py,sha256=9s0st-ohQ6wZ-SxxstdJvF-yJZi2kXUbiFNrKvCzrpM,16543 +jedi/api/refactoring/__init__.py,sha256=8Lyv9F9Kzj5eOUF4P4xgnixIRyn7ihonqmkEgHksWLA,9579 +jedi/api/refactoring/__pycache__/__init__.cpython-310.pyc,, +jedi/api/refactoring/__pycache__/extract.cpython-310.pyc,, +jedi/api/refactoring/extract.py,sha256=u-IRD0BEVoMUDRTMRP9cD-R6DczHNsikaedOeThOBso,13933 +jedi/api/replstartup.py,sha256=o4FaVzt1PX05uHDITgu7NZkSiqMZbkdAI004b4BHy8g,950 +jedi/api/strings.py,sha256=ODTY8z9puVlQJug29pV3fufqr5N-3kixG6qxLC0Wyto,3711 +jedi/cache.py,sha256=fHEukjwaHSOiE1Z6UG5HMg-mH5fzfOTILZsWvxRaDV8,3674 +jedi/common.py,sha256=jL8TzWZ6Wl5ovayygpX5-bruk6ibu_kq5mh6FZcSGkI,668 +jedi/debug.py,sha256=WuLOoaRBKfuKEf-mZIIbMSSu2LMQJEVUS2opXQMY5aU,3504 +jedi/file_io.py,sha256=yYnkY_nUnB7pSQAjWXFyCS_8IN3BuDlXmPIuZbklPoU,2337 +jedi/inference/__init__.py,sha256=mEfzzffKarfcoRINe_Gcmsh2dB2SpbEFWEzq1wNqCxw,8505 +jedi/inference/__pycache__/__init__.cpython-310.pyc,, +jedi/inference/__pycache__/analysis.cpython-310.pyc,, +jedi/inference/__pycache__/arguments.cpython-310.pyc,, +jedi/inference/__pycache__/base_value.cpython-310.pyc,, +jedi/inference/__pycache__/cache.cpython-310.pyc,, +jedi/inference/__pycache__/context.cpython-310.pyc,, +jedi/inference/__pycache__/docstring_utils.cpython-310.pyc,, +jedi/inference/__pycache__/docstrings.cpython-310.pyc,, +jedi/inference/__pycache__/dynamic_params.cpython-310.pyc,, +jedi/inference/__pycache__/filters.cpython-310.pyc,, +jedi/inference/__pycache__/finder.cpython-310.pyc,, +jedi/inference/__pycache__/flow_analysis.cpython-310.pyc,, +jedi/inference/__pycache__/helpers.cpython-310.pyc,, +jedi/inference/__pycache__/imports.cpython-310.pyc,, +jedi/inference/__pycache__/lazy_value.cpython-310.pyc,, +jedi/inference/__pycache__/names.cpython-310.pyc,, +jedi/inference/__pycache__/param.cpython-310.pyc,, +jedi/inference/__pycache__/parser_cache.cpython-310.pyc,, +jedi/inference/__pycache__/recursion.cpython-310.pyc,, +jedi/inference/__pycache__/references.cpython-310.pyc,, +jedi/inference/__pycache__/signature.cpython-310.pyc,, +jedi/inference/__pycache__/star_args.cpython-310.pyc,, +jedi/inference/__pycache__/syntax_tree.cpython-310.pyc,, +jedi/inference/__pycache__/sys_path.cpython-310.pyc,, +jedi/inference/__pycache__/utils.cpython-310.pyc,, +jedi/inference/analysis.py,sha256=vbRF2gOLzm4lEQq9amaVEkQlWsshjl-tKEiOqUSAzYg,7763 +jedi/inference/arguments.py,sha256=zjQcIwW-m8J2Wy0MU_O_u1sTiNIXQk7du3Vcilm5FNM,12218 +jedi/inference/base_value.py,sha256=18r6-ZHXikOn2Tf6_xeC1KADz_25rTRANvAnDSvpDiA,18219 +jedi/inference/cache.py,sha256=_eOYHP3jZbLVWC2tuoBpjcLQNIWDCsDH3sCzz81mgfw,4191 +jedi/inference/compiled/__init__.py,sha256=MlKPMiQqzDJ6tUMgevROr5n0yyNubjImuRYUqM8jF1Y,2651 +jedi/inference/compiled/__pycache__/__init__.cpython-310.pyc,, +jedi/inference/compiled/__pycache__/access.cpython-310.pyc,, +jedi/inference/compiled/__pycache__/getattr_static.cpython-310.pyc,, +jedi/inference/compiled/__pycache__/mixed.cpython-310.pyc,, +jedi/inference/compiled/__pycache__/value.cpython-310.pyc,, +jedi/inference/compiled/access.py,sha256=oyiv78JZOGtRAlkjIGEEoaen6JddTQHvAIOF1GrvrSE,18914 +jedi/inference/compiled/getattr_static.py,sha256=Wv5XDvWS7Q7YtmuGp83Ey74Zhsor5BZS9597U-qRquE,3862 +jedi/inference/compiled/mixed.py,sha256=kM2bIpZX1Fvv2JPxJE3wbT1xHlvUi_PCKPr7POvZEsE,11374 +jedi/inference/compiled/subprocess/__init__.py,sha256=bFN8a7SvRjX4BOZtA9B-15oqNJ6mxFFcAgshzvavC7I,13490 +jedi/inference/compiled/subprocess/__main__.py,sha256=aiYy62eWmK98om7_KvoC45eOn5fEjOHOXd0fFzVmTNQ,1187 +jedi/inference/compiled/subprocess/__pycache__/__init__.cpython-310.pyc,, +jedi/inference/compiled/subprocess/__pycache__/__main__.cpython-310.pyc,, +jedi/inference/compiled/subprocess/__pycache__/functions.cpython-310.pyc,, +jedi/inference/compiled/subprocess/functions.py,sha256=HVHbjAOwN0bgsNnVcWPsk_CLXdYy18773ueYfaCTRFM,8464 +jedi/inference/compiled/value.py,sha256=ey4FfHBfixqByy-8_ziYHKEwvEaKb60AVE5Ia4WiIlQ,21262 +jedi/inference/context.py,sha256=ioyAcWxvFLqI13VezWzCgQAAJ5NJgNn8sDcdWolxrFg,17164 +jedi/inference/docstring_utils.py,sha256=GTk3ZuWNIo8fFC9kt8NjOF_Wn0mYKjzKLrAswgvoJKI,759 +jedi/inference/docstrings.py,sha256=g5bqKJ8q7ZpLT32NVA2aCGISrG4X2aC0gDRE0OsB_IM,9824 +jedi/inference/dynamic_params.py,sha256=kjNruHLLXpJZkJ3PR0J_nRXBesxpUEzzyhqT8T8P_CE,8154 +jedi/inference/filters.py,sha256=43Dtz4_VeOF6k39YueBFvZaHj_RLC8Wtpfwt9NF28Hc,12493 +jedi/inference/finder.py,sha256=bx-TVatGVvPs696vDS-CWFbclUOdwWvXZPMJF8Vp9Gg,5326 +jedi/inference/flow_analysis.py,sha256=p-za41-mt90PvgwCZi8cCfhw1DiJCMZ2XjdCwh_rHBk,4583 +jedi/inference/gradual/__init__.py,sha256=Xzh7u-vHsvyHAzr_2kP8PY4NieFh35o-TmQY3KAttlI,143 +jedi/inference/gradual/__pycache__/__init__.cpython-310.pyc,, +jedi/inference/gradual/__pycache__/annotation.cpython-310.pyc,, +jedi/inference/gradual/__pycache__/base.cpython-310.pyc,, +jedi/inference/gradual/__pycache__/conversion.cpython-310.pyc,, +jedi/inference/gradual/__pycache__/generics.cpython-310.pyc,, +jedi/inference/gradual/__pycache__/stub_value.cpython-310.pyc,, +jedi/inference/gradual/__pycache__/type_var.cpython-310.pyc,, +jedi/inference/gradual/__pycache__/typeshed.cpython-310.pyc,, +jedi/inference/gradual/__pycache__/typing.cpython-310.pyc,, +jedi/inference/gradual/__pycache__/utils.cpython-310.pyc,, +jedi/inference/gradual/annotation.py,sha256=lkeF3gX-HWz7wQAANkCDg2gE0ccgL6VlhgDrNd4rMas,16076 +jedi/inference/gradual/base.py,sha256=g3XVBA4aX40yxpJKFI6TtOCtFrJW0HAoNECLv2xn68w,15554 +jedi/inference/gradual/conversion.py,sha256=vqVITY30kczXEuF59RV5Kp9FNx0P545ctEfCtwX9aSg,7601 +jedi/inference/gradual/generics.py,sha256=gdqzR1SlT6U-I4l6VWqRih_zSXmz69BgwcFNehXEWF4,3144 +jedi/inference/gradual/stub_value.py,sha256=PSrQqdZa-A7EtQ0EpSLrPzHWd9TQqhRwKXSVjyxCXlo,3385 +jedi/inference/gradual/type_var.py,sha256=twrYAoTRNRLHXGPs5CwOZtkvKXGGA8ynczEe9IM1jcU,4139 +jedi/inference/gradual/typeshed.py,sha256=AtR0EBoTlObIzM2c7VQI-Jn_AWsjt0YKp4NYTWOpirE,11467 +jedi/inference/gradual/typing.py,sha256=WJ9jsJN7TDz7E8jQPKVNCfzA9osIps6XyqncCnZfd3A,17255 +jedi/inference/gradual/utils.py,sha256=PXn4xXlo09fQJVfqHrhA3MdLuAn4mqqujhCTuMutzkI,1147 +jedi/inference/helpers.py,sha256=MJp-HLdtz8_jh9U62CnHVXfDZnFoyg8DlXSY1r1WdKw,5943 +jedi/inference/imports.py,sha256=9tpF3lBsrxLi1R3zEdncPNvZv_mTqhFXO-BhKpHL7VI,23082 +jedi/inference/lazy_value.py,sha256=LtD4RiWs_rQ58ZEZLp-_1Bce1B15t4GS1WpixHW891g,1667 +jedi/inference/names.py,sha256=DODbtshdoNhLgfH4AF2Asjtdl5bWamMlctoKRyYfdV0,23214 +jedi/inference/param.py,sha256=EEQZwUHY6MUjX3x4A5gl-NzKHXfJ7Ehx7Iza_s6948Q,10450 +jedi/inference/parser_cache.py,sha256=GOOahC8GvDcGW-Ct653Uts5IZ0EaymB4OwyfKm5WClg,191 +jedi/inference/recursion.py,sha256=2rSUGzs1GqMH8274KrgjLTQMLD76ld_dnopbHckM8qo,4932 +jedi/inference/references.py,sha256=60KtWGun6KFSyv8UDUEzm500jWzGosXYlxSj9GTbJG8,11407 +jedi/inference/signature.py,sha256=uh22vizTq9VsQg-5hYVPDeNs6PYV_YOW9HbCZttWo5g,4859 +jedi/inference/star_args.py,sha256=785E1MC3vvE8zQJ9w0VGei84B2lrLmvxrqYtkyehlbA,7895 +jedi/inference/syntax_tree.py,sha256=hxNuGL5vTqI6RcweWtAZ4-CyZ1xrcfqbia2X8WOMpRs,35753 +jedi/inference/sys_path.py,sha256=coTcmTe997sITzKjzXqyppFYJKpRXNO8mVaFfOIWJ7w,10218 +jedi/inference/utils.py,sha256=63DG77XBZ0b8BTKXqNl8ki-4ySyAO75IoCdFIMvttFk,2706 +jedi/inference/value/__init__.py,sha256=Pyi9BuCJvvWQue2MJH2Lsb-ffK_9MKjRVGjY5NvvhkQ,416 +jedi/inference/value/__pycache__/__init__.cpython-310.pyc,, +jedi/inference/value/__pycache__/decorator.cpython-310.pyc,, +jedi/inference/value/__pycache__/dynamic_arrays.cpython-310.pyc,, +jedi/inference/value/__pycache__/function.cpython-310.pyc,, +jedi/inference/value/__pycache__/instance.cpython-310.pyc,, +jedi/inference/value/__pycache__/iterable.cpython-310.pyc,, +jedi/inference/value/__pycache__/klass.cpython-310.pyc,, +jedi/inference/value/__pycache__/module.cpython-310.pyc,, +jedi/inference/value/__pycache__/namespace.cpython-310.pyc,, +jedi/inference/value/decorator.py,sha256=b8jWqSZQTrQ1rzThPSYCToEAC0Hfx19XEvgJsMXftus,1207 +jedi/inference/value/dynamic_arrays.py,sha256=JKezgJo4LF6gwgJ4joNp1vADX5WVhlq-5O4mhhVh1MA,7527 +jedi/inference/value/function.py,sha256=dwUEHCOwVT-lnZXaqV0P0Xrf2rPuCksXSPRVzGmrqps,17424 +jedi/inference/value/instance.py,sha256=1UthMNhM2piZaUaNuWHjOnInVKuGKqCbjdmbgCMKjyY,22511 +jedi/inference/value/iterable.py,sha256=fxMp3UiqM8xx7pHT_q30Y4thoDrBUIq62unMdBwAnpI,23305 +jedi/inference/value/klass.py,sha256=_KuA6ySY5oZgIBahdRRPOmsR5IOW8ZCwTnHdU2SLKdg,15940 +jedi/inference/value/module.py,sha256=mgmTCAOaELoMcAbmnNVbvzLIdZYXTK-vPsxA9dIOpRY,8118 +jedi/inference/value/namespace.py,sha256=yfw8pjmT705joRJSKU-CfmuiyJ-XdcelbDCzKCxqxVk,2101 +jedi/parser_utils.py,sha256=201oI88ateim7dZDh29T7obso1xqbw5Hdxv66swGuf4,10900 +jedi/plugins/__init__.py,sha256=O3yXGTvWanhnZnKubQJhsnDlyXbl23hlt2aGmhy-UUU,1445 +jedi/plugins/__pycache__/__init__.cpython-310.pyc,, +jedi/plugins/__pycache__/django.cpython-310.pyc,, +jedi/plugins/__pycache__/flask.cpython-310.pyc,, +jedi/plugins/__pycache__/pytest.cpython-310.pyc,, +jedi/plugins/__pycache__/registry.cpython-310.pyc,, +jedi/plugins/__pycache__/stdlib.cpython-310.pyc,, +jedi/plugins/django.py,sha256=n-jg8n9bXNjeuqqu46PsiyypzGjFdUjtYJIGcbiBrtI,10895 +jedi/plugins/flask.py,sha256=SRdiNzObi4Xonq3BqjdNRyz08fHPk5ZvbERyNtYgsSs,916 +jedi/plugins/pytest.py,sha256=CYmb9W-jyJpcMTjRqiqZyzAOkWBXrRWEA_P96Z0TKDY,9502 +jedi/plugins/registry.py,sha256=Z3aaj2W2mpwO2Av4apD_vlkPvorJPljvy4xtJsOHrfc,307 +jedi/plugins/stdlib.py,sha256=bPNOJZ6FOrqgpVip6PpmzbJ8VlpQvxssFSCPEN_zW84,30262 +jedi/settings.py,sha256=F3EZFXS3kA6slIq2xal_qHAE8-9L3hZysKdFFLBZ9RM,3827 +jedi/third_party/django-stubs/LICENSE.txt,sha256=I16ZOWXTmaJefUk9JchiL3hxhRCIS5wFHx8YZrbzTp0,1075 +jedi/third_party/django-stubs/django-stubs/__init__.pyi,sha256=t008ZLMgegSQayAawLGBCwygFeVfDKqQEAqTpXj_9yA,432 +jedi/third_party/django-stubs/django-stubs/apps/__init__.pyi,sha256=Y3Y6Hf-auytvp3gQT19bAYTGM1e3f9BBktHVMvr2KQU,79 +jedi/third_party/django-stubs/django-stubs/apps/config.pyi,sha256=5WdAiYznWIzxBXj7DUdYwg0ZIYp2pZHi2uUuD8oumQc,834 +jedi/third_party/django-stubs/django-stubs/apps/registry.pyi,sha256=gu9N7TVSYlhW--1o8FaRhD1S7W4DbF5_tAw4VkxBcsw,2050 +jedi/third_party/django-stubs/django-stubs/conf/__init__.pyi,sha256=iD_WNEjoMcZZwVw_OsvIr-qT1D-WAyVhwGHURaVG9hQ,899 +jedi/third_party/django-stubs/django-stubs/conf/global_settings.pyi,sha256=uUXM1AzZ6y9TcZd_wEBD-MksMBnnA0S1oncpezQApoQ,17462 +jedi/third_party/django-stubs/django-stubs/conf/locale/__init__.pyi,sha256=M1UVxxBgBcKY0r7pv8DvOE6_CJavw0Od3fi8iH-APEk,62 +jedi/third_party/django-stubs/django-stubs/conf/urls/__init__.pyi,sha256=SisGWP0M2DqTBeYjxgaoseKtVjQhpvEopgidp_u0BG8,1033 +jedi/third_party/django-stubs/django-stubs/conf/urls/i18n.pyi,sha256=x4QdXccNgttRURmMrKKE5BDhfk_sc9GRyzSJPbbQDKU,297 +jedi/third_party/django-stubs/django-stubs/conf/urls/static.pyi,sha256=BXdu_MPkrP5LIynxR3lUVc6AAK_y45T8YVn_lp-UVow,172 +jedi/third_party/django-stubs/django-stubs/contrib/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/admin/__init__.pyi,sha256=EqcwNegHa1kj1bgto5_xljmuP4Ym54wt-_A7nia4WEA,881 +jedi/third_party/django-stubs/django-stubs/contrib/admin/actions.pyi,sha256=O3SXOhCX1-BkDiPxDh9BDUppLQhopWyDj2p1l71XmMo,351 +jedi/third_party/django-stubs/django-stubs/contrib/admin/apps.pyi,sha256=1q33ihcO10A_m7HB5gHdU8S1YpVohz6ihgzo9QMPYnk,142 +jedi/third_party/django-stubs/django-stubs/contrib/admin/checks.pyi,sha256=fWhQq3debqlWvigQzUZNuORAVzHw61iWmi0emeczJC8,849 +jedi/third_party/django-stubs/django-stubs/contrib/admin/decorators.pyi,sha256=Ws4zXIAH5jDgEi60FmJ_Mry9eU5rCJmpJ5lNM2XQXbs,170 +jedi/third_party/django-stubs/django-stubs/contrib/admin/filters.pyi,sha256=fQk48FO4Aik5qG-yUHr7XmW1wgJtCHM4YeWc2PEIvkU,3487 +jedi/third_party/django-stubs/django-stubs/contrib/admin/forms.pyi,sha256=vAAUpUCC7gxxUnBDCwNmPrk1L-NOCHx8lHwlceqBNDY,249 +jedi/third_party/django-stubs/django-stubs/contrib/admin/helpers.pyi,sha256=KD26oTSFhlCVWOzH2jzoAx1Oqhncx5aSUd6ySd3qR0M,4790 +jedi/third_party/django-stubs/django-stubs/contrib/admin/models.pyi,sha256=B8NtoXvvjj8pW0fyQQXwsFJ7j7j3DlaxgxuXwY4Y514,1220 +jedi/third_party/django-stubs/django-stubs/contrib/admin/options.pyi,sha256=TpeeMIIkoNO4Ky8VYdxejUzxmV8R1S3t25ofOD5Nilc,13828 +jedi/third_party/django-stubs/django-stubs/contrib/admin/sites.pyi,sha256=Di-VbTnbIqoK8xDO-IOw64s7JrRQ3CoZKYIvGXKwvOs,3288 +jedi/third_party/django-stubs/django-stubs/contrib/admin/templatetags/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/admin/templatetags/admin_list.pyi,sha256=HSelmnczfnb-E2QvjhmWkHbwTMp7zx9AoxTqWW_HkBI,2028 +jedi/third_party/django-stubs/django-stubs/contrib/admin/templatetags/admin_modify.pyi,sha256=7cJfilSXB5nWosnNDsgAMMJbZXExtCJAjJmCkvKT6a8,690 +jedi/third_party/django-stubs/django-stubs/contrib/admin/templatetags/admin_static.pyi,sha256=FV9QQ0gLLyXEtmmJqGFWRFRo-eJcvO71aRcFqZJn1sI,73 +jedi/third_party/django-stubs/django-stubs/contrib/admin/templatetags/admin_urls.pyi,sha256=BP_vzmHQ3W7lwC1qCuy7_CSL52pWF5_kyx9okRQT100,547 +jedi/third_party/django-stubs/django-stubs/contrib/admin/templatetags/base.pyi,sha256=_2N03P5IN0Y66vpFrz5l0aRyEQjJgCll8w-FJ1CsP8Y,592 +jedi/third_party/django-stubs/django-stubs/contrib/admin/templatetags/log.pyi,sha256=8m7bTnfmhLLycZoE3Aapyyz4JzO_G8I_e3L16FZATWM,454 +jedi/third_party/django-stubs/django-stubs/contrib/admin/tests.pyi,sha256=IGDA9zE9QPLNB5xgBFFr9j3AbMyufxTm2NUPqmP8mpA,1417 +jedi/third_party/django-stubs/django-stubs/contrib/admin/utils.pyi,sha256=RoMzCbcgb_dKDrY8ezXh93jNJQEEuJ3JAHDitnWZ4To,3107 +jedi/third_party/django-stubs/django-stubs/contrib/admin/views/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/admin/views/autocomplete.pyi,sha256=AmCorbocq3VfqVJnU6wtVyGESgls1a-WePP5Bl3Vi3E,349 +jedi/third_party/django-stubs/django-stubs/contrib/admin/views/decorators.pyi,sha256=xF9kl3mBijo2Ucnac0ituCn5nq2dyDYNXReYMgX-Rm8,335 +jedi/third_party/django-stubs/django-stubs/contrib/admin/views/main.pyi,sha256=MnJzYgIiYc6K7yPGAzzKokM2YwHbqYwWzVpEfQwdYf8,3390 +jedi/third_party/django-stubs/django-stubs/contrib/admin/widgets.pyi,sha256=i1bW9opYwddEL9f6hhekwMNYhg_2vbo3CPn9-DY173Y,3375 +jedi/third_party/django-stubs/django-stubs/contrib/admindocs/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/admindocs/middleware.pyi,sha256=DlifCzWN3f9RB-PoedyUCPz3MhpqODdJxEk1xRYyYu4,409 +jedi/third_party/django-stubs/django-stubs/contrib/admindocs/urls.pyi,sha256=oIadLJRRqUS4fwWe3F2TwdQViIuYuSR7iutUidncun0,59 +jedi/third_party/django-stubs/django-stubs/contrib/admindocs/utils.pyi,sha256=aKdHFqtElfEWLRx9LgJRBZby7VKeAvCFRfSh9J_yOVc,725 +jedi/third_party/django-stubs/django-stubs/contrib/admindocs/views.pyi,sha256=JvdGvkWE5fJULlrrbucwuvbOkwPSL9b2I5hsVmBZxAY,852 +jedi/third_party/django-stubs/django-stubs/contrib/auth/__init__.pyi,sha256=hDspBupLxVr58AAo69fVZa9d_pWAkORdzOcjUVxB49M,1316 +jedi/third_party/django-stubs/django-stubs/contrib/auth/admin.pyi,sha256=t3LSFWjHf93LRhNmB3Pn5bd9X4NE9kFzbYCsyDrg930,527 +jedi/third_party/django-stubs/django-stubs/contrib/auth/apps.pyi,sha256=783HiaS66kh94JjcTQDjxdhqTsecaKqZrwJ6rLSB6AQ,68 +jedi/third_party/django-stubs/django-stubs/contrib/auth/backends.pyi,sha256=OzBwXj0hB-Im6o9-2OHAOVCAkP07BG6DQzg0BIMT9d4,1605 +jedi/third_party/django-stubs/django-stubs/contrib/auth/base_user.pyi,sha256=p2mB5q8Pvv1BrE3CgnwXgfLITW3bFSNQVCp388wjxMs,1493 +jedi/third_party/django-stubs/django-stubs/contrib/auth/checks.pyi,sha256=GeMx3ueyYHAufqpprS_roDPC-MGC9jZZiiob_Yj2j1w,371 +jedi/third_party/django-stubs/django-stubs/contrib/auth/context_processors.pyi,sha256=ysQRV_JT1XA8buH6W5mkGbefSDy0nRVumXGjToqgoAg,617 +jedi/third_party/django-stubs/django-stubs/contrib/auth/decorators.pyi,sha256=D-szKzg_wbjHQsdAY7Y_SE4s7fOrE1PQRTqXDdo_acs,982 +jedi/third_party/django-stubs/django-stubs/contrib/auth/forms.pyi,sha256=jpqKdxAYIElU-TlfLzvZNefMnn1Uo3vXEJArzBainZE,3149 +jedi/third_party/django-stubs/django-stubs/contrib/auth/handlers/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/auth/handlers/modwsgi.pyi,sha256=bv6dWYpTbWYlCQjWyc76cB_lH3akN0WYDyuTmnLXiS8,204 +jedi/third_party/django-stubs/django-stubs/contrib/auth/hashers.pyi,sha256=xdR_649Y60eS-C4VcQUDn76n07U5Tzwl6DlLOcOr8Ek,1969 +jedi/third_party/django-stubs/django-stubs/contrib/auth/management/__init__.pyi,sha256=r_Nm0u9_QFypD1ZAkzWIrPzTRiKM0Vwj8N1BPgEjjUA,384 +jedi/third_party/django-stubs/django-stubs/contrib/auth/management/commands/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/auth/management/commands/changepassword.pyi,sha256=sINAEC_pucjlldxJChrjgnhv_PzIBOAi3mPOqdHYZNE,125 +jedi/third_party/django-stubs/django-stubs/contrib/auth/management/commands/createsuperuser.pyi,sha256=LI2_hmrgjeizJ0L0Pe4lo2lqvgqvfd0XDovYSbrx9bk,208 +jedi/third_party/django-stubs/django-stubs/contrib/auth/middleware.pyi,sha256=WwvDTYBlZdi21Ackk6Aksuwgt0JIOOAPOc3niDsN0J0,774 +jedi/third_party/django-stubs/django-stubs/contrib/auth/mixins.pyi,sha256=7Ca0TCSsaszq71MDnCaO2-MqySWlZc2CQQqtQZBIaL0,1150 +jedi/third_party/django-stubs/django-stubs/contrib/auth/models.pyi,sha256=nDsbZgiHmYUmC8TSu1ZAmk5uEVGlEodFN6B2dXhwaec,4641 +jedi/third_party/django-stubs/django-stubs/contrib/auth/password_validation.pyi,sha256=9KOmo5wQfvW4U_qh6yIMczt5izwIRS2GMQQ5vGX2D8I,2052 +jedi/third_party/django-stubs/django-stubs/contrib/auth/signals.pyi,sha256=0qXrIYBrx2XYwRrx1N5V5h9HgFtdmK4pzQfEMPr-EPY,120 +jedi/third_party/django-stubs/django-stubs/contrib/auth/tokens.pyi,sha256=IcHQN4Pn8ikV2qI1e4_VWkLjVbfPR5tOEM2iQR3Dz-c,361 +jedi/third_party/django-stubs/django-stubs/contrib/auth/urls.pyi,sha256=oIadLJRRqUS4fwWe3F2TwdQViIuYuSR7iutUidncun0,59 +jedi/third_party/django-stubs/django-stubs/contrib/auth/validators.pyi,sha256=3P7MK_049xYExFIZVQCaic-iuFgFGaZouQBS4RYe_6I,160 +jedi/third_party/django-stubs/django-stubs/contrib/auth/views.pyi,sha256=BPnTwECkJfap0jz0Mi1RII3PLHeHI6eH3Z3mnBZoFvQ,2478 +jedi/third_party/django-stubs/django-stubs/contrib/contenttypes/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/contenttypes/admin.pyi,sha256=hQT_hpYtA2aHrknPNDLX9Fl6NSVJbdtKpWhiGOsk-VM,586 +jedi/third_party/django-stubs/django-stubs/contrib/contenttypes/apps.pyi,sha256=WUN_nGxl5mnIB9IctYlkCrswHF8BpQo2yRbRywZ69c0,76 +jedi/third_party/django-stubs/django-stubs/contrib/contenttypes/checks.pyi,sha256=3PeRWTpBcWB1u-66ueZ6RVWFIZReu5OOYpoN7r2JCBQ,318 +jedi/third_party/django-stubs/django-stubs/contrib/contenttypes/fields.pyi,sha256=hW1z3wwu_lRFpF6Zkz2pR3pb7yh1za9PgaZFJ6o5XPY,4097 +jedi/third_party/django-stubs/django-stubs/contrib/contenttypes/forms.pyi,sha256=-j9hvsPL-3syqd6O5gTcXzh3xeCQ-isFdtkhfuUwIX4,1159 +jedi/third_party/django-stubs/django-stubs/contrib/contenttypes/management/__init__.pyi,sha256=B0Gku6qIM0KlOwC2zQyZVrNE2Stj4JylnzhQ4-gswiA,1350 +jedi/third_party/django-stubs/django-stubs/contrib/contenttypes/management/commands/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/contenttypes/management/commands/remove_stale_contenttypes.pyi,sha256=-wUrCoB87Ueb8337psYunjcfoiKpxqyestpVHKzvaUE,411 +jedi/third_party/django-stubs/django-stubs/contrib/contenttypes/models.pyi,sha256=PcA_Yze4dDqmxsDJqCYKm_5ZnmfaxW4VNyfGjHqkavI,1085 +jedi/third_party/django-stubs/django-stubs/contrib/contenttypes/views.pyi,sha256=wK8to6ffyMuyyrlNJu9-7nzhOASZRr-x_MWdSgd_G_4,257 +jedi/third_party/django-stubs/django-stubs/contrib/flatpages/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/flatpages/forms.pyi,sha256=XNJrR-mHjiKpxf_r5sPxXC2De-Lyh37BL309dtR_f2I,142 +jedi/third_party/django-stubs/django-stubs/contrib/flatpages/middleware.pyi,sha256=6PzgiwFmjk4K4EBW7tfiUtdxI0x-f81N6RmbMOa5e1g,299 +jedi/third_party/django-stubs/django-stubs/contrib/flatpages/models.pyi,sha256=sd9sLh0PBc86GpcJrEtE4vaPVvs8xE1gifekJbBuyDo,445 +jedi/third_party/django-stubs/django-stubs/contrib/flatpages/sitemaps.pyi,sha256=-JeEn0-5EwpyJHOtgYd8IK_C8fGTmLPtk_UpgiwsiX4,81 +jedi/third_party/django-stubs/django-stubs/contrib/flatpages/templatetags/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/flatpages/templatetags/flatpages.pyi,sha256=U1qIuNadeeNP9oodmb1pXDt9Mrc7urXsAvA0c0bLmC0,518 +jedi/third_party/django-stubs/django-stubs/contrib/flatpages/urls.pyi,sha256=oIadLJRRqUS4fwWe3F2TwdQViIuYuSR7iutUidncun0,59 +jedi/third_party/django-stubs/django-stubs/contrib/flatpages/views.pyi,sha256=aJzjM06Iu8BG4w5frT51cpHotQ6e4GKH18Cukc-yDbo,315 +jedi/third_party/django-stubs/django-stubs/contrib/gis/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/gis/db/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/gis/db/models/__init__.pyi,sha256=I8SirL0ClZODX-As42cycJ4VjKnCc97cK9hVKBY6hH0,418 +jedi/third_party/django-stubs/django-stubs/contrib/gis/db/models/fields.pyi,sha256=HgR0BvodeWJ2iGkP8pLuFvI9SQ5DTWbP0oMzsZwEbHU,3133 +jedi/third_party/django-stubs/django-stubs/contrib/humanize/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/humanize/templatetags/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/humanize/templatetags/humanize.pyi,sha256=4pEElOr5jWoM1AAc-wq4HSLz-0iSrGwmeqqlJqdq2fs,619 +jedi/third_party/django-stubs/django-stubs/contrib/messages/__init__.pyi,sha256=UHr4xfoH6faNXSh6Xq-6OaRhgsi57MkDktuW_MgfDDs,524 +jedi/third_party/django-stubs/django-stubs/contrib/messages/api.pyi,sha256=ue6Dc8C-dHtJuRlcoQE5zPPsmSGOkgUem_H35681jk0,1169 +jedi/third_party/django-stubs/django-stubs/contrib/messages/constants.pyi,sha256=gBM2s_ImbX0K4W0mcwvmesgrEboAd_f_Pzq-lbUQ-IU,187 +jedi/third_party/django-stubs/django-stubs/contrib/messages/context_processors.pyi,sha256=FSJ6CYdzc9OLaBZyMQe1UlTIYq7ROchz_wBsQn-tEGo,249 +jedi/third_party/django-stubs/django-stubs/contrib/messages/middleware.pyi,sha256=H237yhPKbsl-Civ4R6Et_LX5uBUBLrDqTDvBT1JMef4,349 +jedi/third_party/django-stubs/django-stubs/contrib/messages/storage/__init__.pyi,sha256=ySIlvAEkKK1c5eDrg4u2cs-Mu-xkBuIjVqMjlOMqNBo,202 +jedi/third_party/django-stubs/django-stubs/contrib/messages/storage/base.pyi,sha256=EWpGkx7m2iNNqXTYTYTgeQtTmdH_loN6ba3XHbZo8QU,907 +jedi/third_party/django-stubs/django-stubs/contrib/messages/storage/cookie.pyi,sha256=h99T_FYt4AvNx7MaYtCJ42yuXxw2-u4dDaWgX2aqOHQ,487 +jedi/third_party/django-stubs/django-stubs/contrib/messages/storage/fallback.pyi,sha256=9FSrcnvxbwiGz34cVG8v8jOPOBl7cXK8Nvd97wbUCVs,240 +jedi/third_party/django-stubs/django-stubs/contrib/messages/storage/session.pyi,sha256=MsQOXu0WRhdg3mjL51hB_VYTSYspwgCyUfwK7pyEgyA,482 +jedi/third_party/django-stubs/django-stubs/contrib/messages/utils.pyi,sha256=BMBEulwgQtm_k9l8rf5DNeZ-KpKvth7kqf7kzEL2908,69 +jedi/third_party/django-stubs/django-stubs/contrib/messages/views.pyi,sha256=kkAB26i4kilkRl9AJbWCbW5zoy_uuKr33MBLu8Fp4qA,308 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/aggregates/__init__.pyi,sha256=dDW1jG5QW0rYRWbzlmBQ8PT2v9ZdWgFJ0IRHhvBJQyQ,540 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/aggregates/general.pyi,sha256=s1jdffCAM8P8TUYWwwu5ZAjm38N8K7wU_UWR_2wR2SI,338 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/aggregates/mixins.pyi,sha256=BE72JG-si9GbSvdXWBTN2asV9jYrZIh2GldTVScgC1I,29 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/aggregates/statistics.pyi,sha256=PqsAq9E7Jf6IUi3xYpm9LMCl52R6XMfMQieQxe-d49w,470 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/constraints.pyi,sha256=s2FgJy9Rl1JnijuF4VQKTz6aVahFoJAwnO1C36sJQ4I,576 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/fields/__init__.pyi,sha256=nAMbC_GxM8b8-dR9AlAQ9W__vybgo-3-X6bSBi0un48,695 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/fields/array.pyi,sha256=B19PlhiKI8jhgJQq6fK_2SMXtZ1-yq0RyGe6QWwPHik,1729 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/fields/citext.pyi,sha256=bC3-GS1JBsGcBiF5ve1dN3O8vG2eKjJSH0UB7mO7kFA,216 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/fields/hstore.pyi,sha256=f6qGgeaD_ypIuIsPMGlgm_D48zms5vJ5VCclCp-iSCc,519 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/fields/jsonb.pyi,sha256=XQpN-W-SzUoLm8M84ebg9ccGTZ51oMvpNU2S7PGdYMo,1011 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/fields/mixins.pyi,sha256=NAiNjruhDqjVpZdusWemB8_eZhe54CHtvAGKDjvHFKY,113 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/fields/ranges.pyi,sha256=yq6_zMfmqvqbCO627VtFM96DiGfUgdp3tku9RBxv7Vo,1342 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/functions.pyi,sha256=__zcyTUVl9N7_5NVTzZ_SAXvflnckIVZtz46hcSiesI,95 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/indexes.pyi,sha256=WsAYW3VLoP4rQ4p8ra4gkNPYcBH4XU6R-5b5HfvRZ8I,2272 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/lookups.pyi,sha256=6XUZ02CEjDiE_s6sGMCTHuLk1R_MtdkGGjAASJzGVEk,579 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/operations.pyi,sha256=Z8SEkq9xL8vu8ZhaEUZr9DqaN-oBSeu855ng3KXSGXg,735 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/search.pyi,sha256=m0sqtO97PEv8__bspxNgeLfBaMga2rnZUnkHl-6jckk,2195 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/signals.pyi,sha256=yeuM0ob6OaYoACmfLBdZCNWgPlm7A9ZwWjoZi-7Kb0M,237 +jedi/third_party/django-stubs/django-stubs/contrib/postgres/validators.pyi,sha256=s2eEtAs0EGPZdPztDX4Q4ZLLf5fm23Z_ggZU4yWSo0k,653 +jedi/third_party/django-stubs/django-stubs/contrib/redirects/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/redirects/middleware.pyi,sha256=fKa3O8n_i5HyNVUHXOaTIar7d5SmNOPwUqmo5FRDqx0,391 +jedi/third_party/django-stubs/django-stubs/contrib/redirects/models.pyi,sha256=h-aCkCB9-5rhIRn1zd1DhgCRzFbwbi8n3ogO2THqDDI,168 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/backends/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/backends/base.pyi,sha256=Ny7D5lAHrEndxAQ_1CPFZZM2DsHMoMUnBdJanHOSw0k,1527 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/backends/cache.pyi,sha256=7wCasZDS2jUxauFRfc1hgCbbMhdu3BV0eyNaJDMTTnU,299 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/backends/cached_db.pyi,sha256=8jDkOaweqsMtw0xxE-TMfqKVF-EOhZYEN3kjasw_f9w,305 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/backends/db.pyi,sha256=tVGmHGKuGZGDYi4l8aR1a6BlQKRYYDrNI1qorOiCOYI,577 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/backends/file.pyi,sha256=cT7G4hn-3A_ZdlWH1TPH8K_Q9bAoKUUjjLlAwFvFPro,283 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/backends/signed_cookies.pyi,sha256=qBbI8hMn_5IEPkzq2AQW54pFZN-d0lVUhSZ0W6DgC84,100 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/base_session.pyi,sha256=1NPvIW3qfHt-WF1wOYh7FGLoPasYJ80KTw0pdrWOh3c,665 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/exceptions.pyi,sha256=JjF0Gti8-WnILz7dVoTAv1yCrbhMLsvBe3e1z3Zzu50,156 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/management/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/management/commands/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/management/commands/clearsessions.pyi,sha256=Gk-hDddr6HHr5PArycz3DqoeF476UpGqav9HGp_NsnI,85 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/middleware.pyi,sha256=ZaLlZVRmZZgZaDMmC-j4YZVKDSMjdFYW7WOTru56gOg,478 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/models.pyi,sha256=5Xexgn7rtazccT-DG_4gm4-Lc1NtQTZZBYZQn9LdWdk,176 +jedi/third_party/django-stubs/django-stubs/contrib/sessions/serializers.pyi,sha256=IDMgSeFmLRJYNp5_5jiYaTPKxPBLaEVbEWqPbSqRYaM,311 +jedi/third_party/django-stubs/django-stubs/contrib/sitemaps/__init__.pyi,sha256=jH3-BaWLaKwI5DATtZlXtMLNVETNStRqeviv6zqvv4M,1559 +jedi/third_party/django-stubs/django-stubs/contrib/sitemaps/management/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/sitemaps/management/commands/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/sitemaps/management/commands/ping_google.pyi,sha256=Gk-hDddr6HHr5PArycz3DqoeF476UpGqav9HGp_NsnI,85 +jedi/third_party/django-stubs/django-stubs/contrib/sitemaps/views.pyi,sha256=JueiGm9sG9TSRfC_6FP7SpdjeOxKko1he1IJJmvWl2Q,762 +jedi/third_party/django-stubs/django-stubs/contrib/sites/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/sites/apps.pyi,sha256=hA4WuIXyL9iDiIYzysxfk73L--Va48Ede2GGIvVCxPQ,69 +jedi/third_party/django-stubs/django-stubs/contrib/sites/management.pyi,sha256=tMS5XtfebpRS5cGVshTDsfF5tQmiBZqdVLH3Exey-vw,288 +jedi/third_party/django-stubs/django-stubs/contrib/sites/managers.pyi,sha256=9NB542AluxPiwc0ey-I-FDQKSlria3-A9ROMwdg6GXI,170 +jedi/third_party/django-stubs/django-stubs/contrib/sites/middleware.pyi,sha256=XdobZMJ16FQk-rt57wCtkkJ2NoJfD1LQrRz1lZfx8ZA,209 +jedi/third_party/django-stubs/django-stubs/contrib/sites/models.pyi,sha256=CAVo7Xo9pU0SQQ_-CBooqLbMaUjgqugtqvQUXAcmLrM,614 +jedi/third_party/django-stubs/django-stubs/contrib/sites/requests.pyi,sha256=9cA6iRYob33A_N7AHCWhB5HV0u7CBaa3YFxBwKtNjXc,298 +jedi/third_party/django-stubs/django-stubs/contrib/sites/shortcuts.pyi,sha256=dPbrudBzry6CawfRV1IbbF7SHGvSa6oDjGJayELnFzI,266 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/apps.pyi,sha256=JiBc5TPKiv6yGihDg8hKxxONGJOnOn-rZmbU_P4rbxc,126 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/checks.pyi,sha256=_LvpylIL3RjCRsZOzCn7MMuBD41DFLEZNWDolU61Ihs,242 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/finders.pyi,sha256=uYtF_5sLYizgcESyiWVSsqwCHonIPf2CPF1Lh0Qg09w,1715 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/handlers.pyi,sha256=H6xAYLSqzoYTmTD20s-nvsaEaWChXbQWcMsllwq5zJQ,417 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/management/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/management/commands/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/management/commands/collectstatic.pyi,sha256=aauFD0KDUGYrHRVy-El3hUVhCkTXtWty213H0YRRjHc,1125 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/management/commands/findstatic.pyi,sha256=AoPCqwNz4yYQQm1z6WCDMdxcWrBURx1I5iPsHJQvdOo,87 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/management/commands/runserver.pyi,sha256=3uTSyH43HWzNj2ysIJXyT32MaF4q1_gz6q2hJJy2nRM,136 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/storage.pyi,sha256=WRfGAaDXcvPjl7ICgAkaxm_lD8dNx9EqikVnBd-Bie0,2262 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/templatetags/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/templatetags/staticfiles.pyi,sha256=3rNdwJS7hrGHEmWcCDQXI195iq8q2A_fB9K-mjIFLtg,234 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/testing.pyi,sha256=sD4CDNL4bD-9nTfvLSMK8D7_Oc0UlxpMgraoUAGzr7s,100 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/urls.pyi,sha256=e3hDRnM55Tey8gm6uIZkU9XaKL6K1hQCqbgwo2JEfD8,198 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/utils.pyi,sha256=5kh8wIGL3tKBTFQ2tv-beeXeSlQvKS9_S8LVXflcybg,438 +jedi/third_party/django-stubs/django-stubs/contrib/staticfiles/views.pyi,sha256=Q2CX7w--nytsntQ6IkUxHuthpEHo8HQMeIsaC4Moq1c,222 +jedi/third_party/django-stubs/django-stubs/contrib/syndication/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/contrib/syndication/views.pyi,sha256=1HnKaVl09EenWI_V6xo-qcobr45hCSLC3uUtoimfGoU,1251 +jedi/third_party/django-stubs/django-stubs/core/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/core/cache/__init__.pyi,sha256=6zFjJ1q_xONqYBqBOcphnTqp49ZU8UrglfT-DCibJ-A,717 +jedi/third_party/django-stubs/django-stubs/core/cache/backends/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/core/cache/backends/base.pyi,sha256=YQGqJhft7pHV6PSlOwIi5WMK_Zhpqld19F1zp6axjnI,2327 +jedi/third_party/django-stubs/django-stubs/core/cache/backends/db.pyi,sha256=wGdUsoR-ow-u2LRIjaakjcwPByKQb8-U-KOXhTbW9wg,595 +jedi/third_party/django-stubs/django-stubs/core/cache/backends/dummy.pyi,sha256=PilEF_Ep1-fDe4bQFkVaFm_n4uTLtmzHv2ewWM8VBzo,182 +jedi/third_party/django-stubs/django-stubs/core/cache/backends/filebased.pyi,sha256=-ShvV4mAtuzKC617Q2qMr7FHZkwoBpG_KP-pfOBW02Y,216 +jedi/third_party/django-stubs/django-stubs/core/cache/backends/locmem.pyi,sha256=kz1dYtPcAvhw-OAgRG0Vd0n92M_1xxN8iaGoU-s4FZA,186 +jedi/third_party/django-stubs/django-stubs/core/cache/backends/memcached.pyi,sha256=08qEPXnng3yPOK9ZYP4WtWrtENsavVyQbI0ebWLffjI,352 +jedi/third_party/django-stubs/django-stubs/core/cache/utils.pyi,sha256=2rG3g9PW3FWNflGxI84bpIfvpVQlzYYg7FaZre1vEA0,184 +jedi/third_party/django-stubs/django-stubs/core/checks/__init__.pyi,sha256=lNFmiy3qR0A9eHFRAQT7h5sP7a8jHqRUDc6RhhBnjRo,430 +jedi/third_party/django-stubs/django-stubs/core/checks/caches.pyi,sha256=ILPwDMFMEiAA7KYy_G2He5djgU5iZiA8-kq3t8jgeTM,267 +jedi/third_party/django-stubs/django-stubs/core/checks/database.pyi,sha256=a86fSgBJq1RN7ReWxynzDzEJeRf-_SNW2pP5RuavEL4,103 +jedi/third_party/django-stubs/django-stubs/core/checks/messages.pyi,sha256=crXlAhQmqdtkL65DgmJf7T1h7zPJ6LlqXq2wVH0Nx1w,925 +jedi/third_party/django-stubs/django-stubs/core/checks/model_checks.pyi,sha256=YUzLAMcJQ8wgRiN41JJ1kUlkArL9cNW-mzN4z4qIPQ0,358 +jedi/third_party/django-stubs/django-stubs/core/checks/registry.pyi,sha256=djmQNgx4H932hka9_kArUfRrH-MRxMfQS5SchBapFqM,1212 +jedi/third_party/django-stubs/django-stubs/core/checks/security/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/core/checks/security/base.pyi,sha256=iPHQ9r4dT1LPGKvNGYzbup-1So13CsG4CB88tcL0s5w,1592 +jedi/third_party/django-stubs/django-stubs/core/checks/security/csrf.pyi,sha256=ql6NePz-3tHvFrRs1-RiUL7hu-YTeqvOjJDJKWECx4o,379 +jedi/third_party/django-stubs/django-stubs/core/checks/security/sessions.pyi,sha256=GtTSJmziKaY-VxLKo9uZ2Iv2Bm_Q1GucnscQ9IcqI3k,527 +jedi/third_party/django-stubs/django-stubs/core/checks/templates.pyi,sha256=onbDwInTuNoww68Ei0NcAl3zBYRJ1YXTL7ihxAhfyN4,391 +jedi/third_party/django-stubs/django-stubs/core/checks/translation.pyi,sha256=KB0XkeUqlgS1O-HFeVcF0gP09nv9su--tX6fJmTNFKA,155 +jedi/third_party/django-stubs/django-stubs/core/checks/urls.pyi,sha256=cPGPCVBb1TtYLLIgid6su5NcNd9p90b7-40uT9kYhq4,780 +jedi/third_party/django-stubs/django-stubs/core/exceptions.pyi,sha256=VPgD4WzafSFza0Gv5-D0WmJrdtcBAkBKhLirWk9n4h0,1543 +jedi/third_party/django-stubs/django-stubs/core/files/__init__.pyi,sha256=hdnkFb2OQ7nFCvlNdWdVo4qsMPLXN_YaeyRdMAineH0,68 +jedi/third_party/django-stubs/django-stubs/core/files/base.pyi,sha256=EEh_-8b3WGLzwLAKVaKHe_-FNW4yipUv4cEWCThurls,1435 +jedi/third_party/django-stubs/django-stubs/core/files/images.pyi,sha256=SMT0wolMYso4QmCGlPtKmBirJOnvghPc8TkUgMFGe6M,309 +jedi/third_party/django-stubs/django-stubs/core/files/locks.pyi,sha256=EHZ7epp-o3EYCF3vCAPw3DLI5QAPNTg63LDLJL-BP50,308 +jedi/third_party/django-stubs/django-stubs/core/files/move.pyi,sha256=PISpZOttN4V_udjfb3adCtD-4oZ6Xaq8W79I6MddcrY,130 +jedi/third_party/django-stubs/django-stubs/core/files/storage.pyi,sha256=JeqnJwGpqWlD-FDfKMVYM0uTcoHcCaFehqwxyrFcOsQ,1769 +jedi/third_party/django-stubs/django-stubs/core/files/temp.pyi,sha256=GXys-R-G1z3HKO90VbMZqW5mpy8e468QbczNs9k9vQM,100 +jedi/third_party/django-stubs/django-stubs/core/files/uploadedfile.pyi,sha256=mmdboWz3QcaLVWHtiB4GWI5GowWOgQXPpSZoZGJuxqo,1486 +jedi/third_party/django-stubs/django-stubs/core/files/uploadhandler.pyi,sha256=yDWGn8nxgymZGsTNoPles2N1JgzZfvJnR8SGQivSRA8,3195 +jedi/third_party/django-stubs/django-stubs/core/files/utils.pyi,sha256=-YkCeClNFbCHxU418Pbn4t8p451gwLt5UfHnv-1twCc,547 +jedi/third_party/django-stubs/django-stubs/core/handlers/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/core/handlers/base.pyi,sha256=zr5dcNLmsLL8WK45_pof5Whiw8JyVqNe-nQNNTaRBUc,572 +jedi/third_party/django-stubs/django-stubs/core/handlers/exception.pyi,sha256=9Z2sl79zEx6EkjxA7930oIYYsuaOZQNKf_zNUa21YV0,570 +jedi/third_party/django-stubs/django-stubs/core/handlers/wsgi.pyi,sha256=ZmdUpvNrkmvBNxddhxasurnqfL2cXAYWiDchJMTFfVw,1325 +jedi/third_party/django-stubs/django-stubs/core/mail/__init__.pyi,sha256=fS7DQB-KYK0_XQ5JJ_mfNooDw4aI8K3DQOGmUW2cjKQ,1504 +jedi/third_party/django-stubs/django-stubs/core/mail/backends/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/core/mail/backends/base.pyi,sha256=gRQflStnEnexigKiEhXW9ew01NFWHkgEErsO7zGkmq8,604 +jedi/third_party/django-stubs/django-stubs/core/mail/backends/console.pyi,sha256=rp3gJ_WRrP7cC6OHCZxDmMCEGpwSZTXTE_-9sYGE7qA,103 +jedi/third_party/django-stubs/django-stubs/core/mail/backends/dummy.pyi,sha256=rp3gJ_WRrP7cC6OHCZxDmMCEGpwSZTXTE_-9sYGE7qA,103 +jedi/third_party/django-stubs/django-stubs/core/mail/backends/filebased.pyi,sha256=rp3gJ_WRrP7cC6OHCZxDmMCEGpwSZTXTE_-9sYGE7qA,103 +jedi/third_party/django-stubs/django-stubs/core/mail/backends/locmem.pyi,sha256=rp3gJ_WRrP7cC6OHCZxDmMCEGpwSZTXTE_-9sYGE7qA,103 +jedi/third_party/django-stubs/django-stubs/core/mail/backends/smtp.pyi,sha256=9fsENrJbPIEXUtnYhW_1dADqpcj7I4Z3kR2-3DGwdHg,510 +jedi/third_party/django-stubs/django-stubs/core/mail/message.pyi,sha256=AJVmTc2Ih1J4hsxLyE5ESK8r611Ooqk1thh39cW28v0,4094 +jedi/third_party/django-stubs/django-stubs/core/mail/utils.pyi,sha256=HiKGzyDfshoDMKu0_OuSD262A10HLjhURdD4ZNJt1ec,95 +jedi/third_party/django-stubs/django-stubs/core/management/__init__.pyi,sha256=xqh8MGApVKxQeURNp9Rp8zkm_4VlnefNkcDNnLiF6Yw,841 +jedi/third_party/django-stubs/django-stubs/core/management/base.pyi,sha256=ek1zS9nL1nCODxJGB2tJ6UXz2KjHoJzXeYa54B_wJU4,2922 +jedi/third_party/django-stubs/django-stubs/core/management/color.pyi,sha256=vnwktv_gcMVYi1TUqZBJ1DQZzpUMkwEzyZxs4FE47OQ,1052 +jedi/third_party/django-stubs/django-stubs/core/management/commands/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/core/management/commands/dumpdata.pyi,sha256=BnOg4pY4rHhpAptNOtprYlnqdHCdnFygXlshZu_g62Y,123 +jedi/third_party/django-stubs/django-stubs/core/management/commands/loaddata.pyi,sha256=jbmimF-dEcZb-DftHd6JuveAc_o50YWVGpnajeiVKOo,625 +jedi/third_party/django-stubs/django-stubs/core/management/commands/makemessages.pyi,sha256=hXQnwklaQn5pUiJSvIq757ECX5yJd5IcE3X6-d78G84,1134 +jedi/third_party/django-stubs/django-stubs/core/management/commands/runserver.pyi,sha256=if7vEdyAi4nKamysFhqo6H_JDGRhSPSZhEGigVamK4g,194 +jedi/third_party/django-stubs/django-stubs/core/management/commands/testserver.pyi,sha256=Gk-hDddr6HHr5PArycz3DqoeF476UpGqav9HGp_NsnI,85 +jedi/third_party/django-stubs/django-stubs/core/management/sql.pyi,sha256=fi7T3XJ92IYa50gpmW8vwmA9QPag-oU5XLP2WBo6rpI,429 +jedi/third_party/django-stubs/django-stubs/core/management/templates.pyi,sha256=vHW_QxpqF7tt5xpREpcMA7pdHKkY5E53MkzQaTtkASQ,624 +jedi/third_party/django-stubs/django-stubs/core/management/utils.pyi,sha256=UfBxh4njLLSBSor3WdNJt1clcHylFcqwkiSivhLHK3s,527 +jedi/third_party/django-stubs/django-stubs/core/paginator.pyi,sha256=3ha_zAANkWUw_GUkgCYaQULZQRGcxXCS5gaBdkozVdg,1896 +jedi/third_party/django-stubs/django-stubs/core/serializers/__init__.pyi,sha256=fLovD32hc1-1o1kK0wjYy7LdtzF5VcQ0C-Pc8Ta1wGo,1377 +jedi/third_party/django-stubs/django-stubs/core/serializers/base.pyi,sha256=5Lcy5QIKxdt26oPFUeZS6ix1WGk7UlsFLnYLFCgM-GE,3388 +jedi/third_party/django-stubs/django-stubs/core/serializers/json.pyi,sha256=mAtdKicHMrrrYDSb_3TUv5lhhg41Rbdukqrupkc8SqI,422 +jedi/third_party/django-stubs/django-stubs/core/serializers/python.pyi,sha256=QctI1p3qqQiaqLpGJdLwvIvECRDz5taXFR1ElFZB1x0,535 +jedi/third_party/django-stubs/django-stubs/core/servers/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/core/servers/basehttp.pyi,sha256=7PZLnV2hBprVabl_Ph9E6v0w4pjdXcbWKP49uijiD1A,1301 +jedi/third_party/django-stubs/django-stubs/core/signals.pyi,sha256=oSxvwX4-JFULZ6XVH3IH9fIoB7yQP7qWf1q2ifN1wNw,163 +jedi/third_party/django-stubs/django-stubs/core/signing.pyi,sha256=nMLdl0czCgg1pQPSO-8Y8DRyUlbk6t_czM4GzRe1pAA,1467 +jedi/third_party/django-stubs/django-stubs/core/validators.pyi,sha256=EeHJ3xcX9cpZ75vv8vVgZ7XdrRXHU0x8c40lJYMq9G0,3934 +jedi/third_party/django-stubs/django-stubs/core/wsgi.pyi,sha256=6VtHyJCF0P4EmH0PppqXcZMZ3kV75FuSjKglcvueZ9M,98 +jedi/third_party/django-stubs/django-stubs/db/__init__.pyi,sha256=RFbCnbvJwrVkhhkwk-OUM4Q7hCIx88ESSptH3qqQGhE,936 +jedi/third_party/django-stubs/django-stubs/db/backends/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/db/backends/base/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/db/backends/base/base.pyi,sha256=NvNs0Fpr2hOcUhZu0WswbNUaxheO1_haci7_WcMnFgs,4348 +jedi/third_party/django-stubs/django-stubs/db/backends/base/client.pyi,sha256=M2_0vbL99q0t6Y0mKHT-TJ0FHOnXrfz0UFVxQxHIqWQ,274 +jedi/third_party/django-stubs/django-stubs/db/backends/base/creation.pyi,sha256=x9cz11-3RbcDHEAmN2Gr6Efvg3wMqhMx1tweI_z8jbU,1103 +jedi/third_party/django-stubs/django-stubs/db/backends/base/features.pyi,sha256=cbWlLXRFXtvJTHSOm71kTMmvlYLvzVejJ6PIu4_R5PM,4244 +jedi/third_party/django-stubs/django-stubs/db/backends/base/introspection.pyi,sha256=CGlvGS9dFb32sBC_llaLmhVlBxwcO26JTsdA3mW8-9A,1490 +jedi/third_party/django-stubs/django-stubs/db/backends/base/operations.pyi,sha256=-k3Ca4u2vUCVTYS04vxgzD-XpX7PCE9o6RBP_--kJKs,6237 +jedi/third_party/django-stubs/django-stubs/db/backends/base/schema.pyi,sha256=PrP7x6JM2lfZFJcZYIXN7OjevwSeJXHX77uIkFFGcv8,3346 +jedi/third_party/django-stubs/django-stubs/db/backends/base/validation.pyi,sha256=c-X0DuxTKiXBUrZ1xrN2vxE9EC0hRfG2aS3tg3ikZyI,386 +jedi/third_party/django-stubs/django-stubs/db/backends/ddl_references.pyi,sha256=WsOtVIe0GMwG-3kuNxjJaAjg7NDl0FgPfKAIufsvIDY,2657 +jedi/third_party/django-stubs/django-stubs/db/backends/dummy/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/db/backends/dummy/base.pyi,sha256=Bybortoqc2aftoDUI9JOW6B3eA8RmtNnPI8IswCBw_w,1029 +jedi/third_party/django-stubs/django-stubs/db/backends/mysql/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/db/backends/mysql/client.pyi,sha256=LuRBeD30YTt7ZKkIy0QPI5cUoCdDkcFUphPF3P6VHmU,383 +jedi/third_party/django-stubs/django-stubs/db/backends/postgresql/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/db/backends/postgresql/base.pyi,sha256=m4ZIq3xup97EwKqz215v0DPzkl64DGJ8ckiJOzWkPHg,459 +jedi/third_party/django-stubs/django-stubs/db/backends/postgresql/client.pyi,sha256=KnnW4cVB55-O1rySn8y0wxf1HmInCvUppc4Ylzg1HVM,281 +jedi/third_party/django-stubs/django-stubs/db/backends/postgresql/creation.pyi,sha256=fnJLgXWLVw9MDbkPM_PrlgCNeEovyRFWahmCpmGtL9A,117 +jedi/third_party/django-stubs/django-stubs/db/backends/postgresql/operations.pyi,sha256=8l4IrNRrQZJk-LLbH3zGWqgpTmBXA2-rUsP76GgpY_Y,125 +jedi/third_party/django-stubs/django-stubs/db/backends/signals.pyi,sha256=fLFzAfAJ6fuAuy0wVTa0zHBeFCgG7bMLOi_kT7diVBs,69 +jedi/third_party/django-stubs/django-stubs/db/backends/sqlite3/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/db/backends/sqlite3/base.pyi,sha256=bVWwqWyV2s6IXlN9-njHCzdAwLlhw05GFlNq9QT-xG0,349 +jedi/third_party/django-stubs/django-stubs/db/backends/sqlite3/creation.pyi,sha256=fnJLgXWLVw9MDbkPM_PrlgCNeEovyRFWahmCpmGtL9A,117 +jedi/third_party/django-stubs/django-stubs/db/backends/sqlite3/features.pyi,sha256=QBNQPU-dQwPzmhxGH3AcxhgcJ5rCwtLeWJbelW3Lw3E,117 +jedi/third_party/django-stubs/django-stubs/db/backends/sqlite3/introspection.pyi,sha256=7bAlv3kwuzQxii9IDzIOLFHKsr2_TjCkC9wBFmEB2CQ,363 +jedi/third_party/django-stubs/django-stubs/db/backends/sqlite3/operations.pyi,sha256=8l4IrNRrQZJk-LLbH3zGWqgpTmBXA2-rUsP76GgpY_Y,125 +jedi/third_party/django-stubs/django-stubs/db/backends/sqlite3/schema.pyi,sha256=LYEtIfhLNap5vTN1wwBi2NvjgvyRQPhbFpMAYOybr68,127 +jedi/third_party/django-stubs/django-stubs/db/backends/utils.pyi,sha256=yDvBVaKmX_paln-6-vFqr6EQt-vqucdEqkZm3UKvpnA,1835 +jedi/third_party/django-stubs/django-stubs/db/migrations/__init__.pyi,sha256=hhLMlg4osD2PgLKlq76pVHYWqPXZMnA4BpE9v8tDWyQ,243 +jedi/third_party/django-stubs/django-stubs/db/migrations/autodetector.pyi,sha256=xQmcpsLs8jTPIMGuwaASncuUExFQeutQDKJdzNwjAQE,3011 +jedi/third_party/django-stubs/django-stubs/db/migrations/exceptions.pyi,sha256=qoLgU_vrG4a68pzirMdWXBunpPEbYOxdV_m7hJrfKh4,709 +jedi/third_party/django-stubs/django-stubs/db/migrations/executor.pyi,sha256=0s4j-NtGdXyt6dN-GUzjQfPmvvuZAiO-qph4OSIk0_I,1679 +jedi/third_party/django-stubs/django-stubs/db/migrations/graph.pyi,sha256=gZ2DtdRW8WGaNagYSS450xy37jIEYHwXrp4NK6UEUfo,2531 +jedi/third_party/django-stubs/django-stubs/db/migrations/loader.pyi,sha256=FzXHEFMvG28529zVgYPqvIUVmgu4b6ItJHmyLDOcjsU,1547 +jedi/third_party/django-stubs/django-stubs/db/migrations/migration.pyi,sha256=EC3sKQH__KdAQHdtVlB9Mdng9PZRjXfRr5kq_C4QIsU,1064 +jedi/third_party/django-stubs/django-stubs/db/migrations/operations/__init__.pyi,sha256=7V9NE9LUjCDMmYRbbA_DpyByGdwqYJGP4PP4eb9irGM,796 +jedi/third_party/django-stubs/django-stubs/db/migrations/operations/base.pyi,sha256=Yf5Kls_4hSQsIXjlL9OK08vwdBoC-U9HZUEqFAgt3iU,902 +jedi/third_party/django-stubs/django-stubs/db/migrations/operations/fields.pyi,sha256=K3iNfmrU5ih3R9q5iWbWd9vzvwfdKEI5t1mTSOQp6PM,1166 +jedi/third_party/django-stubs/django-stubs/db/migrations/operations/models.pyi,sha256=-PMXj0mF4RpqFF5EfGbXLjb6OL6M3N-IiLppkp1zhNU,3154 +jedi/third_party/django-stubs/django-stubs/db/migrations/operations/special.pyi,sha256=IUylNYPs2amdxe0LXDpAcgG38E6MidF-HqfSXB-rqT4,1340 +jedi/third_party/django-stubs/django-stubs/db/migrations/operations/utils.pyi,sha256=c3JLPgnNvLkXPqd58STsQgONi7i1eUn081TgQaGOehY,217 +jedi/third_party/django-stubs/django-stubs/db/migrations/optimizer.pyi,sha256=4oDcfA4PXyGPtc1C6MmknB-6TaaNtYdeWSyE7JTmMzk,343 +jedi/third_party/django-stubs/django-stubs/db/migrations/questioner.pyi,sha256=734ewVOcc5I9wlAUW2DJ8BVBqACx6C1pm4cHY0ZlwUo,1156 +jedi/third_party/django-stubs/django-stubs/db/migrations/recorder.pyi,sha256=j2th14mIxchOJ3s1ar2nbloAI6Xy4wiVDitnw-i9snE,806 +jedi/third_party/django-stubs/django-stubs/db/migrations/serializer.pyi,sha256=GRls6awL8QgjOVzgNassbBe6H4bEhT1h1vgdjxD97T8,1828 +jedi/third_party/django-stubs/django-stubs/db/migrations/state.pyi,sha256=3Hflqo8oiCVz-oSprFYyKetkhQsjU69QtTViZimP5Q0,2640 +jedi/third_party/django-stubs/django-stubs/db/migrations/topological_sort.pyi,sha256=NJzfRJYQwJeJS8fCVme_hYGsbrt14h7EGAcs2W0fmL8,348 +jedi/third_party/django-stubs/django-stubs/db/migrations/utils.pyi,sha256=yRlE8__8quhMrlAMU1F282hQftrxxiSQbja20o8hYmc,207 +jedi/third_party/django-stubs/django-stubs/db/migrations/writer.pyi,sha256=cNq1r8wj7KawULnICxXBTWDqO0QVGCoBKLls3O_o48M,1441 +jedi/third_party/django-stubs/django-stubs/db/models/__init__.pyi,sha256=7nR-mr7WV8G0f6Cs5Hcblfww2y2WzC1oQxd31mhnx0Q,3576 +jedi/third_party/django-stubs/django-stubs/db/models/aggregates.pyi,sha256=PrOHocknn7Wmh2aq93_WVq50dx9SOifIIBHvN8F6GVE,501 +jedi/third_party/django-stubs/django-stubs/db/models/base.pyi,sha256=77FrtJ8O86v8HFichcpzYvYxYetw0sS1ArGFdd10uNc,2400 +jedi/third_party/django-stubs/django-stubs/db/models/constraints.pyi,sha256=CXaVfiv0tWcDNPkxg8j7s1Be-teHkwMVuYZLZtMVasM,1089 +jedi/third_party/django-stubs/django-stubs/db/models/deletion.pyi,sha256=GEJcVeD0a_5KMj6u_kVxGQ1ezJj7rW5iccRZkQitN00,1149 +jedi/third_party/django-stubs/django-stubs/db/models/enums.pyi,sha256=Tu87vXSWCvRlSS_Sq3k8aptOXzmMF9JQj4Hk6pW3m3s,814 +jedi/third_party/django-stubs/django-stubs/db/models/expressions.pyi,sha256=QmaztG7k_UEThUZgtlDcwHxQASmil4xVM8ZfG6HQ-cg,8583 +jedi/third_party/django-stubs/django-stubs/db/models/fields/__init__.pyi,sha256=7cRcjZV7AVblUEXaAnQfzpnHX5yqCmenp8fyr9Ce_vA,13788 +jedi/third_party/django-stubs/django-stubs/db/models/fields/files.pyi,sha256=5EFmMvl68KKIC0O7BG7w4f-CEmLvtlxPHswHC8AtHTA,3640 +jedi/third_party/django-stubs/django-stubs/db/models/fields/mixins.pyi,sha256=myGDhO7COLmmouchsjRU57lQpXnJahzqaffXmya4r6c,453 +jedi/third_party/django-stubs/django-stubs/db/models/fields/proxy.pyi,sha256=HJvfUvqFTmnGwe3bZs0eHumpAvOdFYTJeX-KzOPvR80,161 +jedi/third_party/django-stubs/django-stubs/db/models/fields/related.pyi,sha256=6kgc4LA0vqc4JVg0Q9GRkHDuKr_6IfHwxVAoYFly2-I,9289 +jedi/third_party/django-stubs/django-stubs/db/models/fields/related_descriptors.pyi,sha256=foBVVlhgtaoOz6iRbAmh5Az18ZNbxND6gDncol863WA,3184 +jedi/third_party/django-stubs/django-stubs/db/models/fields/related_lookups.pyi,sha256=tPhBih5TNsVbpZ4_j-HM5Gt8tehukUcWxh-cnzARJw4,1500 +jedi/third_party/django-stubs/django-stubs/db/models/fields/reverse_related.pyi,sha256=EzSzqqZdtRcfUwzFDBZLZu8Nw1wbx5fyiyjzTfcSDEk,4045 +jedi/third_party/django-stubs/django-stubs/db/models/functions/__init__.pyi,sha256=zwujZ_mDvMGvDG7PZntnImLITlio1OMJlvuvH0hwmlk,2077 +jedi/third_party/django-stubs/django-stubs/db/models/functions/comparison.pyi,sha256=zMX1rKOTcfMhIyWmstp9DLfT06lDE7Yz-znCQVan-OE,312 +jedi/third_party/django-stubs/django-stubs/db/models/functions/datetime.pyi,sha256=YuYkZO0-rVO_6ChthbPlNHuB24w33RMSLAsipoUTn0s,972 +jedi/third_party/django-stubs/django-stubs/db/models/functions/math.pyi,sha256=4rGyuph9f9-2jlUnUDcCm2DKhTgkHvU4w2UOuGTzd_I,1222 +jedi/third_party/django-stubs/django-stubs/db/models/functions/mixins.pyi,sha256=HW4qO_JX_b1oScltR83xx0ekxCrXcvFMFZNCf6Hp5DY,100 +jedi/third_party/django-stubs/django-stubs/db/models/functions/text.pyi,sha256=8oIROhzzKZO1b8MvEmARBx4LgmfDNG-2ZRoNpYsK3yU,2228 +jedi/third_party/django-stubs/django-stubs/db/models/functions/window.pyi,sha256=poxMB0VKvi7XGx_8imATTi1aaOTGcEuaPLCv3zifFC0,702 +jedi/third_party/django-stubs/django-stubs/db/models/indexes.pyi,sha256=eKOgHZWF1_TUVZl3a8W3UpoDf3N73D07NU5Iw1qiaGE,1241 +jedi/third_party/django-stubs/django-stubs/db/models/lookups.pyi,sha256=BI1Ly6Sa_U1zDza00zTlC3pPF-bI4eKKev4L2bTlmqE,4475 +jedi/third_party/django-stubs/django-stubs/db/models/manager.pyi,sha256=w5myME9kNAigRReNYiUSNGjRzLUGWcwoqSfd49zmWH8,1796 +jedi/third_party/django-stubs/django-stubs/db/models/options.pyi,sha256=YC_LwZgqFGmUtfNhVmfRRTjSpTKpMCWbh8MxSXdLuoM,4993 +jedi/third_party/django-stubs/django-stubs/db/models/query.pyi,sha256=I04vyx6gHnK0Wm2atkwItk31rQBzNt2WwUx0u9YiB2M,9202 +jedi/third_party/django-stubs/django-stubs/db/models/query_utils.pyi,sha256=_FWehOpztavNcVxGBrAg7VFLdUNJeRigE7dqPt3-PDQ,3011 +jedi/third_party/django-stubs/django-stubs/db/models/signals.pyi,sha256=UvyKPck5LFYqOCeet0Z9HCxbtF9HKOofVak2mnyfPqM,850 +jedi/third_party/django-stubs/django-stubs/db/models/sql/__init__.pyi,sha256=mcrelxpwAAcq3e68wtJ3Y8vbb0JwRwln3GFy3PXTBf0,219 +jedi/third_party/django-stubs/django-stubs/db/models/sql/compiler.pyi,sha256=01_nE7RT8qgJbDsvul5mI-vifLFTQAwk1ETcJxc3U60,4712 +jedi/third_party/django-stubs/django-stubs/db/models/sql/constants.pyi,sha256=wqrVp2CmNwQGTE6G48hxopiddIaPjheG3aF0bNrfrxI,262 +jedi/third_party/django-stubs/django-stubs/db/models/sql/datastructures.pyi,sha256=aP3H2772NScQipXPMjWN8hReyS1cMtP35Tj0FOV0tTo,1909 +jedi/third_party/django-stubs/django-stubs/db/models/sql/query.pyi,sha256=HT_Qr-1R-rZvhpcYgMxLfWT3Xu82PmfvP7JyXNKMKgM,9072 +jedi/third_party/django-stubs/django-stubs/db/models/sql/subqueries.pyi,sha256=AvzqhSc6nWUiriu4jd62PAQt77fJ004mIDDuuzWpJu0,1828 +jedi/third_party/django-stubs/django-stubs/db/models/sql/where.pyi,sha256=SF_adSLaf14MqiKd-bjuWeX9eZFMBEXstMHnZS9hlT4,1918 +jedi/third_party/django-stubs/django-stubs/db/models/utils.pyi,sha256=ooqtUE_Tbh7fBMCgqzG9BkkycU8Pj3fD-tx3OEkqMuM,157 +jedi/third_party/django-stubs/django-stubs/db/transaction.pyi,sha256=8jqZMjzJCdLIVsBvkw_2elm7gagC_80GoW1SnhbmOs0,2032 +jedi/third_party/django-stubs/django-stubs/db/utils.pyi,sha256=EalrPIBxRawmO26UrJuJmUTeXCB8x24ehargXA4Gm9s,1244 +jedi/third_party/django-stubs/django-stubs/dispatch/__init__.pyi,sha256=SQUYnc7LrrXNZN_Ch5JdeGL9HDU-gaTK6G4Yxrr-rvc,78 +jedi/third_party/django-stubs/django-stubs/dispatch/dispatcher.pyi,sha256=mU0d9m4337GccA9rnNHlGm4xaj7ik5QY39cIeKK8VKM,994 +jedi/third_party/django-stubs/django-stubs/forms/__init__.pyi,sha256=4coCEFdYif8MNg0AXsb9poFP6Ih5OiB9Nytd_goGZmk,2957 +jedi/third_party/django-stubs/django-stubs/forms/boundfield.pyi,sha256=jiT0VOrFHPAwTTJYoFiHrtgZbVa8kxv8OjCwA_JuUSo,2308 +jedi/third_party/django-stubs/django-stubs/forms/fields.pyi,sha256=0F8m3ToEyszYNG0eB0VX-UgaYBlplgt2B1QPpmVAyc0,13037 +jedi/third_party/django-stubs/django-stubs/forms/forms.pyi,sha256=UHB2YbGoqojA9mu6mH7vFcHyFTgSa_y7cTcv7h2pF2s,3096 +jedi/third_party/django-stubs/django-stubs/forms/formsets.pyi,sha256=PWlKGK54Iu7khVNdt8nkqFnAHvlF1LkZCPttxU5jRpc,2397 +jedi/third_party/django-stubs/django-stubs/forms/models.pyi,sha256=B3ET3PjsUBJQWUasxTXCEOmoMsL9EOfdNTDnpchkzvw,10173 +jedi/third_party/django-stubs/django-stubs/forms/renderers.pyi,sha256=GSv3bIQiNtMy2NxLMWqZMSpGNQTJQUygIDYCecN41uE,724 +jedi/third_party/django-stubs/django-stubs/forms/utils.pyi,sha256=MgzO6rPlH29FFb7nkg-7ZSQYUjvQ92m-gqiS-bq5Z2w,1257 +jedi/third_party/django-stubs/django-stubs/forms/widgets.pyi,sha256=xO_oOqc02RaK07qV-g17bKBYlXyFhmRie6Nqrb5eq0o,6094 +jedi/third_party/django-stubs/django-stubs/http/__init__.pyi,sha256=tpXEfW48QUEnzOLVM9glXI6MLX3VX6_a-DmbzjIhvF0,988 +jedi/third_party/django-stubs/django-stubs/http/cookie.pyi,sha256=alynIOZMj8djeNCol-x2_mSUeNUtoO-stYZrboEDHnQ,102 +jedi/third_party/django-stubs/django-stubs/http/multipartparser.pyi,sha256=tq4nBvZN_mSeNC82urno1t2JhEKAqkXOPHpno5LIglQ,1986 +jedi/third_party/django-stubs/django-stubs/http/request.pyi,sha256=hxikJdIej6joG5W7mcj6h74Je5DvynLBpaoYT4vmxfg,3786 +jedi/third_party/django-stubs/django-stubs/http/response.pyi,sha256=JpWbTjhXgAGp7qotHfUTgHRLXoq49jL7YRDNrsjRi6Q,4994 +jedi/third_party/django-stubs/django-stubs/middleware/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/middleware/cache.pyi,sha256=2eNzy0MekvcFiR52WXQ1er12Gl3ZPNaYQBAesnRkaWM,1095 +jedi/third_party/django-stubs/django-stubs/middleware/clickjacking.pyi,sha256=KcQDinOFpxOnJt_bWJpFLk350ceGcODerjF3xcc4gEg,387 +jedi/third_party/django-stubs/django-stubs/middleware/common.pyi,sha256=KmN-G-bFLmSpqbZYXKF2rCWFbyT1thGKKQVscfCdLGQ,987 +jedi/third_party/django-stubs/django-stubs/middleware/csrf.pyi,sha256=-jr3w2lhfv_YPC74qNWZ_r8-CQ8jaJz9AU7c5te2FGA,1250 +jedi/third_party/django-stubs/django-stubs/middleware/gzip.pyi,sha256=If78A7dw_ILgnSCY2L8wkjNF2qqk7SkBVK17WAxVx7Q,339 +jedi/third_party/django-stubs/django-stubs/middleware/http.pyi,sha256=mdaMBPWaWuwjz0g400w1t5N3TkNGL9dG-TCmmkbAOd0,369 +jedi/third_party/django-stubs/django-stubs/middleware/locale.pyi,sha256=typ6K2fi1-vG64j0GVCSAXKCKzcpzmVE2bdDzoe8bes,423 +jedi/third_party/django-stubs/django-stubs/middleware/security.pyi,sha256=4ee7wQgRxiwpP_vwfG5IM68TYyCGpVPuTYEwp8uoM88,715 +jedi/third_party/django-stubs/django-stubs/shortcuts.pyi,sha256=WRzWWQatCnOL3yp2KLZ1H6b35J-eq3H6Ipk66YA2Y6s,1972 +jedi/third_party/django-stubs/django-stubs/template/__init__.pyi,sha256=87Omlyr7XLi5x8bbxTBby2htPXGn79eytzb2jguf2EU,648 +jedi/third_party/django-stubs/django-stubs/template/backends/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/template/backends/base.pyi,sha256=iHw-0xrm6rTptCjmp4N7VEZZRSHEGacxetcQotMVyBU,601 +jedi/third_party/django-stubs/django-stubs/template/backends/django.pyi,sha256=Ds2mLWa5DQirwlyvcy9SzMQVJDbiuvrV5jpYvTUbwbA,706 +jedi/third_party/django-stubs/django-stubs/template/backends/dummy.pyi,sha256=jhUAbJ7I1y6I56DI71u6geDjdndUJn1Fu4RkNIGh7AA,478 +jedi/third_party/django-stubs/django-stubs/template/backends/jinja2.pyi,sha256=ROpMhGxnaNlxWdYlvRWhvdEIFS7HQx1dB-x_FNK1WuQ,581 +jedi/third_party/django-stubs/django-stubs/template/backends/utils.pyi,sha256=Sbwk0bMuzGHJqvrhODl47g0H0n_LiFl7YYOwjHHWGVk,211 +jedi/third_party/django-stubs/django-stubs/template/base.pyi,sha256=eR590K7BCZtsab-Strdw8ju0Luft-YxFl7gme_g9pto,6030 +jedi/third_party/django-stubs/django-stubs/template/context.pyi,sha256=F1mgkncBKN_OeGuDO-dR3dZoYKVE2pxIoFu7ZnAcKUA,3208 +jedi/third_party/django-stubs/django-stubs/template/context_processors.pyi,sha256=bMVI13GgofaJnXyDHRgj09wVxMt0sA1DxawMrzwAtQc,640 +jedi/third_party/django-stubs/django-stubs/template/defaultfilters.pyi,sha256=fFNBc7tz39VsgxpHu5pOGyI1hyjQ44gwpSkObxGe6VA,3651 +jedi/third_party/django-stubs/django-stubs/template/defaulttags.pyi,sha256=hvxjkL-tRoFTg1JeScbEUeiMYF5HrnDw2XQpN4aY0QU,7237 +jedi/third_party/django-stubs/django-stubs/template/engine.pyi,sha256=fVxtt56BiOR-fLxGZtYFwvJD_ygYgu9Bhy5I39c7QbM,2158 +jedi/third_party/django-stubs/django-stubs/template/exceptions.pyi,sha256=XKMDInGTVL9Zr_exfpi6tS-zliwBuK5Dwg2hU9ZwAiE,596 +jedi/third_party/django-stubs/django-stubs/template/library.pyi,sha256=wqpD6-eFu6CHuraBEiO37sRG2Zhmkea1rv0I9eR2StA,3079 +jedi/third_party/django-stubs/django-stubs/template/loader.pyi,sha256=8O6_g_3h_zkRVqC5OdBbHG0MvIcPNWDr2qszobREPsM,620 +jedi/third_party/django-stubs/django-stubs/template/loader_tags.pyi,sha256=uhPtslJdwFOhqdsWtTzgOPgTFle7rf_eeQDVmmwFCuI,2363 +jedi/third_party/django-stubs/django-stubs/template/loaders/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/template/loaders/app_directories.pyi,sha256=tIrBH9Efep-8HYF_maLVfKPj8eGF_xY4ZxB22AmZsNI,88 +jedi/third_party/django-stubs/django-stubs/template/loaders/base.pyi,sha256=XNXihk_WvxirQJ20BpEPQZqcOms77836aOJYjCLwMl4,476 +jedi/third_party/django-stubs/django-stubs/template/loaders/cached.pyi,sha256=WROvtCtkV0dc4-6h2KISA3zO_h_73eo2id2bxRzLiz0,564 +jedi/third_party/django-stubs/django-stubs/template/loaders/filesystem.pyi,sha256=n3c4eQtiR0RBWkrPXGY8fi3bRTjEPlOnpEwawsQlojQ,433 +jedi/third_party/django-stubs/django-stubs/template/loaders/locmem.pyi,sha256=7P_Vsr1xLb_8tZ4QxEFF-31souSiyLHH0cKqlEcwIPA,354 +jedi/third_party/django-stubs/django-stubs/template/response.pyi,sha256=j1Fl0YSJTvostAVTfkMYy8g2KB9YlP3DBTaAZBMdLbU,2344 +jedi/third_party/django-stubs/django-stubs/template/smartif.pyi,sha256=PAKYMxW4Q6JY0l8TLmwR7B90xy8IYnMXTf7e6Atog_w,1267 +jedi/third_party/django-stubs/django-stubs/template/utils.pyi,sha256=keI7BX8onKPNwTqz9O1Osr1Rk7ZiyPIntQOvsPBo8Os,558 +jedi/third_party/django-stubs/django-stubs/templatetags/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/templatetags/cache.pyi,sha256=u-SCt-L2RlRBD9xG-pVTkwhThwJ7eNazTInZFAXHPWg,682 +jedi/third_party/django-stubs/django-stubs/templatetags/i18n.pyi,sha256=gca9DVbprTHJzIGX-GCcxEUViYDYfH10HffLu3xK9Ik,3176 +jedi/third_party/django-stubs/django-stubs/templatetags/l10n.pyi,sha256=8OV4XSr_rPNMhLOzxVu00SzC9VEnmt4OTRZmMZRqDZc,429 +jedi/third_party/django-stubs/django-stubs/templatetags/static.pyi,sha256=cUQ9iIHKXWgJXY2Qh9M1VXFgD_ASyNQbkVk4Unwg7oo,1170 +jedi/third_party/django-stubs/django-stubs/templatetags/tz.pyi,sha256=da1r04-aB0s1v4PR4d_TKredXpVkZR-kluQw1q07V-U,1166 +jedi/third_party/django-stubs/django-stubs/test/__init__.pyi,sha256=FxU0Xt4wJj1Jf3kdcL0lt-dpEDUN1wFe7LsyMiJvjjc,671 +jedi/third_party/django-stubs/django-stubs/test/client.pyi,sha256=A8dwFxbKc1rfjesp2FlOT__QXWPU0aEBOVZHmkyIFsA,5593 +jedi/third_party/django-stubs/django-stubs/test/html.pyi,sha256=k-d7X-HIXjPYhPpAggZQ9GR6MvkBGh1XmY5PnxGnuOY,1203 +jedi/third_party/django-stubs/django-stubs/test/runner.pyi,sha256=0fO4ry5uDw6o-wvnoBhSf8snCK6gd5yTQqw6b1RVJjc,5210 +jedi/third_party/django-stubs/django-stubs/test/selenium.pyi,sha256=CKW-XlD6SUu9pLyxUt6p8l5uYzN-RjkrGqnji-Dup7Y,368 +jedi/third_party/django-stubs/django-stubs/test/signals.pyi,sha256=yBlj9hfd9baDs5gt0x6Ymdg9L4cOtqPe1TRg63VXxKk,986 +jedi/third_party/django-stubs/django-stubs/test/testcases.pyi,sha256=rUMMlG0haG8l9frb1RRgLMZZtRLKEV5a0JAsLJa7jOU,8273 +jedi/third_party/django-stubs/django-stubs/test/utils.pyi,sha256=GmKL_4fBsAa__tn0gXHPJeIq1MsiUN9utqm9t2fsJ-E,5436 +jedi/third_party/django-stubs/django-stubs/urls/__init__.pyi,sha256=nQLDuDGK7FnjTv35h5oWaHGxEj7LUxTpXHmM1bPV1n0,1197 +jedi/third_party/django-stubs/django-stubs/urls/base.pyi,sha256=RtQApc1gUSPpnAw1gm3HqvNai7lYb0k3r2kS6wcKMk0,887 +jedi/third_party/django-stubs/django-stubs/urls/conf.pyi,sha256=7wH1fulW8v9yZlqEy4yQFSGfVznjPJM1DC5V-ZiSoEY,224 +jedi/third_party/django-stubs/django-stubs/urls/converters.pyi,sha256=--viS0cndpAqzNeusl7RLKBVhsrPIoOikVgmQDNxGxc,827 +jedi/third_party/django-stubs/django-stubs/urls/exceptions.pyi,sha256=4UVr_L5SexIY1ok1zhdDAq1v1iyt7l0NiDdOu7i07lg,102 +jedi/third_party/django-stubs/django-stubs/urls/resolvers.pyi,sha256=gvKPNzAwWo1kE-wvLFhWk83C7qm1hsuF7CPPzCv9ypQ,4029 +jedi/third_party/django-stubs/django-stubs/urls/utils.pyi,sha256=aHMnJJ_yLIRc1jFBsuwInS1i07sg_TNCRFfpBdVliWA,168 +jedi/third_party/django-stubs/django-stubs/utils/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/utils/_os.pyi,sha256=o_1Okw2HuOcaOrD-_SzHIgSVDNmBShknGwqd5uat8ps,307 +jedi/third_party/django-stubs/django-stubs/utils/archive.pyi,sha256=uYtbaGft9lKuO5eEfMBWnmOX3lD-ztopvgNhLZaP4QM,1043 +jedi/third_party/django-stubs/django-stubs/utils/autoreload.pyi,sha256=C8KHbfg9nGyhfbNa56RXXP0v1rlIkTUvYw1zkmv3bOk,2702 +jedi/third_party/django-stubs/django-stubs/utils/baseconv.pyi,sha256=Myy9JlfWTJ29J8Oyiq-upqag-VfsPHg3jsuq7HHU9iE,593 +jedi/third_party/django-stubs/django-stubs/utils/cache.pyi,sha256=9ss0oBvIs1-gBExjVldryr4hESowSSeQdShVgMZdugs,1338 +jedi/third_party/django-stubs/django-stubs/utils/crypto.pyi,sha256=uccSZ_WsmWkCQ_4V6SQXOJd8NUn3FdyjnUbcP5K1I0U,543 +jedi/third_party/django-stubs/django-stubs/utils/datastructures.pyi,sha256=CUquMJUCgGCjeVUk3QSx9vVcZjmHKJsgVI3Vjqti1Zk,2624 +jedi/third_party/django-stubs/django-stubs/utils/dateformat.pyi,sha256=KLHBQh9aWgLYWbeqw2kiyQauGNDiKmadeMmltidGlrk,1782 +jedi/third_party/django-stubs/django-stubs/utils/dateparse.pyi,sha256=PX8c8e3ma-IOaj-F0RrBF4XTTVngSfkGNKt24AMy4qo,425 +jedi/third_party/django-stubs/django-stubs/utils/dates.pyi,sha256=eDDUi7Z7G3VU8h_6q4Z63l_7L2zgcyYuTE3yf-GSt-I,181 +jedi/third_party/django-stubs/django-stubs/utils/datetime_safe.pyi,sha256=fXQJdx9zqee-Jac6dhydWUMKvz8JzQYnKhwPsxiDnfY,341 +jedi/third_party/django-stubs/django-stubs/utils/deconstruct.pyi,sha256=6IL3eW3s2avXEeGE_oJW3Mxg6Hk3387euuFekBeHY0A,105 +jedi/third_party/django-stubs/django-stubs/utils/decorators.pyi,sha256=AEPbjOpQtJGpOjwrri-6wV2gzUIvNZ5EIVJVsTEushk,935 +jedi/third_party/django-stubs/django-stubs/utils/deprecation.pyi,sha256=yGFYy_fyf37-YpyWdalYAdQXCX8YLVCOgu8lQVVBxpk,1345 +jedi/third_party/django-stubs/django-stubs/utils/duration.pyi,sha256=W8vapfROyCEor7wRgPdoqY7cpLg8kFEqiAKHqjEYfUk,198 +jedi/third_party/django-stubs/django-stubs/utils/encoding.pyi,sha256=e-_D5ZVUcDKL6yXDbcj1BpXsovmfiOogb9ioT1j0vl0,2416 +jedi/third_party/django-stubs/django-stubs/utils/feedgenerator.pyi,sha256=fxf5z74HoHuKF_W1XVOxHMs1VbbD233GHN1o2pkMl68,2677 +jedi/third_party/django-stubs/django-stubs/utils/formats.pyi,sha256=WLkueQDaB2Gg2MTb_SxnFkpQpR49O6ogaZEKhy2JGFQ,1271 +jedi/third_party/django-stubs/django-stubs/utils/functional.pyi,sha256=_k802K-xEtUcEFBrH09Sxsh_7ObZAARVF12UAMGqkug,1923 +jedi/third_party/django-stubs/django-stubs/utils/hashable.pyi,sha256=l25xFctJPg50l6Y129bvdAFJc8QqefkXx40amy6LkhU,66 +jedi/third_party/django-stubs/django-stubs/utils/html.pyi,sha256=uRHTbSOfG3lG2pwS5CdKwRmnfAC2DSgG7UoWkrXZRFM,1366 +jedi/third_party/django-stubs/django-stubs/utils/http.pyi,sha256=zUCz7anMbyPWISp458GPg3TfnE6iehGn1LPBsPE6ru0,1544 +jedi/third_party/django-stubs/django-stubs/utils/inspect.pyi,sha256=4ifnyH12SSqy2OzqUkuJxvFGTkiirblPt4eo1Xu8GyI,391 +jedi/third_party/django-stubs/django-stubs/utils/ipv6.pyi,sha256=_VLusX7GiWNffbeDTqd_jnJPOdxyVS179DFuB61CjhA,168 +jedi/third_party/django-stubs/django-stubs/utils/itercompat.pyi,sha256=UpxNkxuzzgXwrBfRqew_8T5D5tx5WBsCTP933uGVHwI,61 +jedi/third_party/django-stubs/django-stubs/utils/jslex.pyi,sha256=_R3st8851lEA_sUS5i_Kneche-ZPrCtpIsBtzFQZS_U,748 +jedi/third_party/django-stubs/django-stubs/utils/log.pyi,sha256=yVMX_aEDnocTx6PpJgpvOLVqD-cCmBz9-DnFDpl5ckI,1501 +jedi/third_party/django-stubs/django-stubs/utils/lorem_ipsum.pyi,sha256=hM07Nj_QipTFShSP7PrhQxlwAlYcgcEpH3vXRF_gzcA,248 +jedi/third_party/django-stubs/django-stubs/utils/module_loading.pyi,sha256=sd6wFSrhzvHm648lgnvfdcmkR3PC8SPC925rP83DNkw,247 +jedi/third_party/django-stubs/django-stubs/utils/numberformat.pyi,sha256=tGq_aAmrf_lkWdZ4XUMlf4h0DUtc185bwfgJefaXYkw,344 +jedi/third_party/django-stubs/django-stubs/utils/regex_helper.pyi,sha256=REFgvf8HYnE-XsiepvPLjd2HZyMWlUJDC2ILtUDS0Ws,658 +jedi/third_party/django-stubs/django-stubs/utils/safestring.pyi,sha256=G4jCaiDDv7gb75b9pJFVMOVRZfnECaDk0JYn8jZmxag,629 +jedi/third_party/django-stubs/django-stubs/utils/six.pyi,sha256=fGwtJGbkPulV7pzqEeAdgzVv6z2mDiFtgtios6tWzS4,3408 +jedi/third_party/django-stubs/django-stubs/utils/termcolors.pyi,sha256=gqCN0etzSjG8bN21K_j1d1Jw0UaluPrlEznqx36bGek,517 +jedi/third_party/django-stubs/django-stubs/utils/text.pyi,sha256=ZbmkpySA5CNdPKorDNm5WHh0n0hcF7pp8vI_35z2Bw4,1583 +jedi/third_party/django-stubs/django-stubs/utils/timesince.pyi,sha256=7qwEhIWEQeKk71J8pQJzr7QCljggRtDHEs8-GUiiLOU,362 +jedi/third_party/django-stubs/django-stubs/utils/timezone.pyi,sha256=S6fV8Ub-gZuQzIPWx7RSlugjjcWQFDJVlPxFnq280tE,2714 +jedi/third_party/django-stubs/django-stubs/utils/topological_sort.pyi,sha256=vaxSleSm6rJ0axW_bodRRXA6a82056AxplxL7hbdwTY,297 +jedi/third_party/django-stubs/django-stubs/utils/translation/__init__.pyi,sha256=Xb8uG790oIzlgTor_Czpto7O1o3slls-Yb1gRKqzFvc,2232 +jedi/third_party/django-stubs/django-stubs/utils/translation/reloader.pyi,sha256=WysOUTcv4mLco3UizcmXdD5Im-HrD8nrBy71A_3Phd8,299 +jedi/third_party/django-stubs/django-stubs/utils/translation/template.pyi,sha256=H4yDCnEfEMFUE4OIMg1FJYn9cvj5CbQRv1zeokTe6QY,235 +jedi/third_party/django-stubs/django-stubs/utils/translation/trans_null.pyi,sha256=x7RjAzIctThGOsg6ZKE0NpO6ekKpl1TgsCxaop9G864,729 +jedi/third_party/django-stubs/django-stubs/utils/translation/trans_real.pyi,sha256=uXK53giQyz6hE5WsJPBCkBepZjw9Ljxhwhqk7MLYo_I,1815 +jedi/third_party/django-stubs/django-stubs/utils/tree.pyi,sha256=sR1J0tJzZIGobNu8-XiJ2qr94SlwiM164sF8lD5-8aE,799 +jedi/third_party/django-stubs/django-stubs/utils/version.pyi,sha256=kwDgHzvG-PW5Gs-2ch8lTduLItyo4asFa509YOQd15A,516 +jedi/third_party/django-stubs/django-stubs/utils/xmlutils.pyi,sha256=wGOSksvFX7wd6dl7Yoz836aEyZ-D6jxi4R9kBaaeEtY,433 +jedi/third_party/django-stubs/django-stubs/views/__init__.pyi,sha256=aq7HMnOVrisbvfSuGERsghE8UbQzyGkJq-QpU0Tcv9w,39 +jedi/third_party/django-stubs/django-stubs/views/csrf.pyi,sha256=IWUdDCiJjyVfGZHpN0fHH6nu7yYmVbXyDvWg-Bea23Y,274 +jedi/third_party/django-stubs/django-stubs/views/debug.pyi,sha256=4oGmtVDkDja_yZdqDmfpXGR7-UncwY28nsYv9posmrY,2781 +jedi/third_party/django-stubs/django-stubs/views/decorators/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/django-stubs/django-stubs/views/decorators/cache.pyi,sha256=HNIBcnkLIFOVKkhlD6NCF8hX3MWGBr6pclv27ty-1LY,303 +jedi/third_party/django-stubs/django-stubs/views/decorators/clickjacking.pyi,sha256=Ok9pqjwji2pR7WQPLtZ1CrRu0W9ahdyb-JaP6c4wq5c,247 +jedi/third_party/django-stubs/django-stubs/views/decorators/csrf.pyi,sha256=AWLjm6s5aCI5QnU96JbH6Y5UZxMrmpY227yLDq0jf78,477 +jedi/third_party/django-stubs/django-stubs/views/decorators/debug.pyi,sha256=wZ1YJ1dKztkQ6wd3ED0G6G4l4WuHSRcABy5qOlmFYCo,157 +jedi/third_party/django-stubs/django-stubs/views/decorators/gzip.pyi,sha256=Xjc5S0snvNoApW9JpvSQUhGypsjoCUE8bC0HwaVdL4k,129 +jedi/third_party/django-stubs/django-stubs/views/decorators/http.pyi,sha256=ZTJ_lLWuXIdnX80y1OA230O8ep50pxlSa2acazv-1gw,576 +jedi/third_party/django-stubs/django-stubs/views/decorators/vary.pyi,sha256=yKYeW6Fa4M_bbOYU0evNeMS_0ly-9ZCJonlkD_VqhRQ,181 +jedi/third_party/django-stubs/django-stubs/views/defaults.pyi,sha256=qnKCAJ0gMNl7y0DbyQRDVDm0WnR7iWhIAFZgsILVQqY,814 +jedi/third_party/django-stubs/django-stubs/views/generic/__init__.pyi,sha256=RMzs5h07bv50ftLnRSYnrWOJXkth7PTyU5odn0GCw68,637 +jedi/third_party/django-stubs/django-stubs/views/generic/base.pyi,sha256=EcfZBLJd_E_yktw5j8YYeCKO4OKdOnY8Pns7x9XT2BI,2239 +jedi/third_party/django-stubs/django-stubs/views/generic/dates.pyi,sha256=sV3GQ35fJCDh5I19zVFQkvEHhkIUeVyHqQ9UBRRBVRM,3771 +jedi/third_party/django-stubs/django-stubs/views/generic/detail.pyi,sha256=PNnYL1e7LJxYr71wI93v0kuwfra8YGOHZvD6-1wIodA,1090 +jedi/third_party/django-stubs/django-stubs/views/generic/edit.pyi,sha256=-ngfSEOCkreuUeU0XcL9s1eNWnZHlPw6VlxdUYvix5E,2234 +jedi/third_party/django-stubs/django-stubs/views/generic/list.pyi,sha256=-Hj-OCJw6BgmXFsUgNHSJfeeECrpyqbBWShSLxJaveU,1625 +jedi/third_party/django-stubs/django-stubs/views/i18n.pyi,sha256=uL-yCYqHyEFkfL2He_UTt_OnBnom8TlqhAquvxMd1Sc,1257 +jedi/third_party/django-stubs/django-stubs/views/static.pyi,sha256=tX0ADulNf2Yh6fJhKh6PSS9a8AsAt1gijAMu2iehhP8,461 +jedi/third_party/typeshed/LICENSE,sha256=tBt49WKi5lt1yrRDVDNfb0NdTvcwZVCWAKyRDPTiL-A,12658 +jedi/third_party/typeshed/stdlib/2/BaseHTTPServer.pyi,sha256=0VJgE581N-I4xIycM3XdZA3Ag9e5du0W2tzWwWvuPjY,1747 +jedi/third_party/typeshed/stdlib/2/CGIHTTPServer.pyi,sha256=pv5Xb3udKa7NXOLe3MI__b_zP6a0wmYuFmOYP_N3kuc,187 +jedi/third_party/typeshed/stdlib/2/ConfigParser.pyi,sha256=lFmtKNTfILcHhIRgVu3-d0jKhaBtggGm9qSjqmIBH-Q,3869 +jedi/third_party/typeshed/stdlib/2/Cookie.pyi,sha256=Ke_iU9vRRHFMkJrben9gxXLss3nTR0bw5ZPvN_abYdc,1342 +jedi/third_party/typeshed/stdlib/2/HTMLParser.pyi,sha256=4mV-tgsweFHjVH6VFVR6pwxsUJt8Ttszq4Ka3gtSOPA,1064 +jedi/third_party/typeshed/stdlib/2/Queue.pyi,sha256=jVv-Ahqmn2SC748u_-61UeRcrK4cQXiQbm5lVgxCDeM,895 +jedi/third_party/typeshed/stdlib/2/SimpleHTTPServer.pyi,sha256=kgfdaGTvBrw515C2tKgPM6PBG1vx8Uym30edR9hVnoU,648 +jedi/third_party/typeshed/stdlib/2/SocketServer.pyi,sha256=4V7NHwSn6jAPavRtz1w8bBwPNNXvGL02UYQAlm6bA-c,4559 +jedi/third_party/typeshed/stdlib/2/StringIO.pyi,sha256=GBqd-c4z25y99oSUVcsEMh98LoMruu1ZOb0AE5UOtzk,1146 +jedi/third_party/typeshed/stdlib/2/UserDict.pyi,sha256=h0miLRBjzRA6LL-sfVhN5lIN19AjToRaByhueNrVxoQ,1663 +jedi/third_party/typeshed/stdlib/2/UserList.pyi,sha256=aDxMr5yhCHPZT-SacAYmju5xaCquHi1Nk_z_rZRvHN8,630 +jedi/third_party/typeshed/stdlib/2/UserString.pyi,sha256=3391hc_oGw3FFPX1zlPbil5GVZbqKqlLbGup-08BnLE,3844 +jedi/third_party/typeshed/stdlib/2/__builtin__.pyi,sha256=-CqbxZmdacwEbwoRuybLasluru9S8wSSaWEfq57ZfU8,48853 +jedi/third_party/typeshed/stdlib/2/_ast.pyi,sha256=36MwM7zIYwSWhw4G2sLtd-L8I9hAZs3lUEw4n_J88a4,5726 +jedi/third_party/typeshed/stdlib/2/_collections.pyi,sha256=cMqBXKAr1o6kdqDM8RkZNclFduh0s-QAv-jUmpUYHc0,1430 +jedi/third_party/typeshed/stdlib/2/_functools.pyi,sha256=rDxIud_hO61m7kUqrvpkJT4mwoiyOvTxqMejOjY5nhM,576 +jedi/third_party/typeshed/stdlib/2/_hotshot.pyi,sha256=ffVl3P1C-rfN5DN6smwzDGDOoUNEasmNkrMKo5mnTbc,635 +jedi/third_party/typeshed/stdlib/2/_io.pyi,sha256=JoQeS8uF6Nz0yz_qbFHinI5YarcMPl7fkWpWv5aHwtQ,7016 +jedi/third_party/typeshed/stdlib/2/_json.pyi,sha256=VGD_NDPa30SGzHZ17hKmgnCk91AhGeOP_klprIkApio,226 +jedi/third_party/typeshed/stdlib/2/_md5.pyi,sha256=pGqwb01a_RcSP1QRE8XtVPB0RKaoJOGuRdVk6pwvEag,300 +jedi/third_party/typeshed/stdlib/2/_sha.pyi,sha256=32F3_E2nGplztFti0fx5GwfPqobLiyg2rtTLHopfCw4,348 +jedi/third_party/typeshed/stdlib/2/_sha256.pyi,sha256=1Z5g4wLOL9-z6gasal2kMoBb7yBHPJMFSCgPts_GTRM,632 +jedi/third_party/typeshed/stdlib/2/_sha512.pyi,sha256=6AyOELlW_oDueP9i8yvirht0BdJO0itNx9-deuMYCeA,632 +jedi/third_party/typeshed/stdlib/2/_socket.pyi,sha256=wVuvH1tMPqjPe89bG7pmVsuaXZJfSdvw4qWRD9geQBU,6286 +jedi/third_party/typeshed/stdlib/2/_sre.pyi,sha256=mcck_gioJ8R-V4cW7y7GuCuOihgfKTGcP8RSV_eO6l8,1934 +jedi/third_party/typeshed/stdlib/2/_struct.pyi,sha256=cAuIIq62g6PJsL_G5z16j4v5aTGqYDM6j69kyRcPIsM,767 +jedi/third_party/typeshed/stdlib/2/_symtable.pyi,sha256=LLeUYtJkj_EBmkpwSffnopwY_lsPpopzvM-kXQOwg_I,677 +jedi/third_party/typeshed/stdlib/2/_threading_local.pyi,sha256=1y8WTQOMyBV6cA0BZRbm0Q7bDtCDciIspnuKwYHTrd8,319 +jedi/third_party/typeshed/stdlib/2/_winreg.pyi,sha256=4Q60Z4nLxuUQi1LRbBAKKdAOy0Ir5b-2Bk-RDhYYsU8,3696 +jedi/third_party/typeshed/stdlib/2/abc.pyi,sha256=tzQd1K85KZQBh7b4lulFwkRc2GitR7uplj5toNIJsCA,1147 +jedi/third_party/typeshed/stdlib/2/ast.pyi,sha256=fkVIuq6FiMNU3VMVnZmttTt4-OLZ-CfNMoGexWCZLh0,1199 +jedi/third_party/typeshed/stdlib/2/atexit.pyi,sha256=Zx4nX1WyFC_XDP2Pp17lXrMMJiaCjCut0VWHgtXWBok,117 +jedi/third_party/typeshed/stdlib/2/builtins.pyi,sha256=-CqbxZmdacwEbwoRuybLasluru9S8wSSaWEfq57ZfU8,48853 +jedi/third_party/typeshed/stdlib/2/cPickle.pyi,sha256=FR7Mq3gugr2YQWV0TgHkrE0e-UgEB7OrklOlOg66vdc,795 +jedi/third_party/typeshed/stdlib/2/cStringIO.pyi,sha256=PQQ1dSSTlVi-NTl8Qv0IKwYATynJuLz14g6b27C0G-s,1870 +jedi/third_party/typeshed/stdlib/2/collections.pyi,sha256=5psNp9WgNtInnDK2-chmkny_9NkQcbG7TeFsQZCiX6c,4913 +jedi/third_party/typeshed/stdlib/2/commands.pyi,sha256=nGsS9_5mqYgZqIx-g-A0eQ4Vd9WQWvp53Fv8PEpAeic,329 +jedi/third_party/typeshed/stdlib/2/compileall.pyi,sha256=gx2rOct4wjjTrX8pbrd0ZR3CPNiQkjP1OQ1LARDIftw,628 +jedi/third_party/typeshed/stdlib/2/cookielib.pyi,sha256=f3yMHlsN0iK7oqGhGUD2_0lEHw3mljQ00itXN6T6Qwg,4716 +jedi/third_party/typeshed/stdlib/2/copy_reg.pyi,sha256=-GmSIudewg4xblZXLSvErhpunAMi1bsqdQUEXujpFlI,739 +jedi/third_party/typeshed/stdlib/2/dircache.pyi,sha256=cOqJhmlvq7Vl92FUWcfQGX7aedsgjCOYPTuyZEKD2uo,273 +jedi/third_party/typeshed/stdlib/2/distutils/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/archive_util.pyi,sha256=WaUu32XUCstlA4zE0WS-PvA9UF0mdyuDgDbZZd1LN0A,447 +jedi/third_party/typeshed/stdlib/2/distutils/bcppcompiler.pyi,sha256=fge2cMbG4jp--o0I2zNcwykh24tJWZtk6leQgAH2NJw,78 +jedi/third_party/typeshed/stdlib/2/distutils/ccompiler.pyi,sha256=N38wYG41RyiEuowx_ZvpZIqVVZYiz1NGcGHHDJ9MbWs,6449 +jedi/third_party/typeshed/stdlib/2/distutils/cmd.pyi,sha256=SywVh_oj7uAo1tmMjytIafcLCJMfAtMFFZe9Q3vA6Yg,2817 +jedi/third_party/typeshed/stdlib/2/distutils/command/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/bdist.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/bdist_dumb.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/bdist_msi.pyi,sha256=sDSqH7TRcOiXC5S4VXxJ_YHB-WFPpa1fo8F8g5XeV3Y,182 +jedi/third_party/typeshed/stdlib/2/distutils/command/bdist_packager.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/bdist_rpm.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/bdist_wininst.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/build.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/build_clib.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/build_ext.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/build_py.pyi,sha256=z4m9RU6PID-qalM7jzvc2mIWMwk0saczeEmqq9qleH0,181 +jedi/third_party/typeshed/stdlib/2/distutils/command/build_scripts.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/check.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/clean.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/config.pyi,sha256=DV1oMIztVDdk46W43HGioP_n6b3x9FgSqvFr2rwPVoY,3059 +jedi/third_party/typeshed/stdlib/2/distutils/command/install.pyi,sha256=BCv1Lbe-6AHxtaHuhIGd3nOtN-efRmZI-xvpxwKd4Fk,338 +jedi/third_party/typeshed/stdlib/2/distutils/command/install_data.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/install_egg_info.pyi,sha256=WcnLycNSSWSZ8Z_vHIohu0-3qKnCih2qoJaGBPtjQGY,380 +jedi/third_party/typeshed/stdlib/2/distutils/command/install_headers.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/install_lib.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/install_scripts.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/register.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/sdist.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/distutils/command/upload.pyi,sha256=Sv7tBpCnOzInTz9nVhZeo6Waz4brX1PtS51GKzQNwOU,296 +jedi/third_party/typeshed/stdlib/2/distutils/config.pyi,sha256=M4b9_7PKsO74DhsIBFoTAB5I99hXrlBJcjxghWhI8vc,523 +jedi/third_party/typeshed/stdlib/2/distutils/core.pyi,sha256=lq1S8lRqeGqK6U3z7_UsVg2TR7xbx7Z2YXJ0ekIo9Vk,1688 +jedi/third_party/typeshed/stdlib/2/distutils/cygwinccompiler.pyi,sha256=Y7qhVOqXrkPT0yyQnudNCtTzBYC2lzS38HB5Mh45zEI,138 +jedi/third_party/typeshed/stdlib/2/distutils/debug.pyi,sha256=7_zuriUBqHbc62x7tCONq7LQXLuK_hCaBK0laoR3HeY,12 +jedi/third_party/typeshed/stdlib/2/distutils/dep_util.pyi,sha256=QCheHEDF7waISF42_aaumqvVOIcTw-yh5e5-CbPvQ2o,252 +jedi/third_party/typeshed/stdlib/2/distutils/dir_util.pyi,sha256=0nHuLCqZ36gvDVaF6PoC76_JOC3v6P_310eFauW1ZDM,555 +jedi/third_party/typeshed/stdlib/2/distutils/dist.pyi,sha256=DtSDKCXM6UsXnoN7zpt_vxjOgLJu8qeFY2RBcwaB8RM,508 +jedi/third_party/typeshed/stdlib/2/distutils/emxccompiler.pyi,sha256=FxHjF75rMcBNR4odfAyfbbayTXE-2tfbAkHtKxGgYHw,90 +jedi/third_party/typeshed/stdlib/2/distutils/errors.pyi,sha256=l1W_FgoP9L-D-hEPFA2BzZuybjN0lV4WBXl0VJ-k7J8,852 +jedi/third_party/typeshed/stdlib/2/distutils/extension.pyi,sha256=tsIOARiVdGDcvsnxVFxBa80iydfox_Bt6EGqlJsL7kU,706 +jedi/third_party/typeshed/stdlib/2/distutils/fancy_getopt.pyi,sha256=ai-0E83HnTOcYCWO9zq4tDVvnef04SRRoa-F8baso_o,859 +jedi/third_party/typeshed/stdlib/2/distutils/file_util.pyi,sha256=RFpiizXMdhhcji98pIscfHW4r6i9KLzM2D15gA6_eow,439 +jedi/third_party/typeshed/stdlib/2/distutils/filelist.pyi,sha256=-WeYFFKsEUUjPvbzeZbVCOKkPV-oqc3RoZvN2SB1VOE,20 +jedi/third_party/typeshed/stdlib/2/distutils/log.pyi,sha256=9vvQVRer-_-S5lcV7OHF1Ptr1N3npjKvzTVXReSpZKA,863 +jedi/third_party/typeshed/stdlib/2/distutils/msvccompiler.pyi,sha256=qQLr26msfhjz-omJutWcRHik3shLh1CIt7CDI3jBd3I,78 +jedi/third_party/typeshed/stdlib/2/distutils/spawn.pyi,sha256=iDdi2WvST9yeFDlKWy0Wlye-x67W-ah5nta7EuRW2W4,227 +jedi/third_party/typeshed/stdlib/2/distutils/sysconfig.pyi,sha256=FSdBoSTsVvKAF5D2lkWBwxH15ockfFZv6L06mrgeAb0,620 +jedi/third_party/typeshed/stdlib/2/distutils/text_file.pyi,sha256=vCQLwggDaocAqqR7v1WJjOeS_wgxqjI5xDkyxHJlzcw,716 +jedi/third_party/typeshed/stdlib/2/distutils/unixccompiler.pyi,sha256=R3VKldSfFPIPPIhygeq0KEphtTp0gxUzLoOHd0QoWW8,79 +jedi/third_party/typeshed/stdlib/2/distutils/util.pyi,sha256=KSpX8rQ6qJXqToJBKdwhlpe-jd1QQcacg7wsH_6dKXo,829 +jedi/third_party/typeshed/stdlib/2/distutils/version.pyi,sha256=PU7GKbMl1ivgTKVK54jB2fgIufr_hDCUgzjYyNXt-4E,1160 +jedi/third_party/typeshed/stdlib/2/dummy_thread.pyi,sha256=755Cy6AXyEo3RowYk0pQm5I5mkAIE3yQrkWImnrlHOA,794 +jedi/third_party/typeshed/stdlib/2/email/MIMEText.pyi,sha256=4Hjv1f-LZwoj-ihndmbQNHdwpjOy6wUOJoKS_axJmNo,159 +jedi/third_party/typeshed/stdlib/2/email/__init__.pyi,sha256=iUDv6ttU1qT359eOAubG1JtxNmrJGu8QxH_aXPvOz9w,270 +jedi/third_party/typeshed/stdlib/2/email/_parseaddr.pyi,sha256=oqGaUf13WZALSq7cyULZ0c_6iFKjH8rdnAfAkm6y3Hw,1072 +jedi/third_party/typeshed/stdlib/2/email/base64mime.pyi,sha256=Qb1Q4NHIbSJOcsZ8vUBqaPT-s6lWpj-YD1kI9DI6Xfo,303 +jedi/third_party/typeshed/stdlib/2/email/charset.pyi,sha256=VVEUOTe1XZ824-FhBuIBrSCB16hMAnQ1Ygseu3Noc_Q,902 +jedi/third_party/typeshed/stdlib/2/email/encoders.pyi,sha256=s8kQE5AG1wvh0h0qbNn3_As6ExYQccVdg6Bx2PKGu8E,143 +jedi/third_party/typeshed/stdlib/2/email/feedparser.pyi,sha256=cKLfhKboxZeJxceH5e_broSJZDa4teMu_ZJvZRhREQU,536 +jedi/third_party/typeshed/stdlib/2/email/generator.pyi,sha256=TOAFU4Cb0_a3EitMT62JWGtcoGuvgrfKlbWpNAmwEuA,377 +jedi/third_party/typeshed/stdlib/2/email/header.pyi,sha256=sCk_MfWl5P_bc5v9302SubX0hqgODtlpJsnPb6h-eC8,457 +jedi/third_party/typeshed/stdlib/2/email/iterators.pyi,sha256=vPq5eJF8HBwFQ1hS--niEmurSl4x42YOrU65TxKk0Jc,256 +jedi/third_party/typeshed/stdlib/2/email/message.pyi,sha256=M3XzQbdji1k8_hygt88priwEMJqWKRixQsN4qDLmfeU,1950 +jedi/third_party/typeshed/stdlib/2/email/mime/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2/email/mime/application.pyi,sha256=zlzwumM16ipPeDnDgS11A0hzIJ1LEegxL7g0-I3dSWw,371 +jedi/third_party/typeshed/stdlib/2/email/mime/audio.pyi,sha256=GYtzGiC2dTHTPD3Cm3uIUlBAQ_25NK2BsbbcuDbLZiU,176 +jedi/third_party/typeshed/stdlib/2/email/mime/base.pyi,sha256=lG1Re_xRHsaw4WRUnLh1Jyneb4M6m8kxqa0NUfwuONg,128 +jedi/third_party/typeshed/stdlib/2/email/mime/image.pyi,sha256=-HVa8k6js_9-sGt5jFg2SF-UjZ6cLP53T9GvRIVI63s,176 +jedi/third_party/typeshed/stdlib/2/email/mime/message.pyi,sha256=OwJjEUiejk2bc9FGqgbvz8Q6ZgQyAg9gphhDXPyXsLU,147 +jedi/third_party/typeshed/stdlib/2/email/mime/multipart.pyi,sha256=1pTSK5lU6L5AJG5H35PTIQtHYIplMoipa7Kkd_m9HNQ,159 +jedi/third_party/typeshed/stdlib/2/email/mime/nonmultipart.pyi,sha256=C9WcyywCzQqkL9MPpSlWHgChmP04r0rrWVw3VlSVHQo,107 +jedi/third_party/typeshed/stdlib/2/email/mime/text.pyi,sha256=4Hjv1f-LZwoj-ihndmbQNHdwpjOy6wUOJoKS_axJmNo,159 +jedi/third_party/typeshed/stdlib/2/email/parser.pyi,sha256=9QChl7gsm0KPwZHUYy5tR_kZkmQpdSnxCwuZTnp9ceo,415 +jedi/third_party/typeshed/stdlib/2/email/quoprimime.pyi,sha256=ZRJzHi-3Fszfa8nRpz6EpGYZdSpLyGc4K3pxr1uyMUA,490 +jedi/third_party/typeshed/stdlib/2/email/utils.pyi,sha256=Znm4b4ImavrJrwGc_6PcBqyhqo3zIbqDc7gSS_6NU0w,760 +jedi/third_party/typeshed/stdlib/2/encodings/__init__.pyi,sha256=q95Eqw-U2YICiPawMyarXy1iWCXsz1wV_793ZSg_4ek,184 +jedi/third_party/typeshed/stdlib/2/encodings/utf_8.pyi,sha256=tgCdNX8etJQWYWmOYAIZhK8lcYm_Kn67kylKJp0SgUo,573 +jedi/third_party/typeshed/stdlib/2/exceptions.pyi,sha256=i3AvRM6Osg9r_q5outQ4hn7COs0fyhsJizSH5M72G7k,1756 +jedi/third_party/typeshed/stdlib/2/fcntl.pyi,sha256=4A81Md4YTEt71UFrJWD8E4Pp7iQGMiXN5geQ1-AEuA4,1580 +jedi/third_party/typeshed/stdlib/2/fnmatch.pyi,sha256=8kgI-ZZR0lhAGSuQk0M0kt3cYrYRx29bwhIg9ESvLbs,348 +jedi/third_party/typeshed/stdlib/2/functools.pyi,sha256=t_xmkNhLllvRSRG-5X1uP9sW-IHgkAns8ts0uTLllrw,1180 +jedi/third_party/typeshed/stdlib/2/future_builtins.pyi,sha256=vkVYaUei63-XJqSnDDLn7KwkUyLpVwbP01ZGTRWlySc,194 +jedi/third_party/typeshed/stdlib/2/gc.pyi,sha256=5Lfpz5C3KDyocZ8qEKncV-mvf192A-3xlMModHwVFi4,752 +jedi/third_party/typeshed/stdlib/2/getopt.pyi,sha256=6hPbDzz4CuSglcyFspFGyWCNVW0AKygfMXPTD0LKI8Q,448 +jedi/third_party/typeshed/stdlib/2/getpass.pyi,sha256=5FZTPudjVwTRUD2BQJhOr0pyxEyTeXUxYmj_xt1VC0E,160 +jedi/third_party/typeshed/stdlib/2/gettext.pyi,sha256=FT_rx4GptZANYzTplDlzUgSQS7aNuiF0V-mYsJzK0eE,2285 +jedi/third_party/typeshed/stdlib/2/glob.pyi,sha256=q5eeP2SFk94tP3PwKZOVOdGhS-XoaGeyXiiKtWVdvwU,375 +jedi/third_party/typeshed/stdlib/2/gzip.pyi,sha256=YIsrQQFHmhyG8XrqIaWZV1glkmXHWa837LAk-f_ZDT0,997 +jedi/third_party/typeshed/stdlib/2/hashlib.pyi,sha256=46pv8pBLAtNJTc2Ndh_IadSJsmzUj_hH8xYzp_cgJFg,971 +jedi/third_party/typeshed/stdlib/2/heapq.pyi,sha256=oKTl_WDSRYheNCenxMjwR0rHAPfAltZ-xLrApwdB1N4,756 +jedi/third_party/typeshed/stdlib/2/htmlentitydefs.pyi,sha256=1dyH0i00daNQ_7gDuT-mxXzx-V_nDSRuF4q_vjkSUHg,114 +jedi/third_party/typeshed/stdlib/2/httplib.pyi,sha256=Pzw93K9nIBJ28c66a-GVmWSiZOaYwtOZB1e6P03adOI,5929 +jedi/third_party/typeshed/stdlib/2/imp.pyi,sha256=8qatIBDkbAIbC137jA5zRPVO9PRgIe8rDUwaceORVXY,1290 +jedi/third_party/typeshed/stdlib/2/importlib.pyi,sha256=N1OsqmcdpbeN7RTNU4s2zQgnoQhkwECT5z5bv035F-g,134 +jedi/third_party/typeshed/stdlib/2/inspect.pyi,sha256=WJqBL9pFUFyIEFEZGvIZOD6mW1AgywTV1UNtTMap-Dc,4692 +jedi/third_party/typeshed/stdlib/2/io.pyi,sha256=_9t3YnEw4vkpHSL39UdKGnOXiqy2L_ps-DZtqK1noPI,1136 +jedi/third_party/typeshed/stdlib/2/itertools.pyi,sha256=2kBOMB5H_2z8-Z-WGWYlSimAiKFNGH1-5inZJJPbe-c,5975 +jedi/third_party/typeshed/stdlib/2/json.pyi,sha256=gouCsXdr9Y4KBmiwsvl6oN8ULrxIAnL-zBiTRIiWGiQ,3206 +jedi/third_party/typeshed/stdlib/2/markupbase.pyi,sha256=GmpHjRP9pa1Ybup4LFHoYS0TKu9oh8EOqX5-CY2yNb4,264 +jedi/third_party/typeshed/stdlib/2/md5.pyi,sha256=RxbpEbnpbF491HuNmiN3legMGS-8W11hHBxE1tcy7b4,74 +jedi/third_party/typeshed/stdlib/2/mimetools.pyi,sha256=vZd4d0QRIrqrvjtei-P-uK-vhduilbUpT55hlBGzIFA,703 +jedi/third_party/typeshed/stdlib/2/multiprocessing/__init__.pyi,sha256=i3uPVztHhIhN7ZQjtUjwbR3qvjZnMAi4qKeE0x5JU5s,1921 +jedi/third_party/typeshed/stdlib/2/multiprocessing/dummy/__init__.pyi,sha256=YQzAQbXIOWxv1gTPvKeNeduNGN2r40OLyPYE7rCo2Vc,1392 +jedi/third_party/typeshed/stdlib/2/multiprocessing/dummy/connection.pyi,sha256=OkvPY8s58y6GC-8yF1y8QyqxZAoZHxeDPfHDOfJXnaQ,673 +jedi/third_party/typeshed/stdlib/2/multiprocessing/pool.pyi,sha256=H24VaPQgs8EFjcmWtI9dn4nvFsjCb7xIDv8xQzTR7Cw,2038 +jedi/third_party/typeshed/stdlib/2/multiprocessing/process.pyi,sha256=2AyzstRaJgHXufF7RSpgBgtVD3BmsdXFgd6SSRwJqKU,906 +jedi/third_party/typeshed/stdlib/2/multiprocessing/util.pyi,sha256=xjwHWnEAqauPnOxaopyvJ6irZXQOv7qZ07PRv0YzCnM,758 +jedi/third_party/typeshed/stdlib/2/mutex.pyi,sha256=lQeSSkY5dwRUOdavqMyCEafd8j5XK6Y76vYMH43tyuE,363 +jedi/third_party/typeshed/stdlib/2/ntpath.pyi,sha256=upbuNccYoJZOEeauXVuegAuTUMr9LsDZ6E0c9PCsJwI,2937 +jedi/third_party/typeshed/stdlib/2/nturl2path.pyi,sha256=_u8yHiGMMnRRTjQAs37HCefvy5193SJDBUZTw1nZ0I4,115 +jedi/third_party/typeshed/stdlib/2/os/__init__.pyi,sha256=njHsqKbPSqEyAtvIOGFdEva0es8A5Siemx_9wu92JZE,13380 +jedi/third_party/typeshed/stdlib/2/os/path.pyi,sha256=upbuNccYoJZOEeauXVuegAuTUMr9LsDZ6E0c9PCsJwI,2937 +jedi/third_party/typeshed/stdlib/2/os2emxpath.pyi,sha256=upbuNccYoJZOEeauXVuegAuTUMr9LsDZ6E0c9PCsJwI,2937 +jedi/third_party/typeshed/stdlib/2/pipes.pyi,sha256=OTfpqql0CUZDbJx-Ka4gWuoDub7FsF7bH7N1PplvU6s,467 +jedi/third_party/typeshed/stdlib/2/platform.pyi,sha256=E8pD_04NuJTBrY5bFIdiAgncffdCfeKjgq_mV_uwIZs,1536 +jedi/third_party/typeshed/stdlib/2/popen2.pyi,sha256=dyNtDrnabvCpQVm7i8TVQpDMIvWKoK_2otJgd41hUYI,999 +jedi/third_party/typeshed/stdlib/2/posix.pyi,sha256=KdclicEcb0wBRDaJhr_avbFQJ3HOYr3U7ZZOyoZjpyo,6396 +jedi/third_party/typeshed/stdlib/2/posixpath.pyi,sha256=upbuNccYoJZOEeauXVuegAuTUMr9LsDZ6E0c9PCsJwI,2937 +jedi/third_party/typeshed/stdlib/2/random.pyi,sha256=MSUCctD3w_mmXrrjtfYKuQ2_DF-26GGNnvrhN8APGZ8,3156 +jedi/third_party/typeshed/stdlib/2/re.pyi,sha256=3pi-S5VeaGgnyZGfY9n0kaBqCR4n7bS5BFjPLt_8e_8,3641 +jedi/third_party/typeshed/stdlib/2/repr.pyi,sha256=Ic8zKDnkZLcpQmOGBLvScWrBx4sYHHmlA4gSoIrSQOM,1095 +jedi/third_party/typeshed/stdlib/2/resource.pyi,sha256=ZMxMAadyqjJ7Nmvu-GFPdy5XwG7RKunEkKu3LuRevPE,877 +jedi/third_party/typeshed/stdlib/2/rfc822.pyi,sha256=PYDCMDz6R87HI7iOVAVbqK4Sr7aTijXVK9kBsDw_KBE,2163 +jedi/third_party/typeshed/stdlib/2/robotparser.pyi,sha256=IpfpnvNmCtN84yyZR9TmNdCQA7F1M5MQcqbUdkwoPXQ,230 +jedi/third_party/typeshed/stdlib/2/runpy.pyi,sha256=D-ttE7Yt0BQGuEMaHf5GUyzWrdW_onB8qwW1Opwrn_E,541 +jedi/third_party/typeshed/stdlib/2/sets.pyi,sha256=hbfbabNHU03vRbZSQ2o1QyghdUm5J5w1eNXpFzHM8D4,2975 +jedi/third_party/typeshed/stdlib/2/sha.pyi,sha256=35pkvQygB0J3E1etUSO_ijjsk_H8Q8k6EYjuNMLFHQQ,236 +jedi/third_party/typeshed/stdlib/2/shelve.pyi,sha256=ac7Jr7saU4x3m9hQIQzPKRhuRJqAQO5_uLvwxcs6u9o,1612 +jedi/third_party/typeshed/stdlib/2/shlex.pyi,sha256=gs-tAXy6la_1_qoR0tQhxfMzXXGWDC3qoir1ofNnOd4,1025 +jedi/third_party/typeshed/stdlib/2/signal.pyi,sha256=qEcMv8og5esD8oDAycqwzU276cqYm6_61n-41UVOJ1c,1571 +jedi/third_party/typeshed/stdlib/2/smtplib.pyi,sha256=8wiSP1iFF9-l9IKgh8p6S0rGwLuguGQfFH3xyWPh4ec,2542 +jedi/third_party/typeshed/stdlib/2/spwd.pyi,sha256=BDoGUDub7DFTKhD_tzXW6DbD3uGX15Ujm2DzuFF_cvA,308 +jedi/third_party/typeshed/stdlib/2/sre_constants.pyi,sha256=T6kBTKeYYGkM83SbbgVx9L38eaZgqEY-AkgfCLr9GbU,1744 +jedi/third_party/typeshed/stdlib/2/sre_parse.pyi,sha256=meZYm_VskCn193qP3aV71kf61HJ8KWYKT55LISLYP9Q,2311 +jedi/third_party/typeshed/stdlib/2/stat.pyi,sha256=Tzy8jDY2wz2pZucTjKwCHly-4C9c3bhLBpQaZW8zk7o,992 +jedi/third_party/typeshed/stdlib/2/string.pyi,sha256=WRgPuXahi1tCgo4iM7HQI7fr4qcc1gOanuuA0Cjh1Qw,3567 +jedi/third_party/typeshed/stdlib/2/stringold.pyi,sha256=cefNX8V9XsaCmguO3e32tYNdfUnfMHZ48Gu8smUUCuw,2010 +jedi/third_party/typeshed/stdlib/2/strop.pyi,sha256=kF2oXemBZd_VaHlTzV19pp9fi8iwcVsq8avQS8YwdXc,1157 +jedi/third_party/typeshed/stdlib/2/subprocess.pyi,sha256=JV6VJpxGqVfwky5ZAmL54wLvYHwEucisA1xWCZunUKA,3282 +jedi/third_party/typeshed/stdlib/2/symbol.pyi,sha256=gmMHvO88vurNaeIXHNnl7UgNPg0gdf8D6iuxX5aTJiM,1341 +jedi/third_party/typeshed/stdlib/2/sys.pyi,sha256=L0K5SKIRsKdL_WJIz735YgaSPvxBXtVPHWY0dvxumZ4,3616 +jedi/third_party/typeshed/stdlib/2/tempfile.pyi,sha256=BbSxin5u5F2m-KXj0ucDGPMIIxyv_lO-VJSpLQa14bY,3696 +jedi/third_party/typeshed/stdlib/2/textwrap.pyi,sha256=8VV3JRR4Lq73UZ4t9gtYOBeM-YcbOCaGbUDPmKHQeJM,1854 +jedi/third_party/typeshed/stdlib/2/thread.pyi,sha256=P3v99RXZZFRPUzpe9St8fswlzP4IEStuPFKdlwlUJvk,920 +jedi/third_party/typeshed/stdlib/2/toaiff.pyi,sha256=FA2QwiYCh-YuKQyMuSj4DhQQo9iTASevNspzkoWfRB4,243 +jedi/third_party/typeshed/stdlib/2/tokenize.pyi,sha256=NC9RJ7aJxIBijxom_sURphAu-rVlnF8j0cHhY_VbJkc,2686 +jedi/third_party/typeshed/stdlib/2/types.pyi,sha256=YKV40Mmm-geFO0fRjHduzNW-0RRYjhOgzoeKWkBVKJo,5465 +jedi/third_party/typeshed/stdlib/2/typing.pyi,sha256=eVRiPuinpIP89d6cVv8AK1x1DlXngv32iRpORi8yNc0,17772 +jedi/third_party/typeshed/stdlib/2/unittest.pyi,sha256=a7yf589r5b0mArYulqLfbvmW8NdJNTnHXZh9rclXTVQ,12726 +jedi/third_party/typeshed/stdlib/2/urllib.pyi,sha256=upPSXvIUWm-CLSW7TwMXageO9i3zNZMpOWzlWR9mmyQ,4765 +jedi/third_party/typeshed/stdlib/2/urllib2.pyi,sha256=xZez6mOO-ku7JSJAEnfZK-Epo0Atd5ZMTfooXukDA6c,8312 +jedi/third_party/typeshed/stdlib/2/urlparse.pyi,sha256=rQEJa_Oyy2KoO9WWgonvFDCfkDgfTgqReuGRU8ky1f8,1944 +jedi/third_party/typeshed/stdlib/2/user.pyi,sha256=Mz3QGfgG58h1uuCR-b9r_Pv8L-vA8oH4I4NN6L6hfC0,83 +jedi/third_party/typeshed/stdlib/2/whichdb.pyi,sha256=k7vfuOkgz6kZd_6mVSsP36DBR16mfRZNpO0kngswZEg,85 +jedi/third_party/typeshed/stdlib/2/xmlrpclib.pyi,sha256=6H5putpgG72kUD6x5OLeJKtMzXkvrMMOkNx-zShk27U,9769 +jedi/third_party/typeshed/stdlib/2and3/__future__.pyi,sha256=b7dmNhiJdcJM2cyLfX1i73MNfiVwTUfMmyOdZzBU2fw,587 +jedi/third_party/typeshed/stdlib/2and3/_bisect.pyi,sha256=MURhFGAf_n_yms64euXpOGBeEY3932tibeH9gkxDsQc,471 +jedi/third_party/typeshed/stdlib/2and3/_codecs.pyi,sha256=i_rEwDVsWLSSK7M1ShCr3jZ79V9MwQY4on1CF8PBQ74,5308 +jedi/third_party/typeshed/stdlib/2and3/_csv.pyi,sha256=OZpwhCxRKWUNfzD-TFlmjGgx1WnbSAY1f5CruGAieEc,1574 +jedi/third_party/typeshed/stdlib/2and3/_curses.pyi,sha256=nI5pvFuJzbmoa8ZOIZ4I94UPStHtWM_wQsO84a8J4pw,13596 +jedi/third_party/typeshed/stdlib/2and3/_dummy_threading.pyi,sha256=__eLfXz1Hygl5swKoIL4JZtQ-JBIKkr-yExKwKl6eD0,6309 +jedi/third_party/typeshed/stdlib/2and3/_heapq.pyi,sha256=heTQYQlAQ5f-78bm3rPkSeoQWoxIQ4_pX44TcFx0AGE,612 +jedi/third_party/typeshed/stdlib/2and3/_msi.pyi,sha256=tAOc34He8wJWmW1WsINZxT0ns-a8zYfV-rlhnDKzg-g,2168 +jedi/third_party/typeshed/stdlib/2and3/_random.pyi,sha256=Ot_yeMIsjSxwYFMkes_URP2OlZqzRePVeYaCbnQr-qI,478 +jedi/third_party/typeshed/stdlib/2and3/_typeshed/__init__.pyi,sha256=auqNqMA9bWUD9jKmsHaWidPPnFFcc0M4c4a9M3nHChU,4615 +jedi/third_party/typeshed/stdlib/2and3/_typeshed/wsgi.pyi,sha256=n0p7egUjhY7Hni-BwmOZd1OpOWy_GkpNDKDYHHHcrKE,1293 +jedi/third_party/typeshed/stdlib/2and3/_typeshed/xml.pyi,sha256=sWVM_2a0LZudB2irKi_UO4fJHhMiYz8OFLeTtvhByz8,528 +jedi/third_party/typeshed/stdlib/2and3/_warnings.pyi,sha256=5h2niNh47xb7G7p6MOvQSj-NVnFJf_hiwyO4aLN05BY,2233 +jedi/third_party/typeshed/stdlib/2and3/_weakref.pyi,sha256=-638OW7gOKVYT51Bp8QkXKc13qvnhpl_6NCQCvdaY6s,1200 +jedi/third_party/typeshed/stdlib/2and3/_weakrefset.pyi,sha256=FY1Fl7JZpGeVBFKZV3MhCGOR8WoD8jGnH6g7Gd_0OW4,2417 +jedi/third_party/typeshed/stdlib/2and3/aifc.pyi,sha256=R9iiBcPS2xH4RSRcwydKcpWuxhDP4K4HANvaYLsERKk,3393 +jedi/third_party/typeshed/stdlib/2and3/antigravity.pyi,sha256=hXe2_7W39SOiBPpQJb8e-9pre8_0DIGWXRZY1IFp-6c,123 +jedi/third_party/typeshed/stdlib/2and3/argparse.pyi,sha256=7AG5GfwwiCJe3UqjaIRP5xtbyRNKCyqOunne245uml0,18335 +jedi/third_party/typeshed/stdlib/2and3/array.pyi,sha256=VADSEevIbSyboD2_W-2eF51q8nJp2QcgPF5d0Siqp_c,3489 +jedi/third_party/typeshed/stdlib/2and3/asynchat.pyi,sha256=v-upDoOfXdG1839UdJj6wkBosW0914umWN-BOjaGrwM,1555 +jedi/third_party/typeshed/stdlib/2and3/asyncore.pyi,sha256=OS5PpOKzhv4BM7wNNTDgky5rJtoFE1yuz-ZIZ7bPwdc,5534 +jedi/third_party/typeshed/stdlib/2and3/audioop.pyi,sha256=lBRWVdim8hPgzNwMIWQSs_nvv7b7uoBqjuEbXG7FiqM,2119 +jedi/third_party/typeshed/stdlib/2and3/base64.pyi,sha256=E8MqdBGhXIjIXkqSkLdkS6645St1pMYR59syRKr5oeQ,1614 +jedi/third_party/typeshed/stdlib/2and3/bdb.pyi,sha256=dZP-An_qNDJXxMzWsIX1fcTIoc3qI-WIm48BS3gAJLk,4644 +jedi/third_party/typeshed/stdlib/2and3/binascii.pyi,sha256=HgN_QgHsAdhRgpelHyZsSWnup-Tce23veb46498Mo6U,1552 +jedi/third_party/typeshed/stdlib/2and3/binhex.pyi,sha256=KPO4jyuFjgvRV1ryvanL0j0cIThoIoDisYOz3TBm_nw,1147 +jedi/third_party/typeshed/stdlib/2and3/bisect.pyi,sha256=sQn9UUS0Cw5XZMEGcEj8Ka5VKPVobL43Pex_SagjXg8,67 +jedi/third_party/typeshed/stdlib/2and3/bz2.pyi,sha256=RO-rYMH1TpprITXeCtqhU5TM6I7gQDRutpgh_bSvZAo,2267 +jedi/third_party/typeshed/stdlib/2and3/cProfile.pyi,sha256=IhoIaaoA9_bIN4ifc6d0qX3kO1so5Lss17OCrBW8HcE,1239 +jedi/third_party/typeshed/stdlib/2and3/calendar.pyi,sha256=iB08ixpoedGoLTBZovdy7JavZEqFYF0kxwXctgez0qI,5773 +jedi/third_party/typeshed/stdlib/2and3/cgi.pyi,sha256=HUlFXoovlydtwen1RnQdlLPQC4a26fXe5wrAR0f38bM,6016 +jedi/third_party/typeshed/stdlib/2and3/cgitb.pyi,sha256=-5_uqith9S9ZHOXQf-2Z76Q3I7UeBX2kxRoijoQbvnI,1447 +jedi/third_party/typeshed/stdlib/2and3/chunk.pyi,sha256=juLgoUjtZJj3tExr1grorsVNUnVZtTgoJ8N9gj8opXs,613 +jedi/third_party/typeshed/stdlib/2and3/cmath.pyi,sha256=jSud27iMIBSTw2I2NhIUJa38VEC8X1h7YlN7SbQqWhk,1217 +jedi/third_party/typeshed/stdlib/2and3/cmd.pyi,sha256=-8dq1Td5QhOzLoBkeK3EZjYTfNvxJUTZHrBj0H03cl8,1658 +jedi/third_party/typeshed/stdlib/2and3/code.pyi,sha256=A9cTESdR37TyuZeiiIjfDJhI-0Baizcu_YGHZU6hqTU,1522 +jedi/third_party/typeshed/stdlib/2and3/codecs.pyi,sha256=DXqiWmZp5ZhfIw6zib357lvj9OnTmMmKgh8uQ6SBLu8,12312 +jedi/third_party/typeshed/stdlib/2and3/codeop.pyi,sha256=Ypyu2k5z5r6B2tNkO9JKM8YM-CBXT3SZUL0Ak8P8qag,489 +jedi/third_party/typeshed/stdlib/2and3/colorsys.pyi,sha256=JwCA0W2O9-JS0KA9u7kKNVJKRiu1ACOPzxy-SEKzDRg,578 +jedi/third_party/typeshed/stdlib/2and3/contextlib.pyi,sha256=sV5PoppaY5WggvWnKN7ZpAnKWFITWtTPV6CEPLnmrKY,4419 +jedi/third_party/typeshed/stdlib/2and3/copy.pyi,sha256=EOWzpARbTnxmkq3u6Om9nYpYG62ts2WqfquBObFQgxM,328 +jedi/third_party/typeshed/stdlib/2and3/crypt.pyi,sha256=_D__DDlGumti-2YksSJiOjFJ0tJbckFBKrszPdbtWrg,648 +jedi/third_party/typeshed/stdlib/2and3/csv.pyi,sha256=26HmvwJBnGEWObBRHj0xS0p68q1192_ygtQd2rEgB9g,2751 +jedi/third_party/typeshed/stdlib/2and3/ctypes/__init__.pyi,sha256=Br0DywqyGaVwHaWbgbIWZLnQYQ7Spa04Z4eU8TrHz-8,11893 +jedi/third_party/typeshed/stdlib/2and3/ctypes/util.pyi,sha256=fO9_MTBB4lpXUvuQzrFT4yuIzt9x_FsEWtW6kbAWtkg,163 +jedi/third_party/typeshed/stdlib/2and3/ctypes/wintypes.pyi,sha256=4mLfzJ8kXytQo4DDsO5HX13sZWXUcs-XdwPygO6MOE0,4642 +jedi/third_party/typeshed/stdlib/2and3/curses/__init__.pyi,sha256=e6zyQJFe4QJgc6mUK3ZqOqRTKGXq_QxJLXLs3vyVQHU,370 +jedi/third_party/typeshed/stdlib/2and3/curses/ascii.pyi,sha256=9MeRhsGJwmIcMsL77Ct9QOrt5TKS_JRtgjyv_nbmQZQ,1212 +jedi/third_party/typeshed/stdlib/2and3/curses/panel.pyi,sha256=Wsl42xkXk8GQesNABDijIoBVX5Nx8dGm6prO1-gxlyU,801 +jedi/third_party/typeshed/stdlib/2and3/curses/textpad.pyi,sha256=V-6r4xPbkITORxiUCAPV-QzWi69JZV0tZwO72HDbuU8,457 +jedi/third_party/typeshed/stdlib/2and3/datetime.pyi,sha256=vfY8l1ocVFixSnpqCIp6nGbp8NT1aU_hksSYpeAoTpw,12699 +jedi/third_party/typeshed/stdlib/2and3/decimal.pyi,sha256=94S0G8TK1ytezWJuoH_gHpnAeNswravx_BXo3nJO9Mc,17787 +jedi/third_party/typeshed/stdlib/2and3/difflib.pyi,sha256=gZTDgj3DFNFmIpbQ_Ju2zcsVT3xop8yB1JSbAi1XWz8,4936 +jedi/third_party/typeshed/stdlib/2and3/dis.pyi,sha256=3FFtM_e7jcMgzXw60pTLZZWMEO4nV3K6iZVuXwbSwXQ,3089 +jedi/third_party/typeshed/stdlib/2and3/doctest.pyi,sha256=kfWLoM--CjzB1TW3481RvTfVDOOgbQy1oL94-2joI2M,7083 +jedi/third_party/typeshed/stdlib/2and3/dummy_threading.pyi,sha256=ZI04ySfGgI8qdlogWtA8USUTFGfzm32t2ZxL5Ps53O8,79 +jedi/third_party/typeshed/stdlib/2and3/ensurepip/__init__.pyi,sha256=dTpEX_z-rFEqnQI3XvpxGCw-IE1aqUxZw6KEv0k86So,562 +jedi/third_party/typeshed/stdlib/2and3/errno.pyi,sha256=KDjlJTkt1sdcrwGgLkPMZlSwz1Dp0Xkt6PqEnqcLZWY,2011 +jedi/third_party/typeshed/stdlib/2and3/filecmp.pyi,sha256=z5T2uxVBjwdWIzrhR-zlRqIV9rg08EpUldQgr3-AfJ0,2566 +jedi/third_party/typeshed/stdlib/2and3/fileinput.pyi,sha256=01vjE75hR9lXLH-S_MN-RAeieTiBGcg4_of-zNyDmnU,2601 +jedi/third_party/typeshed/stdlib/2and3/formatter.pyi,sha256=lxpoZgWeuh3fyJu7ew4O00DBospTKRxateEK7f99zMU,4639 +jedi/third_party/typeshed/stdlib/2and3/fractions.pyi,sha256=VblZe5mtSJGL7fUSRronuksTeO24OLVAGghS_rEDpog,5936 +jedi/third_party/typeshed/stdlib/2and3/ftplib.pyi,sha256=4FqgC8unm_zR1CcpJalpHvRzODOmy0IuZk_Fggoercc,6278 +jedi/third_party/typeshed/stdlib/2and3/genericpath.pyi,sha256=Wob25JMIB38kmxD3fbyEmW_zOZ30A-0RR41OymvOg_E,806 +jedi/third_party/typeshed/stdlib/2and3/grp.pyi,sha256=fm0JOwvJ8CGCXSVWyqQuXFBPBfoVC8b5CyWTq-5fcvs,295 +jedi/third_party/typeshed/stdlib/2and3/hmac.pyi,sha256=DRSe8IPnvSQ6DCIhG8IFosACcpsOngQftNJRjEj4WkU,1607 +jedi/third_party/typeshed/stdlib/2and3/imaplib.pyi,sha256=d8rIKtUeqXt5O51dNweLcp_J6445KdC9aWJnGKR7zdI,8034 +jedi/third_party/typeshed/stdlib/2and3/imghdr.pyi,sha256=38fd4eD_xncwov0L7vvbaDGjKi9gGzLPbgsD3oe_I0A,604 +jedi/third_party/typeshed/stdlib/2and3/keyword.pyi,sha256=48hMnou7HbA580mGfRyd4uPgxmnbSt4KM_STSGMFzFA,210 +jedi/third_party/typeshed/stdlib/2and3/lib2to3/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2and3/lib2to3/pgen2/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2and3/lib2to3/pgen2/driver.pyi,sha256=FqgZDUSf1X9X55R7dm4YxYIIzu3zAXjB0x6T_zQl92w,956 +jedi/third_party/typeshed/stdlib/2and3/lib2to3/pgen2/grammar.pyi,sha256=zuKtRoL1GMizL8ij0akjhZVXH2mBsbb2LxJkFJPi0ME,733 +jedi/third_party/typeshed/stdlib/2and3/lib2to3/pgen2/literals.pyi,sha256=CSUI0I_W6I9fN-nb6rGgzLjGMIO0C6OwOmzYz-HEQZU,172 +jedi/third_party/typeshed/stdlib/2and3/lib2to3/pgen2/parse.pyi,sha256=Vd7LRmWw5Q9pWzYBbDsKAvL2NdyCZNEFCa0kysAtrsU,1107 +jedi/third_party/typeshed/stdlib/2and3/lib2to3/pgen2/pgen.pyi,sha256=ZhxquDht1cGiy0KiclTZSKGSctTRM21QTQDJEY38nC4,2140 +jedi/third_party/typeshed/stdlib/2and3/lib2to3/pgen2/token.pyi,sha256=YpBo_itTcpSXfWPpPFYEdTLcdEerPoAw8nuHpAGEo1Y,1065 +jedi/third_party/typeshed/stdlib/2and3/lib2to3/pgen2/tokenize.pyi,sha256=pA9uvZ22MHGVm37U1xhMEde2hUPvJQJBq6XDQ3Gzcts,883 +jedi/third_party/typeshed/stdlib/2and3/lib2to3/pygram.pyi,sha256=lyhgf6kpNVMPN6g6W-DSxt8E2_IyVguf7JjKzVhAmzc,2208 +jedi/third_party/typeshed/stdlib/2and3/lib2to3/pytree.pyi,sha256=Zz0sUjSMFvxHKpIPlvzNCRyJtzT2vEzYKPPGha8LRFk,3322 +jedi/third_party/typeshed/stdlib/2and3/linecache.pyi,sha256=mfzNP8AUc3yd_VahaBEaSYt7-zDAKneFa7rjFQE8sIU,591 +jedi/third_party/typeshed/stdlib/2and3/locale.pyi,sha256=t-y1NnMHom3HdCewxj1zq7-83xAEdC2FR7KC7P3K67w,2555 +jedi/third_party/typeshed/stdlib/2and3/logging/__init__.pyi,sha256=zMbblZS9jbaC4hrktbUu0rzfZKP_AKhN2shNU0kgh_M,28358 +jedi/third_party/typeshed/stdlib/2and3/logging/config.pyi,sha256=oreN5ILtTK6x7s5m0n7-wqM4fG3e4WYp8uRhu9Qmtl0,967 +jedi/third_party/typeshed/stdlib/2and3/logging/handlers.pyi,sha256=J36V-7X3w-tFAH57rE6YkYU20tvEDzQF_omTvHIPDp4,8847 +jedi/third_party/typeshed/stdlib/2and3/macpath.pyi,sha256=B6y98iSZAXQPkStHypSps3hvpzq4eBmfYL6uMyBYj4E,5330 +jedi/third_party/typeshed/stdlib/2and3/mailbox.pyi,sha256=IrYfGXuaCVIJDADHUZ-1D5OOvHHeULJdTsWySzYCL5I,8071 +jedi/third_party/typeshed/stdlib/2and3/mailcap.pyi,sha256=5MWzikKoMQEkohWxGbzZUPwY6E_g1-i-nvPQw13Zxwk,330 +jedi/third_party/typeshed/stdlib/2and3/marshal.pyi,sha256=zXZaX_H3VD9clK-iiNZz7f5GKDdG4_TriqlTPCBR6oM,253 +jedi/third_party/typeshed/stdlib/2and3/math.pyi,sha256=RA890_35QPnZF7K2393I3qhr0My1U62LVK2UjXyv9Fw,3835 +jedi/third_party/typeshed/stdlib/2and3/mimetypes.pyi,sha256=wEC12QuS8DXgpM9ZMB-SZ1Zh2GA494p-cRBZQvnV6dI,1641 +jedi/third_party/typeshed/stdlib/2and3/mmap.pyi,sha256=BZHJGHPpkicsCQSIgZOUZA62JaXkafcpm4Vj8cYZjsU,4006 +jedi/third_party/typeshed/stdlib/2and3/modulefinder.pyi,sha256=Pwx90XH7YRRF4Dm1mUNBtIP8xb-5eDwTi6bwl1x4k_o,3674 +jedi/third_party/typeshed/stdlib/2and3/msilib/__init__.pyi,sha256=vruWL_rWAZiYVXQMq4jwiqVTzrIXKw2g5PSf59FOJu4,6306 +jedi/third_party/typeshed/stdlib/2and3/msilib/schema.pyi,sha256=LEZZXliX-2N73ryhJtTAuTvP_RZVsD4i7S1CRTLNtik,2214 +jedi/third_party/typeshed/stdlib/2and3/msilib/sequence.pyi,sha256=XQC-7D28bgEuJzAFC7WIX86AtUBrQ7pC5Sc1JTyPwEk,356 +jedi/third_party/typeshed/stdlib/2and3/msilib/text.pyi,sha256=drG3KVxFc8MYRrdkm3oJWaivBvZecq6pdKoFEemvF2Y,202 +jedi/third_party/typeshed/stdlib/2and3/msvcrt.pyi,sha256=DDiTa2YmqkChpLzKr80n-ZOfJAXWb6YYB0RER013dHw,795 +jedi/third_party/typeshed/stdlib/2and3/netrc.pyi,sha256=6ipkr1k-p5Pve7y-PTtCH0sZqeFV19Aw33SkQddMJm0,444 +jedi/third_party/typeshed/stdlib/2and3/nis.pyi,sha256=CUnTx-mKL-YinbUfrvw8WIWLwuqu4PtSVZ9M2mxSSvY,322 +jedi/third_party/typeshed/stdlib/2and3/numbers.pyi,sha256=73gLvE-jPZrLxYYhVHJfA--xYyIjJJG9_06iU3Vdauk,4335 +jedi/third_party/typeshed/stdlib/2and3/opcode.pyi,sha256=k_DFbtjvSdVKzq72SSztF8K4GKK-tk_WMSITs6EKUTI,609 +jedi/third_party/typeshed/stdlib/2and3/operator.pyi,sha256=JoI0OeQjkMbyEoD4cYXn9R1CVf2GXNOlFKKkgKfUmZ0,7146 +jedi/third_party/typeshed/stdlib/2and3/optparse.pyi,sha256=6YXlTrzWD4rA0HUYfQ8odZlsCza7ln4bT9srOGv7d-k,10096 +jedi/third_party/typeshed/stdlib/2and3/parser.pyi,sha256=AZjxkjNl47Rk3MMk38Muh7gpibhfs6EeTrHNubEsgTA,966 +jedi/third_party/typeshed/stdlib/2and3/pdb.pyi,sha256=mppJ9PSllcPnOcziqt3ua1Ne-N8BKDC7_kCa0xCNc7o,10325 +jedi/third_party/typeshed/stdlib/2and3/pickle.pyi,sha256=HgJusFb0xpfdwPMgh689OtA9zWULEUC_ob_3bSQNVgk,5322 +jedi/third_party/typeshed/stdlib/2and3/pickletools.pyi,sha256=LekbII856jROKj6nerzKVY3tr0J3rjFh7aHiiB9fMHQ,4510 +jedi/third_party/typeshed/stdlib/2and3/pkgutil.pyi,sha256=EXApPfIH6cq6nCJ3dgWY27Z2vO95y0_4sjUspTLJRSA,1558 +jedi/third_party/typeshed/stdlib/2and3/plistlib.pyi,sha256=SufCK1SjP-QOhGd1VhStpLcdRiuoczBGX3KdB4C4ICs,2742 +jedi/third_party/typeshed/stdlib/2and3/poplib.pyi,sha256=CezxOiUqIikxqAv4df5G5rs2jqmGMIWYVfbYt2hvUZg,2500 +jedi/third_party/typeshed/stdlib/2and3/pprint.pyi,sha256=Eox19Bw2R4Uyl87ilwJHr38pEFhBdHRWC2TgW9A2T6E,2893 +jedi/third_party/typeshed/stdlib/2and3/profile.pyi,sha256=4mezNO66Y3vWX9SIftkcbeQcA3Swuc62yNxPkmmwk1w,1199 +jedi/third_party/typeshed/stdlib/2and3/pstats.pyi,sha256=trO8Oc7GQ4nRAdays5pWi5Viib1tKQwkwnQH0upmcJ4,2213 +jedi/third_party/typeshed/stdlib/2and3/pty.pyi,sha256=aCa6VBas1vNafTUiBCmFI-W9SR018bDjy1WtJmIqxOg,592 +jedi/third_party/typeshed/stdlib/2and3/pwd.pyi,sha256=s48Hp0pbMoMBpWg6glXl1ABEcLU4zLj_wV-PloenVI0,350 +jedi/third_party/typeshed/stdlib/2and3/py_compile.pyi,sha256=1A3_VdW5WwbifT-CTrSoXy0ZNFoN5zsqUUuUyPXJjfA,1642 +jedi/third_party/typeshed/stdlib/2and3/pyclbr.pyi,sha256=xirCZDC7Q7ffw0CZ3V6_62EbwCSSE4UXe0-DYatTLUs,1192 +jedi/third_party/typeshed/stdlib/2and3/pydoc.pyi,sha256=aHbKeX9juIvggS2xrKYVGkVBHvxJRBAJLMmtuYS5Xms,10660 +jedi/third_party/typeshed/stdlib/2and3/pydoc_data/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2and3/pydoc_data/topics.pyi,sha256=PLI9r9ujTHeIKw7D9GxCeB_LwxV17eUTauVx9AKiU8k,48 +jedi/third_party/typeshed/stdlib/2and3/pyexpat/__init__.pyi,sha256=XJx933QTavu4mJNP63uHUFwncDx2wlbv1O1DarVJQ9E,3404 +jedi/third_party/typeshed/stdlib/2and3/pyexpat/errors.pyi,sha256=TVdXkdX10ZCjPuBYAy3yNQzUJMo5lCjsl8bv4VJ8PU0,1275 +jedi/third_party/typeshed/stdlib/2and3/pyexpat/model.pyi,sha256=LlBMaLvt3TBD1JQSAQTYUrHKAfKNEeQYaDw5GsJA--g,205 +jedi/third_party/typeshed/stdlib/2and3/quopri.pyi,sha256=0zH--hEadPU_f_GNqwlxU8vrs2tqmPaLCO94BeZXCyo,343 +jedi/third_party/typeshed/stdlib/2and3/readline.pyi,sha256=-Yx9NN7kGQwlGTwubEhIUIFPiMHQJY0z8QbuCE8u7xo,1585 +jedi/third_party/typeshed/stdlib/2and3/rlcompleter.pyi,sha256=1aZLE2z6iAikB08PpAEuc-UeL63VZhgHYQpBT4ssVGI,308 +jedi/third_party/typeshed/stdlib/2and3/sched.pyi,sha256=D7Y2BV6CNyRrJNn0UwLp1sYVTxu1TMw_CR4U3HUs03Q,1497 +jedi/third_party/typeshed/stdlib/2and3/select.pyi,sha256=zpT6GIDgll2LngtzJbW2ZV1qLyJAMpLwJwLMKynskxI,4828 +jedi/third_party/typeshed/stdlib/2and3/shutil.pyi,sha256=O5GZv_Ion6daXUfCfJLjhNPfefuLArzzCK0cYWVVxuA,5961 +jedi/third_party/typeshed/stdlib/2and3/site.pyi,sha256=dG90RROJAZP04P8KTzbJOEm5w-5lbHPfP1tU5IbErJo,450 +jedi/third_party/typeshed/stdlib/2and3/smtpd.pyi,sha256=mKs96L0YHWx4Mk3DXIq0ROLiettU1muAHz_MoksxOwk,2935 +jedi/third_party/typeshed/stdlib/2and3/sndhdr.pyi,sha256=Kbdxb_xqFGOZzce1apkDVbj2dpoMWKplNAP5KNV6tRc,501 +jedi/third_party/typeshed/stdlib/2and3/socket.pyi,sha256=y2Db0P_TYtBdVuWiG3xXxsr0AbSxa5sXbyLf0or_wuI,22711 +jedi/third_party/typeshed/stdlib/2and3/sqlite3/__init__.pyi,sha256=aJu9MCNl8y9HCykdUpo1Z-LSiP3mxRSxrWkCsMxemYI,43 +jedi/third_party/typeshed/stdlib/2and3/sqlite3/dbapi2.pyi,sha256=hdn9VtF3aSsI-oGdY4XObtCo8kSUVGOPuZP51f2Z9aM,11315 +jedi/third_party/typeshed/stdlib/2and3/sre_compile.pyi,sha256=a7wV8OxkRzj0F1L7B02gcZrvMi5Uz87g2wG0pHvWtNI,1073 +jedi/third_party/typeshed/stdlib/2and3/ssl.pyi,sha256=igBbyUhdonNcvRkjzUaRf2r8LIOx1J2lwYzHVC9AEvk,14626 +jedi/third_party/typeshed/stdlib/2and3/stringprep.pyi,sha256=fqKAxHgpLExMmFDO66rJ-kFZS5mVKeWvK_qWQ2yvsWc,817 +jedi/third_party/typeshed/stdlib/2and3/struct.pyi,sha256=FnlORamrhrWi59qMGSXUL_px20rH8QAgFyrptIv3Qxw,1568 +jedi/third_party/typeshed/stdlib/2and3/sunau.pyi,sha256=8nruVjfJtTIU816plhmHsGL5VRubbsWem5hvWiyiONc,3085 +jedi/third_party/typeshed/stdlib/2and3/symtable.pyi,sha256=Dff6o3EDAli4Mzo-Jx8aSN9iiCxlEN-hC82OFaJOHVg,1645 +jedi/third_party/typeshed/stdlib/2and3/sysconfig.pyi,sha256=ISugdNUmN9ycQF8mHoiBL8zoy9As_QcIBRvRPbUTamY,843 +jedi/third_party/typeshed/stdlib/2and3/syslog.pyi,sha256=WbINKcwKTg5WFsn4f06K3-jriyoYXzqnOU9HwbAFbZY,821 +jedi/third_party/typeshed/stdlib/2and3/tabnanny.pyi,sha256=g_vgZl-PEUiu0JEx0LxnwPaDmgP65k9Hva0zVCIDUQU,447 +jedi/third_party/typeshed/stdlib/2and3/tarfile.pyi,sha256=1RRfYoQGC8KVZABw5FH0PGk5I3cgS90HWN_s-EoboRQ,8182 +jedi/third_party/typeshed/stdlib/2and3/telnetlib.pyi,sha256=TRsxrafitwtsxZ6m-KzsFEy-9uFpypw-r2bmW6Mz2lQ,2680 +jedi/third_party/typeshed/stdlib/2and3/termios.pyi,sha256=ZJKwv-a6e8w8frrD3Wgo_I66YNXNtQkhY59jymFZ-wI,3504 +jedi/third_party/typeshed/stdlib/2and3/this.pyi,sha256=5hi7CD2tChI9_genuMBxsS8GOFo0gAVvFGuv-_Uc9p0,50 +jedi/third_party/typeshed/stdlib/2and3/threading.pyi,sha256=__eLfXz1Hygl5swKoIL4JZtQ-JBIKkr-yExKwKl6eD0,6309 +jedi/third_party/typeshed/stdlib/2and3/time.pyi,sha256=jnR64tSQq-k6AOYxmFOxLD2FPFy71v0BPWsic4F1fVM,3900 +jedi/third_party/typeshed/stdlib/2and3/timeit.pyi,sha256=AXtKdk4M2pfi6_KCgmpkDujbaVuR1smNIgpz6_90fIQ,1623 +jedi/third_party/typeshed/stdlib/2and3/token.pyi,sha256=04p49poqkYTWUiSpb2syf7dX84i-eP_NBUXbJIkFcRY,1468 +jedi/third_party/typeshed/stdlib/2and3/trace.pyi,sha256=JzpexVtFU46sf7VlGb7mnpvyLMKxuco6OAk-pQRnhLM,2037 +jedi/third_party/typeshed/stdlib/2and3/traceback.pyi,sha256=O9stbkIrGRLbmSg-nBxrEFlEkk9Kxp-yJiQXnJzc5hs,5545 +jedi/third_party/typeshed/stdlib/2and3/tty.pyi,sha256=A25_a1yrTL55nQAsEpOWKsni215-75a4rAoFJ7g7Qr0,275 +jedi/third_party/typeshed/stdlib/2and3/turtle.pyi,sha256=sbaGlYhQbjQoqfJNX_mxNqMeJEqjNpuVQJe5L0Ci4Zs,19476 +jedi/third_party/typeshed/stdlib/2and3/unicodedata.pyi,sha256=AdW8_ww_4_Oh5undlAXiNUJF6elKYALIyc_cMMJsqpg,1902 +jedi/third_party/typeshed/stdlib/2and3/uu.pyi,sha256=j8lHTLVoMNPv2EJ1py5ozflgE5M1tHFrCOjydfSbPts,549 +jedi/third_party/typeshed/stdlib/2and3/uuid.pyi,sha256=NnP8AHc5cneMi37IHGdBBvmVqPSAlTUK_4qOfEYMMhA,3448 +jedi/third_party/typeshed/stdlib/2and3/warnings.pyi,sha256=byrds4TJ9vuk0d5ohkeFcm81yadHPfmudeFBE1JEQ8A,2583 +jedi/third_party/typeshed/stdlib/2and3/wave.pyi,sha256=EWn5CUj5JmmpZHDy6nfHuxkLSHhEZ8vPl-iH5rOeyrk,2651 +jedi/third_party/typeshed/stdlib/2and3/weakref.pyi,sha256=yK1h8AhDi0SMSMS3jih101FhTimqaZl_g-uIcc1hcNQ,4433 +jedi/third_party/typeshed/stdlib/2and3/webbrowser.pyi,sha256=Aic7NcY6wXkRP5SU66vAhrcWylwe8i78u2nzfenn7FY,3295 +jedi/third_party/typeshed/stdlib/2and3/winsound.pyi,sha256=i7oaOMrdRL5hMtBGZXQtB2jR_gIDcqltc0JMZPeTHMY,811 +jedi/third_party/typeshed/stdlib/2and3/wsgiref/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2and3/wsgiref/handlers.pyi,sha256=HkQCK0geDJd1kFX6FWlydyu7QWG-rcDM2P2BHXKAsrI,3125 +jedi/third_party/typeshed/stdlib/2and3/wsgiref/headers.pyi,sha256=j5-Y2n64U9US_Xld0JtgrWjA8eYlo6BOzu89HKdDsaE,1250 +jedi/third_party/typeshed/stdlib/2and3/wsgiref/simple_server.pyi,sha256=4pAabsnp0x4uyrHdtxchezvI8cudyl4F6K4VlCX5VFk,1528 +jedi/third_party/typeshed/stdlib/2and3/wsgiref/types.pyi,sha256=1qhS0qVWoV0IVfe3b1y4Mzhou65EPtwb_QLmWfX5_I4,71 +jedi/third_party/typeshed/stdlib/2and3/wsgiref/util.pyi,sha256=Ly4ctt2LOpbP9t5YeKU9wwov7iN1uFqeAtBjJ-O1HtU,893 +jedi/third_party/typeshed/stdlib/2and3/wsgiref/validate.pyi,sha256=YB_yhIz9Dl3b8s_-ASpzsPpRSpCwCuZ1jMaLAB_7q4g,1861 +jedi/third_party/typeshed/stdlib/2and3/xdrlib.pyi,sha256=yHZnDsb7h-kuMZ04mQip7YRjBSX-g900SOE0HhGyMl4,2315 +jedi/third_party/typeshed/stdlib/2and3/xml/__init__.pyi,sha256=BqMXnsXiYPoalMzEakn6mYDxgyW5N2UPF0Ao7xPuGVY,30 +jedi/third_party/typeshed/stdlib/2and3/xml/dom/NodeFilter.pyi,sha256=bi0L5SEOxk4FyEhf18oU-I8Msf9S9o_tJt-mVc93f28,457 +jedi/third_party/typeshed/stdlib/2and3/xml/dom/__init__.pyi,sha256=gjfWhkwyNoY8SeH6cztWZ9W8w9E4CLgCpHeP8vnHM5c,1844 +jedi/third_party/typeshed/stdlib/2and3/xml/dom/domreg.pyi,sha256=Sq02GZ6VRiXWCy2lUE1e47_EUDdr88rmWZoKCxH0fgQ,462 +jedi/third_party/typeshed/stdlib/2and3/xml/dom/expatbuilder.pyi,sha256=wI_eu1G8yaaquRHmZ9mYRgjy4zNNhJC385TjSMoamRg,77 +jedi/third_party/typeshed/stdlib/2and3/xml/dom/minicompat.pyi,sha256=wI_eu1G8yaaquRHmZ9mYRgjy4zNNhJC385TjSMoamRg,77 +jedi/third_party/typeshed/stdlib/2and3/xml/dom/minidom.pyi,sha256=DSNxhSfqdzlQQ5HjeRZHxB2mhshhLkDfeAmeOY5I9K4,287 +jedi/third_party/typeshed/stdlib/2and3/xml/dom/pulldom.pyi,sha256=wI_eu1G8yaaquRHmZ9mYRgjy4zNNhJC385TjSMoamRg,77 +jedi/third_party/typeshed/stdlib/2and3/xml/dom/xmlbuilder.pyi,sha256=wI_eu1G8yaaquRHmZ9mYRgjy4zNNhJC385TjSMoamRg,77 +jedi/third_party/typeshed/stdlib/2and3/xml/etree/ElementInclude.pyi,sha256=4e6cquTvRpyBS1UBTNomH5ghEtSzMHr3im5VwFAzvEI,873 +jedi/third_party/typeshed/stdlib/2and3/xml/etree/ElementPath.pyi,sha256=lJV3KlSWuf_anuM3hIRwdD-n-hpO4b7MzCKg0FxyFAE,1561 +jedi/third_party/typeshed/stdlib/2and3/xml/etree/ElementTree.pyi,sha256=YbNf8vOL8aKD-MjSJufTgeTcip6A270muX9zWiKQzDQ,14905 +jedi/third_party/typeshed/stdlib/2and3/xml/etree/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/2and3/xml/etree/cElementTree.pyi,sha256=D25RVU5Y1Sai47EQ49UTStrYaY39HMqT1HUOZrSioRg,50 +jedi/third_party/typeshed/stdlib/2and3/xml/parsers/__init__.pyi,sha256=FHZYB9bXDrj4RiKUgrctpkuf7_Rms9PqQrGyjkn0EE4,34 +jedi/third_party/typeshed/stdlib/2and3/xml/parsers/expat/__init__.pyi,sha256=qmz8tuPGbZ2rBfRrfYANxDZNxn9BTQXdd9AugF5wDW0,22 +jedi/third_party/typeshed/stdlib/2and3/xml/parsers/expat/errors.pyi,sha256=mH9YRZuV4quzksDMLEmxiisAFgNhMOhl8p07ZzlS2XE,29 +jedi/third_party/typeshed/stdlib/2and3/xml/parsers/expat/model.pyi,sha256=M7GVdd-AxOh6oGw6zfONEATLMsxAIYW2y9kROXnn-Zg,28 +jedi/third_party/typeshed/stdlib/2and3/xml/sax/__init__.pyi,sha256=PuxAYEbGDZ-GvZFBPhpKAnN5935WR5dRWaGbhdUN1Nc,1389 +jedi/third_party/typeshed/stdlib/2and3/xml/sax/handler.pyi,sha256=lzRvfmcsC4Px11th3C-OB53OJgrSxHHTkgWKkFnYYII,1391 +jedi/third_party/typeshed/stdlib/2and3/xml/sax/saxutils.pyi,sha256=GPHCPJVekBwXRuRViACPSwgJ6WvTn3bKK7Vfy4hgFwA,2825 +jedi/third_party/typeshed/stdlib/2and3/xml/sax/xmlreader.pyi,sha256=6xszLDpI9jWetj83aFhYiq2XKeTvp6A1vj0gT2grXqE,2477 +jedi/third_party/typeshed/stdlib/2and3/zipfile.pyi,sha256=WDrnBqQDKJpsKwqAATU8DrbT4_9fi1ZtMz_6j1antOQ,7249 +jedi/third_party/typeshed/stdlib/2and3/zipimport.pyi,sha256=zdgTOt05qDHBEdkU8TmZXXu_VeX4BXptVsGnYu39eiM,1244 +jedi/third_party/typeshed/stdlib/2and3/zlib.pyi,sha256=fn9NyYniW7bGyLOaipjK0mBXQnUqlmowBBc5BPAM_gk,1692 +jedi/third_party/typeshed/stdlib/3.7/_py_abc.pyi,sha256=vReNkdPnizulbWOKGaxCg-6CslS4zvzEtheQOazNlxM,376 +jedi/third_party/typeshed/stdlib/3.7/contextvars.pyi,sha256=buC4sVtTFV1eqWvJ3Xd53ymdER7bHRY7RXGcN21HF68,1514 +jedi/third_party/typeshed/stdlib/3.7/dataclasses.pyi,sha256=KYjW7iSlZ42rrD1Z34CvtIjUSzOTm2lcLozDVSAOkSo,2737 +jedi/third_party/typeshed/stdlib/3.9/graphlib.pyi,sha256=Spt7N2qI_jP87TY6Oqfn5q1JKonbPmQnGn7yMBPAq9s,592 +jedi/third_party/typeshed/stdlib/3.9/zoneinfo/__init__.pyi,sha256=QjdggL46x_feOdxU4aioHhVM0-obo-vi_N-SBsgR-S4,1183 +jedi/third_party/typeshed/stdlib/3/_ast.pyi,sha256=xgO_RWFNlpKCgJ-N8Oz0MLoNxsluUZK6-LgvM7GHk9s,7851 +jedi/third_party/typeshed/stdlib/3/_bootlocale.pyi,sha256=0g5u3OybUgqbQUaK32XpV2OY1vMr_tMyvv-xWePyn60,63 +jedi/third_party/typeshed/stdlib/3/_compat_pickle.pyi,sha256=bgMC5XUuSbLzpZNo1xi_Lmdhuk7YEkNehIU3VbxJF0A,388 +jedi/third_party/typeshed/stdlib/3/_compression.pyi,sha256=2oEmy37B7HOAAXqPvDtdlBgIuI4veE3Zn_ahmMZek9I,761 +jedi/third_party/typeshed/stdlib/3/_decimal.pyi,sha256=gKb8JF9qlgs-k7lWkc6x13adH918J9IBSh2vJJO2-Es,22 +jedi/third_party/typeshed/stdlib/3/_dummy_thread.pyi,sha256=eurqRwuKXCugsXKBB_45dhYwMfGsEOI3JM36MU0_38c,800 +jedi/third_party/typeshed/stdlib/3/_imp.pyi,sha256=ncrUaJ1P-gDktpPYaAEDEd6O0vWZoQbZaEUYkJJEUi0,705 +jedi/third_party/typeshed/stdlib/3/_importlib_modulespec.pyi,sha256=2H1Bqwkmzeu7VdhBr_ZD4YGYLfFCcQPtfrs8Vc8-B0I,1586 +jedi/third_party/typeshed/stdlib/3/_json.pyi,sha256=TLqFcbDts6gPDQGO1UBQuu7Sgr707zI5mIsc0nE4468,1124 +jedi/third_party/typeshed/stdlib/3/_markupbase.pyi,sha256=bxmSN6aw-V3qwShSR82gk2l_ZrCF0KGQhP3baRo7TPE,256 +jedi/third_party/typeshed/stdlib/3/_operator.pyi,sha256=cW8gndiQv2Ge6SxDU1TYCmdDJ1jjCpRO4Blp3IzYBJ4,1310 +jedi/third_party/typeshed/stdlib/3/_osx_support.pyi,sha256=UzxlYXvWxj-409uwv1Qlrt2Dz7SljoBGbxpQn2Rgeok,1796 +jedi/third_party/typeshed/stdlib/3/_posixsubprocess.pyi,sha256=CSDkhdm87FCJzDPSZ7kRSuruLaUJCFO6-grnXJAohPg,557 +jedi/third_party/typeshed/stdlib/3/_pydecimal.pyi,sha256=droa2p0DPjlLpftTkyseRtRiWWwFEFuBmsKmAEBykHI,157 +jedi/third_party/typeshed/stdlib/3/_sitebuiltins.pyi,sha256=joHmJtcwuqTJzYC9Yz8ExsAOrT2ImObL7RzK1J_YKmA,534 +jedi/third_party/typeshed/stdlib/3/_stat.pyi,sha256=rOfqArOAol_zPagL6lyRodbmL93Qsb1vl1UMNz9x5QE,1179 +jedi/third_party/typeshed/stdlib/3/_thread.pyi,sha256=bwRRV0tuEwneVnc4LSBeYjCZp6rbUVAxzFU17xRxVzc,1451 +jedi/third_party/typeshed/stdlib/3/_threading_local.pyi,sha256=nxpI1n5Bmux1ymFk1lGgWgzSzqFHIDvH_y_FJzSGMqc,490 +jedi/third_party/typeshed/stdlib/3/_tkinter.pyi,sha256=yIaAQOXeQdYw5KzaBCpRx8VZJ67khvPs95zlZI9cM4k,2531 +jedi/third_party/typeshed/stdlib/3/_tracemalloc.pyi,sha256=0mp3PJAcjINGNLU8pmyA2_18SfGMgVN8p72QNz-TfPU,563 +jedi/third_party/typeshed/stdlib/3/_winapi.pyi,sha256=e80Wj1rKs41wv4T4tVVGedGngTWfXB8Yal7OS3iXRKs,4507 +jedi/third_party/typeshed/stdlib/3/abc.pyi,sha256=sFDzu6W3-WI8VhWu-3f7oy89xu-T2ad5AkZOr-oaKbk,597 +jedi/third_party/typeshed/stdlib/3/ast.pyi,sha256=rN2zXBxVShE5080Uq3D6J8cFVU0PMumhHOWLGBp6EyE,9090 +jedi/third_party/typeshed/stdlib/3/asyncio/__init__.pyi,sha256=vQqBti5lCesBtJosU6ISR_TaGXUnon5V2niuGmIAFgM,4242 +jedi/third_party/typeshed/stdlib/3/asyncio/base_events.pyi,sha256=z3HWCTbmefm0vunGqkQwL2fvzDwHWxeQGyI6FtzfHIA,14112 +jedi/third_party/typeshed/stdlib/3/asyncio/base_futures.pyi,sha256=S2IqXCOFikl3_yUkCuiONt0mvmvxW_OcK5ZhlWv38cQ,733 +jedi/third_party/typeshed/stdlib/3/asyncio/base_subprocess.pyi,sha256=BY-Dg-c_bEEe9OMq3LkTlsf3b9GfDTy9GpG6ff7os-Y,3239 +jedi/third_party/typeshed/stdlib/3/asyncio/base_tasks.pyi,sha256=WgkoJoXQj11Dp8EFMNpux0aZD5C62kXOyEVuPgTHUNU,412 +jedi/third_party/typeshed/stdlib/3/asyncio/compat.pyi,sha256=5RcYr5xnYP7yXeBN4CKGSeYeNXUNNU1OCkeliI5fmXM,180 +jedi/third_party/typeshed/stdlib/3/asyncio/constants.pyi,sha256=mi5hQw879HlxJe6jIB5ZGcFF7IwUSNwV39dUXP14JG8,327 +jedi/third_party/typeshed/stdlib/3/asyncio/coroutines.pyi,sha256=KSMF7NRW8a61FFkMXSbI-I957V8-cnI4vr-y5jPIcLE,226 +jedi/third_party/typeshed/stdlib/3/asyncio/events.pyi,sha256=GfQt1pVJScwwdoKJOILTBFFA4ew_qTSYteCM-W6laQI,18648 +jedi/third_party/typeshed/stdlib/3/asyncio/exceptions.pyi,sha256=dROhdLRjCGhqH7bI3_yUbPBr7aIKBM1Ks_iWi-7HOq0,562 +jedi/third_party/typeshed/stdlib/3/asyncio/format_helpers.pyi,sha256=3fVMBIqfwuM6eLWjnNv0OFdNZEs9uk2on5NbaeCTW5Q,921 +jedi/third_party/typeshed/stdlib/3/asyncio/futures.pyi,sha256=pKyo4aaJBbPLHDZepPclI8A-2flbzGx60pWiCn49dTw,2581 +jedi/third_party/typeshed/stdlib/3/asyncio/locks.pyi,sha256=UFo1H-sg4cOAyT0rj-Rbm_58FyR73NGvXT70-vbWMW4,2806 +jedi/third_party/typeshed/stdlib/3/asyncio/log.pyi,sha256=Ql97njxNKmNn76c8-vomSAM7P-V14o-17SOIgG47V-U,39 +jedi/third_party/typeshed/stdlib/3/asyncio/proactor_events.pyi,sha256=VcgZHD_Vsu0VSBYYXa4pZTwntgr9okSrLZs-6xcR9o0,2783 +jedi/third_party/typeshed/stdlib/3/asyncio/protocols.pyi,sha256=SBG_ZeDS4RSwq6vj0uppbRezcJKyjRGpqnWOSWSqlqY,1072 +jedi/third_party/typeshed/stdlib/3/asyncio/queues.pyi,sha256=KWOi5Y1t8_v4Nlf0T50xiTi4okN1kxpU1kw74TnBLIM,1166 +jedi/third_party/typeshed/stdlib/3/asyncio/runners.pyi,sha256=8fwyzaEpkiQuUJBAJGdA4QLa6keFBm_ojm0N5KQn2pk,314 +jedi/third_party/typeshed/stdlib/3/asyncio/selector_events.pyi,sha256=ajnESF-_WSF-65nUWRQKJJrq95fJAkO6mYUIdpDWRL0,215 +jedi/third_party/typeshed/stdlib/3/asyncio/sslproto.pyi,sha256=bXJ1AhfKicw_i991U4uAnI6LIC5NCTotbzIX_o_tEAY,5415 +jedi/third_party/typeshed/stdlib/3/asyncio/staggered.pyi,sha256=5ysf7K4OZzJoslO6Jyutra3HVjrgdYXzOxBDc9SpnS8,396 +jedi/third_party/typeshed/stdlib/3/asyncio/streams.pyi,sha256=Vjlv0BjrBw-jAlUE7AYF-V3-GqMYJO8waNOpLBRKs3M,3941 +jedi/third_party/typeshed/stdlib/3/asyncio/subprocess.pyi,sha256=qeMUmUnuAWmms9LYfR5Ldgao3tWDPhxuwprVQ9rRE6I,2265 +jedi/third_party/typeshed/stdlib/3/asyncio/tasks.pyi,sha256=NR-30BS6Tgn5Xd5qj_HMiN1yvd6xT0D21HYLmSkOO8Q,7153 +jedi/third_party/typeshed/stdlib/3/asyncio/threads.pyi,sha256=xNqf8z1pbCIoeaO4w4YTzcuqOoGKbb1bJ7KjXpP7vWU,194 +jedi/third_party/typeshed/stdlib/3/asyncio/transports.pyi,sha256=81VjVySq54ht5UnyaxEaYhjBgt263z0nUGuwl2zNWvc,1886 +jedi/third_party/typeshed/stdlib/3/asyncio/trsock.pyi,sha256=Gywt7hJSyhtc9oIGAhTZUls1u_5C_AtmGKlOCkdqyIk,4582 +jedi/third_party/typeshed/stdlib/3/asyncio/unix_events.pyi,sha256=xnaVSEGzJn3AKT0eCG9g0xacrj5EG9K6fA8MNbD2EEw,2144 +jedi/third_party/typeshed/stdlib/3/asyncio/windows_events.pyi,sha256=SQQ4tw0uGxFkIaFjMhVwqHlpU2kyBt-7Zb1qUKbNmGM,3554 +jedi/third_party/typeshed/stdlib/3/asyncio/windows_utils.pyi,sha256=sPyeKsJrsOsSHFop6mMTdPdyiOasrfvAVWRCLVDDruY,983 +jedi/third_party/typeshed/stdlib/3/atexit.pyi,sha256=NLT6GZV7DmuNcHb-lTn2R5mT-V12bibRWe12ittWOeo,271 +jedi/third_party/typeshed/stdlib/3/builtins.pyi,sha256=OnHDGVqjdghwpPVhsTK7DHElTolNSs8MvfAHEwoLw8o,54524 +jedi/third_party/typeshed/stdlib/3/collections/__init__.pyi,sha256=CRVuUl5jM4mQlf1i1Xsn2zm1KljSS0XzEvNpAI2kPRk,14502 +jedi/third_party/typeshed/stdlib/3/collections/abc.pyi,sha256=dVx25fVnQBL30UcThhhbW3wcpH5Ru5EM_hW448QeFPg,744 +jedi/third_party/typeshed/stdlib/3/compileall.pyi,sha256=YYPeCHqW1sy_ecwW2gdNA4JErmOMqzizTus5hYh__IM,3367 +jedi/third_party/typeshed/stdlib/3/concurrent/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/concurrent/futures/__init__.pyi,sha256=ZyXXp9i057jbhjo8WeR9SYBT0dhFnwyZ-L1cJP4pQWg,629 +jedi/third_party/typeshed/stdlib/3/concurrent/futures/_base.pyi,sha256=4tKni6V8xudjGjgdqmUCyWa4aQFINZrLlzIzJegI4O4,4676 +jedi/third_party/typeshed/stdlib/3/concurrent/futures/process.pyi,sha256=ZJi1cVZxQPFoJnHUunWf_kUSlEVX_NBbj3asiI8_shM,804 +jedi/third_party/typeshed/stdlib/3/concurrent/futures/thread.pyi,sha256=7thIcnDTCR-0-lB5jJFN4xWzpt4MoQiGy2vKkymPA4w,1421 +jedi/third_party/typeshed/stdlib/3/configparser.pyi,sha256=v_JccqizVrtoccBdWqJSDGiZUbDQaXu2Eoh96AI3EHM,10095 +jedi/third_party/typeshed/stdlib/3/copyreg.pyi,sha256=-GmSIudewg4xblZXLSvErhpunAMi1bsqdQUEXujpFlI,739 +jedi/third_party/typeshed/stdlib/3/dbm/__init__.pyi,sha256=fxHKjeckXzF2s17gpHZ7mD2CKOw42qFaX8oTsa2wUGM,945 +jedi/third_party/typeshed/stdlib/3/dbm/dumb.pyi,sha256=f1YcbTYCzE8bcoC-J9jL8-tKlwTMMV0JBjGFzuj4EQk,1010 +jedi/third_party/typeshed/stdlib/3/dbm/gnu.pyi,sha256=zMcocoAPlm0vkOt5kqWVu6eBe7HZ74A3xl0W7PgrpSc,1363 +jedi/third_party/typeshed/stdlib/3/dbm/ndbm.pyi,sha256=mCED-czlQBkm28csVVuyWFXNQ282xg1sUAnsCOvaMP4,1236 +jedi/third_party/typeshed/stdlib/3/distutils/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/archive_util.pyi,sha256=WaUu32XUCstlA4zE0WS-PvA9UF0mdyuDgDbZZd1LN0A,447 +jedi/third_party/typeshed/stdlib/3/distutils/bcppcompiler.pyi,sha256=fge2cMbG4jp--o0I2zNcwykh24tJWZtk6leQgAH2NJw,78 +jedi/third_party/typeshed/stdlib/3/distutils/ccompiler.pyi,sha256=N38wYG41RyiEuowx_ZvpZIqVVZYiz1NGcGHHDJ9MbWs,6449 +jedi/third_party/typeshed/stdlib/3/distutils/cmd.pyi,sha256=wquIAgFbqLNsoHDPGQm3WuGU13054o1u4FGHprpI_9Q,2803 +jedi/third_party/typeshed/stdlib/3/distutils/command/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/bdist.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/bdist_dumb.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/bdist_msi.pyi,sha256=sDSqH7TRcOiXC5S4VXxJ_YHB-WFPpa1fo8F8g5XeV3Y,182 +jedi/third_party/typeshed/stdlib/3/distutils/command/bdist_packager.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/bdist_rpm.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/bdist_wininst.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/build.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/build_clib.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/build_ext.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/build_py.pyi,sha256=lBXy2QTRaedFpVnXmdyJonKRrnEjAKYViarVHccdb1w,217 +jedi/third_party/typeshed/stdlib/3/distutils/command/build_scripts.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/check.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/clean.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/config.pyi,sha256=DV1oMIztVDdk46W43HGioP_n6b3x9FgSqvFr2rwPVoY,3059 +jedi/third_party/typeshed/stdlib/3/distutils/command/install.pyi,sha256=z_gxnwFlHhSicrx9YRtsJnJQ1YG7mu6zERZwZ-1RJi8,365 +jedi/third_party/typeshed/stdlib/3/distutils/command/install_data.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/install_egg_info.pyi,sha256=WcnLycNSSWSZ8Z_vHIohu0-3qKnCih2qoJaGBPtjQGY,380 +jedi/third_party/typeshed/stdlib/3/distutils/command/install_headers.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/install_lib.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/install_scripts.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/register.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/sdist.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/distutils/command/upload.pyi,sha256=NQ4QYCmhC98gBW2Fv2LW0xzFNZI-9gI6AjTW_UqeOJY,279 +jedi/third_party/typeshed/stdlib/3/distutils/config.pyi,sha256=M4b9_7PKsO74DhsIBFoTAB5I99hXrlBJcjxghWhI8vc,523 +jedi/third_party/typeshed/stdlib/3/distutils/core.pyi,sha256=lq1S8lRqeGqK6U3z7_UsVg2TR7xbx7Z2YXJ0ekIo9Vk,1688 +jedi/third_party/typeshed/stdlib/3/distutils/cygwinccompiler.pyi,sha256=Y7qhVOqXrkPT0yyQnudNCtTzBYC2lzS38HB5Mh45zEI,138 +jedi/third_party/typeshed/stdlib/3/distutils/debug.pyi,sha256=7_zuriUBqHbc62x7tCONq7LQXLuK_hCaBK0laoR3HeY,12 +jedi/third_party/typeshed/stdlib/3/distutils/dep_util.pyi,sha256=QCheHEDF7waISF42_aaumqvVOIcTw-yh5e5-CbPvQ2o,252 +jedi/third_party/typeshed/stdlib/3/distutils/dir_util.pyi,sha256=0nHuLCqZ36gvDVaF6PoC76_JOC3v6P_310eFauW1ZDM,555 +jedi/third_party/typeshed/stdlib/3/distutils/dist.pyi,sha256=a5bZv3WJK1cAA-rXLz2WRgvWRr_KrNShz1ho37Z73nA,2557 +jedi/third_party/typeshed/stdlib/3/distutils/errors.pyi,sha256=l1W_FgoP9L-D-hEPFA2BzZuybjN0lV4WBXl0VJ-k7J8,852 +jedi/third_party/typeshed/stdlib/3/distutils/extension.pyi,sha256=DSCTrWp6UVQGcvlVM-07HCJUNMT7ggxg8-gGBL1hKlg,736 +jedi/third_party/typeshed/stdlib/3/distutils/fancy_getopt.pyi,sha256=ai-0E83HnTOcYCWO9zq4tDVvnef04SRRoa-F8baso_o,859 +jedi/third_party/typeshed/stdlib/3/distutils/file_util.pyi,sha256=RFpiizXMdhhcji98pIscfHW4r6i9KLzM2D15gA6_eow,439 +jedi/third_party/typeshed/stdlib/3/distutils/filelist.pyi,sha256=-WeYFFKsEUUjPvbzeZbVCOKkPV-oqc3RoZvN2SB1VOE,20 +jedi/third_party/typeshed/stdlib/3/distutils/log.pyi,sha256=CXViHmBVIadN4DWrSddT9lN1O1WaNjpxvaz-KUCD8C0,845 +jedi/third_party/typeshed/stdlib/3/distutils/msvccompiler.pyi,sha256=qQLr26msfhjz-omJutWcRHik3shLh1CIt7CDI3jBd3I,78 +jedi/third_party/typeshed/stdlib/3/distutils/spawn.pyi,sha256=iDdi2WvST9yeFDlKWy0Wlye-x67W-ah5nta7EuRW2W4,227 +jedi/third_party/typeshed/stdlib/3/distutils/sysconfig.pyi,sha256=FSdBoSTsVvKAF5D2lkWBwxH15ockfFZv6L06mrgeAb0,620 +jedi/third_party/typeshed/stdlib/3/distutils/text_file.pyi,sha256=vCQLwggDaocAqqR7v1WJjOeS_wgxqjI5xDkyxHJlzcw,716 +jedi/third_party/typeshed/stdlib/3/distutils/unixccompiler.pyi,sha256=R3VKldSfFPIPPIhygeq0KEphtTp0gxUzLoOHd0QoWW8,79 +jedi/third_party/typeshed/stdlib/3/distutils/util.pyi,sha256=KSpX8rQ6qJXqToJBKdwhlpe-jd1QQcacg7wsH_6dKXo,829 +jedi/third_party/typeshed/stdlib/3/distutils/version.pyi,sha256=-MSresMMQbCxM9NDl_Hof4kDdGYmlJmqL8QWHCbliqM,1429 +jedi/third_party/typeshed/stdlib/3/email/__init__.pyi,sha256=5hwilWCw-1AiwWEMUfgVza8eTnH1vsqSL4Dkjx44w4c,757 +jedi/third_party/typeshed/stdlib/3/email/charset.pyi,sha256=tKEN5fdqEvA9KmtWRqGdv7MFSdI8IKcRMrkjzvdUpyQ,1062 +jedi/third_party/typeshed/stdlib/3/email/contentmanager.pyi,sha256=1onMqjP_pwFFABKkXAJMpLssp2eCajYb_DmfdGW88dg,489 +jedi/third_party/typeshed/stdlib/3/email/encoders.pyi,sha256=WIE0oEGqiDZZCnaaUganOj3RIHvNpdH1H6_uYsb9BCU,214 +jedi/third_party/typeshed/stdlib/3/email/errors.pyi,sha256=5uMjZNDOPcQBIOB08smMOo19sHIAc_Zcbsbz4OAjrlM,833 +jedi/third_party/typeshed/stdlib/3/email/feedparser.pyi,sha256=-El1uWq32_69H0WtIpGT6eiEsyCQRcHuGUxKSlE7OdI,823 +jedi/third_party/typeshed/stdlib/3/email/generator.pyi,sha256=aBWIi9nhJSWROoxD0pIk0TMLt0j2epr_ASen8iiUKPs,967 +jedi/third_party/typeshed/stdlib/3/email/header.pyi,sha256=N32hw4GGgE82e7R8dpDHQnQXLf6YABLJ5OGWFuvwQNw,1025 +jedi/third_party/typeshed/stdlib/3/email/headerregistry.pyi,sha256=M9ZBxQ_YJIe_lWFLyr0AkvgkIy0ch1rhnZm7JNIT2LE,2828 +jedi/third_party/typeshed/stdlib/3/email/iterators.pyi,sha256=kVhrZU7h-D41NBZrgFU_h2xeBs02vSRL56lfxXiRy8g,266 +jedi/third_party/typeshed/stdlib/3/email/message.pyi,sha256=4OVN83CSaSRycbt4NmwrHwB643-65ibkluIiQqNa4Os,4681 +jedi/third_party/typeshed/stdlib/3/email/mime/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/email/mime/application.pyi,sha256=1nvquRMV3pfipzE_Db77xQA2YiVshkslbrR6KJZXN2k,499 +jedi/third_party/typeshed/stdlib/3/email/mime/audio.pyi,sha256=hsj22zxJwLlF6TPXDzRMksu4oUoxnypUshYenxXSkE8,502 +jedi/third_party/typeshed/stdlib/3/email/mime/base.pyi,sha256=PLTWaSsib758s-xoTPQE43teS-vStf7nOoPLiq_evHg,325 +jedi/third_party/typeshed/stdlib/3/email/mime/image.pyi,sha256=DpDOxmW7k3nF8WzPhaeLLyYWNeYIQgW1v5OU7v3U4_E,502 +jedi/third_party/typeshed/stdlib/3/email/mime/message.pyi,sha256=BbZ7ULwq3vT6-MPS2QFmkIuWqxgC9vDLWD9x0zv9oXk,292 +jedi/third_party/typeshed/stdlib/3/email/mime/multipart.pyi,sha256=CTHQJbuC-ZAgPKymxr62DjsslnF4P45RZQ_a17481UA,507 +jedi/third_party/typeshed/stdlib/3/email/mime/nonmultipart.pyi,sha256=gAN-hOmjxz3lcG8FK3lbvlEOoz_yhG9rKIslq3WLil4,76 +jedi/third_party/typeshed/stdlib/3/email/mime/text.pyi,sha256=uqvOSr2SoHrbTcLikt1P7Gm0L9sjyy6c5TeuX1cbvBY,297 +jedi/third_party/typeshed/stdlib/3/email/parser.pyi,sha256=p-CbSumd_5JYzgkS81GOZofpQuameh6ajoLtJifdpyQ,1328 +jedi/third_party/typeshed/stdlib/3/email/policy.pyi,sha256=sFF7jOYVcLw_gIS27K7CxMFbz62BbWXM_qVPQQakyx8,2159 +jedi/third_party/typeshed/stdlib/3/email/utils.pyi,sha256=V-8KVirBmcrTekDW6TW7gqkV3bPCh2l9RJKtGHCyg5M,1832 +jedi/third_party/typeshed/stdlib/3/encodings/__init__.pyi,sha256=q95Eqw-U2YICiPawMyarXy1iWCXsz1wV_793ZSg_4ek,184 +jedi/third_party/typeshed/stdlib/3/encodings/utf_8.pyi,sha256=QvCf0jkCM3g7fA1rzv7IB8onY_ciFLPaySvkBQ95aOM,561 +jedi/third_party/typeshed/stdlib/3/enum.pyi,sha256=iY_vEF2WHiMhmgZTtBGnE_XdzdPx90709cxW0BP9o4M,2643 +jedi/third_party/typeshed/stdlib/3/faulthandler.pyi,sha256=jn6gMdF0GEZljfFTc1splgef8zIo99X1H44qgWxU8sE,644 +jedi/third_party/typeshed/stdlib/3/fcntl.pyi,sha256=YNY4OFCzwUNVzqaocJgNzrS7kwsRrKCzFmXfv9ukHUc,2244 +jedi/third_party/typeshed/stdlib/3/fnmatch.pyi,sha256=_sH2M1wxuUFryRYDO_K2eXtxxJPSRHHRRuz2nzUqf6Y,257 +jedi/third_party/typeshed/stdlib/3/functools.pyi,sha256=twLVNCB5Heu0Y1yfJbaDdFbDCP0kX9PYYg4vumV3iKI,4613 +jedi/third_party/typeshed/stdlib/3/gc.pyi,sha256=mfh-9jtsLPOaFLuBwkRgt1rPjpuRf6GelyMb8PJjokc,1135 +jedi/third_party/typeshed/stdlib/3/getopt.pyi,sha256=QUWgEAjkZEgYEDX83HNFzlLqoM7tWZfu35croNXpYUk,352 +jedi/third_party/typeshed/stdlib/3/getpass.pyi,sha256=VZjU7IEeEVtL8nHUcWRbbOkNYAegfSRiTDc5QFY-BuM,178 +jedi/third_party/typeshed/stdlib/3/gettext.pyi,sha256=hI05wMFAUpQqs9xphkmJdM2UbLKcvQ9--PNPwOaPQiQ,3208 +jedi/third_party/typeshed/stdlib/3/glob.pyi,sha256=l-vPtsxqdYR6852YtsLh8m9dS4g2OvnU2hDUPHnwRw4,442 +jedi/third_party/typeshed/stdlib/3/gzip.pyi,sha256=hxBMzBPUEElN-Q0CyO7__tQ8cOH0kboT5Oqqah3HLms,2909 +jedi/third_party/typeshed/stdlib/3/hashlib.pyi,sha256=a1LtNJGGukXpq3P09xQvoAfU9aDsTR3nZ09T7EyFOL8,4195 +jedi/third_party/typeshed/stdlib/3/heapq.pyi,sha256=ZXIM49YLZnLiEMSQlWt9uUzX6fxgNXH5HIQwBVhcneI,798 +jedi/third_party/typeshed/stdlib/3/html/__init__.pyi,sha256=qKsbjN2OJn_AoUr9a4zdFC8juUG9ii6rqYp3EQTSG-Q,122 +jedi/third_party/typeshed/stdlib/3/html/entities.pyi,sha256=ap6jI6Fw355WC0vSd-WGc957p_KpAJDGONUeAlItYzI,136 +jedi/third_party/typeshed/stdlib/3/html/parser.pyi,sha256=odnTynZ-Cz1KlNCzxR_nl3NY_vEHlg1iha4DGTHCX0Q,984 +jedi/third_party/typeshed/stdlib/3/http/__init__.pyi,sha256=bE3i99Lkeat-Seo0TTAm-dDauYTTNtih512Tt6ij9G8,1940 +jedi/third_party/typeshed/stdlib/3/http/client.pyi,sha256=N-CIvhqvj7MhnMDK-rhyuH5FgsBQCjsTcuP230LsH7M,6034 +jedi/third_party/typeshed/stdlib/3/http/cookiejar.pyi,sha256=vIs7US67GiRUDVmq_qdQ9a48s4Iv73h4_S6D8OYrNcA,4991 +jedi/third_party/typeshed/stdlib/3/http/cookies.pyi,sha256=pfXrzKusaD-sxZsv92P84Vw0sgoGrE-KKhiuXm15vz0,1364 +jedi/third_party/typeshed/stdlib/3/http/server.pyi,sha256=C9LgOZw5ZoAYzbXN-y9BXdS31iaEiwwo-QpynkLS_QM,3036 +jedi/third_party/typeshed/stdlib/3/imp.pyi,sha256=_zd93_282PmC8cqim3FPUsFeYrIl8-dp_WFab_v7nac,2343 +jedi/third_party/typeshed/stdlib/3/importlib/__init__.pyi,sha256=xuq-05qRDyIRBetquMX_JB1dMc_BLme0WAZ3MpggjPQ,571 +jedi/third_party/typeshed/stdlib/3/importlib/abc.pyi,sha256=polXx3wt7y2EQYqfND7eD1OA0hMt1ucG_vQoACtvq4U,3960 +jedi/third_party/typeshed/stdlib/3/importlib/machinery.pyi,sha256=FXq0GgiGYrJRtZKN76mlRVRuum88bdiq_k_ZuF08I18,4006 +jedi/third_party/typeshed/stdlib/3/importlib/metadata.pyi,sha256=e-uCY4Q7Su-dTeeUl-sGUF9F7-70Vd2hf_CQ2Y0IT9k,3786 +jedi/third_party/typeshed/stdlib/3/importlib/resources.pyi,sha256=zYjYz5gFHS1eT5roJ2glvvqhVwjJBkLdxkMYUj3FhJo,1026 +jedi/third_party/typeshed/stdlib/3/importlib/util.pyi,sha256=bqqXx43SyHCCERNe7jA-i6MjfUdyPpQtimtuqqTKF2M,1782 +jedi/third_party/typeshed/stdlib/3/inspect.pyi,sha256=yYThFqYLPUJToz4Fz9LYxI-ZJ89k94wqpvhUoK1K-QA,9929 +jedi/third_party/typeshed/stdlib/3/io.pyi,sha256=OQHQh77AvF1wqIqeqUPiHDO9nKgRX45O6ZgOrQap7xE,7499 +jedi/third_party/typeshed/stdlib/3/ipaddress.pyi,sha256=74Fj1yj7-tgOJeSSR8LKvfqIUyY1UzTQN6q_lrUcgpU,5252 +jedi/third_party/typeshed/stdlib/3/itertools.pyi,sha256=Up21RC9zUGPLkg3vHuuQ7iSWpphBCeWGBH0Txn1mUk4,4524 +jedi/third_party/typeshed/stdlib/3/json/__init__.pyi,sha256=dWEcInSJdvZapNjd2VvnksUHKyj4_AVSVJasp7uqFBw,1919 +jedi/third_party/typeshed/stdlib/3/json/decoder.pyi,sha256=85tOPFvXraUsmSgW1Sn7FPqKAxSWwPZwjAZ7Aa6CMFM,1090 +jedi/third_party/typeshed/stdlib/3/json/encoder.pyi,sha256=FIZVowFsNCGiAAKqnohbOQkmFKsvujvwkzk2qu0OCSg,779 +jedi/third_party/typeshed/stdlib/3/json/tool.pyi,sha256=d4f22QGwpb1ZtDk-1Sn72ftvo4incC5E2JAikmjzfJI,24 +jedi/third_party/typeshed/stdlib/3/lzma.pyi,sha256=t8jPcnq6o5L1t3nKdw_dmeFFovwCAkQZnKnMltEypTc,4580 +jedi/third_party/typeshed/stdlib/3/macurl2path.pyi,sha256=IjGXxi7XlrbJwIcpfitwuKQWR635yLOf6Sh3N1o25Do,225 +jedi/third_party/typeshed/stdlib/3/multiprocessing/__init__.pyi,sha256=J3pDzfv93b1AS4hf69jhyonvhWmrj1eYbCcnApCWlUI,3802 +jedi/third_party/typeshed/stdlib/3/multiprocessing/connection.pyi,sha256=ReCch6MGZ-m4TSo6bSFHUv9byxu79g2GP7chnYmhD8w,2602 +jedi/third_party/typeshed/stdlib/3/multiprocessing/context.pyi,sha256=LQireotdnvV1vrojP-BTgtSyAZpius2NGrbl1kbUpmc,6698 +jedi/third_party/typeshed/stdlib/3/multiprocessing/dummy/__init__.pyi,sha256=0n5irDRx6L6Q8NlRCxq9w-Dlk4DhJhiGxbFiFTPZSgM,1572 +jedi/third_party/typeshed/stdlib/3/multiprocessing/dummy/connection.pyi,sha256=1h-QyO57HTVQN_OiJjuiNp9QSEkuXKZDdR-jrhasw44,1431 +jedi/third_party/typeshed/stdlib/3/multiprocessing/managers.pyi,sha256=_owX_1Pum7mnYDXFKtk72T8KHfckdNl_hg8FT5AUsoQ,4584 +jedi/third_party/typeshed/stdlib/3/multiprocessing/pool.pyi,sha256=Zz1Lx-lpsQrJHgsAS6xqCUqsh2F-E6lP0Iu_Vc4RXA4,3224 +jedi/third_party/typeshed/stdlib/3/multiprocessing/process.pyi,sha256=Eupk7EcZSXHEe_LcXKt_uY_oYt5ZGpwFoIVVVAiCq78,1143 +jedi/third_party/typeshed/stdlib/3/multiprocessing/queues.pyi,sha256=fXQ9gQc8wqYmEgwBCK6bpaTLUR-tPGAZxtADAw6OdUw,1288 +jedi/third_party/typeshed/stdlib/3/multiprocessing/shared_memory.pyi,sha256=oZEZSEATdR5PqJEtyd6E3ZWsSsM_mjvOW_CRhVjP1JM,1302 +jedi/third_party/typeshed/stdlib/3/multiprocessing/sharedctypes.pyi,sha256=qg970qF7Qy0nnIWPnqV3q0Sycxx0dZAV60Fa88oUyOM,1526 +jedi/third_party/typeshed/stdlib/3/multiprocessing/spawn.pyi,sha256=cMRcm2n3hnOszIByMFkG3H7PatT472zWi-g-s8ZdkPk,690 +jedi/third_party/typeshed/stdlib/3/multiprocessing/synchronize.pyi,sha256=ZUPVL36s3ANf4divriLWpgHYFBva09_09IK7hORTSns,1799 +jedi/third_party/typeshed/stdlib/3/nntplib.pyi,sha256=KSCji19ptgFnr26zKlzgLulK7e0LSfPLZHLBSMKYN54,4325 +jedi/third_party/typeshed/stdlib/3/ntpath.pyi,sha256=MZG5ucT7dEvAJYFsJHN7SHM2Frs4Nyf3eLm6VqhUAX0,4721 +jedi/third_party/typeshed/stdlib/3/nturl2path.pyi,sha256=E4_g6cF1KbaY3WxuH-K0-fdoY_Awea4D2Q0hQCFf3pQ,76 +jedi/third_party/typeshed/stdlib/3/os/__init__.pyi,sha256=sqb9_zZjeE44gdDnephPQIj8rzLZU8WIuVAj13qcdu4,28846 +jedi/third_party/typeshed/stdlib/3/os/path.pyi,sha256=MZG5ucT7dEvAJYFsJHN7SHM2Frs4Nyf3eLm6VqhUAX0,4721 +jedi/third_party/typeshed/stdlib/3/pathlib.pyi,sha256=WxnASz0QaegMLAzqln-6G529DGVub-Jg8D8lyWGjGSM,6899 +jedi/third_party/typeshed/stdlib/3/pipes.pyi,sha256=cgke6nDBXMhLkpcUUsV_y61rc2ujKCydFa6kl3oYVdU,518 +jedi/third_party/typeshed/stdlib/3/platform.pyi,sha256=_a4cugd9uoy8pzW0KE0U0feB2O5k4Co1X_VeKHbgbGQ,2272 +jedi/third_party/typeshed/stdlib/3/posix.pyi,sha256=YNT-Sw-8HFLKnXeCg2GRN97r0K6piolWHPuiD-qPK6k,2810 +jedi/third_party/typeshed/stdlib/3/posixpath.pyi,sha256=MZG5ucT7dEvAJYFsJHN7SHM2Frs4Nyf3eLm6VqhUAX0,4721 +jedi/third_party/typeshed/stdlib/3/queue.pyi,sha256=W1aF5vLpBMTnl4ObekHw4B67WbhjoUSukrz3g6opa0k,1884 +jedi/third_party/typeshed/stdlib/3/random.pyi,sha256=G3NB5JMkfENo8v1u6X00c1-V2HEX4W9fRqcNibNdGNc,3901 +jedi/third_party/typeshed/stdlib/3/re.pyi,sha256=E1wPk3W7Mp-0VSatzEl1WR058yXWhyau-vJfFsr4Ovo,4442 +jedi/third_party/typeshed/stdlib/3/reprlib.pyi,sha256=Hn5K7nvw9i4uwyrxH7BkSSbA8_na_24t6YJ8YleX0cQ,1228 +jedi/third_party/typeshed/stdlib/3/resource.pyi,sha256=Je96IzWSE5BZkL25TXCyg3wVbGO2zMHGrLjjqKgl-gY,1243 +jedi/third_party/typeshed/stdlib/3/runpy.pyi,sha256=kYMEMd1kHr9LmvQzt9-s4xXrAAth_upzRtxbkXtl_r8,746 +jedi/third_party/typeshed/stdlib/3/secrets.pyi,sha256=38xdvca6iVqHllAA8pfiRXMnSFEZ_AakTCHjVjksHIo,467 +jedi/third_party/typeshed/stdlib/3/selectors.pyi,sha256=5Mg36Zke6M8UL4t91QuDDY5LX6kHwgluUwr4zGSezBk,3644 +jedi/third_party/typeshed/stdlib/3/shelve.pyi,sha256=9NdL2QZYd9Wtqsz991GfroFbZ553lesYsKujKZCrPbI,1605 +jedi/third_party/typeshed/stdlib/3/shlex.pyi,sha256=pVtr3Hc4_Wx7qgcfQPf6DepYemtlrQaZ58l7jckycVI,1325 +jedi/third_party/typeshed/stdlib/3/signal.pyi,sha256=aFVl8Op3n4JAQhj_jEta45aGGmXQciIRswTbmx5VJ68,5209 +jedi/third_party/typeshed/stdlib/3/smtplib.pyi,sha256=DobAN5UFsK41cOSl_hPqRBQskhdlTxUPTbJBH3iH7-k,5606 +jedi/third_party/typeshed/stdlib/3/socketserver.pyi,sha256=7ASKFWF67wQOePi5HR3RqEuAAb2xqkf1YrRhlmNS4RQ,5367 +jedi/third_party/typeshed/stdlib/3/spwd.pyi,sha256=aAmkS56-90q7hfKMkpAmUifbEQy32e2c-gW_KVHTEn8,310 +jedi/third_party/typeshed/stdlib/3/sre_constants.pyi,sha256=j5xp3zUP6iox81-rrMQSi6OqC7s99PXncyL6YCjwMAc,3348 +jedi/third_party/typeshed/stdlib/3/sre_parse.pyi,sha256=JYDEHNQG-Tw_Bwj77fpG7I-7bIk2skV2eMjIQ7ggbcI,3820 +jedi/third_party/typeshed/stdlib/3/stat.pyi,sha256=WYG0DONENxta5efsANvcWTQiy_kkB5NjSYl7oJBgrv8,1805 +jedi/third_party/typeshed/stdlib/3/statistics.pyi,sha256=nVUX7Tcg6mZeuTzdOWCDLH-jULHlCCOf-s0RnPhK0K4,3149 +jedi/third_party/typeshed/stdlib/3/string.pyi,sha256=CNCZS22ux7JIYv0mm9KGJ1sPWk4-ziwN13SKAC0u36M,1423 +jedi/third_party/typeshed/stdlib/3/subprocess.pyi,sha256=aVTeOMtFq4uJMnSXkZG9YA8jjXdhxxY_yjo4e9mLMqI,34320 +jedi/third_party/typeshed/stdlib/3/symbol.pyi,sha256=6iOqBcaia5f9UZcXxZaDy9qBA4Boe35Uy4WxQmhFWcg,1383 +jedi/third_party/typeshed/stdlib/3/sys.pyi,sha256=7WyG6k19z77CBdUUkNVQiAR_YxpKCamnsxjsFX_06MU,6337 +jedi/third_party/typeshed/stdlib/3/tempfile.pyi,sha256=FyzDCJDCZvkXsxr_0arm8Q3vMKgWO58vw4LPl8ZghT0,11371 +jedi/third_party/typeshed/stdlib/3/textwrap.pyi,sha256=SA66hWuD6KU0BfK8EUpEwnm8tKgyWbYcHJVaj_qXdzM,3234 +jedi/third_party/typeshed/stdlib/3/tkinter/__init__.pyi,sha256=zXaLTPom1qPFajA0dc2Ic0_43bCd0gQqvUsu1NhtsvE,110919 +jedi/third_party/typeshed/stdlib/3/tkinter/commondialog.pyi,sha256=GXUxL-Y2G2Y3DgfMNMNpCUzuRnZ746Iij-R56ZXCu6E,277 +jedi/third_party/typeshed/stdlib/3/tkinter/constants.pyi,sha256=WfBSeKTB52NfPVFdsdaRrnsmDOvT3FoP8j4QK1T3yxU,1886 +jedi/third_party/typeshed/stdlib/3/tkinter/dialog.pyi,sha256=SqRt2lN2RMOtj5B9Q1h73xcxbVJTfZ0bYzfgH0nnNpY,291 +jedi/third_party/typeshed/stdlib/3/tkinter/filedialog.pyi,sha256=xM4DePZxB81ctUBT5Wp6hxfQAQAXqunHfv5idcVx71g,2247 +jedi/third_party/typeshed/stdlib/3/tkinter/font.pyi,sha256=HVeakp_l7Vqh6sbakpZ6XlilT2oIQhQA5AajfLhANtY,3812 +jedi/third_party/typeshed/stdlib/3/tkinter/messagebox.pyi,sha256=Q0-rK5wXBbbzvx4Mpf92dITPgL7Lwjw8t4CLfpYV0hw,1150 +jedi/third_party/typeshed/stdlib/3/tkinter/ttk.pyi,sha256=f5W5gEAauDlNzBBJ7e43tuyu9_SzdA4rWxRhhOHlGvE,43748 +jedi/third_party/typeshed/stdlib/3/tokenize.pyi,sha256=MKMkZ5E-aZIObWJom5U1-4JcKMEUuykkKDNs1tcko2M,3110 +jedi/third_party/typeshed/stdlib/3/tracemalloc.pyi,sha256=VqPd3kiHw2LM5yb0YDtj_Rx-kSlOUon2W3cSu1d49KY,2795 +jedi/third_party/typeshed/stdlib/3/types.pyi,sha256=KsTXLa4cimtNz6HGMOn6whGh07tR0NJkc1_PabY6l3A,11027 +jedi/third_party/typeshed/stdlib/3/typing.pyi,sha256=lh5-KS3cFrYby3k1Etdksa6_IZBaTd0V-8o3aV_ztcc,25040 +jedi/third_party/typeshed/stdlib/3/unittest/__init__.pyi,sha256=mot-stW02w3PxX7Q8XcXM-wNPkyICkUEcthaTuzIMh4,387 +jedi/third_party/typeshed/stdlib/3/unittest/async_case.pyi,sha256=8FL7FHo9f5ZAO1veWlhC6r1WXpC4JBSZ9jAGr5VBrhs,372 +jedi/third_party/typeshed/stdlib/3/unittest/case.pyi,sha256=h5wnJUGhA_Jw3GstvFcaWX5jXfFnXnZcODW3nLzfP7o,12483 +jedi/third_party/typeshed/stdlib/3/unittest/loader.pyi,sha256=8wcy7fsfMDuHjVSNzD6bwmPmnUGiNG0PTLz0HpLjDWU,2067 +jedi/third_party/typeshed/stdlib/3/unittest/main.pyi,sha256=TM86Ac-1qF3P9JogJ2rYwE3E4dKi7ke1G8RwLeBEiaA,1691 +jedi/third_party/typeshed/stdlib/3/unittest/mock.pyi,sha256=dXwIB0Naj-0TjlamsKQcrlNRThPHW3nvBHtUt28nR6A,14809 +jedi/third_party/typeshed/stdlib/3/unittest/result.pyi,sha256=v11JXslVniIzkCYnUnalSJ2ndJc4I-Bxt8_Mie_6teI,1859 +jedi/third_party/typeshed/stdlib/3/unittest/runner.pyi,sha256=l-eVrz13ec-okAuc0MGdYbo192H2ep3adVycY6dnUYE,1339 +jedi/third_party/typeshed/stdlib/3/unittest/signals.pyi,sha256=uL7z9v81sng6PtI-FqweH7jef7o41fQD3AL8WoIxuFU,402 +jedi/third_party/typeshed/stdlib/3/unittest/suite.pyi,sha256=YPg_NeVuWOBeQCdI_NHPFyRDhsLy6E0rgmH96jNyj58,892 +jedi/third_party/typeshed/stdlib/3/unittest/util.pyi,sha256=rs8aUKwYgAQfqWYmsy-OEMdcNgeBOMktmf-wmFjd67c,906 +jedi/third_party/typeshed/stdlib/3/urllib/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/urllib/error.pyi,sha256=9VToSt5hDZo_aKbL2MRVwQ019iJ9LFmlaJg5hPOxTsA,391 +jedi/third_party/typeshed/stdlib/3/urllib/parse.pyi,sha256=yj_xX2qv1FoEtJJOk7naDIRVxuGKKwuTWolsztAekG4,5436 +jedi/third_party/typeshed/stdlib/3/urllib/request.pyi,sha256=-tFU7MccfufD8Iss9KyLS0uoWYmX_q3lKeqZJf_x6AI,15581 +jedi/third_party/typeshed/stdlib/3/urllib/response.pyi,sha256=lFv0RWV2D9QrkSrXlCD_Mx9iKdCz6TkOuTy6BA4bdcE,2107 +jedi/third_party/typeshed/stdlib/3/urllib/robotparser.pyi,sha256=vTWHNtcRWcgDk3DwlPv2ULB913AI8l6JpbdMAaTnlJc,704 +jedi/third_party/typeshed/stdlib/3/venv/__init__.pyi,sha256=f_2SVkhePBtzrvKVcVVyS79VKHy6V37E_99ToJRmnXk,2967 +jedi/third_party/typeshed/stdlib/3/winreg.pyi,sha256=Yz2wG-34BwXWx4JaU2TqPjHlLm_eiPoALQm97KGcI2w,3779 +jedi/third_party/typeshed/stdlib/3/xmlrpc/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/stdlib/3/xmlrpc/client.pyi,sha256=iak5sIIqou0OLODL8DPFbpAAFO3_k8SmIHmgLvPYcbE,12285 +jedi/third_party/typeshed/stdlib/3/xmlrpc/server.pyi,sha256=OguOW818SOafYK9lArrfOt7Nw7NgfaGcx9ERt1rBxWo,6810 +jedi/third_party/typeshed/stdlib/3/xxlimited.pyi,sha256=IeL0poSgoE8vvdo93lscYL9hdc2xqfSgnzzb1iQ6hgc,211 +jedi/third_party/typeshed/stdlib/3/zipapp.pyi,sha256=gjXsMJc9OrQhFpRaUVZrrN7ACwo-oMr54g0KTKx0zIQ,678 +jedi/third_party/typeshed/third_party/2/OpenSSL/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2/OpenSSL/crypto.pyi,sha256=N5TmTvFyT5KzQsoHdU7x2nU_G1Kf5TTQF1mFK-HBwMk,7588 +jedi/third_party/typeshed/third_party/2/concurrent/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2/concurrent/futures/__init__.pyi,sha256=zAZn69ohscdtL7rotnHyxSu7jzhVMw005ANG7J09apw,436 +jedi/third_party/typeshed/third_party/2/concurrent/futures/_base.pyi,sha256=rPSXJhb-LKP8qcFX9YQou0wCs33t6a3_hgfdD1zO278,3701 +jedi/third_party/typeshed/third_party/2/concurrent/futures/process.pyi,sha256=rGmQHTSHwVsXVPxwu4R1efuU0b8D1gqAEG9N_XH3vr8,195 +jedi/third_party/typeshed/third_party/2/concurrent/futures/thread.pyi,sha256=jEiKlhYG3p-MOOEpWFeYHNOKzK9vL7eAYEe-bI2V4XY,574 +jedi/third_party/typeshed/third_party/2/enum.pyi,sha256=iY_vEF2WHiMhmgZTtBGnE_XdzdPx90709cxW0BP9o4M,2643 +jedi/third_party/typeshed/third_party/2/fb303/FacebookService.pyi,sha256=ypViThxa9-z-_FsAfoHiXj9175tTBfqK2Vf2-8H7EL0,8692 +jedi/third_party/typeshed/third_party/2/fb303/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2/ipaddress.pyi,sha256=pkTNeeQfeOrdmRZd3aZanIWk4vgyW8bbDOfqrLrZvvM,5107 +jedi/third_party/typeshed/third_party/2/kazoo/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2/kazoo/client.pyi,sha256=fUPUW9-5VQibevcTDbpkhTkorz9JNyBfw6e03lrKgxs,3400 +jedi/third_party/typeshed/third_party/2/kazoo/exceptions.pyi,sha256=7DhQ3xMt63lVinoWQv0vwI6VmG2IfNPlMpLRGu_gq-s,2054 +jedi/third_party/typeshed/third_party/2/kazoo/recipe/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2/kazoo/recipe/watchers.pyi,sha256=Pxdwrx10R_orsLVINsoCsy-TL6a96EumRsENf2G11BE,551 +jedi/third_party/typeshed/third_party/2/pathlib2.pyi,sha256=Q9vqVVrQppWLvGeGATrn66__LyzByFxgWQ0maNUpYgk,4283 +jedi/third_party/typeshed/third_party/2/pymssql.pyi,sha256=vJAY2YPlF2arXuP2Yg05pFN8onxIqonFQD5uOqejAQY,1685 +jedi/third_party/typeshed/third_party/2/routes/__init__.pyi,sha256=JsiOugLeCiWHyhEHnCrs-i0H288m2ZaDpocEKY80qzE,364 +jedi/third_party/typeshed/third_party/2/routes/mapper.pyi,sha256=oBumS7A80xLzb_YA8ZEUnsqm5YjqE91r8m-9cw0lLlM,2362 +jedi/third_party/typeshed/third_party/2/routes/util.pyi,sha256=U9AvEa_p8Q1eq1zmIMKpGMjYqFbd9YSvTbv7W1Ez49Y,576 +jedi/third_party/typeshed/third_party/2/scribe/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2/scribe/scribe.pyi,sha256=2Yexj6Ymfe-Kj4_jigeicxrB2NEZryKUOtAqNN8Mm2s,1216 +jedi/third_party/typeshed/third_party/2/scribe/ttypes.pyi,sha256=Ub8_sHFFbDuMouo-wUU4bkr-WhUpSOVLBJ6zb9amqBs,383 +jedi/third_party/typeshed/third_party/2/six/__init__.pyi,sha256=L7AgaYaMdkiwH6Psg5nWG0MIvZ8gX4twwJ-Glf8zJqM,4390 +jedi/third_party/typeshed/third_party/2/six/moves/BaseHTTPServer.pyi,sha256=tGipj5ccgHt3EnzbxpPyQGUVnvu7RltrUOqgqWNGwpw,29 +jedi/third_party/typeshed/third_party/2/six/moves/CGIHTTPServer.pyi,sha256=omAocPqXteLJkGtdobdZjdzXynzCtn0jCFwBBlc3LUc,28 +jedi/third_party/typeshed/third_party/2/six/moves/SimpleHTTPServer.pyi,sha256=nC0fjp9wrhSnJMI8N2QD7ovWp1CW-dJWLyaV4K_Ql54,31 +jedi/third_party/typeshed/third_party/2/six/moves/__init__.pyi,sha256=6ipHwbeU3E61Sk4ZTAvfGuW7gr17r6zDZlINNDf2_4k,2105 +jedi/third_party/typeshed/third_party/2/six/moves/_dummy_thread.pyi,sha256=Va4LPdV5kK4luB-1NlkmycnVpXD4kKeYFD5QmmNqE8U,27 +jedi/third_party/typeshed/third_party/2/six/moves/_thread.pyi,sha256=9194lgQXs2W3sR00kP7Zn9gx9WWl0C49GEsbvzshDy0,21 +jedi/third_party/typeshed/third_party/2/six/moves/cPickle.pyi,sha256=hS7yIew9WYdHI_b6XwIlHAfRFRpUTKj7IokSZcpJ4PY,22 +jedi/third_party/typeshed/third_party/2/six/moves/collections_abc.pyi,sha256=Fcq1sut9OLQ824MQ69luYDJSzEB0LroJqTsXWgWGcDo,26 +jedi/third_party/typeshed/third_party/2/six/moves/configparser.pyi,sha256=r3G3JXE9Yo34AASn9AoNcielq9KuyQ3xrtElpnhRJYc,27 +jedi/third_party/typeshed/third_party/2/six/moves/email_mime_base.pyi,sha256=WcWEleCKHROrfdXpRuKABrT_Va1hx90NY_kxYeul3Sk,30 +jedi/third_party/typeshed/third_party/2/six/moves/email_mime_multipart.pyi,sha256=HRKWFU9qh95-mEE22_2NzEKL6lx7ynvhcfHjUcYWuZ8,35 +jedi/third_party/typeshed/third_party/2/six/moves/email_mime_nonmultipart.pyi,sha256=n5hD7R_rktJj3hiHYzEqr3CJCHSW4ikfObKHmUrXBw0,38 +jedi/third_party/typeshed/third_party/2/six/moves/email_mime_text.pyi,sha256=55VzBSQimrZf6UgotoXMiJDvqbKXly6-E_IXo6Ix22c,29 +jedi/third_party/typeshed/third_party/2/six/moves/html_entities.pyi,sha256=I0BI00vvC21L_BgnCbpjio-s1jqF4ARTt-qaol7mGig,29 +jedi/third_party/typeshed/third_party/2/six/moves/html_parser.pyi,sha256=hivJeBkqiAIZ6mvO1v4tOC9Mg6MzMR08P9tzsODdul4,25 +jedi/third_party/typeshed/third_party/2/six/moves/http_client.pyi,sha256=P8tgtt5Icp-ksHij6yPb_zuKk7ckcAHt_HM3aO0WrSM,22 +jedi/third_party/typeshed/third_party/2/six/moves/http_cookiejar.pyi,sha256=HUlF3MydQRX2Vv5G6KtN_Q6iCS48LBDggoDuPbEQUCc,24 +jedi/third_party/typeshed/third_party/2/six/moves/http_cookies.pyi,sha256=itzb5D5Mp66bx7hjyI3u-hri4h9jgqVzZyMfz4xNu2k,21 +jedi/third_party/typeshed/third_party/2/six/moves/queue.pyi,sha256=6Llng-UlZW_9HSWFgmIgW2q9YhaZ-Nzh2zJ8hkqoaZA,20 +jedi/third_party/typeshed/third_party/2/six/moves/reprlib.pyi,sha256=SWZYhGRU6moFAVBo5dUFUB9kyY6TO_kgrIqxzqDQ3C0,19 +jedi/third_party/typeshed/third_party/2/six/moves/socketserver.pyi,sha256=oeRnmecMYQfMmwRFVydatyCfs_HLrJYZvf5p7nm_ryE,27 +jedi/third_party/typeshed/third_party/2/six/moves/urllib/__init__.pyi,sha256=F_1V8NcR4jGkws85IUurYLi4JnGh7_HttdVHvj8cQZM,217 +jedi/third_party/typeshed/third_party/2/six/moves/urllib/error.pyi,sha256=73JoWrqvixKdnpT83GQ-6DsBc0gxqFC_xXN009qtCi4,129 +jedi/third_party/typeshed/third_party/2/six/moves/urllib/parse.pyi,sha256=Tv-Yji1tm-ikeaeLPXCcp1YVUa8y296SNV7YVupJtNg,744 +jedi/third_party/typeshed/third_party/2/six/moves/urllib/request.pyi,sha256=Z-EBXRE0CyHR8oOekYW6bl_w--YlvIVxkdbfcxtNKWQ,1453 +jedi/third_party/typeshed/third_party/2/six/moves/urllib/response.pyi,sha256=iSs21E6BAaB6P9CQngcuzBsGnGa9XN8P7P8GO2xYWjI,114 +jedi/third_party/typeshed/third_party/2/six/moves/urllib/robotparser.pyi,sha256=C8_E9lApZyMQpHflnHpYeyAgvQ_vFSuKon9Gl5DM3Q0,59 +jedi/third_party/typeshed/third_party/2/six/moves/urllib_error.pyi,sha256=7RTGNFpeUX5KEap9vyjA1Xc3Twfkut431Nu5290po1U,28 +jedi/third_party/typeshed/third_party/2/six/moves/urllib_parse.pyi,sha256=Q3BVGITL1UwlTmBsFD9iLf2pggJgTE5bG32QANdkMvo,28 +jedi/third_party/typeshed/third_party/2/six/moves/urllib_request.pyi,sha256=8WFe7ycArSuM6wJfgcXWLDRKNsymd0UlxWlflszb2yk,30 +jedi/third_party/typeshed/third_party/2/six/moves/urllib_response.pyi,sha256=dokFMleMVEVFVxBgSkrcn4f4yM7RhR3zkk0iDQGOC_U,31 +jedi/third_party/typeshed/third_party/2/six/moves/urllib_robotparser.pyi,sha256=8c26GW8MTI6cxDTD65N_18NRQcqWY4P9v8mrQm8c-oI,26 +jedi/third_party/typeshed/third_party/2/six/moves/xmlrpc_client.pyi,sha256=hL_FNiBles8aoJq0XQLbEHvWX1AedYbQopgRVQlbCEI,24 +jedi/third_party/typeshed/third_party/2/tornado/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2/tornado/concurrent.pyi,sha256=THaUUH9XW_opc-G528IhDDrf-cOBVx9ZJAQKAgoqr34,1016 +jedi/third_party/typeshed/third_party/2/tornado/gen.pyi,sha256=WPudybrtny-geS69g62Hkv-NApwTukIA8BrPCn5QzVg,2785 +jedi/third_party/typeshed/third_party/2/tornado/httpclient.pyi,sha256=FDo5peSsvOJFAfIxTgbvVqxk4zfsTxFYXcx7GEnDus4,3219 +jedi/third_party/typeshed/third_party/2/tornado/httpserver.pyi,sha256=YHoHZwbU43MoKk6EBgiWzu7OwD6jQTTdzjrIg4EbEac,1617 +jedi/third_party/typeshed/third_party/2/tornado/httputil.pyi,sha256=Ypkdy7WEucueAK8huNsEbp-SG5RoCiLuxzY2VFfYlbo,2853 +jedi/third_party/typeshed/third_party/2/tornado/ioloop.pyi,sha256=pO0QehC4_KN3eVtj7lnt4Z5PoCkzUT5CiTL6XxWsvyk,2798 +jedi/third_party/typeshed/third_party/2/tornado/locks.pyi,sha256=6LDrW8nuBZLsAe90h1UYs92k_41UjcxE8vpEhc_LBzM,1279 +jedi/third_party/typeshed/third_party/2/tornado/netutil.pyi,sha256=0u8D481OqDC_ZuSTQjFzWCmDmPp2H-xJuP2hsA2J8_Y,1350 +jedi/third_party/typeshed/third_party/2/tornado/process.pyi,sha256=NsnEzaGJtchXD3_1d5-cZ-C2q-h9ocucfLvT575cbrg,662 +jedi/third_party/typeshed/third_party/2/tornado/tcpserver.pyi,sha256=1N0cm3HUZlN3dfsgzrdYTNMEN3nvGu7r2UsE7WuJQ9Y,556 +jedi/third_party/typeshed/third_party/2/tornado/testing.pyi,sha256=L25CKrPr6bQA9NTQo5bDHfttn5WSAEwlky9y3vIINgo,1865 +jedi/third_party/typeshed/third_party/2/tornado/util.pyi,sha256=8eT1IqgZQAG2ffDTCdkAufpPxgtarCEPwbDua2Nuuqg,1072 +jedi/third_party/typeshed/third_party/2/tornado/web.pyi,sha256=3D7aMJ7GBsCFu8dho6qlXOnkklJxcPC37100DvJd1Wc,8848 +jedi/third_party/typeshed/third_party/2and3/atomicwrites/__init__.pyi,sha256=2rvz0VcbCzK5PZ_z1EkWACfHYm2-Lr68oowyr4P3pp0,850 +jedi/third_party/typeshed/third_party/2and3/attr/__init__.pyi,sha256=VDc3e_MSaIhknnBqIv-6XNjmDYR7pK2WCKydzh6D1Pg,8149 +jedi/third_party/typeshed/third_party/2and3/attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209 +jedi/third_party/typeshed/third_party/2and3/attr/converters.pyi,sha256=Bk1t2LtjazrzPzOBElUr6xTXwm1bSRT9yqX87xrEdeQ,346 +jedi/third_party/typeshed/third_party/2and3/attr/exceptions.pyi,sha256=4zuaJyl2axxWbqnZgxo_2oTpPNbyowEw3A4hqV5PmAc,458 +jedi/third_party/typeshed/third_party/2and3/attr/filters.pyi,sha256=_Sm80jGySETX_Clzdkon5NHVjQWRl3Y3liQKZX1czXc,215 +jedi/third_party/typeshed/third_party/2and3/attr/validators.pyi,sha256=ycA4ztBcRNQeniNpAENIu4BHB1RAyHgbZpS-8r8mwvI,1814 +jedi/third_party/typeshed/third_party/2and3/backports/__init__.pyi,sha256=CKeVKOomLpAIZH2ipts0a2F9ugydH1mCvfIGlfvtEbc,109 +jedi/third_party/typeshed/third_party/2and3/backports/ssl_match_hostname.pyi,sha256=jAdLMMvdxdszZCIdVGxRZw7i5waIBqb4RqwketJqMr4,81 +jedi/third_party/typeshed/third_party/2and3/backports_abc.pyi,sha256=z8KwJUh51HVvBluyv29D8TM7-fSQNZf_WPXGvddzV3M,220 +jedi/third_party/typeshed/third_party/2and3/bleach/__init__.pyi,sha256=M2COlKhdPdtUHIMaOWtlGIVk5NGzag1OEEcXL0vtTo4,841 +jedi/third_party/typeshed/third_party/2and3/bleach/callbacks.pyi,sha256=Wpeg0LxBAend5GwNZVw0u6dKT4ZoicVDrUqsGqFJMFs,206 +jedi/third_party/typeshed/third_party/2and3/bleach/linkifier.pyi,sha256=vQleEwvJ8ucOi0oyBhRnOXj6dAGZe_2bHRHX8UqRFPA,978 +jedi/third_party/typeshed/third_party/2and3/bleach/sanitizer.pyi,sha256=1-c_WuvDFDEVINySuZEuaBq_RfYjf7UV74b9pq-15YQ,1148 +jedi/third_party/typeshed/third_party/2and3/bleach/utils.pyi,sha256=RSxKxwsKbaak2b50U5iFJsnkMGQDSEADBN8fqamTZGw,286 +jedi/third_party/typeshed/third_party/2and3/boto/__init__.pyi,sha256=daXgcCm5ezx16EpC83J04doV5fHQ1mZlLLCEgIltRZY,7183 +jedi/third_party/typeshed/third_party/2and3/boto/auth.pyi,sha256=H6ZtG0lKjp4WCcoUcNxecLo4CzDs1xCsUYmwI3yeSYg,4141 +jedi/third_party/typeshed/third_party/2and3/boto/auth_handler.pyi,sha256=7QChS7_s-Uit5ZNCr4FCtMRFRw7uOIJUUW3ISeAckXQ,251 +jedi/third_party/typeshed/third_party/2and3/boto/compat.pyi,sha256=v6PUjkCJ0tdXWWUz-0vx2vSFBbh0pmk3yfY3EFhLg_g,384 +jedi/third_party/typeshed/third_party/2and3/boto/connection.pyi,sha256=g7eTL8QplrrRBsQoGhIEEE4y4puNzPFQDuEDXq1o4bc,5679 +jedi/third_party/typeshed/third_party/2and3/boto/ec2/__init__.pyi,sha256=mJotwEhqx0uTOkp7ABAXmgruW432cmd_CLfyqwIdh7c,256 +jedi/third_party/typeshed/third_party/2and3/boto/elb/__init__.pyi,sha256=vIwGOPGo2wMeNQLxYnBTI5nq-35Q2bcZCyXmgK7WfGA,2629 +jedi/third_party/typeshed/third_party/2and3/boto/exception.pyi,sha256=pp1-GMr4VjeXHcmHJsiBvyiYC5I1MN6iHV_kclA1Mg8,4569 +jedi/third_party/typeshed/third_party/2and3/boto/kms/__init__.pyi,sha256=kblf2ubHjX319n44aDeqTcaZmNGcE1aCNVX1_eprqM8,157 +jedi/third_party/typeshed/third_party/2and3/boto/kms/exceptions.pyi,sha256=WKhoeQKeApIczWPJz3vC5A7K03AmbNAM4bnBuVuNVCw,829 +jedi/third_party/typeshed/third_party/2and3/boto/kms/layer1.pyi,sha256=OgMqR0jIffdQCX7bqbN8WybQrfrr6sXj9CGvNUC_cHA,3972 +jedi/third_party/typeshed/third_party/2and3/boto/plugin.pyi,sha256=rquBCp_gv_wSNNcWYijFPURhXO48ELzf5XItCuPv7nI,235 +jedi/third_party/typeshed/third_party/2and3/boto/regioninfo.pyi,sha256=QkiqbCcldisKPERnXU6fgKYlOHD4ojOWssGq87r2KMw,701 +jedi/third_party/typeshed/third_party/2and3/boto/s3/__init__.pyi,sha256=9lKx0pFB5gzA3515idyUeg6YF8zryDGfm1hWzOWlSbo,523 +jedi/third_party/typeshed/third_party/2and3/boto/s3/acl.pyi,sha256=01T9Ak5fcjt4TGf2vpCW-T0rS-miWRpcdKttcYBBZhk,1686 +jedi/third_party/typeshed/third_party/2and3/boto/s3/bucket.pyi,sha256=576y7ybFKFnWpxcFeYHsmCg1iXsy0fv3BST2eIbEDG0,8747 +jedi/third_party/typeshed/third_party/2and3/boto/s3/bucketlistresultset.pyi,sha256=3UER4BACEqM1ZpY6P3kefwwXuBP2TxD8UoqMhPbWtl0,2029 +jedi/third_party/typeshed/third_party/2and3/boto/s3/bucketlogging.pyi,sha256=3A-6a-73DgObYEzBbcUcknHJQyw4e3ihu0NDL4qRmGQ,400 +jedi/third_party/typeshed/third_party/2and3/boto/s3/connection.pyi,sha256=pkr9v7Bxd0pyu-IA_bkW-ueRCTb5uzed_UT8G_Frp-g,4829 +jedi/third_party/typeshed/third_party/2and3/boto/s3/cors.pyi,sha256=wrlgEU_VFds-ZtwzNG00aCwil2hZxRaSaBkMq6yGKo4,1088 +jedi/third_party/typeshed/third_party/2and3/boto/s3/deletemarker.pyi,sha256=T6AWOwNnW391UjVpzJ8tfbaOtM_hSaNa2wyJV02f5wQ,366 +jedi/third_party/typeshed/third_party/2and3/boto/s3/key.pyi,sha256=73WWtWTIbSJl_vHm8DqYCi2DCa7IuVsCq07I4TKtw-g,8317 +jedi/third_party/typeshed/third_party/2and3/boto/s3/keyfile.pyi,sha256=EcfBooXpPExAghucYEM6sxPVsp32wueqK_-5Kk-66rI,684 +jedi/third_party/typeshed/third_party/2and3/boto/s3/lifecycle.pyi,sha256=eevvO0p0sW2lQYzyxVGUgEe7yLDsnia-Tnqq0ibgwU4,1984 +jedi/third_party/typeshed/third_party/2and3/boto/s3/multidelete.pyi,sha256=GpNwfrASD9rWAfPhd1HoZf-6rBT2Lf3bbJyj_8fX4Kg,1075 +jedi/third_party/typeshed/third_party/2and3/boto/s3/multipart.pyi,sha256=G1A4EP8qX15z724aoTc-13cAZnuSl2V-2Gbp4GjlPjA,1997 +jedi/third_party/typeshed/third_party/2and3/boto/s3/prefix.pyi,sha256=UDLPJn1XiSFLXxd0RdeOMzUfzEyYUnCAiGPACntnkkI,324 +jedi/third_party/typeshed/third_party/2and3/boto/s3/tagging.pyi,sha256=JgNFPHTLi7TR0HbeasYlSnbVA-GzNXzYJ9H6sBmLz0c,748 +jedi/third_party/typeshed/third_party/2and3/boto/s3/user.pyi,sha256=nFbH3dOf5SfNoaTyNwkiu2q3ZR82iPYVr-LfXzUGzYg,362 +jedi/third_party/typeshed/third_party/2and3/boto/s3/website.pyi,sha256=O-9DSWitjeUMEkGVnCUzOQjEOuX94HDt_9bPgLEnkBw,2649 +jedi/third_party/typeshed/third_party/2and3/boto/utils.pyi,sha256=9iLNLIEyh6Z5rbXpdGef7V5eQ0jih7eSA5sEwgtxAOI,5876 +jedi/third_party/typeshed/third_party/2and3/cachetools/__init__.pyi,sha256=i7v_tIZJRWRJCFiADCwVOOp_t9Vo_aZp_cEL0lcOBJ8,254 +jedi/third_party/typeshed/third_party/2and3/cachetools/abc.pyi,sha256=IqWLRykYAB0tVeDGtxAv4hEBeotGQaxIPkrSMYWdlAg,182 +jedi/third_party/typeshed/third_party/2and3/cachetools/cache.pyi,sha256=xnBVZ9h9R56nhR4vWfH_MAXev9KnLLtEqLN4405zQww,712 +jedi/third_party/typeshed/third_party/2and3/cachetools/decorators.pyi,sha256=jao0eXl3rjETbO5_TUtpRNiHg83q6MqK-YPtshaldk4,605 +jedi/third_party/typeshed/third_party/2and3/cachetools/func.pyi,sha256=p4rD-rrZVbHD3x0HVrmQf2Sv6gCqDxqo5Ln-AGCnfSc,510 +jedi/third_party/typeshed/third_party/2and3/cachetools/lfu.pyi,sha256=hEuvwO-6VmvZdl2bkFWuP7yq6jmCeB2Z9HVPC-vC9Rg,632 +jedi/third_party/typeshed/third_party/2and3/cachetools/lru.pyi,sha256=1PkODHl1pNoGmDhZEUh0qvQQLDZKUJTfKPhjhuT73WU,607 +jedi/third_party/typeshed/third_party/2and3/cachetools/rr.pyi,sha256=f76mZgCsS7a8K44HFchyia1hAAAAxDIg6pdlVhfOQ-g,683 +jedi/third_party/typeshed/third_party/2and3/cachetools/ttl.pyi,sha256=I-P_gSblYWVijxd6WIFFAYEuqf6qI2wApq4RuFxekPk,926 +jedi/third_party/typeshed/third_party/2and3/certifi.pyi,sha256=L-Idpv54Z5MALmou6rENZOu7F_jAFD5CPo1hb5amF2s,24 +jedi/third_party/typeshed/third_party/2and3/characteristic/__init__.pyi,sha256=3h0tr1yazsqIaoo23raa-kUVvvfFTzzsMHllLmJBgY8,1330 +jedi/third_party/typeshed/third_party/2and3/chardet/__init__.pyi,sha256=iTl0aDlS_uQVPCs4ZQSsAYwNveIOYkE3ADM0Bu09Q04,667 +jedi/third_party/typeshed/third_party/2and3/chardet/enums.pyi,sha256=inSdRKDA-1mWbD2S2Wk1NRmWRmefiYx5YQxJMGimkz4,710 +jedi/third_party/typeshed/third_party/2and3/chardet/langbulgarianmodel.pyi,sha256=OpF1fUvRwXdQXUXoJs33oTP4KRMWZF05xwZjkLu6iyc,263 +jedi/third_party/typeshed/third_party/2and3/chardet/langcyrillicmodel.pyi,sha256=QbAYnXFTbOS3n1OntaCWI1x3XIbWX96oRwyyA9A5vZw,536 +jedi/third_party/typeshed/third_party/2and3/chardet/langgreekmodel.pyi,sha256=T0VzNfG5ktCgMT8PY-JSmVToVUXYxY9T72iaYQ9xmmk,240 +jedi/third_party/typeshed/third_party/2and3/chardet/langhebrewmodel.pyi,sha256=tUeIdTVWsWmI-RJAIkk5ubiUqeI9R1vUElAk1cdCxP4,169 +jedi/third_party/typeshed/third_party/2and3/chardet/langhungarianmodel.pyi,sha256=6sMaBbtCipjI9zwb3pK2ojnJ1V50ueO7cLd3ODuBkVo,263 +jedi/third_party/typeshed/third_party/2and3/chardet/langthaimodel.pyi,sha256=X0Ot8G3kPmA4VrnAxy7wwAkLyZJ5QfLOQMMSZfDpNH4,157 +jedi/third_party/typeshed/third_party/2and3/chardet/langturkishmodel.pyi,sha256=0xxalkd4p4lrZFWdCy9OZhvAG_S1rPEH2HuguWFe_7o,171 +jedi/third_party/typeshed/third_party/2and3/chardet/universaldetector.pyi,sha256=Cv9ikp-HF1JsbmUUnfrTGdj-ogw_iFVSf0yOQEE5Hxs,809 +jedi/third_party/typeshed/third_party/2and3/chardet/version.pyi,sha256=9L2O530uTJPPoHbmYkRjZwFkoIeIOhCWwGGBYouCKN4,61 +jedi/third_party/typeshed/third_party/2and3/click/__init__.pyi,sha256=3oe1-v8WfI-v6asPrtdIp1ochj2vT_cKOt4W6PBUx0E,2273 +jedi/third_party/typeshed/third_party/2and3/click/_termui_impl.pyi,sha256=gMqbhjSvNlAmi6GaFanVip40LRi4taeSN4sR_ck10II,442 +jedi/third_party/typeshed/third_party/2and3/click/core.pyi,sha256=lRB0bp9srrgM9TRZ8ZKRV71c2QRF1EOi2DBE7ePclQs,11012 +jedi/third_party/typeshed/third_party/2and3/click/decorators.pyi,sha256=UbSVA_xm2O6BZhfl-K0Xi4a_xzuMiby4_pz9gZx2X8I,9143 +jedi/third_party/typeshed/third_party/2and3/click/exceptions.pyi,sha256=o4rbUXi-6iBjHNiHUjnEo-Ad02Dy3xrZL_YYPVkWN78,1936 +jedi/third_party/typeshed/third_party/2and3/click/formatting.pyi,sha256=D6GMWwb6-9mM7-F7Om4bQP-pbtFKD2NZ_rDUKYNpjvA,1383 +jedi/third_party/typeshed/third_party/2and3/click/globals.pyi,sha256=Xi3fHtTEJHjVyQ-djJEaQkDxnZ9RE5xD-uh2B1xs9ho,274 +jedi/third_party/typeshed/third_party/2and3/click/parser.pyi,sha256=tSKZ0bKLpIObxaSZ86IBpk7PsR6idJGXD2bZzgya5ss,2006 +jedi/third_party/typeshed/third_party/2and3/click/termui.pyi,sha256=cG1FM90WpFvr8VQ9-XY_zstLebtXRK2MLZ0ncPqwMXg,3291 +jedi/third_party/typeshed/third_party/2and3/click/testing.pyi,sha256=s1GKTPK-blyfhaXqk3NvFBAXRBGuCR73RRH_weGbOcs,2136 +jedi/third_party/typeshed/third_party/2and3/click/types.pyi,sha256=VSx6kUdgT7LHTW2Ig8-pvnD9t8AxUYYSYUabbpsDdyM,5045 +jedi/third_party/typeshed/third_party/2and3/click/utils.pyi,sha256=Rgm3qjXceoORj5TgL7fHcBMs5bU6j2EU7qC68ZaVElU,1752 +jedi/third_party/typeshed/third_party/2and3/croniter.pyi,sha256=TJBwRUQdiVQbCYBiao1VL2Esal2U_I3n6ZR38NtOd7o,1934 +jedi/third_party/typeshed/third_party/2and3/cryptography/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2and3/cryptography/exceptions.pyi,sha256=CpK5HtZvigk1U7yYLlqYcXGJaOzknyfhUUittXFuKiM,262 +jedi/third_party/typeshed/third_party/2and3/cryptography/fernet.pyi,sha256=pr8Po1ra3EdG9lMU8osc1fg0-ZOoASGAHtccbhcbr80,1282 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/__init__.pyi,sha256=3fBxcSppJr6EOEcUojvflG3Eegg7lv2Qp0dNQQILrP4,63 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/backends/__init__.pyi,sha256=dKFUFhsHmSbNrHdG2a_5CErfvQu9hdnIWMA1vys9LaI,124 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/backends/interfaces.pyi,sha256=B6ll_f3g9smOyhOZR8hRLJcc891y2yr57KPSVQYb1PI,8238 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/bindings/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/bindings/openssl/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/bindings/openssl/binding.pyi,sha256=QHIbjl0Q39fxas4DabuLytXjjBZrfqNkft0H_nWwXr4,148 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/__init__.pyi,sha256=3fBxcSppJr6EOEcUojvflG3Eegg7lv2Qp0dNQQILrP4,63 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/asymmetric/__init__.pyi,sha256=3fBxcSppJr6EOEcUojvflG3Eegg7lv2Qp0dNQQILrP4,63 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/asymmetric/dh.pyi,sha256=AHR65ed03NWB6oK8YoTQYt3AgCk0fMk179NRcE5WfmQ,2651 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/asymmetric/dsa.pyi,sha256=AeKPk-Evk0aTJufLg3ohlne7qR2CXJqMse8gNoAD-y0,2965 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/asymmetric/ec.pyi,sha256=VoSyXBfG2NuAqHd_WARe7UH1eqq9jaO5vijRMNk3AFQ,6340 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/asymmetric/ed25519.pyi,sha256=OiE4CJcNDIaqd_VzdmQGn28WPdT5PBQ5jVKfq_ZRc3k,985 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/asymmetric/ed448.pyi,sha256=Mw-p07GDyttBXKaNB5Su5hw01DQXZgjEZe5M1e7PlhE,973 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/asymmetric/padding.pyi,sha256=g1sB5wpH6yoi9LrLUSHOejKdt8xiLmkPvDdUn6Cm1UY,787 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/asymmetric/rsa.pyi,sha256=rT_qcbpenlB8wuhg3jRcMm-5OErh2cCc3tkNFn-x1d0,3288 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/asymmetric/utils.pyi,sha256=nTFxj83DVi4OEi8B9wokES2Vh-HwUsxZHiEA4PcH068,406 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/asymmetric/x25519.pyi,sha256=JHX8lIJN0j94AsHIS4jQWCrlcOgLdb3Ud2QznMRXOC0,919 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/asymmetric/x448.pyi,sha256=efSTvyRkpVAcxtAwzYymDbnOQPA-dJW7YAliRVA5_cc,905 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/ciphers/__init__.pyi,sha256=eOEIBpUXLaH5IkOca5Ko3HQdUGfDIDFDwmlsz7RBGJA,1363 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/ciphers/aead.pyi,sha256=MhkfsUnaMVEi6oRrcfTgp6h1_1QCS8bvxtwoE6bqZS4,1065 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/ciphers/algorithms.pyi,sha256=Ig9mWCRz8rM6X5js1D4rlT06YbdALJ5RUpO4i6Qb7Io,2245 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/ciphers/modes.pyi,sha256=3rIwa-kj4k0wj8YfZeesqX6jEJhHtUZth6-ivi76etE,3089 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/cmac.pyi,sha256=HO4SJ5cJgPcP3cBjXg6n8nAKBWpyOlQzyXZfyQz_af8,461 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/constant_time.pyi,sha256=47ogru9j6XG5mLMDsl2iGKxhZjsH_bKhWYBt4c1jQk0,46 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/hashes.pyi,sha256=j3L6NIrR867bxvIoY6FVw10HEZz-0alVzDTUdPqf48w,1342 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/hmac.pyi,sha256=4iDj3W1Nk5MKJdkpNVRsY3Q_KP9NBOK_zS7pbo1Wj_0,457 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/kdf/__init__.pyi,sha256=fymJGVYF4JGsuO5IUacyuYb4Xunrps4mFrJ0XFEUdfU,261 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/kdf/concatkdf.pyi,sha256=Qmc9PP5j2fsiJfQltiWvgywgNLPQVTcyofH2mNqmWmI,951 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/kdf/hkdf.pyi,sha256=m_JHDA0r_aZGcOT5B0_3H9VpNfN9WnEAvb1zvgIssa8,902 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/kdf/kbkdf.pyi,sha256=MCUGyI5wZWLcPd9Sh_aHnDfumqS9ZoCYsQ2WseK-BM8,867 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/kdf/pbkdf2.pyi,sha256=-MWdJfLGEKiNC3qavnm2unJDtndsBFmZmQoj4Cbi_N8,561 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/kdf/scrypt.pyi,sha256=wKA2YyDAullLwwfR9Ng43xn2gKd7GyCH9NGVSRz1Ycs,452 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/kdf/x963kdf.pyi,sha256=wvi3YJDqDJdqVT-Dgr84e4-cqosiq2f_wQQD6mgFXsk,545 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/keywrap.pyi,sha256=FGJg_GstrQcaavqriIfHdq_aOQPVeq_F9CpN8Nw0gvI,611 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/padding.pyi,sha256=RYc1U9k5Vw4NzpT-JXi_AwRiPXwzu7XLVyG6HtDy_gM,540 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/poly1305.pyi,sha256=ON5PxE5sYR6YLf8grWy-8FwUfGiQt28l1s2elvrKgYw,375 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/serialization/__init__.pyi,sha256=7RHfNmeDX9gwtceJXJ7kdp_YCpqHB1BZObkeDk2YSAI,2359 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/serialization/pkcs12.pyi,sha256=NwgQSXWYnaOm4717c2v92AP8jrw0davzciJDVzeDIjE,933 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/twofactor/__init__.pyi,sha256=MV00ZbBi-FKgHi9PYY2ZS7tneV2rghu2eVi1KgxRZzs,35 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/twofactor/hotp.pyi,sha256=ibl6C1ISmQ5H6nmmT_axuSuhPmPUn7YBx7A4i5PoFbY,540 +jedi/third_party/typeshed/third_party/2and3/cryptography/hazmat/primitives/twofactor/totp.pyi,sha256=p2rGTlM-DzrBmqLnYQKklWHjJJbDwQ0yf_J1BfJqf8g,585 +jedi/third_party/typeshed/third_party/2and3/cryptography/x509/__init__.pyi,sha256=3xR6GVwKT2nPtrlazcfVcs_r5DmEFkYk1OnstSBjiQ4,17734 +jedi/third_party/typeshed/third_party/2and3/cryptography/x509/extensions.pyi,sha256=zMbVNkp-hl_XW3RW71h0MlwUM-Rrz3X0OxEEZbZAo-Y,557 +jedi/third_party/typeshed/third_party/2and3/cryptography/x509/oid.pyi,sha256=eHglWkt4t9CRxOwXqdsocMPa8MpJFTHn1lhBf5EAfMU,4153 +jedi/third_party/typeshed/third_party/2and3/dateparser.pyi,sha256=xAhfYbQgduxqvMxoMSfuaBeQ5pzwLWVrZTdbOzHWMYo,522 +jedi/third_party/typeshed/third_party/2and3/datetimerange/__init__.pyi,sha256=_CLVxkVpVy20mXcnlMv8KzMcvSEbnEKgYfScf_swPuk,2062 +jedi/third_party/typeshed/third_party/2and3/dateutil/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2and3/dateutil/_common.pyi,sha256=Xxs-IpVplkwjwEnqCLwzA-JIm6I9kzAFkcLQWyzZWvY,340 +jedi/third_party/typeshed/third_party/2and3/dateutil/easter.pyi,sha256=5mypiQN0QZHpFBk80RddMUkFuRHb4qWOX8wJCbx4a6s,214 +jedi/third_party/typeshed/third_party/2and3/dateutil/parser.pyi,sha256=Brz_8xe_yX14RB6B8pVk6BacM6HizqkUCIQ5vc9U-_I,1779 +jedi/third_party/typeshed/third_party/2and3/dateutil/relativedelta.pyi,sha256=SfCOq1b8otE6in4nsnH0m0Kbyl2jbWaxGwcBoeapTHk,3243 +jedi/third_party/typeshed/third_party/2and3/dateutil/rrule.pyi,sha256=EBup_m5smahe2tbtJG_-0kj6pyZjyv0YKi5oWKjwP1s,3219 +jedi/third_party/typeshed/third_party/2and3/dateutil/tz/__init__.pyi,sha256=MlXCZJGqduJh-Mk0pHLw06oNxeW1fvPP3ixfo2SM8ak,340 +jedi/third_party/typeshed/third_party/2and3/dateutil/tz/_common.pyi,sha256=0C_pqZ2TLRyKAsg_lS6WR-pwbGiezi4PY7uotzfUMjk,818 +jedi/third_party/typeshed/third_party/2and3/dateutil/tz/tz.pyi,sha256=mExYydbsNJYcED03KVMfEHthqixakuSa3Qq_9mDczUc,3970 +jedi/third_party/typeshed/third_party/2and3/dateutil/utils.pyi,sha256=c9oLGnocofsb4y9CPpCIZGPFF_tysaA6P9i_g3CpZVE,281 +jedi/third_party/typeshed/third_party/2and3/decorator.pyi,sha256=MXFxKax2ovqI9ZAJRmWIyNzOxL1sYQ2vGBM6JGkl_44,2760 +jedi/third_party/typeshed/third_party/2and3/deprecated/__init__.pyi,sha256=Q5NJtAoHP5mH31hhagi9d8-jI3qoRJDlRQNCMMBpOG8,46 +jedi/third_party/typeshed/third_party/2and3/deprecated/classic.pyi,sha256=lWfx7eJTqKQPcZr3oKLHHrPB29S-vYm8yImwb3enRM0,783 +jedi/third_party/typeshed/third_party/2and3/deprecated/sphinx.pyi,sha256=NvfjRlHcPAeTbI_BkSg9ygv0klEMxTShqP9i5GjfaSQ,1129 +jedi/third_party/typeshed/third_party/2and3/emoji/__init__.pyi,sha256=Ix7SRTSbCqLgmR0L0MCBIW72Zs0t3-mmmbdMzg4Axbo,373 +jedi/third_party/typeshed/third_party/2and3/emoji/core.pyi,sha256=-4Hs3zO0KZtnou62-FII7AZbyEpL__-VRSziFn8acTo,419 +jedi/third_party/typeshed/third_party/2and3/emoji/unicode_codes.pyi,sha256=qwYCvETNLt20Ok76oiBBIuVfIzzEkdLt1jhxGzdqHUA,171 +jedi/third_party/typeshed/third_party/2and3/first.pyi,sha256=jk0nVWOV_kqCy1ZIvuTPGbggVajhW9SnGVYdDflfss4,481 +jedi/third_party/typeshed/third_party/2and3/flask/__init__.pyi,sha256=dKC5xsUiM9a0nh1zxuP991HYODlG4Th_-2_uifzieFI,1688 +jedi/third_party/typeshed/third_party/2and3/flask/app.pyi,sha256=E352cSxQISVJ894PKmp07vTwurIyadV2RRI1Cro7m-Y,8224 +jedi/third_party/typeshed/third_party/2and3/flask/blueprints.pyi,sha256=g_ZbSf5GoDmzEbTyJUnDydPwfK--8soIOApyjCgjg3w,3564 +jedi/third_party/typeshed/third_party/2and3/flask/cli.pyi,sha256=WwdlCr0pd8tNj8W8B2XHg6jIlh-ryn5vzaBvvWe2b3U,2195 +jedi/third_party/typeshed/third_party/2and3/flask/config.pyi,sha256=KjCQX-yxie9WA0K5-Zujen_kja_GyptfYqTw_L-QdhM,871 +jedi/third_party/typeshed/third_party/2and3/flask/ctx.pyi,sha256=JJ9QMsJoIWCudf9Gwi_9PmFhLHh3Qi-m50IklJOrnOg,1335 +jedi/third_party/typeshed/third_party/2and3/flask/debughelpers.pyi,sha256=xW9zO6fuhvTJ4rk-tQFWucJOzufOJjHaahF7_Hm4HYQ,507 +jedi/third_party/typeshed/third_party/2and3/flask/globals.pyi,sha256=C86I2edNhxlipXPH6Ghwf2gN2E0ALtd7uRe2f1A5NOM,322 +jedi/third_party/typeshed/third_party/2and3/flask/helpers.pyi,sha256=2sriNHIKIlknrelbfl2Kh9Fd-Gzqo1qX3G0pptqn5ig,2036 +jedi/third_party/typeshed/third_party/2and3/flask/json/__init__.pyi,sha256=1UCkfXT-tCvt4MW1loZM4fXMd9_8U91nYZyMCzddUSY,654 +jedi/third_party/typeshed/third_party/2and3/flask/json/tag.pyi,sha256=_cOpm75btUJ-zPPdvBeGxj12fENH_9XY0_I32Ounk3I,1918 +jedi/third_party/typeshed/third_party/2and3/flask/logging.pyi,sha256=AOGLuCAg9HnXXkXQsVGa2lQ6TAxqk18G0RpytsD0SBo,150 +jedi/third_party/typeshed/third_party/2and3/flask/sessions.pyi,sha256=rrIKmlljalXzcTNH_x5YVEVsmMKuIJnJ1r7XHI2RTfo,2017 +jedi/third_party/typeshed/third_party/2and3/flask/signals.pyi,sha256=HRUu9jh583VLFfxUmdNjJ17XnVdziRDu-_9NIgnf2yc,718 +jedi/third_party/typeshed/third_party/2and3/flask/templating.pyi,sha256=xBS-FLM_qPjNQwx5Z5a-c48hQ77vfC5LXLNCJCSmI4g,604 +jedi/third_party/typeshed/third_party/2and3/flask/testing.pyi,sha256=oiZz2eCxXMLPjChHuLps-2HwtApsmWgY0nU1fFmXc74,1827 +jedi/third_party/typeshed/third_party/2and3/flask/views.pyi,sha256=AXG5OxY9r03ZDSFtV48yXsFPXQlMFZtD0cHceIpwzIg,530 +jedi/third_party/typeshed/third_party/2and3/flask/wrappers.pyi,sha256=Qhblio57KSJw-stcPOYTRxCWtRZ7V7yCCMtz8q2JkBc,1145 +jedi/third_party/typeshed/third_party/2and3/geoip2/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2and3/geoip2/database.pyi,sha256=KiGpw7XqZHu0bSQSFNDu9yW-RDbP8XGTQCD0LTO_soM,1133 +jedi/third_party/typeshed/third_party/2and3/geoip2/errors.pyi,sha256=pZ2Y9djJdVQVgKMnPXPI77UKCF_gs6v9ApuWSj4tgSY,494 +jedi/third_party/typeshed/third_party/2and3/geoip2/mixins.pyi,sha256=ZpggdVI7qLOYVHt91mL8uPwWq4E07MxpN_UfAgoPcB8,120 +jedi/third_party/typeshed/third_party/2and3/geoip2/models.pyi,sha256=h0flgP22m1KL7U55cg1PAhoQ73XeGPg5s2nLn6B_5aY,1867 +jedi/third_party/typeshed/third_party/2and3/geoip2/records.pyi,sha256=3-gke4nBlwnk09iOiT8tRdusFPi6CewrJn9foXKvI_M,2071 +jedi/third_party/typeshed/third_party/2and3/gflags.pyi,sha256=c32SjcBY5EaEWC0ZzMWqfEM9BfMa8sMc4nX_WntL3fc,10776 +jedi/third_party/typeshed/third_party/2and3/google/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/__init__.pyi,sha256=U54VfsG7-8DG_1f27w_Gmz8NunTjIR0iCWEXUCuKKVA,19 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/any_pb2.pyi,sha256=r12boNnMlrqzwGMOVLK3YUILTg26UVP3uGOav2S3vgo,1353 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/api_pb2.pyi,sha256=Vk1syiUbOtrQuhC5HKZX4RPYKtdAbypTsX-fZox8Gtc,4881 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/compiler/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/compiler/plugin_pb2.pyi,sha256=MhJF46Zpl7YfIhQyUBXmG5pkOYIww3CSrkhyIK_FL58,6548 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/descriptor.pyi,sha256=fkv0RR_gPgZr9EJS7tisfkMQtiYDX8OOCGeY7DKBSvc,7843 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/descriptor_pb2.pyi,sha256=A7YzWInNH0SSQrlE4wXAw2vbnCNw93zkyu196Gm8rto,43146 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/descriptor_pool.pyi,sha256=dsqeewyqydUwYE9sNscI2qvYE1h5huXeZQ9igxjQD8I,744 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/duration_pb2.pyi,sha256=jBEaeqcqCopxOCSWjbhP8zjXLf-VkCBFSZeZ1G8I1UU,1348 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/empty_pb2.pyi,sha256=FWX0qLZZMDX34-I45EwCjvOWnNdvwGJ-yE3DcZXoZJc,603 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/field_mask_pb2.pyi,sha256=6E26cc872vDtSCX0B1XQGAHUW_ajwYcCtKKg-jrejJg,1558 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/internal/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/internal/containers.pyi,sha256=FRcEQWw5vvdjZ5fD80mj9G_9tdZjOTgWiMGIoV6fhnc,3653 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/internal/decoder.pyi,sha256=roJK89CkgjVKjRJbEnQ5ik1KdfXhwxymSyRIg7c5WW0,860 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/internal/encoder.pyi,sha256=3rRgqC6ceJ7LtL7Qo5YiAK6CwE_HigHMsj9l-oa_5iE,1045 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/internal/enum_type_wrapper.pyi,sha256=HPVeZHGr2DNnMI9bX8hRNii3OrD8DtWIr2fawf8BQ2Q,650 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/internal/extension_dict.pyi,sha256=e54-8z3JwpIKs36-NI9VWfQ9LEncwDMCXJrJI0wint8,1795 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/internal/message_listener.pyi,sha256=EiRlRR-kzJlKV3NXoYtYrGhMS3lZ2oWwQdq77h1cY8E,148 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/internal/python_message.pyi,sha256=IESr1OjAZD9idg1OgWzMfeV2Sta08R0uHLAbweljTD4,46 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/internal/well_known_types.pyi,sha256=DFxcLR_wHeTGdA49Vl-dr8k0zdm7LlksB6Dqj3ZD5Yg,3756 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/internal/wire_format.pyi,sha256=ncrIlZeBjytfWeG9rQX8VdHO8YvS3EmqBJ4rIbaZVxw,1554 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/json_format.pyi,sha256=ZCsnQy2nbUTYAkOHilhoGAu0gMyiTGx7rD1wTMEH75M,903 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/message.pyi,sha256=wxYo7wptWBT2maYMBCMwEGIyuTrPzmjUNmGOly4wmhE,2522 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/message_factory.pyi,sha256=LOP5EeGqIi8j6R0V-4eoG9iZNX30c3dQb1J59td_4Ks,631 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/reflection.pyi,sha256=VahfyfCz4LzllaCNcv6qXVMa6sWQZ9PEbM9yTKrC9d0,230 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/service.pyi,sha256=khFSQ7jbdMgCZRnhTDDIXnaDvS_GsjhllVxiJpDu9yg,1371 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/source_context_pb2.pyi,sha256=ojA6bEP8HWs-2i-UH8GOxc8z3fYZGq6diaj1vZLTEgU,1097 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/struct_pb2.pyi,sha256=g1CuFe2Cd37bigkRiOudt2hoIA27LIH0YcxxM1iym1w,5358 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/symbol_database.pyi,sha256=skHeJKelHAiApzBN2EKOd6VaWP3IsyjOxoe0HDS3X84,912 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/timestamp_pb2.pyi,sha256=3WwiKOf2BEcFMLwe480tAQWPDG1xRtRebgwvj2xIN9o,1354 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/type_pb2.pyi,sha256=TNdAmPxbIYjLzTc_4nMSy3tyBY3gPN7Vteotfh6aNxM,11139 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/util/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2and3/google/protobuf/wrappers_pb2.pyi,sha256=rew4YgJVRs7PuxNVZggdHPISr2QuNOYmDbj2gmZIv8k,4287 +jedi/third_party/typeshed/third_party/2and3/itsdangerous.pyi,sha256=ufy10z6xR-SpjLpa8c89G4uN-vu_LGaGGiSMzA-PtYw,8405 +jedi/third_party/typeshed/third_party/2and3/jinja2/__init__.pyi,sha256=FGOBNYKOz5GYd8TgZutpo9IKAZ32BiGnbkiz1dWTIpg,1529 +jedi/third_party/typeshed/third_party/2and3/jinja2/_compat.pyi,sha256=7I95GYBNx-7EWPd3BvePzanb1IWGiOJZwAW2AdCJLwQ,598 +jedi/third_party/typeshed/third_party/2and3/jinja2/_stringdefs.pyi,sha256=_gw2yhYfWIlqy06ve5t93z3SIVXX2j5Md3rz58LIKjE,360 +jedi/third_party/typeshed/third_party/2and3/jinja2/bccache.pyi,sha256=ypfXFrP5k-Phe6dJ3jLwn3yhRMzDSYPHTpsJT4Hx56M,1396 +jedi/third_party/typeshed/third_party/2and3/jinja2/compiler.pyi,sha256=PGyzLL8q-_nQVW45vTCeyclt-UuWjln6UqewcHsju9E,6363 +jedi/third_party/typeshed/third_party/2and3/jinja2/constants.pyi,sha256=seJNAMX1_sGZ8wi8SB5qbZtSUvYiQMelMpXMyxl-AR4,23 +jedi/third_party/typeshed/third_party/2and3/jinja2/debug.pyi,sha256=6XZeMpZjfFUP-Mh5tEof8P8UANNgJ7lsBq1RUbjL_M4,1018 +jedi/third_party/typeshed/third_party/2and3/jinja2/defaults.pyi,sha256=fe4KjTE3_Kh0bG7yXzSy6cqtlkHTsRgMWYyMzLnBsag,532 +jedi/third_party/typeshed/third_party/2and3/jinja2/environment.pyi,sha256=7cYxhgLusiW6bw8147dTFHfjesFH_ytARlKw5bmfLL4,8493 +jedi/third_party/typeshed/third_party/2and3/jinja2/exceptions.pyi,sha256=P52TnpizVoU2vAeayt4MZlTYMnhJYiMrWxiLMWP1LeQ,1050 +jedi/third_party/typeshed/third_party/2and3/jinja2/ext.pyi,sha256=w5pYOcJ4h9vgSon1YceCeXsGDWjvUQjGSdCOhoScYXQ,1684 +jedi/third_party/typeshed/third_party/2and3/jinja2/filters.pyi,sha256=OxR24skT1uz7n2J-h3voluptB-l21pQ9KTx0CQnGsVU,2425 +jedi/third_party/typeshed/third_party/2and3/jinja2/lexer.pyi,sha256=evBj10x_89jlfuVacpADTOynp2XF9n8d2vQf5lZg-pE,2764 +jedi/third_party/typeshed/third_party/2and3/jinja2/loaders.pyi,sha256=EbGBtL810XARseWeDa6srKOlNUiUesvlJllf8Li-dw8,2923 +jedi/third_party/typeshed/third_party/2and3/jinja2/meta.pyi,sha256=_R2ui6gjeNSl5x59dwZjhL1WxTG0oKqXSCrgLx-2L24,339 +jedi/third_party/typeshed/third_party/2and3/jinja2/nodes.pyi,sha256=-86WsO5Cbws9IqXBSZidnIVlKesGmV2VFdhIUMrK8Vs,5271 +jedi/third_party/typeshed/third_party/2and3/jinja2/optimizer.pyi,sha256=1bIneJHqfKubBDxSn7sWv2-tGGZ06G6c4zr9Lxbd4X4,661 +jedi/third_party/typeshed/third_party/2and3/jinja2/parser.pyi,sha256=G6S7SzeUUTs_AGL916QYuaEKRhGyrpCekEVt0JOoRRk,2576 +jedi/third_party/typeshed/third_party/2and3/jinja2/runtime.pyi,sha256=8uryZzgqtGakyVDyA7OGzI9uqvSobgIoGrsj5MWmwb4,3463 +jedi/third_party/typeshed/third_party/2and3/jinja2/sandbox.pyi,sha256=zhmpDnPs50ugSYwrdg7V9r3OCbM839NHfBHILwfse-M,1146 +jedi/third_party/typeshed/third_party/2and3/jinja2/tests.pyi,sha256=dixzMe9BiHnvbL8KyR-KY9LLX-VD1-pgbWXC6Mwc4LA,561 +jedi/third_party/typeshed/third_party/2and3/jinja2/utils.pyi,sha256=oCtfBv7W9JqYQMU7nukSm8kVK1wy38U7db3oFsnXaUI,2957 +jedi/third_party/typeshed/third_party/2and3/jinja2/visitor.pyi,sha256=yeJ6g3RC5bitP3tG-btWLOZn954E_e9RxKPtgsiwcVc,306 +jedi/third_party/typeshed/third_party/2and3/markdown/__init__.pyi,sha256=31jve2aAqalclv4Hgat04sP28Oz06Nh8qwJVkC4dB84,146 +jedi/third_party/typeshed/third_party/2and3/markdown/__meta__.pyi,sha256=lugjndxerVWkOhrd9Mo0zDlN4utrCKIse4vNsqQFvxU,46 +jedi/third_party/typeshed/third_party/2and3/markdown/blockparser.pyi,sha256=OYDwhQquCvSSpAxoXCZ0z5kABsWuvCog4LqG3HEePE4,463 +jedi/third_party/typeshed/third_party/2and3/markdown/blockprocessors.pyi,sha256=Hd8EgV71NYEe-nx1m2WHIoQ4p7dXJbzYRiA5vXka1O4,1490 +jedi/third_party/typeshed/third_party/2and3/markdown/core.pyi,sha256=K4hpNVt2gTOP72HB8YSv-xf6uHu_qsUTuBWKnAb9D_0,2555 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/__init__.pyi,sha256=qaZiIY5BUSCBliWaMIWdX4v-ROFISLWzp6juNrXQi08,549 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/abbr.pyi,sha256=NWvh4SRoU2bI0X7gdc0Gc5FNu3y51gIkRyDNfU93ozI,428 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/admonition.pyi,sha256=tX58KZyPP03stonTmJnnvgR8J-QL8MT6i6ILHtFZ_8U,385 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/attr_list.pyi,sha256=cQaSO-jiaqUGZN1BZoISNSAvLM9m9Y8W2-DJ5Qi9ytA,498 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/codehilite.pyi,sha256=HRl9kJHv2nleEi84BdCR9cIZ1w4yiOauvFKdmsXgieg,979 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/def_list.pyi,sha256=-aeDlJbL4JICSTuizcoBkYIHmkEfb6gpkGQEWpMc0r4,360 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/extra.pyi,sha256=_cNunwRxZSkv3OT-m9oWf76bVVMcJiTkosNCFtH3cz0,197 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/fenced_code.pyi,sha256=wLqffeWN4EVg1IpvnMeHW9CTfWJ7WC3Vwz8fi-_XrBE,428 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/footnotes.pyi,sha256=B219DIvS8G0RbJu_ThwOD0qxXME5Ble7k64ak8dfwak,1751 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/legacy_attrs.pyi,sha256=UE5KQPkirTdRMr90C4WqPUpKKHOALOQpT4HkHPrPxNs,300 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/legacy_em.pyi,sha256=S5wb-FfHSfIq9o5Uc1ZeQm3BwfR7mjRX8g2km5EV90U,306 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/md_in_html.pyi,sha256=BFh_ogT03pnKfpqUb0DY5LLw9T330erK3NzOBMOkI1o,260 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/meta.pyi,sha256=3eRiqW2npoe0iuhFt5BgFVGjhlSSKc4vYOkDjYltaJ8,361 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/nl2br.pyi,sha256=mjR-zAlnbfZkttPcKAihZt-8vj9VcsOvADl9LzTgW2E,150 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/sane_lists.pyi,sha256=hqqMgizBkpFc4msXW6IQjsGDp3WFwfeTvRHxSMuKk5E,392 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/smarty.pyi,sha256=RsAPMqFAK3Qfz51st2J6SdY7s0yDJXQMnl2HbRtHDJc,1080 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/tables.pyi,sha256=Eg96sG_ww2G6kt6N4jlSwVA-LE834KZGp4-EgDacB9k,414 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/toc.pyi,sha256=fcqE9YDibJzb8P1Ox7WWczXi_34dCvpcPwXCLkrjagE,1178 +jedi/third_party/typeshed/third_party/2and3/markdown/extensions/wikilinks.pyi,sha256=Q2FIa1iPu0XMRqJTb8wewdPoTXEnJXZiFuYL-Zm0T1o,404 +jedi/third_party/typeshed/third_party/2and3/markdown/inlinepatterns.pyi,sha256=W_AMUX0BcD5vv8OJRL9D2hJfAc0BzK1hs12kOgKKr08,3022 +jedi/third_party/typeshed/third_party/2and3/markdown/pep562.pyi,sha256=pLjxji-A4xeBVYjntboexEwwoSp-JO4CbTLziUGvYqc,276 +jedi/third_party/typeshed/third_party/2and3/markdown/postprocessors.pyi,sha256=BRcuzX2JjjkfSk-OxVyYiPl27LjR9ak-u9kp2xZRVe8,400 +jedi/third_party/typeshed/third_party/2and3/markdown/preprocessors.pyi,sha256=oo3D5AAExEaq9GWiXhVblF37c2cojo7hjTsfh3IiOVY,550 +jedi/third_party/typeshed/third_party/2and3/markdown/serializers.pyi,sha256=V3npjXzO5oR8Ov6K-4JZTa96ma5VQGrtKDRjdvRvCG4,91 +jedi/third_party/typeshed/third_party/2and3/markdown/treeprocessors.pyi,sha256=_DfzQcWXflM9vqyQYWmPb0DfH5b8PEFTBRhBrpUEsaQ,469 +jedi/third_party/typeshed/third_party/2and3/markdown/util.pyi,sha256=4tEwF_04Z1zG_yu314kiwQCJUfnVO9AFLFoDUAU04a4,1584 +jedi/third_party/typeshed/third_party/2and3/markupsafe/__init__.pyi,sha256=RTRaycAzCzRQGvRT-ejSLNdvwg5kQsXB5oHcBPHypjo,2861 +jedi/third_party/typeshed/third_party/2and3/markupsafe/_compat.pyi,sha256=xN1vhSTmy2fXTE5pUrL_NRkYoOLH3lBY6YDf7ZR9czM,435 +jedi/third_party/typeshed/third_party/2and3/markupsafe/_constants.pyi,sha256=Kk2R4mLBd9wpcFgKA0k4u87FmXD-v1U6ODEduwhFkVE,62 +jedi/third_party/typeshed/third_party/2and3/markupsafe/_native.pyi,sha256=reb7nc_c28JTwKoUYlgirotBiOiESx1M1bpLZtXt6DQ,242 +jedi/third_party/typeshed/third_party/2and3/markupsafe/_speedups.pyi,sha256=reb7nc_c28JTwKoUYlgirotBiOiESx1M1bpLZtXt6DQ,242 +jedi/third_party/typeshed/third_party/2and3/maxminddb/__init__.pyi,sha256=VeOvFZHkaU9NmEfD-ZxOq735CGwfJmnMkDRUmkYLDW8,177 +jedi/third_party/typeshed/third_party/2and3/maxminddb/compat.pyi,sha256=lE11t1KuE4nA0ph6Bw9dWfhNE4l45r-4fCxOVRy4AfQ,194 +jedi/third_party/typeshed/third_party/2and3/maxminddb/const.pyi,sha256=c1LWH0XYtbcs9T0mJz5lyj6ZAVBwU2zZJdRk7Dd4Fc0,130 +jedi/third_party/typeshed/third_party/2and3/maxminddb/decoder.pyi,sha256=lZIOBE8OKJGu1fMmiH_n0b46qbmDxnXNsCq2Ed9oxzA,215 +jedi/third_party/typeshed/third_party/2and3/maxminddb/errors.pyi,sha256=ifm1u-yPLyPC5KPPF7F5vswnJ2X-TeGc5EgYXvWvn30,46 +jedi/third_party/typeshed/third_party/2and3/maxminddb/extension.pyi,sha256=k7yLS2l266OjFQfh4zmzOwO-46LpvnT3KM9FnvrZybU,1122 +jedi/third_party/typeshed/third_party/2and3/maxminddb/reader.pyi,sha256=X414tjdlsmCmS-fFfxFTqHn5-E8ifpb9_ckjzM-g9iM,1269 +jedi/third_party/typeshed/third_party/2and3/mock.pyi,sha256=Al3tA7zKNLXxUAeGa1e1ndsoaQtSm8ouPYT7_zifejI,14819 +jedi/third_party/typeshed/third_party/2and3/mypy_extensions.pyi,sha256=w1Erg7Whcf03pE8mI0T5qLwLPoaHncPRRJMrcgAomQs,2198 +jedi/third_party/typeshed/third_party/2and3/nmap/__init__.pyi,sha256=x_aIpzoj5vmzZKXKDrwrKEN5CWrg7XCDZ1-xOdfGOj0,20 +jedi/third_party/typeshed/third_party/2and3/nmap/nmap.pyi,sha256=hUW8b9yuNGhk4-XFq89rFhnzIghRIVN4wmYUiFwGjYw,3953 +jedi/third_party/typeshed/third_party/2and3/paramiko/__init__.pyi,sha256=0oM8LtFIAnlG-laK6WjyklSWeCGB9wdSzK7gVUbRlsg,1919 +jedi/third_party/typeshed/third_party/2and3/paramiko/_version.pyi,sha256=EeKXNTXM6-EkF3h5RGFiff1iafaFyZYoMscJOfcYuHc,65 +jedi/third_party/typeshed/third_party/2and3/paramiko/_winapi.pyi,sha256=yFTkwFHb32lUW4WD6Eq8onLhRGGCenZZMOxAdvUvYIo,2649 +jedi/third_party/typeshed/third_party/2and3/paramiko/agent.pyi,sha256=VmUg7we1YYno6H4zHgjCoPY7pTO3zzFHknFMC8oYwEc,2057 +jedi/third_party/typeshed/third_party/2and3/paramiko/auth_handler.pyi,sha256=qbisXIS9P2lmxsw2cSVxh7y6WIMLsD9YRbBMdPOtpjA,1934 +jedi/third_party/typeshed/third_party/2and3/paramiko/ber.pyi,sha256=4OoK5kdfYx8MVMfmbZLxMVuu5xLxaPJe8uTnu0n8_Bc,611 +jedi/third_party/typeshed/third_party/2and3/paramiko/buffered_pipe.pyi,sha256=mvCBsHuL3EIsyewdEIiwUZNYpn7AaSBxcqTH_md14lE,520 +jedi/third_party/typeshed/third_party/2and3/paramiko/channel.pyi,sha256=2smxhSShd9xXKGqHb5MqB6Bo23CpUJIi1dQwHBiOIag,3796 +jedi/third_party/typeshed/third_party/2and3/paramiko/client.pyi,sha256=-zCUwFWZIcgSSwVKiEnzC8jsuiPC_DGdJUNaw1kXQ-s,2932 +jedi/third_party/typeshed/third_party/2and3/paramiko/common.pyi,sha256=MfByis2G6kU5-y4FjhhRTQD5DQaP5OOFrc-ciC9JiKE,3167 +jedi/third_party/typeshed/third_party/2and3/paramiko/compress.pyi,sha256=_nA_oaAuza2Oxg0glDo-ov6uYbAIYlKcrkY5spOfqAk,296 +jedi/third_party/typeshed/third_party/2and3/paramiko/config.pyi,sha256=uxhOYKOIPM7jGe8ZsKKh9f_yNtKEdLdxfFC2mV9nwSc,1170 +jedi/third_party/typeshed/third_party/2and3/paramiko/dsskey.pyi,sha256=RdWJ9CuhBviY1c7FChwLDe_ZTOzLukxqaC1K5A6AmpM,1242 +jedi/third_party/typeshed/third_party/2and3/paramiko/ecdsakey.pyi,sha256=q8KVhmbEGp9_YW_9AYcEx_nKshajW_qzWl8QcKSTv5I,2338 +jedi/third_party/typeshed/third_party/2and3/paramiko/ed25519key.pyi,sha256=4aW1Q8tt2U2QIY5uVuSuAczVFBLkP8PR3_AgniR1tlU,703 +jedi/third_party/typeshed/third_party/2and3/paramiko/file.pyi,sha256=Lu1hmKapmlk3_jd1FcIPbrnz89WVlTNCwY46D4TFbfE,1342 +jedi/third_party/typeshed/third_party/2and3/paramiko/hostkeys.pyi,sha256=S1zc4QbgA7dGlEtcixKZ8WrT2Ok861yy-zU8DLtvZpc,1942 +jedi/third_party/typeshed/third_party/2and3/paramiko/kex_curve25519.pyi,sha256=naqQ5GRL0pVcTXD-_XQX1yXd2EpEk0Uu9vNRTfo1Xws,771 +jedi/third_party/typeshed/third_party/2and3/paramiko/kex_ecdh_nist.pyi,sha256=5n2l13AS1p4jz4LbYhSZiJ1Q5xYt4VYIUYLLvJ61ydk,1112 +jedi/third_party/typeshed/third_party/2and3/paramiko/kex_gex.pyi,sha256=NuQIADC7WgKxmDy2NCSzx4cTCDALv9x5Nlp_YxZ900Y,1023 +jedi/third_party/typeshed/third_party/2and3/paramiko/kex_group1.pyi,sha256=h9Vj1_2oQa6qTdB60kAF7MrpScKW9522kG3FL4SUmvk,679 +jedi/third_party/typeshed/third_party/2and3/paramiko/kex_group14.pyi,sha256=JyCccRukC_ojKvgpoVaTxH7dacQN5ICaJrRU6DX2_ZQ,453 +jedi/third_party/typeshed/third_party/2and3/paramiko/kex_group16.pyi,sha256=8unnknuU2cuPNUKhFO13YzhPsaOXewViTCsW8f9cw3Q,359 +jedi/third_party/typeshed/third_party/2and3/paramiko/kex_gss.pyi,sha256=7v4a1mbwTtuKVNmkU91yWlU7e-8VGVaq96xwsvB3iIw,1539 +jedi/third_party/typeshed/third_party/2and3/paramiko/message.pyi,sha256=6MGhnlQZ53mxrYlQ5JjloSks1tlqUJDAzc7RZeZ9E8M,1496 +jedi/third_party/typeshed/third_party/2and3/paramiko/packet.pyi,sha256=0X8hvx7r-I2siTS7L5IJVzf0GeAmX3BOHXcOxxJuOtc,2187 +jedi/third_party/typeshed/third_party/2and3/paramiko/pipe.pyi,sha256=qnF6412bon86OfW4FrlTJjey2atC9u3eDLQKqH8QDZ0,951 +jedi/third_party/typeshed/third_party/2and3/paramiko/pkey.pyi,sha256=ZtYonbeM4kI4Xu6_eM0wU7aLzHFsEX6M3sG96x7b_2M,1800 +jedi/third_party/typeshed/third_party/2and3/paramiko/primes.pyi,sha256=e2J5npj8PeYfnQq2pW1RLCgMofmSITjDVWH0Z2lmv0Y,308 +jedi/third_party/typeshed/third_party/2and3/paramiko/proxy.pyi,sha256=4VFZ2iL-P85RBWXliik--vmtlAUsngl4CT2giLlxuHg,504 +jedi/third_party/typeshed/third_party/2and3/paramiko/py3compat.pyi,sha256=KWCrIqjmlN5keG1E5Ns6jDa46z8yMym8hJpv-1Deuuc,1053 +jedi/third_party/typeshed/third_party/2and3/paramiko/rsakey.pyi,sha256=uHpfuSfDNTe7MsIdb9H6x5fKd1vP0SAKIcdOerxOTFc,1347 +jedi/third_party/typeshed/third_party/2and3/paramiko/server.pyi,sha256=lyv5SoRjT2tkm3AzCFG8CA9D1OtJKSdCCiQ5fNxfNi0,3062 +jedi/third_party/typeshed/third_party/2and3/paramiko/sftp.pyi,sha256=I81h3XktYUmZyfhBMsJJwWFYOxTiA60IAohoy_ekfZc,1065 +jedi/third_party/typeshed/third_party/2and3/paramiko/sftp_attr.pyi,sha256=7kLm3Cjr6Pm40VuoEzfV_u6zA_Ua2Pknu7-D5FxTpaw,667 +jedi/third_party/typeshed/third_party/2and3/paramiko/sftp_client.pyi,sha256=lgZedQtTqoeS7zSEO2r2Byo6mTO-DJxXgDxmQtRJva8,3133 +jedi/third_party/typeshed/third_party/2and3/paramiko/sftp_file.pyi,sha256=41HlM6ayMJaExhuvTI599gyKLtJhWOgxNTpIcXgkYlo,1356 +jedi/third_party/typeshed/third_party/2and3/paramiko/sftp_handle.pyi,sha256=OnJmN4k7typ9GyVtWukw4CuBvDIDvpWCdBQ4PxGPqJQ,531 +jedi/third_party/typeshed/third_party/2and3/paramiko/sftp_server.pyi,sha256=4aNH4sKvRI-49RNeOzFdSJAnvBnrgy0slFWrQ0AOPsk,1094 +jedi/third_party/typeshed/third_party/2and3/paramiko/sftp_si.pyi,sha256=RICAJB149dHYRnGFNgSwzZapr2Opr9-Eif4UCP1w7OU,1193 +jedi/third_party/typeshed/third_party/2and3/paramiko/ssh_exception.pyi,sha256=Nf27JN0Bt6JPpmMNEBC_LejuvbCi_P-ul9PYDqWd1t8,1454 +jedi/third_party/typeshed/third_party/2and3/paramiko/ssh_gss.pyi,sha256=WmUhwBWyvaof_6oP0ROFfrTo9OWccXMQ8_Q6USJq3bI,2774 +jedi/third_party/typeshed/third_party/2and3/paramiko/transport.pyi,sha256=kfJrcgrIs3Qu4KC7rd4EOcWMtz7ikEMeVqpgbvdMrVA,7945 +jedi/third_party/typeshed/third_party/2and3/paramiko/util.pyi,sha256=dxK3QYafkuBXobGkL0wBrZ3mJPz7ZwU-AFy2-1QaViI,1880 +jedi/third_party/typeshed/third_party/2and3/paramiko/win_pageant.pyi,sha256=X8oLeKZslmregVnQLAwaLDz8q95pCqVJ2ahSz3wVOI4,349 +jedi/third_party/typeshed/third_party/2and3/polib.pyi,sha256=kPN8NED1QJMdtkOwNC1Jh03d_i3iM8iaL7_vLaXoYao,5764 +jedi/third_party/typeshed/third_party/2and3/pyVmomi/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2and3/pyVmomi/vim/__init__.pyi,sha256=wddK6G7TpjbfWY1cCqoYTDgoLmqoUWTHC5cmy8he_LI,2197 +jedi/third_party/typeshed/third_party/2and3/pyVmomi/vim/event.pyi,sha256=GIxugpgYOODuc5rSewssVmYlRAZ8qmkC1C38rbD4yhA,395 +jedi/third_party/typeshed/third_party/2and3/pyVmomi/vim/fault.pyi,sha256=LYt9WpvRZNEwWlR55k6l7CxSFbS8k_FLJLjU-TGoP8U,195 +jedi/third_party/typeshed/third_party/2and3/pyVmomi/vim/option.pyi,sha256=YCaLxJmOtTldHIlo984EI6d9ykGsFbelNGuTHL70Ar8,204 +jedi/third_party/typeshed/third_party/2and3/pyVmomi/vim/view.pyi,sha256=hLDI0bDPEkPnxpLliSBlcmYgond-3RCxWc1_3NU-5Bc,586 +jedi/third_party/typeshed/third_party/2and3/pyVmomi/vmodl/__init__.pyi,sha256=DuLSUTy9Szmd-_pUmuOcSXoaWrnCPCH3HU9xUtW6AVo,74 +jedi/third_party/typeshed/third_party/2and3/pyVmomi/vmodl/fault.pyi,sha256=l8VLbRpKzACCz_wqwXLJkyxLwYRS0XiUl9JmKy2Vixw,116 +jedi/third_party/typeshed/third_party/2and3/pyVmomi/vmodl/query.pyi,sha256=h0HhTZVJIG0t0yvxp3bgwjVJEOJDyO1iDmPPShwj5C8,1430 +jedi/third_party/typeshed/third_party/2and3/pycurl.pyi,sha256=PkqfXT-xgGm4Sq1bKyxtX7Fk1kGI4Wx4mt2DFrqObRM,13755 +jedi/third_party/typeshed/third_party/2and3/pymysql/__init__.pyi,sha256=uughCRiT4wuIMgn5-CDOZwZWaFcJg2ZzWSqEVK6AU6k,1625 +jedi/third_party/typeshed/third_party/2and3/pymysql/charset.pyi,sha256=8pQ9uIfBVCKgQE42rgAV8Oc1RAUiR7c8k2z4w64zVso,327 +jedi/third_party/typeshed/third_party/2and3/pymysql/connections.pyi,sha256=Eno05u1fJ2WBME7vc-WAzP0ztvE4Khh9JbZ1VqI0Ci4,5627 +jedi/third_party/typeshed/third_party/2and3/pymysql/constants/CLIENT.pyi,sha256=vnIE4D-CCZ2-TwRYvficDcBx-luonNhHmsuxq8M0V6U,308 +jedi/third_party/typeshed/third_party/2and3/pymysql/constants/COMMAND.pyi,sha256=SAol0UV6ttKwz1i9DUn_DydO1HByeSodgDC3Qyv1BTA,407 +jedi/third_party/typeshed/third_party/2and3/pymysql/constants/ER.pyi,sha256=0O0FmmEhzeeEUmlkmSNluHuZVhhlTOIEHq1Yc-hXLFY,11280 +jedi/third_party/typeshed/third_party/2and3/pymysql/constants/FIELD_TYPE.pyi,sha256=112f_ZZ3GXSiGv5q3fY04FLq-NMWWh8zDSsneoGKfzE,354 +jedi/third_party/typeshed/third_party/2and3/pymysql/constants/FLAG.pyi,sha256=LJ19iNTKdV_tqShn5GyJbTc3JtGr7IesoCQy058KPx0,226 +jedi/third_party/typeshed/third_party/2and3/pymysql/constants/SERVER_STATUS.pyi,sha256=uoVJx08QJpMZzBiyZXTG1Hr7zcSahGx2zR89eOtYxec,331 +jedi/third_party/typeshed/third_party/2and3/pymysql/constants/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2and3/pymysql/converters.pyi,sha256=JNCfG3LgOn6QUtAQm7Dkf5j5MAKasP9tOLDWGzD30jc,1331 +jedi/third_party/typeshed/third_party/2and3/pymysql/cursors.pyi,sha256=V0DYB2ILjVUjOc79rveMPALidnQ3ZlfRRiv_ZS608Hw,2180 +jedi/third_party/typeshed/third_party/2and3/pymysql/err.pyi,sha256=vz8muipV5ryF9NuS_6aya2mWwgAJ9JzdhAN-NvyhyBk,606 +jedi/third_party/typeshed/third_party/2and3/pymysql/times.pyi,sha256=N3iYLkF3MEtwRa8muKj4ViRXS7ON8fcW5XANu2VMxXo,170 +jedi/third_party/typeshed/third_party/2and3/pymysql/util.pyi,sha256=fKG9sTGCjyLG5L-n5K8STqna8XVfKzQ2a-X46ozbk20,66 +jedi/third_party/typeshed/third_party/2and3/pynamodb/__init__.pyi,sha256=qp8cF6TlhBd5820jKiBRcrR-qumnONUUlUKCzf4rib0,17 +jedi/third_party/typeshed/third_party/2and3/pynamodb/attributes.pyi,sha256=-GsbmMFuySBUZVphL5s770L_BinXheI-_8jEZWFcaa0,4342 +jedi/third_party/typeshed/third_party/2and3/pynamodb/connection/__init__.pyi,sha256=tIcusRqjD5cbw9gURA6V6gJS3kotd6qBxua-WeuUSfg,135 +jedi/third_party/typeshed/third_party/2and3/pynamodb/connection/base.pyi,sha256=lRRsdiy4q2wxWEKFd9f70MNqsUvGiaz3HbEYW366KXA,6089 +jedi/third_party/typeshed/third_party/2and3/pynamodb/connection/table.pyi,sha256=ybdRMvW7ihut6ROMqnP8Di_1janFYH82iN2BAO4Rreg,3825 +jedi/third_party/typeshed/third_party/2and3/pynamodb/connection/util.pyi,sha256=VelfJ8xvQieSdjx4hkp5g0W73tyArG2qobRxY2xccWs,67 +jedi/third_party/typeshed/third_party/2and3/pynamodb/constants.pyi,sha256=2DxSY2CwYrKx_uT0QKHorzmekcf6bcKWxQ5lZp6PRco,3038 +jedi/third_party/typeshed/third_party/2and3/pynamodb/exceptions.pyi,sha256=oN2x-x1qyp5WVRK-jF9rVjzQIHiUjNO5RAwfRTQSRdA,887 +jedi/third_party/typeshed/third_party/2and3/pynamodb/indexes.pyi,sha256=a5MVXShQkb2UUh5hJiX1S_UaLJ3Rzqp2IKB9ZfuU2SQ,1052 +jedi/third_party/typeshed/third_party/2and3/pynamodb/models.pyi,sha256=zg1iyUvCjODMELDKSPXncRXAZfyr-TePSzI61qfjUeo,5702 +jedi/third_party/typeshed/third_party/2and3/pynamodb/settings.pyi,sha256=nfytWdsaKGyoTVPsIi0B-15yecRvNQ61Y3L_sT0qpvo,145 +jedi/third_party/typeshed/third_party/2and3/pynamodb/throttle.pyi,sha256=Xnxx2caEi3AgLinoHSCVZUkuEPV5XxwMw83tpUaUvRk,472 +jedi/third_party/typeshed/third_party/2and3/pynamodb/types.pyi,sha256=BCI-zF5K_mOWPzGVixMvVXqISqSxWokGjR0aVosyvK0,57 +jedi/third_party/typeshed/third_party/2and3/pyre_extensions.pyi,sha256=CJ0jUa1yruR4a_cVqcFRpORBpYGea9C5vQ5lwGfWUZI,267 +jedi/third_party/typeshed/third_party/2and3/pytz/__init__.pyi,sha256=-LDivLI7EkEywpVQfVyMFuVNEDZFbod-zNxGNlQO_A8,1892 +jedi/third_party/typeshed/third_party/2and3/redis/__init__.pyi,sha256=pNlwX9y10ONwvlK1yzT9MMA6hQx06nW8u1qHrVpMmEE,829 +jedi/third_party/typeshed/third_party/2and3/redis/client.pyi,sha256=hKMjp_LRcqsk8xf2tv9HRdFVqqqJ46QH1MHbRgAReIE,26034 +jedi/third_party/typeshed/third_party/2and3/redis/connection.pyi,sha256=aMlTz2YmsHb6igv5yajn4gJswPs9frFa_grs2eEqCLc,5595 +jedi/third_party/typeshed/third_party/2and3/redis/exceptions.pyi,sha256=d2fmJa9u5GrrAaST8JgdfpFsBxKupvKLb2iH9VgP7Wg,569 +jedi/third_party/typeshed/third_party/2and3/redis/utils.pyi,sha256=a5cyDCASB0E1ZuBKK33dZI7ZBcr2rISkiWU5alhjJbA,136 +jedi/third_party/typeshed/third_party/2and3/requests/__init__.pyi,sha256=LIORHwdjl-Ogkvvd5Fs_GZS3k_LcEOd9qglRpTQnYDs,928 +jedi/third_party/typeshed/third_party/2and3/requests/adapters.pyi,sha256=OfzqJV64CBHmzwroW2Tm14e5kZyCu_L6NJWSUhBgKLI,2925 +jedi/third_party/typeshed/third_party/2and3/requests/api.pyi,sha256=Gcw_jo5GO0jbLDMvukc10WFuXCngBEjyTbTTNgvgeMY,1216 +jedi/third_party/typeshed/third_party/2and3/requests/auth.pyi,sha256=q63njtnTjj3AICWDacBKsIS7DjLikQsJwRd7CtWa26o,1148 +jedi/third_party/typeshed/third_party/2and3/requests/compat.pyi,sha256=d3SHDa-UnNy17Mv0aio7kQEf6qsRzEpgA31-MwWuBIo,58 +jedi/third_party/typeshed/third_party/2and3/requests/cookies.pyi,sha256=wn0Y-_dNxjgTjY9WfRuJplkBLY41YyhUS4cDAiFGGo8,2033 +jedi/third_party/typeshed/third_party/2and3/requests/exceptions.pyi,sha256=RxHFR2hELgvFoBEa0AKeYaWNmjDyrWJrvJ6RKAr24UI,1259 +jedi/third_party/typeshed/third_party/2and3/requests/hooks.pyi,sha256=daf3Tp5DknV7lyWtGe5n0jV7U0I_6HwoMpOnlgCdYv0,117 +jedi/third_party/typeshed/third_party/2and3/requests/models.pyi,sha256=ynI5o3SRKsMTkXyxba2NCAVdTtGgC1hSqQj-4QXMLSs,4578 +jedi/third_party/typeshed/third_party/2and3/requests/packages/__init__.pyi,sha256=cnNTJT8mNb7zJ8vT6jKdpiWxIYAZMw3Qz6frelGgzbs,158 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/__init__.pyi,sha256=noXIj8HyCJNtp4CXGIfsy6-yAi7pT1P4uTn-_vYdGPo,825 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/_collections.pyi,sha256=aHphg2k55d7e0tqFKmSyIvsa2vRgJNA3rr_GvV28BF8,1535 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/connection.pyi,sha256=gW4ugIttzymKZSoRvGOSD65mT69TANr35_NBfNIn8mk,1907 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/connectionpool.pyi,sha256=CQdrc51bqLxZG_Sd_u23SPcIxffQtggfp1sS8FJLiog,3170 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/contrib/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/exceptions.pyi,sha256=TxvMswYXJdgim9frBpmQbC5q_Cu8SM37vW2ZFrfvH20,1413 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/fields.pyi,sha256=ol7EAXCmRCqnANBpCeua5OHNjWGz3aCVf_2j-eIZlgc,442 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/filepost.pyi,sha256=VdQ66F4SjusF9qTkUP8U0yUbrRqIJv8DWfTISgJ7N3g,244 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/packages/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/packages/ssl_match_hostname/__init__.pyi,sha256=8-WNQ8HsF7JWBeN1n0qoPU84HsAouCrHj4R5qrGKrVQ,88 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.pyi,sha256=jAdLMMvdxdszZCIdVGxRZw7i5waIBqb4RqwketJqMr4,81 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/poolmanager.pyi,sha256=66XnVPM1snW_kgKDX2_21PwyN-As7xtgAiCEHXf6U14,1309 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/request.pyi,sha256=chC2ml6eAcw5E2ILdPySz_aZ9OQMeD5nx0hlmYVfkz8,534 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/response.pyi,sha256=Qpkf8CpEW5s2pKMxwAVfgz2zQ4miW7kyd-FiOeHyvG8,1788 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/util/__init__.pyi,sha256=xacNymxEsCZp5wlKo15L8WSYXzhruHcRtGMqamjArZM,615 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/util/connection.pyi,sha256=XaFYtI2YyZGnOVivtXsLW6jMSb6FKpfIh3aIQInJLR0,188 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/util/request.pyi,sha256=hbniWmj2f5WG5m6qurjL54K3VzMhAffiSMa-rr1CO0s,227 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/util/response.pyi,sha256=Q0CWgpQG5zMPxwc0LOr5eQOEfmOqglfpydPfBVxiStc,27 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/util/retry.pyi,sha256=DWGmbAvdoZc9gL9-7N-wU4A-SmtfWYpKmSRoZFno-kc,1161 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/util/ssl_.pyi,sha256=VY_E6OYlixXdjHOT_FYYFjaECs7n8BxzU1O9rAvp1hI,691 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/util/timeout.pyi,sha256=EojIS91DNeDBfY9TONTAuJkoR6LmHGeX3IYNmLZdvWY,499 +jedi/third_party/typeshed/third_party/2and3/requests/packages/urllib3/util/url.pyi,sha256=iXePrO9HE5OJVXeWx0nu4PKX8ZX3ScJt1KgPrYpUtrA,492 +jedi/third_party/typeshed/third_party/2and3/requests/sessions.pyi,sha256=qpxhe7nKCvlYZbWU_HcgynucxH5vvj7D31tEIkNq6R8,4727 +jedi/third_party/typeshed/third_party/2and3/requests/status_codes.pyi,sha256=miO3NECO-3yQmK56zg67HzZSbx1eER1xmKABjoDMuXs,35 +jedi/third_party/typeshed/third_party/2and3/requests/structures.pyi,sha256=7Dr21XygxBfDVhOr83FfWEiJNt_W6uNzOmyLfermE_o,968 +jedi/third_party/typeshed/third_party/2and3/requests/utils.pyi,sha256=cYgq8lcqt9s25H2SfQ_TuTvZfGFMlpJEH6yW43yX_IE,1989 +jedi/third_party/typeshed/third_party/2and3/retry/__init__.pyi,sha256=oURuYy9CoiIEbeBziwmNwAexn1hXNQlhL1_lH15UZZw,32 +jedi/third_party/typeshed/third_party/2and3/retry/api.pyi,sha256=fnNyTJFt_8RiZ5QT87GTrUPQvH8Lqrwm7WXT74s51-g,935 +jedi/third_party/typeshed/third_party/2and3/simplejson/__init__.pyi,sha256=r6sZ4B_hGIz2y83FiFs5WvQweXydgb2GfykOzF0Gd3g,538 +jedi/third_party/typeshed/third_party/2and3/simplejson/decoder.pyi,sha256=ijcE8aSOnNvjIOQ0s3eiKjq7GYcIOGe9_lESZPAmweM,229 +jedi/third_party/typeshed/third_party/2and3/simplejson/encoder.pyi,sha256=Ju-aWi4B6WNQ3G-6n7VQw7idFNeKMhS9g_b7XcHSXLo,264 +jedi/third_party/typeshed/third_party/2and3/simplejson/scanner.pyi,sha256=_AUT1GJRNYopQ98J8rRXtig5nCunjXLoiCf5gN9dpXo,262 +jedi/third_party/typeshed/third_party/2and3/singledispatch.pyi,sha256=snB5T00WMlJzaeON3HT586VTC_rlHBZ5d4Q-Seaj1VM,624 +jedi/third_party/typeshed/third_party/2and3/slugify/__init__.pyi,sha256=oS6sazgR3xHmORcsqDRk6ZulbTzRF1UJaDw1V2i9428,46 +jedi/third_party/typeshed/third_party/2and3/slugify/slugify.pyi,sha256=dRDus-8qkVm5OBBP31a-RdSahYcrn9HGYJcgTa_LPBI,561 +jedi/third_party/typeshed/third_party/2and3/slugify/special.pyi,sha256=Nw6LzOLeZREXSk-gK6A_pqAcurWVlbnE-TAzw6pkMwk,279 +jedi/third_party/typeshed/third_party/2and3/tabulate.pyi,sha256=JJ7kYLQU-3I7s68wXecrchQU6zKeYPHH7mb6yGdcsHE,1413 +jedi/third_party/typeshed/third_party/2and3/termcolor.pyi,sha256=bvMVbmC_0sADtLLJ1gTgz0qy7u6z0nxMmPyumxJurUM,350 +jedi/third_party/typeshed/third_party/2and3/toml.pyi,sha256=wqxUoqPomQnEzd6_LnLALaHqfgqKJH2O4wbLkCBA61M,697 +jedi/third_party/typeshed/third_party/2and3/typing_extensions.pyi,sha256=Yv2B_WZlMZ9ZF5LzirIeruXF56kHMW22DLFGtnx_3l8,3358 +jedi/third_party/typeshed/third_party/2and3/tzlocal/__init__.pyi,sha256=gZwxQC65kbnoMEOnhjkBKqvx2aJayhHsQ52R1s_Bl2E,104 +jedi/third_party/typeshed/third_party/2and3/ujson.pyi,sha256=YuEF8oRji3rvUknBhjJBa8bJqdZD2H07ZGDIUwyjSP0,938 +jedi/third_party/typeshed/third_party/2and3/werkzeug/__init__.pyi,sha256=Z4tIEffX6wFRXuakZjYqPUNOnfUbr7STWtzAAWlF11E,5307 +jedi/third_party/typeshed/third_party/2and3/werkzeug/_compat.pyi,sha256=XYwsJu2d8JZH41AXqSDlRaODFAG8hzsrIVrjlnHAaDE,1271 +jedi/third_party/typeshed/third_party/2and3/werkzeug/_internal.pyi,sha256=xff0Vbs8wNEN9bykzs7k9bgLdhDY-DiIUj1NbsY0ZOE,644 +jedi/third_party/typeshed/third_party/2and3/werkzeug/_reloader.pyi,sha256=QCQ4dGw3MM4rhwKmKntyLJ7JB-neMbnRuRrm2scpZgE,826 +jedi/third_party/typeshed/third_party/2and3/werkzeug/contrib/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2and3/werkzeug/contrib/atom.pyi,sha256=LAPRofmeDhSpU418KTzoATp5cB2D9daWgcgP_A2YrDI,1136 +jedi/third_party/typeshed/third_party/2and3/werkzeug/contrib/cache.pyi,sha256=qzO8UcufWrYMdYREIG3H0rutk922G-3guC4zNJoWFlI,3375 +jedi/third_party/typeshed/third_party/2and3/werkzeug/contrib/fixers.pyi,sha256=mvPRIfbtHVRE98seLNNyHSonlGnl7uxSZC0s9TfSEa0,1650 +jedi/third_party/typeshed/third_party/2and3/werkzeug/contrib/iterio.pyi,sha256=2yRg8voINYQF23YfRkpGPXaaaX4jpupooKthKwfX4kU,1202 +jedi/third_party/typeshed/third_party/2and3/werkzeug/contrib/jsrouting.pyi,sha256=4s_tRK57Yu_n1O1Zk85Nty48UaBHu6AsgeHHxLTS0ko,325 +jedi/third_party/typeshed/third_party/2and3/werkzeug/contrib/limiter.pyi,sha256=qLbYIPMTBsgaYe74m3NerbQ1AkW_57LKwY0iW_zaIXU,192 +jedi/third_party/typeshed/third_party/2and3/werkzeug/contrib/lint.pyi,sha256=vnPiJs5jXekVZmugPlmkXITga2URy2gh6vngdxkn_Ls,32 +jedi/third_party/typeshed/third_party/2and3/werkzeug/contrib/profiler.pyi,sha256=HtjfNVB5ydRd_FfwMkAzJ05aHtFcN2Eecjf2nHQfJVM,315 +jedi/third_party/typeshed/third_party/2and3/werkzeug/contrib/securecookie.pyi,sha256=EUmDrvenhGHAJ9GUO20vYOlwNon9XoAVABBxREOqnBs,1212 +jedi/third_party/typeshed/third_party/2and3/werkzeug/contrib/sessions.pyi,sha256=7V-lJ3FrbkOIp0RK42KtlhntH3SinErVLH78sgu5Zes,2074 +jedi/third_party/typeshed/third_party/2and3/werkzeug/contrib/testtools.pyi,sha256=OOwXK52N2t35kMCMl0B4zBK-d85PcqGG_nOAbhsEAD8,188 +jedi/third_party/typeshed/third_party/2and3/werkzeug/contrib/wrappers.pyi,sha256=AdT0yvz1TaiY0mn6zlt_ET3DKsTywVVjkErC769c6L0,603 +jedi/third_party/typeshed/third_party/2and3/werkzeug/datastructures.pyi,sha256=nzcm32yD5-va5Toukm8sXENL4F6AUFxyAD3iAkF2OQg,15587 +jedi/third_party/typeshed/third_party/2and3/werkzeug/debug/__init__.pyi,sha256=Th_QGEjQE3niHm77xHcmtgRUFZxFd2IsIUhqNGymRdE,1384 +jedi/third_party/typeshed/third_party/2and3/werkzeug/debug/console.pyi,sha256=DBWA7QK0dluokniouOizZ1h5oAIcWi47N7i6UiSDRiU,1207 +jedi/third_party/typeshed/third_party/2and3/werkzeug/debug/repr.pyi,sha256=dqtVFNOfcjINAMzaJ-tHiW6SO37ZwVtVT1J7SS7yWh0,846 +jedi/third_party/typeshed/third_party/2and3/werkzeug/debug/tbtools.pyi,sha256=tbrAcENEbWQEOxxSrEKr_5GA3i-OaKn7K5QPA4Sk6TU,1687 +jedi/third_party/typeshed/third_party/2and3/werkzeug/exceptions.pyi,sha256=xvGHuWowxhPqNfh4VqLPjTsRubPoapE3-Jo_k7T3UMk,4942 +jedi/third_party/typeshed/third_party/2and3/werkzeug/filesystem.pyi,sha256=te3jyfyiDfsnFU0MEYw-wB5jBgWgw4VAbIpQzGAs9Vs,169 +jedi/third_party/typeshed/third_party/2and3/werkzeug/formparser.pyi,sha256=cr1vdd95MjSLCA0VyEutG3F4ezKQpuY54LSj3d03F7M,3702 +jedi/third_party/typeshed/third_party/2and3/werkzeug/http.pyi,sha256=gQCvsTijEmaeJXpTMvKOFt_o-XTAtNtfSggTeBqhSq4,5335 +jedi/third_party/typeshed/third_party/2and3/werkzeug/local.pyi,sha256=ZWdRsm--5mtPjtmAH_7-0JBQmvQUfAba_VbReVyAVIg,2315 +jedi/third_party/typeshed/third_party/2and3/werkzeug/middleware/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/2and3/werkzeug/middleware/dispatcher.pyi,sha256=CYiiHkHrQ9Qlc0JmtZvUBWUPxeUJDjaxzwBbriStxo8,451 +jedi/third_party/typeshed/third_party/2and3/werkzeug/middleware/http_proxy.pyi,sha256=5S5vt4U681ugR9lV5G6ZFkopoSw9tHCocHenpeSJAA4,652 +jedi/third_party/typeshed/third_party/2and3/werkzeug/middleware/lint.pyi,sha256=fKmZa4yVYgObsnOwlVaYh7jo9bQrVQu4BA15g2uzeLY,2389 +jedi/third_party/typeshed/third_party/2and3/werkzeug/middleware/profiler.pyi,sha256=_DWwg99Hb3nYORrYOngd_L6LGGV5rWjIC1EXuucQ6LA,569 +jedi/third_party/typeshed/third_party/2and3/werkzeug/middleware/proxy_fix.pyi,sha256=oJCpfNfoF8oKte5Ikmu4XBdYv_4pJqgFGOf86Iq47u8,712 +jedi/third_party/typeshed/third_party/2and3/werkzeug/middleware/shared_data.pyi,sha256=JVOyPz2zNzW2AlLdc0uAQQ10SQ3Ved-rLzv3cVdwVwA,1295 +jedi/third_party/typeshed/third_party/2and3/werkzeug/posixemulation.pyi,sha256=L5e2d7yb54c171JGU8g_r6G4qIDx4Av28BY12iiwT6w,199 +jedi/third_party/typeshed/third_party/2and3/werkzeug/routing.pyi,sha256=dY-lLGVr0rQ-p_y-8xP5papvCEuSBuaFkNeC8ADEN_M,6802 +jedi/third_party/typeshed/third_party/2and3/werkzeug/script.pyi,sha256=ifTEJ5yx1gam9_q9RnsxmiN3W4GRvegb2kRTfZ-z_0g,768 +jedi/third_party/typeshed/third_party/2and3/werkzeug/security.pyi,sha256=utyGUghv_u_Rsb4p48VUyXuDJuLo621MJZuafdW4Gd4,524 +jedi/third_party/typeshed/third_party/2and3/werkzeug/serving.pyi,sha256=gNa8EOCapHswvpWYh4qYN1783afRO18_26L5jYmGRaU,4057 +jedi/third_party/typeshed/third_party/2and3/werkzeug/test.pyi,sha256=ICgRxquqP1H5-8Tlk7UAn2DXcl7IvF2ZV_7LV93SZng,6138 +jedi/third_party/typeshed/third_party/2and3/werkzeug/testapp.pyi,sha256=O0O2-rhExeAFHWIryQcjWpQhGPQfkkU8SuV0uLbbpco,226 +jedi/third_party/typeshed/third_party/2and3/werkzeug/urls.pyi,sha256=CKx_xMwXyR1EXEx7DFrXZIr8OdrpyZ3m2htWiCWGvgQ,2898 +jedi/third_party/typeshed/third_party/2and3/werkzeug/useragents.pyi,sha256=4LzBjciKyh8zZcufKX8SjjvDAu_rrCsEdxr4-LwZbKY,431 +jedi/third_party/typeshed/third_party/2and3/werkzeug/utils.pyi,sha256=kPk0JhxbcvYk-AAWyo3s-8nhsUxm32PpEa5hjVTilHw,1962 +jedi/third_party/typeshed/third_party/2and3/werkzeug/wrappers.pyi,sha256=uWHxu1UBSC9WlMmZ-HbIk71sgRb0-euFZ5lY4xUh5g0,9115 +jedi/third_party/typeshed/third_party/2and3/werkzeug/wsgi.pyi,sha256=1TOv_UGcc16Znog_D-iNwXHnnlIwSfFk97mA_zcL2cY,3030 +jedi/third_party/typeshed/third_party/2and3/yaml/__init__.pyi,sha256=bElbXZu32G81wGc_WxNeXTnPxSStYTaJFRzrPjj8kxU,6559 +jedi/third_party/typeshed/third_party/2and3/yaml/composer.pyi,sha256=Vl74-4GCncmKS-eb06xYTS5KZtmmOl_4DlXceFg_lbY,496 +jedi/third_party/typeshed/third_party/2and3/yaml/constructor.pyi,sha256=K8dllpXyNJoRWMHkbGLYZJvnoNO33n3nv3VCdRWOR8c,3603 +jedi/third_party/typeshed/third_party/2and3/yaml/cyaml.pyi,sha256=1mixUYME26X3XC9R1onrnbH91b5S94KPpnVZRc0Z0CA,2292 +jedi/third_party/typeshed/third_party/2and3/yaml/dumper.pyi,sha256=Q3Byffe7aMWtkkC0wdCgWognnXnQch-sHziRFxQASdo,1548 +jedi/third_party/typeshed/third_party/2and3/yaml/emitter.pyi,sha256=TPp-ubnhpVa6Vp89RdhjjfIMbMoI35XSj5t5_pL5OSE,3802 +jedi/third_party/typeshed/third_party/2and3/yaml/error.pyi,sha256=kYhKOUoSrnEQLvgIvG_qmPKgMQ53ADqFHOPPDTlRTs4,535 +jedi/third_party/typeshed/third_party/2and3/yaml/events.pyi,sha256=IXJuqeHbEQtHZ4mD36QQ1-CAQaLbKua8JEZ-qh7jPKg,1662 +jedi/third_party/typeshed/third_party/2and3/yaml/loader.pyi,sha256=Q_smlF0bYLSXJ8O9TH93ErQmInWH0S8dqibxbXG6API,767 +jedi/third_party/typeshed/third_party/2and3/yaml/nodes.pyi,sha256=HXEU6EM5Z7O0WqO8hCQbRXW_BCGYAzK68xKTZC5yDJs,685 +jedi/third_party/typeshed/third_party/2and3/yaml/parser.pyi,sha256=0uGiWaCcBAoSQBsXUVRqYGAevPu8GLG5ANORAGS_snE,1664 +jedi/third_party/typeshed/third_party/2and3/yaml/reader.pyi,sha256=i91dNQcOijQHle6687O3xJHWAVy5Sq_k-ko79nolORo,832 +jedi/third_party/typeshed/third_party/2and3/yaml/representer.pyi,sha256=J7rrcGo3XvshSlUyHGbJXMprquSaD3oTHv51VwqWr3Y,2185 +jedi/third_party/typeshed/third_party/2and3/yaml/resolver.pyi,sha256=h32kNW_kLa4Ckxc8DxTHcsFvvgvYKcQQ7lFsV3Pt3s4,786 +jedi/third_party/typeshed/third_party/2and3/yaml/scanner.pyi,sha256=Ly12oPYyJVNrdODBM-F9dVO1EzK3hiiItMgay0hDQAE,3573 +jedi/third_party/typeshed/third_party/2and3/yaml/serializer.pyi,sha256=W27WLPu0g0Cm6vnExbuG3NGLdzUK8tuPM17SSEwY4HU,666 +jedi/third_party/typeshed/third_party/2and3/yaml/tokens.pyi,sha256=lRuHaCHbPMRTOm3WEhanBssRRnJHUiGh1mjNmE4CND8,1792 +jedi/third_party/typeshed/third_party/3/aiofiles/__init__.pyi,sha256=JV9ztGDgZgmwrcgLqJ_R6o8Fgf3cmJWE5MgYjP2BEE0,37 +jedi/third_party/typeshed/third_party/3/aiofiles/base.pyi,sha256=gL3zcEhpb3ZeAHKbouMayq0UIzTDrsE8oTwKU8Yhyq0,1490 +jedi/third_party/typeshed/third_party/3/aiofiles/os.pyi,sha256=G9ZSixzRK7_9tZ5sSjqhp-c7Uy5f8tfI_SEQUwYnScw,1040 +jedi/third_party/typeshed/third_party/3/aiofiles/threadpool/__init__.pyi,sha256=plwVo_uI0AiOap217i7Ef_Gh_TGyOOeJren5WnTIZds,2812 +jedi/third_party/typeshed/third_party/3/aiofiles/threadpool/binary.pyi,sha256=xmn9nxqyzM5XQCr5H8sx2xzW7YkqAOQ159cL1yljyIA,1619 +jedi/third_party/typeshed/third_party/3/aiofiles/threadpool/text.pyi,sha256=X4W8oOVHyTrz32gJ-W_-rYQIw9A92MUd1pp3okUG3xY,1416 +jedi/third_party/typeshed/third_party/3/contextvars.pyi,sha256=vR-ubUfhlQVgLJbJzuVOM5UTc-sFCVSEg2EzcqLMxzI,1405 +jedi/third_party/typeshed/third_party/3/dataclasses.pyi,sha256=KYjW7iSlZ42rrD1Z34CvtIjUSzOTm2lcLozDVSAOkSo,2737 +jedi/third_party/typeshed/third_party/3/docutils/__init__.pyi,sha256=3fBxcSppJr6EOEcUojvflG3Eegg7lv2Qp0dNQQILrP4,63 +jedi/third_party/typeshed/third_party/3/docutils/examples.pyi,sha256=3DheJVl7ojvJt4_YF7b1hRFNExi-vO9IWxtfojfi0uo,80 +jedi/third_party/typeshed/third_party/3/docutils/nodes.pyi,sha256=yxmAtZCJ_mCnQ6pH3u3qACugJ_Tpis4DrrkGGTyIcTk,203 +jedi/third_party/typeshed/third_party/3/docutils/parsers/__init__.pyi,sha256=3fBxcSppJr6EOEcUojvflG3Eegg7lv2Qp0dNQQILrP4,63 +jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/__init__.pyi,sha256=3fBxcSppJr6EOEcUojvflG3Eegg7lv2Qp0dNQQILrP4,63 +jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/nodes.pyi,sha256=3fBxcSppJr6EOEcUojvflG3Eegg7lv2Qp0dNQQILrP4,63 +jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/roles.pyi,sha256=7P4PG1OuDMfSK1DnTQzcOtAeQVqUViHtNXrn3G8hjCo,418 +jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/states.pyi,sha256=qb0d-B857-wAVpdJ7vT06HNQRkpaTDj1YXl3cOWzqVs,115 +jedi/third_party/typeshed/third_party/3/filelock/__init__.pyi,sha256=diWa8QPjqlZ7BhKzJxWLqfFHOgE3XYRWfe06UbvgWlU,1789 +jedi/third_party/typeshed/third_party/3/freezegun/__init__.pyi,sha256=xM4QVDAOY8pmP9Isl_PcX_TU5zDCY7rP0U5dY9pWQzE,44 +jedi/third_party/typeshed/third_party/3/freezegun/api.pyi,sha256=6Infm2WmoBzTquz0uTNPmAYJ9b8sE8cEHuWo2tMaPLY,2266 +jedi/third_party/typeshed/third_party/3/frozendict.pyi,sha256=E89ILrzNgYcRBsm_vnQLOvKuit1t2WhnQVdq60lLeuM,895 +jedi/third_party/typeshed/third_party/3/jwt/__init__.pyi,sha256=975_sUWTv0BaJ04jZpVTmd7WNFmu25f82KqqMReqI3k,1724 +jedi/third_party/typeshed/third_party/3/jwt/algorithms.pyi,sha256=Vp1NVzBfM8jz0VJGhop3UOnKXa1mmJSNI3u0iv_-N70,4299 +jedi/third_party/typeshed/third_party/3/jwt/contrib/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/3/jwt/contrib/algorithms/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/3/jwt/contrib/algorithms/py_ecdsa.pyi,sha256=43zFcZpLRNwzuol7DXZi7jGaQ0M6jDzfyTNeWsuwtRk,251 +jedi/third_party/typeshed/third_party/3/jwt/contrib/algorithms/pycrypto.pyi,sha256=w-Prvq6u6oOOCt6KPuTT1p86AW7cABIOj8iDa00gvyM,252 +jedi/third_party/typeshed/third_party/3/orjson.pyi,sha256=BKaBOXNbLXb2MHYpT8BwCh78j8HagLQq51uZY0Dq5nU,656 +jedi/third_party/typeshed/third_party/3/pkg_resources/__init__.pyi,sha256=h-i-Ph-0RjoeDFw5lowMgeyOK73tbkz_iaP28vyYUsU,12105 +jedi/third_party/typeshed/third_party/3/pkg_resources/py31compat.pyi,sha256=ouFLzqmJqfMmT4orlwN1utMnk2R5vHeXxDSk2FI-7I4,56 +jedi/third_party/typeshed/third_party/3/pyrfc3339/__init__.pyi,sha256=GdEm76Fq93-yKV0jkd5FMP5HR8CCpDV6-1R888e5sI0,79 +jedi/third_party/typeshed/third_party/3/pyrfc3339/generator.pyi,sha256=Y8yuzXtNLGik4y6l4TgvvU9ehZGNUK0Pe-MZVvddL4A,139 +jedi/third_party/typeshed/third_party/3/pyrfc3339/parser.pyi,sha256=E4529DNPk1MqcLAd-DgDjRo9onMQzi6rquEEIDZgZ_A,118 +jedi/third_party/typeshed/third_party/3/pyrfc3339/utils.pyi,sha256=wxuOjaaxzHH_PlMf8YxP4oVh5Oq_QayX3Kdfz9CHD_8,447 +jedi/third_party/typeshed/third_party/3/six/__init__.pyi,sha256=43jcx_IPU45XeUJ44qydi-un8-jRZFH4kya6OUlkuvw,4169 +jedi/third_party/typeshed/third_party/3/six/moves/BaseHTTPServer.pyi,sha256=vtBUJUabTrIVK97Cn0MhLUhHSlbmHB8QgQlWwadH-6w,26 +jedi/third_party/typeshed/third_party/3/six/moves/CGIHTTPServer.pyi,sha256=vtBUJUabTrIVK97Cn0MhLUhHSlbmHB8QgQlWwadH-6w,26 +jedi/third_party/typeshed/third_party/3/six/moves/SimpleHTTPServer.pyi,sha256=vtBUJUabTrIVK97Cn0MhLUhHSlbmHB8QgQlWwadH-6w,26 +jedi/third_party/typeshed/third_party/3/six/moves/__init__.pyi,sha256=Vsp6DhxVU4xIcJ8eDyruQNQz2a_ERZmnHjA2_5MOyTk,2363 +jedi/third_party/typeshed/third_party/3/six/moves/_dummy_thread.pyi,sha256=QcsaN0JxBr9ArwQnzhmN06G8dMTpqIuIbapJvWWr8IQ,28 +jedi/third_party/typeshed/third_party/3/six/moves/_thread.pyi,sha256=An3Es1KPMtE47GK-HKV_WnuG7kfoT5bh-bn_SfOQ5Pc,22 +jedi/third_party/typeshed/third_party/3/six/moves/builtins.pyi,sha256=VVjpGGLJ2CYwC3lYePGY6TLTEhwcdru3YV-nV2ZAzL8,23 +jedi/third_party/typeshed/third_party/3/six/moves/cPickle.pyi,sha256=pezOsQZrW9XS1R09Ote5u1Wtw9FHC0k8Kjp4g44_PgI,21 +jedi/third_party/typeshed/third_party/3/six/moves/collections_abc.pyi,sha256=9kznK-Qq5Rqt-V--bq6awTNs5NISIlTFiOZLVI4BvBA,30 +jedi/third_party/typeshed/third_party/3/six/moves/configparser.pyi,sha256=Wp5Y7Z134PHLahfawLJDB7WyIBpdLooaGKLQmEr7veQ,27 +jedi/third_party/typeshed/third_party/3/six/moves/email_mime_base.pyi,sha256=WcWEleCKHROrfdXpRuKABrT_Va1hx90NY_kxYeul3Sk,30 +jedi/third_party/typeshed/third_party/3/six/moves/email_mime_multipart.pyi,sha256=HRKWFU9qh95-mEE22_2NzEKL6lx7ynvhcfHjUcYWuZ8,35 +jedi/third_party/typeshed/third_party/3/six/moves/email_mime_nonmultipart.pyi,sha256=n5hD7R_rktJj3hiHYzEqr3CJCHSW4ikfObKHmUrXBw0,38 +jedi/third_party/typeshed/third_party/3/six/moves/email_mime_text.pyi,sha256=M7mb9V3f5JUut8yf8UfL3rG4XPr-Lr692DGjk1OR9d4,30 +jedi/third_party/typeshed/third_party/3/six/moves/html_entities.pyi,sha256=YkFcpA-UjTm7ps8gp1Xs6Ye9eu-fRHUlSrZPc00LZuk,28 +jedi/third_party/typeshed/third_party/3/six/moves/html_parser.pyi,sha256=EhnBFGx0nBd-ZHMy53ihoemWud0xnNYYYzQDrqWZ7SM,26 +jedi/third_party/typeshed/third_party/3/six/moves/http_client.pyi,sha256=a-UAXTgUTrJNFFiQWThbgVvOsqCJXXiFTxjOG4QgbiE,26 +jedi/third_party/typeshed/third_party/3/six/moves/http_cookiejar.pyi,sha256=_qfFwqs5DnvAOqLWCAdCzWjnwVFi2tkRjypRcow1Kgw,29 +jedi/third_party/typeshed/third_party/3/six/moves/http_cookies.pyi,sha256=dKSPvohzW_QPkOUb0gxj3rsshfRDYb9krTqjID3wN68,27 +jedi/third_party/typeshed/third_party/3/six/moves/queue.pyi,sha256=_rNUYjj1lkl5pRaQP4GWCuWEHBSetCgHhvSnWjgBuhk,20 +jedi/third_party/typeshed/third_party/3/six/moves/reprlib.pyi,sha256=gzyGHWv3b10R17IbpgllskSTyulpq6RWGb7I5KAbSh0,22 +jedi/third_party/typeshed/third_party/3/six/moves/socketserver.pyi,sha256=GWp7BzDMpq3JNfA3H3Pn0iyENzAcy5ufcvuvlkEzmFg,27 +jedi/third_party/typeshed/third_party/3/six/moves/tkinter.pyi,sha256=R-kj-ZjyE6cnPhkAhJLQIA2zyggMRHyf4azpH_WtXNo,22 +jedi/third_party/typeshed/third_party/3/six/moves/tkinter_commondialog.pyi,sha256=piW_7DIKFPiFl8awGTKEBkW-toBwMu7ySfSgxT39Qsc,35 +jedi/third_party/typeshed/third_party/3/six/moves/tkinter_constants.pyi,sha256=sB-tEEYJXZlnQEvgUxsHYFp3yyp3F7NtblS3_hRFVFM,32 +jedi/third_party/typeshed/third_party/3/six/moves/tkinter_dialog.pyi,sha256=Lk_TOa4m8kLSRZRs2-zLtgFnpbtkGcs2eu1YgCjNzmM,29 +jedi/third_party/typeshed/third_party/3/six/moves/tkinter_filedialog.pyi,sha256=znHuWqubMwXiONWP1bhNRmAXUVcHdXn9B8AqoJu4EgY,33 +jedi/third_party/typeshed/third_party/3/six/moves/tkinter_tkfiledialog.pyi,sha256=znHuWqubMwXiONWP1bhNRmAXUVcHdXn9B8AqoJu4EgY,33 +jedi/third_party/typeshed/third_party/3/six/moves/tkinter_ttk.pyi,sha256=4JCeiL-sndFy8xykanaUTbW3-ESBr4w8Dd1gOMAvrag,26 +jedi/third_party/typeshed/third_party/3/six/moves/urllib/__init__.pyi,sha256=F_1V8NcR4jGkws85IUurYLi4JnGh7_HttdVHvj8cQZM,217 +jedi/third_party/typeshed/third_party/3/six/moves/urllib/error.pyi,sha256=tOHyCWru4FB-CVZpdZ5tzT5jUW7R6e1NHrm_AOEx5Zo,116 +jedi/third_party/typeshed/third_party/3/six/moves/urllib/parse.pyi,sha256=LYJLXIl0_B1NV-7WuGMWjM9E12zpMbsB0SMqi4ubpR4,981 +jedi/third_party/typeshed/third_party/3/six/moves/urllib/request.pyi,sha256=Y9e1cIOCqpbA9u9wdHkuk7XkhKW4C_BR-th2joh4rKM,1639 +jedi/third_party/typeshed/third_party/3/six/moves/urllib/response.pyi,sha256=MLuhuwcVdryiGU6pB2rkOWjdFnFcm7NsXJxqFt9-YlI,389 +jedi/third_party/typeshed/third_party/3/six/moves/urllib/robotparser.pyi,sha256=WK-Nrt7QFCWmAxfbrK0Mecw9NZur54H8AoYbslX6vSg,66 +jedi/third_party/typeshed/third_party/3/six/moves/urllib_error.pyi,sha256=ZLiDEtiqtoYYbNDYF4LjnxKRd_uFft6Yi5QQyNEkZm8,27 +jedi/third_party/typeshed/third_party/3/six/moves/urllib_parse.pyi,sha256=PQR8avzMMvUSLV96WLv3J4leuJpKEUBoo7vDzP6M848,27 +jedi/third_party/typeshed/third_party/3/six/moves/urllib_request.pyi,sha256=8WFe7ycArSuM6wJfgcXWLDRKNsymd0UlxWlflszb2yk,30 +jedi/third_party/typeshed/third_party/3/six/moves/urllib_response.pyi,sha256=dokFMleMVEVFVxBgSkrcn4f4yM7RhR3zkk0iDQGOC_U,31 +jedi/third_party/typeshed/third_party/3/six/moves/urllib_robotparser.pyi,sha256=BiNO0kuoX9quQRDQsnPLr04VZLHOj57CmrJJN5OuBn4,33 +jedi/third_party/typeshed/third_party/3/typed_ast/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jedi/third_party/typeshed/third_party/3/typed_ast/ast27.pyi,sha256=8l8h1iTl2STMguvaHgJ6LObWVTN-vi7jkq7F7grliuY,6949 +jedi/third_party/typeshed/third_party/3/typed_ast/ast3.pyi,sha256=rBHvYHpYGPkPYcSCvtsS5PvfUScfOXa5NLzA0sLnjDg,7946 +jedi/third_party/typeshed/third_party/3/typed_ast/conversions.pyi,sha256=fNBvV3U3qM1ehj0f9uO9xPT9XL43AiT7v51N2S39Qf0,71 +jedi/third_party/typeshed/third_party/3/waitress/__init__.pyi,sha256=AyhjsF-JhGwHdZo_BHPZnAqQg5p6I4yrQ-GAzbIgiCE,308 +jedi/third_party/typeshed/third_party/3/waitress/adjustments.pyi,sha256=Ci0sGEaI8sgN3oQ4m63cI7_fk6b9YcQCle1SEIpkMLo,2162 +jedi/third_party/typeshed/third_party/3/waitress/buffers.pyi,sha256=BAUmNMbRRvJmXspos4XTqxpGuPcG-m8_52-ZzP1RsBU,2179 +jedi/third_party/typeshed/third_party/3/waitress/channel.pyi,sha256=yEVhDA-LraBWtHs-Qh1btontCRgfNj1crB76Mj1UeVM,1843 +jedi/third_party/typeshed/third_party/3/waitress/compat.pyi,sha256=DEqpWPIWRpAINum_lKdh8-FvKsx-8J-TnGcubrxzA6k,719 +jedi/third_party/typeshed/third_party/3/waitress/parser.pyi,sha256=xxkLG3DSOAG0gsULfgKnIDWRqDPgznvkSPm4XtD6c4o,1442 +jedi/third_party/typeshed/third_party/3/waitress/proxy_headers.pyi,sha256=dg-1E5AEYcDAdcehvBlLT_3ATo94vfS2f8GZwBZ46do,1100 +jedi/third_party/typeshed/third_party/3/waitress/receiver.pyi,sha256=ZOcx0IC-LJC3W0BwO2cZfeP7xzlFEm9ESca3oxsIm_4,1044 +jedi/third_party/typeshed/third_party/3/waitress/rfc7230.pyi,sha256=Q31Np-HnKfD9_-8UHRr4KvzdviLI4HjaIacvMKQGBFc,226 +jedi/third_party/typeshed/third_party/3/waitress/runner.pyi,sha256=ReRs3WT9zH73aXEgBq9O7e_YHDwvTlhtsZ2G-m302Hk,469 +jedi/third_party/typeshed/third_party/3/waitress/server.pyi,sha256=BLVopN0ySFAhfTjaSg-9ctygNpoMxToxGNzog_2eF2E,3500 +jedi/third_party/typeshed/third_party/3/waitress/task.pyi,sha256=uZEpvbFNjJjGtzx9HMt2vgRG1VhgzUIT7aoEN3roOGk,2216 +jedi/third_party/typeshed/third_party/3/waitress/trigger.pyi,sha256=BJ6rU86VdsB281MqGmHwUkyvf661Za4H_qhSHi5nivU,1079 +jedi/third_party/typeshed/third_party/3/waitress/utilities.pyi,sha256=jUJUiahQzp3KRB3zgfX1Zx1ebKgK-81Nz7e32ez-s4E,1875 +jedi/third_party/typeshed/third_party/3/waitress/wasyncore.pyi,sha256=mOMVSFL7majEQSv_8fEMKMGqtAXrGEqipcZ_lkQHCOY,4059 +jedi/utils.py,sha256=UDLfGghM2nrkbs1GIQR_aslkMrctH1TzOgZoMFUxtc4,4680 diff --git a/evalkit_tf437/lib/python3.10/site-packages/jedi-0.19.1.dist-info/top_level.txt b/evalkit_tf437/lib/python3.10/site-packages/jedi-0.19.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..86c1cb19e2763e3c2e7bab63fc0971722ead7fdf --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/jedi-0.19.1.dist-info/top_level.txt @@ -0,0 +1 @@ +jedi diff --git a/evalkit_tf437/lib/python3.10/site-packages/svgwrite-1.4.3.dist-info/METADATA b/evalkit_tf437/lib/python3.10/site-packages/svgwrite-1.4.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..24241123e6f3102631bf0d589840b785d2ad3402 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/svgwrite-1.4.3.dist-info/METADATA @@ -0,0 +1,285 @@ +Metadata-Version: 2.1 +Name: svgwrite +Version: 1.4.3 +Summary: A Python library to create SVG drawings. +Home-page: http://github.com/mozman/svgwrite.git +Author: Manfred Moitzi +Author-email: me@mozman.at +License: MIT License +Download-URL: http://github.com/mozman/svgwrite/releases +Platform: OS Independent +Classifier: Development Status :: 7 - Inactive +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Intended Audience :: Developers +Classifier: Topic :: Multimedia :: Graphics +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Provides: svgwrite +Requires-Python: >=3.6 +License-File: LICENSE.TXT + +svgwrite +======== + +This package is inactive! No new features will be added, there will +be no change of behavior, just bugfixes will be merged. + +Abstract +-------- + +A Python library to create SVG drawings. + +a simple example:: + + import svgwrite + + dwg = svgwrite.Drawing('test.svg', profile='tiny') + dwg.add(dwg.line((0, 0), (10, 0), stroke=svgwrite.rgb(10, 10, 16, '%'))) + dwg.add(dwg.text('Test', insert=(0, 0.2), fill='red')) + dwg.save() + +for more examples see: examples.py + +As the name `svgwrite` implies, `svgwrite` creates new SVG drawings, it does not read existing drawings and also does +not import existing drawings, but you can always include other SVG drawings by the entity. + +`svgwrite` is a pure Python package and has no external dependencies. + +Installation +------------ + +with pip:: + + pip install svgwrite + +or from source:: + + python setup.py install + + +Documentation +------------- + +http://readthedocs.org/docs/svgwrite/ + +svgwrite can be found on GitHub.com at: + +http://github.com/mozman/svgwrite.git + +Contact +------- + +svgwrite@mozman.at + +NEWS +==== + +Version 1.4.3 - 2022-07-14 +-------------------------- + +* BUGFIX: `#114 `_ Polyline validation by @normanrz + +Version 1.4.2 - 2022-03-23 +-------------------------- + +* This package is INACTIVE! + + * No new features will be added. + * There will be no change of behavior. + * Only bugfixes will be merged. + +* Merged some contributions and bugfixes. + +Version 1.4.1 - 2021-01-15 +-------------------------- + +* This package is in maintenance mode, no new features will be added, there will + be no change of behavior, just bugfixes will be merged. +* Merged some contributions and bugfixes. + +Version 1.4 - 2020-03-28 +------------------------ + +* Requires Python 3.6 +* Removed Python2 support +* Optimized/faster ``svgparser.py`` module by Florian Festi +* Removed batch files to run tests on Windows, use ``pytest`` or ``tox`` instead + +Version 1.3.1 - 2019-06-28 +-------------------------- + +* BUGFIX: changed license of module shapes.py to MIT license + +Version 1.3.0 - 2019-06-23 +-------------------------- + +* Dropping Python 2 support in v1.4 +* NEW: easy stylesheet embedding: `Drawing.embed_stylesheet(content)`, see example `using_fonts.py `_ +* NEW: embedded local fonts: `Drawing.embed_font(fontname, filename)`, see example using_fonts.py +* NEW: embedded web fonts: `Drawing.embed_google_web_font(fontname, uri)`, see example using_fonts.py +* NEW: shapes extension by `Christof Hanke `_ +* NEW: for write(), save() and saveas(), indentation for pretty print is configurable by `Mitar `_ +* BUGFIX: converted regex strings in tests to raw strings by `tirkarthi `_ + +Version 1.2.1 - 2018-11-16 +-------------------------- + +* BUGFIX: Python 2 pretty print encoding issue + +Version 1.2.0 - 2018-10-27 +-------------------------- + +* NEW: Inkscape extension - support for Inkscape layers +* BUGFIX: compatibility with Python 3.7 by fixing re.split() usage + +Version 1.1.12 - 2017-12-27 +--------------------------- + +* BUGFIX: values attribute for element animate, accepts any semicolon separated values + +Version 1.1.11 - 2017-05-31 +--------------------------- + +* CHANGED: e.set_markers((sm, mm, em)), accepts None/False values to set markers individually, like just set end + marker: s.set_markers((None, False, em)) +* BUGFIX: text-decoration attribute accepts multiple values like 'underline overline' + +Version 1.1.10 - 2017-01-28 +--------------------------- + +* NEW: added `pretty` argument to `Drawing.write`, `Drawing.save` and `Drawing.saveas` to produce easy to read XML + output, svgwrite reparses the output string with minidom, this reduces the performance because this process requires + memory and runtime, default value for `pretty` is False. +* Moved repository to GitHub.com: http://github.com/mozman/svgwrite.git + +Version 1.1.9 - 2016-10-26 +-------------------------- + +* NEW: added solidColor paint server (only valid in the Tiny 1.2 profile) + +Version 1.1.8 - 2016-05-31 +-------------------------- + +* BUGFIX: None checks: 'if value:' -> 'if value is not None:' + +Version 1.1.7 - 2016-05-22 +-------------------------- + +* BUGFIX: color accepts percentage values as floats like "rgb(10.2%, 3.78%, 20%)" + +Version 1.1.6 - 2014-05-30 +-------------------------- + +* BUGFIX: sign for offset-value wasn't optional + +Version 1.1.5 - 2014-03-26 +-------------------------- + +* BUGFIX: xml serialization for CPython 3.4.0 + +Version 1.1.4 - 2014-03-16 +-------------------------- + +* simplified path parser +* pyparsing as external dependency (by jenselme) + +Version 1.1.3 - 2013-10-01 +-------------------------- + +* updated pyparsing for Python 3 to version 2.0.1 (prior version caused memory leaks) +* BUGFIX: utf8 to unicode encoding error for Python 2.7 +* Tests for Python 3 require CPython3.3 or newer, using the 'u' prefix. + +Version 1.1.2 - 2013-01-08 +-------------------------- + +* prevent setup.py from compiling all modules - error with 'pyparsing_py2.py' and Python3 +* BUGFIX: all tests run with CPython3.3 + +Version 1.1.1 - 2012-08-15 +-------------------------- + +* License changed to MIT License +* tested with CPython2.7, CPython3.2, CPython3.3 and pypy-1.9 on Win7 Pro 32-bit +* BUGFIX: dwg.animateTranform() -> dwg.animateTransform() +* BUGFIX: in examples, replaced width and height params by size parameter +* added examples +* edit docs + +Version 1.0.1 - 2012-06-08 +-------------------------- + +* added inline stylesheets +* added examples created by Lawrence Tattrie + +Version 1.0.0 - 2012-05-27 +-------------------------- + +* stable +* tested with CPython 2.7, CPython 3.2, pypy-1.8 +* added script tag - thx to jmahmood +* docs also available at: http://readthedocs.org/docs/svgwrite + +Version 0.2.4 - 2011-12-30 +-------------------------- + +* beta version +* Python 2.7: all strings will be converted by the unicode() function, for strings containing none-ascii-characters use + prefix ``u""`` or better use ``from __future__ import unicode_literals``, because this is Python 3 compatible. +* tested with CPython 2.7, CPython 3.2, and PyPy 1.7 +* BUGFIX: color parsing accepts white spaces in ``rgb()`` like ``rgb(0, 0, 0)`` + +Version 0.2.3 - 2010-11-13 +-------------------------- + +* beta version +* Python 3.1 support +* splitted examples.py into several files and moved them to the subdir 'examples' + +Version 0.2.2 - 2010-11-05 +-------------------------- + +* alpha version +* removed 'attribs' parameter from all constructors +* new elements: Set, Animate, AnimateMotion, AnimateColor, AnimateTransform, all filter elements +* added set_desc(title, desc), set_metadata(xmldata) to BaseElement class +* moved content of interfaces.py to mixins.py, (ITransform -> Transform and so on) + +Version 0.2.1 - 2010-10-31 +-------------------------- + +* alpha version +* new elements: Marker, ClipPath, Mask +* paint service: LinearGradient, RadialGradient, Pattern + +Version 0.2.0 - 2010-10-24 +-------------------------- + +* alpha version +* validator rewritten as validator2.py +* debug and profile options separated for each drawing object +* important change: create objects with factory functions of the *Drawing* class: drawing.(...) +* added mixins for setting stroke and fill properties +* new elements: Hyperlink, Image, TextArea, + +Version 0.1.0 - 2010-09-26 +-------------------------- + +* alpha version +* new elements: + + * basic shapes: Line, Rect, Circle, Ellipse, Polyline, Polygon, Path + * text elements: Text, TSpan, TRef, TextPath + * container elements: Group, Symbol, SVG, Use, Defs + +* for examples see: examples.py + + diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/bert_padding.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/bert_padding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..769ccc2ca5e1c4f5578b1c2bf09c5773146f2ab4 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/bert_padding.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/flash_attn_triton.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/flash_attn_triton.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e68e2382275b591c8019a7dde00be3f29df8009 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/flash_attn_triton.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/flash_attn_triton_og.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/flash_attn_triton_og.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d325857953113c6ec0b4ee52eb3a6942cc62b9ac Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/flash_attn_triton_og.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/flash_blocksparse_attention.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/flash_blocksparse_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e176c40ea24e64beb44b355d5d76e1991c8babbd Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/flash_blocksparse_attention.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/flash_blocksparse_attn_interface.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/flash_blocksparse_attn_interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6da6a36e38d70611d33a5095e166d3fd799c8953 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/flash_blocksparse_attn_interface.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/fused_softmax.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/fused_softmax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a17fadd9444e8ac33091cc25a26728a77742bd53 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/__pycache__/fused_softmax.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/losses/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/losses/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..036969e0de173a75b934bf33e642baa61ad2b29c Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/losses/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/losses/cross_entropy.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/losses/cross_entropy.py new file mode 100644 index 0000000000000000000000000000000000000000..93c8e961dc918298bccc361c5676f109be05a0cb --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/losses/cross_entropy.py @@ -0,0 +1,63 @@ +# Copyright (c) 2023, Tri Dao. + +import torch +import torch.nn as nn + +from flash_attn.ops.triton.cross_entropy import cross_entropy_loss + + +class CrossEntropyLoss(nn.Module): + def __init__( + self, + ignore_index=-100, + reduction="mean", + label_smoothing=0.0, + lse_square_scale=0.0, + inplace_backward=False, + process_group=None, + ): + """ + Arguments: + ignored_index: int. If labels == ignored_index, the loss is set to 0.0. + label_smoothing: float + lse_square_scale: float. If > 0, we add lse_square_scale * lse(logits) ^ 2 to the loss. + This is also referred to as "z-loss". + inplace_backward: bool. If True, we do the backward pass in-place by modifying the logits. + This saves memory. + process_group: if not None, we're doing Tensor Parallel: each process is responsible for + one part of the vocab. The loss will be aggregated across processes. + """ + super().__init__() + if reduction not in ["mean", "none", "sum"]: + raise NotImplementedError("Only support reduction = 'mean' or 'none' or 'sum'") + self.ignore_index = ignore_index + self.reduction = reduction + self.label_smoothing = label_smoothing + self.lse_square_scale = lse_square_scale + self.inplace_backward = inplace_backward + self.process_group = process_group + + def forward(self, input, target): + """ + Arguments: + input: (batch, vocab_size) + target: (batch,) + Returns: + losses: (batch,) if reduction is 'none', else (1,), dtype float + """ + assert input.is_cuda and target.is_cuda, "Only support CUDA tensors" + loss = cross_entropy_loss( + input, + target, + label_smoothing=self.label_smoothing, + lse_square_scale=self.lse_square_scale, + ignored_index=self.ignore_index, + inplace_backward=self.inplace_backward, + process_group=self.process_group, + ) + if self.reduction == "mean": + return loss.sum() / (target != self.ignore_index).sum() + elif self.reduction == "sum": + return loss.sum() + else: + return loss diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/utils/__pycache__/distributed.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/utils/__pycache__/distributed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b73914a36441d0aaf6e17b4441b0fd784e5d13c2 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/utils/__pycache__/distributed.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/utils/distributed.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/utils/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..74c55279645cd0fd687584bc1b7374c8c3c73e56 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/_flash_attn/utils/distributed.py @@ -0,0 +1,144 @@ +from typing import Optional + +import torch +from torch import Tensor +from torch.distributed import ProcessGroup + +# `all_gather_into_tensor` and `reduce_scatter_tensor` are new placeholders for +# `_all_gather_base` and `_reduce_scatter_base`. They require the most recent +# version of PyTorch. The following 4 lines are for backward compatibility with +# older PyTorch. +if "all_gather_into_tensor" not in dir(torch.distributed): + torch.distributed.all_gather_into_tensor = torch.distributed._all_gather_base +if "reduce_scatter_tensor" not in dir(torch.distributed): + torch.distributed.reduce_scatter_tensor = torch.distributed._reduce_scatter_base + + +# Raw operation, does not support autograd, but does support async +def all_gather_raw(input_: Tensor, process_group: ProcessGroup, async_op: bool = False): + world_size = torch.distributed.get_world_size(process_group) + output = torch.empty( + world_size * input_.shape[0], *input_.shape[1:], dtype=input_.dtype, device=input_.device + ) + handle = torch.distributed.all_gather_into_tensor( + output, input_.contiguous(), group=process_group, async_op=async_op + ) + return output, handle + + +# Raw operation, does not support autograd, but does support async +def reduce_scatter_raw(input_: Tensor, process_group: ProcessGroup, async_op: bool = False): + world_size = torch.distributed.get_world_size(process_group) + assert input_.shape[0] % world_size == 0 + output = torch.empty( + input_.shape[0] // world_size, *input_.shape[1:], dtype=input_.dtype, device=input_.device + ) + handle = torch.distributed.reduce_scatter_tensor( + output, input_.contiguous(), group=process_group, async_op=async_op + ) + return output, handle + + +# Raw operation, does not support autograd, but does support async +def all_reduce_raw(input_: Tensor, process_group: ProcessGroup, async_op: bool = False): + input_ = input_.contiguous() + handle = torch.distributed.all_reduce(input_, group=process_group, async_op=async_op) + return input_, handle + + +class AllGatherFunc(torch.autograd.Function): + """Gather the input from sequence parallel region and concatenate.""" + + @staticmethod + def forward(ctx, input_: Tensor, process_group: ProcessGroup) -> Tensor: + ctx.process_group = process_group + output, _ = all_gather_raw(input_, process_group) + return output + + @staticmethod + def backward(ctx, grad_output: Tensor): + grad_input, _ = reduce_scatter_raw(grad_output, ctx.process_group) + return grad_input, None + + +# Supports autograd, but does not support async +all_gather = AllGatherFunc.apply + + +class ReduceScatterFunc(torch.autograd.Function): + """Reduce scatter the input from the sequence parallel region and concatenate.""" + + @staticmethod + def forward(ctx, input_: Tensor, process_group: ProcessGroup) -> Tensor: + ctx.process_group = process_group + output, _ = reduce_scatter_raw(input_, process_group) + return output + + @staticmethod + def backward(ctx, grad_output: Tensor): + grad_input, _ = all_gather_raw(grad_output, ctx.process_group) + return grad_input, None + + +# Supports autograd, but does not support async +reduce_scatter = ReduceScatterFunc.apply + + +class AllReduceFunc(torch.autograd.Function): + """Gather the input from sequence parallel region and concatenate.""" + + @staticmethod + def forward(ctx, input_: Tensor, process_group: ProcessGroup) -> Tensor: + ctx.process_group = process_group + output, _ = all_reduce_raw(input_, process_group) + return output + + @staticmethod + def backward(ctx, grad_output: Tensor): + return grad_output, None + + +# Supports autograd, but does not support async +all_reduce = AllReduceFunc.apply + + +def sync_shared_params(model: torch.nn.Module, process_group: ProcessGroup): + # We want to iterate over parameters with _shared_params=True in the same order, + # as different ranks might have different number of parameters (e.g., only rank 0 has bias). + pamams_shared = { + name: p for name, p in model.named_parameters() if getattr(p, "_shared_params", False) + } + for _, p in sorted(pamams_shared.items()): + with torch.no_grad(): + # Broadcast needs src to be global rank, not group rank + torch.distributed.broadcast( + p, src=torch.distributed.get_global_rank(process_group, 0), group=process_group + ) + + +# Ref: https://github.com/NVIDIA/Megatron-LM/blob/52e636888cccc41e931251c417a7181fc36de926/megatron/optimizer/optimizer.py#L256 +def allreduce_sequence_parallel_grad(model: torch.nn.Module, process_group: ProcessGroup): + # We want to iterate over parameters with _sequence_parallel=True in the same order, + # as different ranks might have different number of parameters (e.g., only rank 0 has bias). + params_seqparallel = { + name: p for name, p in model.named_parameters() if getattr(p, "_sequence_parallel", False) + } + grads = [p.grad for _, p in sorted(params_seqparallel.items())] + if grads: + with torch.no_grad(): + coalesced = torch._utils._flatten_dense_tensors(grads) + torch.distributed.all_reduce(coalesced, group=process_group) + for buf, synced in zip(grads, torch._utils._unflatten_dense_tensors(coalesced, grads)): + buf.copy_(synced) + + +def get_dim_for_local_rank(dim: int, world_size: int, local_rank: int, multiple_of: int = 1) -> int: + """Get the dim for the local rank derived from splitting dim on world_size processes. + + The split may not be even across the world_size processes. + """ + multiple = dim // multiple_of + div = multiple // world_size + mod = multiple % world_size + local_multiple = div + int(local_rank < mod) + return local_multiple * multiple_of diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/helpers/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/helpers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..11aa37df97bf303b3a3bbc78b7d962b74254d082 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/helpers/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + + +from .timm_sparse_attention import TimmSparseAttention # noqa diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/helpers/__pycache__/hierarchical_configs.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/helpers/__pycache__/hierarchical_configs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6352c8d503eb4e016f3219656ee696079886fb44 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/helpers/__pycache__/hierarchical_configs.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/helpers/__pycache__/timm_sparse_attention.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/helpers/__pycache__/timm_sparse_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1057c5f9a0bc3c4d2a283c05d927faededebca7 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/helpers/__pycache__/timm_sparse_attention.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/helpers/test_utils.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/helpers/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6438e3c4dba966abce01de5af230b800b116a353 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/helpers/test_utils.py @@ -0,0 +1,32 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + + +import sys +import tempfile + +import torch + +is_windows = False +if sys.platform == "win32": # pytorch on windows uses gloo not ncll + is_windows = True + + +def init_torch_distributed_local(): + if torch.distributed.is_initialized(): + return + + init_url = "file://" + tempfile.mkstemp()[1] + backend = ( + torch.distributed.Backend.NCCL + if torch.cuda.is_available() and not is_windows + else torch.distributed.Backend.GLOO + ) + torch.distributed.init_process_group( + backend=backend, + rank=0, + world_size=1, + init_method=init_url, + )