ZTWHHH commited on
Commit
ca89b1a
·
verified ·
1 Parent(s): 74b56ab

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/__pycache__/functions.cpython-310.pyc +0 -0
  2. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/__init__.cpython-310.pyc +0 -0
  3. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/array_expressions.cpython-310.pyc +0 -0
  4. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/arrayexpr_derivatives.cpython-310.pyc +0 -0
  5. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_array_to_indexed.cpython-310.pyc +0 -0
  6. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_array_to_matrix.cpython-310.pyc +0 -0
  7. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_indexed_to_array.cpython-310.pyc +0 -0
  8. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_matrix_to_array.cpython-310.pyc +0 -0
  9. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_array_to_indexed.cpython-310.pyc +0 -0
  10. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_array_to_matrix.cpython-310.pyc +0 -0
  11. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_matrix_to_array.cpython-310.pyc +0 -0
  12. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/utils.cpython-310.pyc +0 -0
  13. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  14. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_array_expressions.cpython-310.pyc +0 -0
  15. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_arrayexpr_derivatives.cpython-310.pyc +0 -0
  16. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_as_explicit.cpython-310.pyc +0 -0
  17. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_array_to_indexed.cpython-310.pyc +0 -0
  18. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_array_to_matrix.cpython-310.pyc +0 -0
  19. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_indexed_to_array.cpython-310.pyc +0 -0
  20. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_matrix_to_array.cpython-310.pyc +0 -0
  21. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_deprecated_conv_modules.cpython-310.pyc +0 -0
  22. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_array_expressions.py +808 -0
  23. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_arrayexpr_derivatives.py +78 -0
  24. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_array_to_indexed.py +61 -0
  25. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_array_to_matrix.py +689 -0
  26. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_indexed_to_array.py +205 -0
  27. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_matrix_to_array.py +128 -0
  28. evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_deprecated_conv_modules.py +22 -0
  29. evalkit_tf437/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc +0 -0
  30. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/__init__.py +0 -0
  31. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc +0 -0
  32. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py +51 -0
  33. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__init__.py +52 -0
  34. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc +0 -0
  35. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc +0 -0
  36. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc +0 -0
  37. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc +0 -0
  38. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc +0 -0
  39. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc +0 -0
  40. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc +0 -0
  41. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/cluster.py +645 -0
  42. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/config.py +89 -0
  43. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/config_args.py +234 -0
  44. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py +99 -0
  45. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/default.py +125 -0
  46. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py +267 -0
  47. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/update.py +63 -0
  48. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/estimate.py +270 -0
  49. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/launch.py +996 -0
  50. evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/test.py +66 -0
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/__pycache__/functions.cpython-310.pyc ADDED
Binary file (5.27 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (7.13 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/array_expressions.cpython-310.pyc ADDED
Binary file (80.8 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/arrayexpr_derivatives.cpython-310.pyc ADDED
Binary file (7.16 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_array_to_indexed.cpython-310.pyc ADDED
Binary file (602 Bytes). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_array_to_matrix.cpython-310.pyc ADDED
Binary file (477 Bytes). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_indexed_to_array.cpython-310.pyc ADDED
Binary file (419 Bytes). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/conv_matrix_to_array.cpython-310.pyc ADDED
Binary file (416 Bytes). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_array_to_indexed.cpython-310.pyc ADDED
Binary file (3.95 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_array_to_matrix.cpython-310.pyc ADDED
Binary file (32 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/from_matrix_to_array.cpython-310.pyc ADDED
Binary file (4.05 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/__pycache__/utils.cpython-310.pyc ADDED
Binary file (5.43 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (198 Bytes). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_array_expressions.cpython-310.pyc ADDED
Binary file (24.8 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_arrayexpr_derivatives.cpython-310.pyc ADDED
Binary file (2.15 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_as_explicit.cpython-310.pyc ADDED
Binary file (2.78 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_array_to_indexed.cpython-310.pyc ADDED
Binary file (2.36 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_array_to_matrix.cpython-310.pyc ADDED
Binary file (17.8 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_indexed_to_array.cpython-310.pyc ADDED
Binary file (7.61 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_convert_matrix_to_array.cpython-310.pyc ADDED
Binary file (3.44 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/__pycache__/test_deprecated_conv_modules.cpython-310.pyc ADDED
Binary file (1.44 kB). View file
 
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_array_expressions.py ADDED
@@ -0,0 +1,808 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ from sympy import tensordiagonal, eye, KroneckerDelta, Array
4
+ from sympy.core.symbol import symbols
5
+ from sympy.functions.elementary.trigonometric import (cos, sin)
6
+ from sympy.matrices.expressions.diagonal import DiagMatrix
7
+ from sympy.matrices.expressions.matexpr import MatrixSymbol
8
+ from sympy.matrices.expressions.special import ZeroMatrix
9
+ from sympy.tensor.array.arrayop import (permutedims, tensorcontraction, tensorproduct)
10
+ from sympy.tensor.array.dense_ndim_array import ImmutableDenseNDimArray
11
+ from sympy.combinatorics import Permutation
12
+ from sympy.tensor.array.expressions.array_expressions import ZeroArray, OneArray, ArraySymbol, ArrayElement, \
13
+ PermuteDims, ArrayContraction, ArrayTensorProduct, ArrayDiagonal, \
14
+ ArrayAdd, nest_permutation, ArrayElementwiseApplyFunc, _EditArrayContraction, _ArgE, _array_tensor_product, \
15
+ _array_contraction, _array_diagonal, _array_add, _permute_dims, Reshape
16
+ from sympy.testing.pytest import raises
17
+
18
+ i, j, k, l, m, n = symbols("i j k l m n")
19
+
20
+
21
+ M = ArraySymbol("M", (k, k))
22
+ N = ArraySymbol("N", (k, k))
23
+ P = ArraySymbol("P", (k, k))
24
+ Q = ArraySymbol("Q", (k, k))
25
+
26
+ A = ArraySymbol("A", (k, k))
27
+ B = ArraySymbol("B", (k, k))
28
+ C = ArraySymbol("C", (k, k))
29
+ D = ArraySymbol("D", (k, k))
30
+
31
+ X = ArraySymbol("X", (k, k))
32
+ Y = ArraySymbol("Y", (k, k))
33
+
34
+ a = ArraySymbol("a", (k, 1))
35
+ b = ArraySymbol("b", (k, 1))
36
+ c = ArraySymbol("c", (k, 1))
37
+ d = ArraySymbol("d", (k, 1))
38
+
39
+
40
+ def test_array_symbol_and_element():
41
+ A = ArraySymbol("A", (2,))
42
+ A0 = ArrayElement(A, (0,))
43
+ A1 = ArrayElement(A, (1,))
44
+ assert A[0] == A0
45
+ assert A[1] != A0
46
+ assert A.as_explicit() == ImmutableDenseNDimArray([A0, A1])
47
+
48
+ A2 = tensorproduct(A, A)
49
+ assert A2.shape == (2, 2)
50
+ # TODO: not yet supported:
51
+ # assert A2.as_explicit() == Array([[A[0]*A[0], A[1]*A[0]], [A[0]*A[1], A[1]*A[1]]])
52
+ A3 = tensorcontraction(A2, (0, 1))
53
+ assert A3.shape == ()
54
+ # TODO: not yet supported:
55
+ # assert A3.as_explicit() == Array([])
56
+
57
+ A = ArraySymbol("A", (2, 3, 4))
58
+ Ae = A.as_explicit()
59
+ assert Ae == ImmutableDenseNDimArray(
60
+ [[[ArrayElement(A, (i, j, k)) for k in range(4)] for j in range(3)] for i in range(2)])
61
+
62
+ p = _permute_dims(A, Permutation(0, 2, 1))
63
+ assert isinstance(p, PermuteDims)
64
+
65
+ A = ArraySymbol("A", (2,))
66
+ raises(IndexError, lambda: A[()])
67
+ raises(IndexError, lambda: A[0, 1])
68
+ raises(ValueError, lambda: A[-1])
69
+ raises(ValueError, lambda: A[2])
70
+
71
+ O = OneArray(3, 4)
72
+ Z = ZeroArray(m, n)
73
+
74
+ raises(IndexError, lambda: O[()])
75
+ raises(IndexError, lambda: O[1, 2, 3])
76
+ raises(ValueError, lambda: O[3, 0])
77
+ raises(ValueError, lambda: O[0, 4])
78
+
79
+ assert O[1, 2] == 1
80
+ assert Z[1, 2] == 0
81
+
82
+
83
+ def test_zero_array():
84
+ assert ZeroArray() == 0
85
+ assert ZeroArray().is_Integer
86
+
87
+ za = ZeroArray(3, 2, 4)
88
+ assert za.shape == (3, 2, 4)
89
+ za_e = za.as_explicit()
90
+ assert za_e.shape == (3, 2, 4)
91
+
92
+ m, n, k = symbols("m n k")
93
+ za = ZeroArray(m, n, k, 2)
94
+ assert za.shape == (m, n, k, 2)
95
+ raises(ValueError, lambda: za.as_explicit())
96
+
97
+
98
+ def test_one_array():
99
+ assert OneArray() == 1
100
+ assert OneArray().is_Integer
101
+
102
+ oa = OneArray(3, 2, 4)
103
+ assert oa.shape == (3, 2, 4)
104
+ oa_e = oa.as_explicit()
105
+ assert oa_e.shape == (3, 2, 4)
106
+
107
+ m, n, k = symbols("m n k")
108
+ oa = OneArray(m, n, k, 2)
109
+ assert oa.shape == (m, n, k, 2)
110
+ raises(ValueError, lambda: oa.as_explicit())
111
+
112
+
113
+ def test_arrayexpr_contraction_construction():
114
+
115
+ cg = _array_contraction(A)
116
+ assert cg == A
117
+
118
+ cg = _array_contraction(_array_tensor_product(A, B), (1, 0))
119
+ assert cg == _array_contraction(_array_tensor_product(A, B), (0, 1))
120
+
121
+ cg = _array_contraction(_array_tensor_product(M, N), (0, 1))
122
+ indtup = cg._get_contraction_tuples()
123
+ assert indtup == [[(0, 0), (0, 1)]]
124
+ assert cg._contraction_tuples_to_contraction_indices(cg.expr, indtup) == [(0, 1)]
125
+
126
+ cg = _array_contraction(_array_tensor_product(M, N), (1, 2))
127
+ indtup = cg._get_contraction_tuples()
128
+ assert indtup == [[(0, 1), (1, 0)]]
129
+ assert cg._contraction_tuples_to_contraction_indices(cg.expr, indtup) == [(1, 2)]
130
+
131
+ cg = _array_contraction(_array_tensor_product(M, M, N), (1, 4), (2, 5))
132
+ indtup = cg._get_contraction_tuples()
133
+ assert indtup == [[(0, 0), (1, 1)], [(0, 1), (2, 0)]]
134
+ assert cg._contraction_tuples_to_contraction_indices(cg.expr, indtup) == [(0, 3), (1, 4)]
135
+
136
+ # Test removal of trivial contraction:
137
+ assert _array_contraction(a, (1,)) == a
138
+ assert _array_contraction(
139
+ _array_tensor_product(a, b), (0, 2), (1,), (3,)) == _array_contraction(
140
+ _array_tensor_product(a, b), (0, 2))
141
+
142
+
143
+ def test_arrayexpr_array_flatten():
144
+
145
+ # Flatten nested ArrayTensorProduct objects:
146
+ expr1 = _array_tensor_product(M, N)
147
+ expr2 = _array_tensor_product(P, Q)
148
+ expr = _array_tensor_product(expr1, expr2)
149
+ assert expr == _array_tensor_product(M, N, P, Q)
150
+ assert expr.args == (M, N, P, Q)
151
+
152
+ # Flatten mixed ArrayTensorProduct and ArrayContraction objects:
153
+ cg1 = _array_contraction(expr1, (1, 2))
154
+ cg2 = _array_contraction(expr2, (0, 3))
155
+
156
+ expr = _array_tensor_product(cg1, cg2)
157
+ assert expr == _array_contraction(_array_tensor_product(M, N, P, Q), (1, 2), (4, 7))
158
+
159
+ expr = _array_tensor_product(M, cg1)
160
+ assert expr == _array_contraction(_array_tensor_product(M, M, N), (3, 4))
161
+
162
+ # Flatten nested ArrayContraction objects:
163
+ cgnested = _array_contraction(cg1, (0, 1))
164
+ assert cgnested == _array_contraction(_array_tensor_product(M, N), (0, 3), (1, 2))
165
+
166
+ cgnested = _array_contraction(_array_tensor_product(cg1, cg2), (0, 3))
167
+ assert cgnested == _array_contraction(_array_tensor_product(M, N, P, Q), (0, 6), (1, 2), (4, 7))
168
+
169
+ cg3 = _array_contraction(_array_tensor_product(M, N, P, Q), (1, 3), (2, 4))
170
+ cgnested = _array_contraction(cg3, (0, 1))
171
+ assert cgnested == _array_contraction(_array_tensor_product(M, N, P, Q), (0, 5), (1, 3), (2, 4))
172
+
173
+ cgnested = _array_contraction(cg3, (0, 3), (1, 2))
174
+ assert cgnested == _array_contraction(_array_tensor_product(M, N, P, Q), (0, 7), (1, 3), (2, 4), (5, 6))
175
+
176
+ cg4 = _array_contraction(_array_tensor_product(M, N, P, Q), (1, 5), (3, 7))
177
+ cgnested = _array_contraction(cg4, (0, 1))
178
+ assert cgnested == _array_contraction(_array_tensor_product(M, N, P, Q), (0, 2), (1, 5), (3, 7))
179
+
180
+ cgnested = _array_contraction(cg4, (0, 1), (2, 3))
181
+ assert cgnested == _array_contraction(_array_tensor_product(M, N, P, Q), (0, 2), (1, 5), (3, 7), (4, 6))
182
+
183
+ cg = _array_diagonal(cg4)
184
+ assert cg == cg4
185
+ assert isinstance(cg, type(cg4))
186
+
187
+ # Flatten nested ArrayDiagonal objects:
188
+ cg1 = _array_diagonal(expr1, (1, 2))
189
+ cg2 = _array_diagonal(expr2, (0, 3))
190
+ cg3 = _array_diagonal(_array_tensor_product(M, N, P, Q), (1, 3), (2, 4))
191
+ cg4 = _array_diagonal(_array_tensor_product(M, N, P, Q), (1, 5), (3, 7))
192
+
193
+ cgnested = _array_diagonal(cg1, (0, 1))
194
+ assert cgnested == _array_diagonal(_array_tensor_product(M, N), (1, 2), (0, 3))
195
+
196
+ cgnested = _array_diagonal(cg3, (1, 2))
197
+ assert cgnested == _array_diagonal(_array_tensor_product(M, N, P, Q), (1, 3), (2, 4), (5, 6))
198
+
199
+ cgnested = _array_diagonal(cg4, (1, 2))
200
+ assert cgnested == _array_diagonal(_array_tensor_product(M, N, P, Q), (1, 5), (3, 7), (2, 4))
201
+
202
+ cg = _array_add(M, N)
203
+ cg2 = _array_add(cg, P)
204
+ assert isinstance(cg2, ArrayAdd)
205
+ assert cg2.args == (M, N, P)
206
+ assert cg2.shape == (k, k)
207
+
208
+ expr = _array_tensor_product(_array_diagonal(X, (0, 1)), _array_diagonal(A, (0, 1)))
209
+ assert expr == _array_diagonal(_array_tensor_product(X, A), (0, 1), (2, 3))
210
+
211
+ expr1 = _array_diagonal(_array_tensor_product(X, A), (1, 2))
212
+ expr2 = _array_tensor_product(expr1, a)
213
+ assert expr2 == _permute_dims(_array_diagonal(_array_tensor_product(X, A, a), (1, 2)), [0, 1, 4, 2, 3])
214
+
215
+ expr1 = _array_contraction(_array_tensor_product(X, A), (1, 2))
216
+ expr2 = _array_tensor_product(expr1, a)
217
+ assert isinstance(expr2, ArrayContraction)
218
+ assert isinstance(expr2.expr, ArrayTensorProduct)
219
+
220
+ cg = _array_tensor_product(_array_diagonal(_array_tensor_product(A, X, Y), (0, 3), (1, 5)), a, b)
221
+ assert cg == _permute_dims(_array_diagonal(_array_tensor_product(A, X, Y, a, b), (0, 3), (1, 5)), [0, 1, 6, 7, 2, 3, 4, 5])
222
+
223
+
224
+ def test_arrayexpr_array_diagonal():
225
+ cg = _array_diagonal(M, (1, 0))
226
+ assert cg == _array_diagonal(M, (0, 1))
227
+
228
+ cg = _array_diagonal(_array_tensor_product(M, N, P), (4, 1), (2, 0))
229
+ assert cg == _array_diagonal(_array_tensor_product(M, N, P), (1, 4), (0, 2))
230
+
231
+ cg = _array_diagonal(_array_tensor_product(M, N), (1, 2), (3,), allow_trivial_diags=True)
232
+ assert cg == _permute_dims(_array_diagonal(_array_tensor_product(M, N), (1, 2)), [0, 2, 1])
233
+
234
+ Ax = ArraySymbol("Ax", shape=(1, 2, 3, 4, 3, 5, 6, 2, 7))
235
+ cg = _array_diagonal(Ax, (1, 7), (3,), (2, 4), (6,), allow_trivial_diags=True)
236
+ assert cg == _permute_dims(_array_diagonal(Ax, (1, 7), (2, 4)), [0, 2, 4, 5, 1, 6, 3])
237
+
238
+ cg = _array_diagonal(M, (0,), allow_trivial_diags=True)
239
+ assert cg == _permute_dims(M, [1, 0])
240
+
241
+ raises(ValueError, lambda: _array_diagonal(M, (0, 0)))
242
+
243
+
244
+ def test_arrayexpr_array_shape():
245
+ expr = _array_tensor_product(M, N, P, Q)
246
+ assert expr.shape == (k, k, k, k, k, k, k, k)
247
+ Z = MatrixSymbol("Z", m, n)
248
+ expr = _array_tensor_product(M, Z)
249
+ assert expr.shape == (k, k, m, n)
250
+ expr2 = _array_contraction(expr, (0, 1))
251
+ assert expr2.shape == (m, n)
252
+ expr2 = _array_diagonal(expr, (0, 1))
253
+ assert expr2.shape == (m, n, k)
254
+ exprp = _permute_dims(expr, [2, 1, 3, 0])
255
+ assert exprp.shape == (m, k, n, k)
256
+ expr3 = _array_tensor_product(N, Z)
257
+ expr2 = _array_add(expr, expr3)
258
+ assert expr2.shape == (k, k, m, n)
259
+
260
+ # Contraction along axes with discordant dimensions:
261
+ raises(ValueError, lambda: _array_contraction(expr, (1, 2)))
262
+ # Also diagonal needs the same dimensions:
263
+ raises(ValueError, lambda: _array_diagonal(expr, (1, 2)))
264
+ # Diagonal requires at least to axes to compute the diagonal:
265
+ raises(ValueError, lambda: _array_diagonal(expr, (1,)))
266
+
267
+
268
+ def test_arrayexpr_permutedims_sink():
269
+
270
+ cg = _permute_dims(_array_tensor_product(M, N), [0, 1, 3, 2], nest_permutation=False)
271
+ sunk = nest_permutation(cg)
272
+ assert sunk == _array_tensor_product(M, _permute_dims(N, [1, 0]))
273
+
274
+ cg = _permute_dims(_array_tensor_product(M, N), [1, 0, 3, 2], nest_permutation=False)
275
+ sunk = nest_permutation(cg)
276
+ assert sunk == _array_tensor_product(_permute_dims(M, [1, 0]), _permute_dims(N, [1, 0]))
277
+
278
+ cg = _permute_dims(_array_tensor_product(M, N), [3, 2, 1, 0], nest_permutation=False)
279
+ sunk = nest_permutation(cg)
280
+ assert sunk == _array_tensor_product(_permute_dims(N, [1, 0]), _permute_dims(M, [1, 0]))
281
+
282
+ cg = _permute_dims(_array_contraction(_array_tensor_product(M, N), (1, 2)), [1, 0], nest_permutation=False)
283
+ sunk = nest_permutation(cg)
284
+ assert sunk == _array_contraction(_permute_dims(_array_tensor_product(M, N), [[0, 3]]), (1, 2))
285
+
286
+ cg = _permute_dims(_array_tensor_product(M, N), [1, 0, 3, 2], nest_permutation=False)
287
+ sunk = nest_permutation(cg)
288
+ assert sunk == _array_tensor_product(_permute_dims(M, [1, 0]), _permute_dims(N, [1, 0]))
289
+
290
+ cg = _permute_dims(_array_contraction(_array_tensor_product(M, N, P), (1, 2), (3, 4)), [1, 0], nest_permutation=False)
291
+ sunk = nest_permutation(cg)
292
+ assert sunk == _array_contraction(_permute_dims(_array_tensor_product(M, N, P), [[0, 5]]), (1, 2), (3, 4))
293
+
294
+
295
+ def test_arrayexpr_push_indices_up_and_down():
296
+
297
+ indices = list(range(12))
298
+
299
+ contr_diag_indices = [(0, 6), (2, 8)]
300
+ assert ArrayContraction._push_indices_down(contr_diag_indices, indices) == (1, 3, 4, 5, 7, 9, 10, 11, 12, 13, 14, 15)
301
+ assert ArrayContraction._push_indices_up(contr_diag_indices, indices) == (None, 0, None, 1, 2, 3, None, 4, None, 5, 6, 7)
302
+
303
+ assert ArrayDiagonal._push_indices_down(contr_diag_indices, indices, 10) == (1, 3, 4, 5, 7, 9, (0, 6), (2, 8), None, None, None, None)
304
+ assert ArrayDiagonal._push_indices_up(contr_diag_indices, indices, 10) == (6, 0, 7, 1, 2, 3, 6, 4, 7, 5, None, None)
305
+
306
+ contr_diag_indices = [(1, 2), (7, 8)]
307
+ assert ArrayContraction._push_indices_down(contr_diag_indices, indices) == (0, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14, 15)
308
+ assert ArrayContraction._push_indices_up(contr_diag_indices, indices) == (0, None, None, 1, 2, 3, 4, None, None, 5, 6, 7)
309
+
310
+ assert ArrayDiagonal._push_indices_down(contr_diag_indices, indices, 10) == (0, 3, 4, 5, 6, 9, (1, 2), (7, 8), None, None, None, None)
311
+ assert ArrayDiagonal._push_indices_up(contr_diag_indices, indices, 10) == (0, 6, 6, 1, 2, 3, 4, 7, 7, 5, None, None)
312
+
313
+
314
+ def test_arrayexpr_split_multiple_contractions():
315
+ a = MatrixSymbol("a", k, 1)
316
+ b = MatrixSymbol("b", k, 1)
317
+ A = MatrixSymbol("A", k, k)
318
+ B = MatrixSymbol("B", k, k)
319
+ C = MatrixSymbol("C", k, k)
320
+ X = MatrixSymbol("X", k, k)
321
+
322
+ cg = _array_contraction(_array_tensor_product(A.T, a, b, b.T, (A*X*b).applyfunc(cos)), (1, 2, 8), (5, 6, 9))
323
+ expected = _array_contraction(_array_tensor_product(A.T, DiagMatrix(a), OneArray(1), b, b.T, (A*X*b).applyfunc(cos)), (1, 3), (2, 9), (6, 7, 10))
324
+ assert cg.split_multiple_contractions().dummy_eq(expected)
325
+
326
+ # Check no overlap of lines:
327
+
328
+ cg = _array_contraction(_array_tensor_product(A, a, C, a, B), (1, 2, 4), (5, 6, 8), (3, 7))
329
+ assert cg.split_multiple_contractions() == cg
330
+
331
+ cg = _array_contraction(_array_tensor_product(a, b, A), (0, 2, 4), (1, 3))
332
+ assert cg.split_multiple_contractions() == cg
333
+
334
+
335
+ def test_arrayexpr_nested_permutations():
336
+
337
+ cg = _permute_dims(_permute_dims(M, (1, 0)), (1, 0))
338
+ assert cg == M
339
+
340
+ times = 3
341
+ plist1 = [list(range(6)) for i in range(times)]
342
+ plist2 = [list(range(6)) for i in range(times)]
343
+
344
+ for i in range(times):
345
+ random.shuffle(plist1[i])
346
+ random.shuffle(plist2[i])
347
+
348
+ plist1.append([2, 5, 4, 1, 0, 3])
349
+ plist2.append([3, 5, 0, 4, 1, 2])
350
+
351
+ plist1.append([2, 5, 4, 0, 3, 1])
352
+ plist2.append([3, 0, 5, 1, 2, 4])
353
+
354
+ plist1.append([5, 4, 2, 0, 3, 1])
355
+ plist2.append([4, 5, 0, 2, 3, 1])
356
+
357
+ Me = M.subs(k, 3).as_explicit()
358
+ Ne = N.subs(k, 3).as_explicit()
359
+ Pe = P.subs(k, 3).as_explicit()
360
+ cge = tensorproduct(Me, Ne, Pe)
361
+
362
+ for permutation_array1, permutation_array2 in zip(plist1, plist2):
363
+ p1 = Permutation(permutation_array1)
364
+ p2 = Permutation(permutation_array2)
365
+
366
+ cg = _permute_dims(
367
+ _permute_dims(
368
+ _array_tensor_product(M, N, P),
369
+ p1),
370
+ p2
371
+ )
372
+ result = _permute_dims(
373
+ _array_tensor_product(M, N, P),
374
+ p2*p1
375
+ )
376
+ assert cg == result
377
+
378
+ # Check that `permutedims` behaves the same way with explicit-component arrays:
379
+ result1 = _permute_dims(_permute_dims(cge, p1), p2)
380
+ result2 = _permute_dims(cge, p2*p1)
381
+ assert result1 == result2
382
+
383
+
384
+ def test_arrayexpr_contraction_permutation_mix():
385
+
386
+ Me = M.subs(k, 3).as_explicit()
387
+ Ne = N.subs(k, 3).as_explicit()
388
+
389
+ cg1 = _array_contraction(PermuteDims(_array_tensor_product(M, N), Permutation([0, 2, 1, 3])), (2, 3))
390
+ cg2 = _array_contraction(_array_tensor_product(M, N), (1, 3))
391
+ assert cg1 == cg2
392
+ cge1 = tensorcontraction(permutedims(tensorproduct(Me, Ne), Permutation([0, 2, 1, 3])), (2, 3))
393
+ cge2 = tensorcontraction(tensorproduct(Me, Ne), (1, 3))
394
+ assert cge1 == cge2
395
+
396
+ cg1 = _permute_dims(_array_tensor_product(M, N), Permutation([0, 1, 3, 2]))
397
+ cg2 = _array_tensor_product(M, _permute_dims(N, Permutation([1, 0])))
398
+ assert cg1 == cg2
399
+
400
+ cg1 = _array_contraction(
401
+ _permute_dims(
402
+ _array_tensor_product(M, N, P, Q), Permutation([0, 2, 3, 1, 4, 5, 7, 6])),
403
+ (1, 2), (3, 5)
404
+ )
405
+ cg2 = _array_contraction(
406
+ _array_tensor_product(M, N, P, _permute_dims(Q, Permutation([1, 0]))),
407
+ (1, 5), (2, 3)
408
+ )
409
+ assert cg1 == cg2
410
+
411
+ cg1 = _array_contraction(
412
+ _permute_dims(
413
+ _array_tensor_product(M, N, P, Q), Permutation([1, 0, 4, 6, 2, 7, 5, 3])),
414
+ (0, 1), (2, 6), (3, 7)
415
+ )
416
+ cg2 = _permute_dims(
417
+ _array_contraction(
418
+ _array_tensor_product(M, P, Q, N),
419
+ (0, 1), (2, 3), (4, 7)),
420
+ [1, 0]
421
+ )
422
+ assert cg1 == cg2
423
+
424
+ cg1 = _array_contraction(
425
+ _permute_dims(
426
+ _array_tensor_product(M, N, P, Q), Permutation([1, 0, 4, 6, 7, 2, 5, 3])),
427
+ (0, 1), (2, 6), (3, 7)
428
+ )
429
+ cg2 = _permute_dims(
430
+ _array_contraction(
431
+ _array_tensor_product(_permute_dims(M, [1, 0]), N, P, Q),
432
+ (0, 1), (3, 6), (4, 5)
433
+ ),
434
+ Permutation([1, 0])
435
+ )
436
+ assert cg1 == cg2
437
+
438
+
439
+ def test_arrayexpr_permute_tensor_product():
440
+ cg1 = _permute_dims(_array_tensor_product(M, N, P, Q), Permutation([2, 3, 1, 0, 5, 4, 6, 7]))
441
+ cg2 = _array_tensor_product(N, _permute_dims(M, [1, 0]),
442
+ _permute_dims(P, [1, 0]), Q)
443
+ assert cg1 == cg2
444
+
445
+ # TODO: reverse operation starting with `PermuteDims` and getting down to `bb`...
446
+ cg1 = _permute_dims(_array_tensor_product(M, N, P, Q), Permutation([2, 3, 4, 5, 0, 1, 6, 7]))
447
+ cg2 = _array_tensor_product(N, P, M, Q)
448
+ assert cg1 == cg2
449
+
450
+ cg1 = _permute_dims(_array_tensor_product(M, N, P, Q), Permutation([2, 3, 4, 6, 5, 7, 0, 1]))
451
+ assert cg1.expr == _array_tensor_product(N, P, Q, M)
452
+ assert cg1.permutation == Permutation([0, 1, 2, 4, 3, 5, 6, 7])
453
+
454
+ cg1 = _array_contraction(
455
+ _permute_dims(
456
+ _array_tensor_product(N, Q, Q, M),
457
+ [2, 1, 5, 4, 0, 3, 6, 7]),
458
+ [1, 2, 6])
459
+ cg2 = _permute_dims(_array_contraction(_array_tensor_product(Q, Q, N, M), (3, 5, 6)), [0, 2, 3, 1, 4])
460
+ assert cg1 == cg2
461
+
462
+ cg1 = _array_contraction(
463
+ _array_contraction(
464
+ _array_contraction(
465
+ _array_contraction(
466
+ _permute_dims(
467
+ _array_tensor_product(N, Q, Q, M),
468
+ [2, 1, 5, 4, 0, 3, 6, 7]),
469
+ [1, 2, 6]),
470
+ [1, 3, 4]),
471
+ [1]),
472
+ [0])
473
+ cg2 = _array_contraction(_array_tensor_product(M, N, Q, Q), (0, 3, 5), (1, 4, 7), (2,), (6,))
474
+ assert cg1 == cg2
475
+
476
+
477
+ def test_arrayexpr_canonicalize_diagonal__permute_dims():
478
+ tp = _array_tensor_product(M, Q, N, P)
479
+ expr = _array_diagonal(
480
+ _permute_dims(tp, [0, 1, 2, 4, 7, 6, 3, 5]), (2, 4, 5), (6, 7),
481
+ (0, 3))
482
+ result = _array_diagonal(tp, (2, 6, 7), (3, 5), (0, 4))
483
+ assert expr == result
484
+
485
+ tp = _array_tensor_product(M, N, P, Q)
486
+ expr = _array_diagonal(_permute_dims(tp, [0, 5, 2, 4, 1, 6, 3, 7]), (1, 2, 6), (3, 4))
487
+ result = _array_diagonal(_array_tensor_product(M, P, N, Q), (3, 4, 5), (1, 2))
488
+ assert expr == result
489
+
490
+
491
+ def test_arrayexpr_canonicalize_diagonal_contraction():
492
+ tp = _array_tensor_product(M, N, P, Q)
493
+ expr = _array_contraction(_array_diagonal(tp, (1, 3, 4)), (0, 3))
494
+ result = _array_diagonal(_array_contraction(_array_tensor_product(M, N, P, Q), (0, 6)), (0, 2, 3))
495
+ assert expr == result
496
+
497
+ expr = _array_contraction(_array_diagonal(tp, (0, 1, 2, 3, 7)), (1, 2, 3))
498
+ result = _array_contraction(_array_tensor_product(M, N, P, Q), (0, 1, 2, 3, 5, 6, 7))
499
+ assert expr == result
500
+
501
+ expr = _array_contraction(_array_diagonal(tp, (0, 2, 6, 7)), (1, 2, 3))
502
+ result = _array_diagonal(_array_contraction(tp, (3, 4, 5)), (0, 2, 3, 4))
503
+ assert expr == result
504
+
505
+ td = _array_diagonal(_array_tensor_product(M, N, P, Q), (0, 3))
506
+ expr = _array_contraction(td, (2, 1), (0, 4, 6, 5, 3))
507
+ result = _array_contraction(_array_tensor_product(M, N, P, Q), (0, 1, 3, 5, 6, 7), (2, 4))
508
+ assert expr == result
509
+
510
+
511
+ def test_arrayexpr_array_wrong_permutation_size():
512
+ cg = _array_tensor_product(M, N)
513
+ raises(ValueError, lambda: _permute_dims(cg, [1, 0]))
514
+ raises(ValueError, lambda: _permute_dims(cg, [1, 0, 2, 3, 5, 4]))
515
+
516
+
517
+ def test_arrayexpr_nested_array_elementwise_add():
518
+ cg = _array_contraction(_array_add(
519
+ _array_tensor_product(M, N),
520
+ _array_tensor_product(N, M)
521
+ ), (1, 2))
522
+ result = _array_add(
523
+ _array_contraction(_array_tensor_product(M, N), (1, 2)),
524
+ _array_contraction(_array_tensor_product(N, M), (1, 2))
525
+ )
526
+ assert cg == result
527
+
528
+ cg = _array_diagonal(_array_add(
529
+ _array_tensor_product(M, N),
530
+ _array_tensor_product(N, M)
531
+ ), (1, 2))
532
+ result = _array_add(
533
+ _array_diagonal(_array_tensor_product(M, N), (1, 2)),
534
+ _array_diagonal(_array_tensor_product(N, M), (1, 2))
535
+ )
536
+ assert cg == result
537
+
538
+
539
+ def test_arrayexpr_array_expr_zero_array():
540
+ za1 = ZeroArray(k, l, m, n)
541
+ zm1 = ZeroMatrix(m, n)
542
+
543
+ za2 = ZeroArray(k, m, m, n)
544
+ zm2 = ZeroMatrix(m, m)
545
+ zm3 = ZeroMatrix(k, k)
546
+
547
+ assert _array_tensor_product(M, N, za1) == ZeroArray(k, k, k, k, k, l, m, n)
548
+ assert _array_tensor_product(M, N, zm1) == ZeroArray(k, k, k, k, m, n)
549
+
550
+ assert _array_contraction(za1, (3,)) == ZeroArray(k, l, m)
551
+ assert _array_contraction(zm1, (1,)) == ZeroArray(m)
552
+ assert _array_contraction(za2, (1, 2)) == ZeroArray(k, n)
553
+ assert _array_contraction(zm2, (0, 1)) == 0
554
+
555
+ assert _array_diagonal(za2, (1, 2)) == ZeroArray(k, n, m)
556
+ assert _array_diagonal(zm2, (0, 1)) == ZeroArray(m)
557
+
558
+ assert _permute_dims(za1, [2, 1, 3, 0]) == ZeroArray(m, l, n, k)
559
+ assert _permute_dims(zm1, [1, 0]) == ZeroArray(n, m)
560
+
561
+ assert _array_add(za1) == za1
562
+ assert _array_add(zm1) == ZeroArray(m, n)
563
+ tp1 = _array_tensor_product(MatrixSymbol("A", k, l), MatrixSymbol("B", m, n))
564
+ assert _array_add(tp1, za1) == tp1
565
+ tp2 = _array_tensor_product(MatrixSymbol("C", k, l), MatrixSymbol("D", m, n))
566
+ assert _array_add(tp1, za1, tp2) == _array_add(tp1, tp2)
567
+ assert _array_add(M, zm3) == M
568
+ assert _array_add(M, N, zm3) == _array_add(M, N)
569
+
570
+
571
+ def test_arrayexpr_array_expr_applyfunc():
572
+
573
+ A = ArraySymbol("A", (3, k, 2))
574
+ aaf = ArrayElementwiseApplyFunc(sin, A)
575
+ assert aaf.shape == (3, k, 2)
576
+
577
+
578
+ def test_edit_array_contraction():
579
+ cg = _array_contraction(_array_tensor_product(A, B, C, D), (1, 2, 5))
580
+ ecg = _EditArrayContraction(cg)
581
+ assert ecg.to_array_contraction() == cg
582
+
583
+ ecg.args_with_ind[1], ecg.args_with_ind[2] = ecg.args_with_ind[2], ecg.args_with_ind[1]
584
+ assert ecg.to_array_contraction() == _array_contraction(_array_tensor_product(A, C, B, D), (1, 3, 4))
585
+
586
+ ci = ecg.get_new_contraction_index()
587
+ new_arg = _ArgE(X)
588
+ new_arg.indices = [ci, ci]
589
+ ecg.args_with_ind.insert(2, new_arg)
590
+ assert ecg.to_array_contraction() == _array_contraction(_array_tensor_product(A, C, X, B, D), (1, 3, 6), (4, 5))
591
+
592
+ assert ecg.get_contraction_indices() == [[1, 3, 6], [4, 5]]
593
+ assert [[tuple(j) for j in i] for i in ecg.get_contraction_indices_to_ind_rel_pos()] == [[(0, 1), (1, 1), (3, 0)], [(2, 0), (2, 1)]]
594
+ assert [list(i) for i in ecg.get_mapping_for_index(0)] == [[0, 1], [1, 1], [3, 0]]
595
+ assert [list(i) for i in ecg.get_mapping_for_index(1)] == [[2, 0], [2, 1]]
596
+ raises(ValueError, lambda: ecg.get_mapping_for_index(2))
597
+
598
+ ecg.args_with_ind.pop(1)
599
+ assert ecg.to_array_contraction() == _array_contraction(_array_tensor_product(A, X, B, D), (1, 4), (2, 3))
600
+
601
+ ecg.args_with_ind[0].indices[1] = ecg.args_with_ind[1].indices[0]
602
+ ecg.args_with_ind[1].indices[1] = ecg.args_with_ind[2].indices[0]
603
+ assert ecg.to_array_contraction() == _array_contraction(_array_tensor_product(A, X, B, D), (1, 2), (3, 4))
604
+
605
+ ecg.insert_after(ecg.args_with_ind[1], _ArgE(C))
606
+ assert ecg.to_array_contraction() == _array_contraction(_array_tensor_product(A, X, C, B, D), (1, 2), (3, 6))
607
+
608
+
609
+ def test_array_expressions_no_canonicalization():
610
+
611
+ tp = _array_tensor_product(M, N, P)
612
+
613
+ # ArrayTensorProduct:
614
+
615
+ expr = ArrayTensorProduct(tp, N)
616
+ assert str(expr) == "ArrayTensorProduct(ArrayTensorProduct(M, N, P), N)"
617
+ assert expr.doit() == ArrayTensorProduct(M, N, P, N)
618
+
619
+ expr = ArrayTensorProduct(ArrayContraction(M, (0, 1)), N)
620
+ assert str(expr) == "ArrayTensorProduct(ArrayContraction(M, (0, 1)), N)"
621
+ assert expr.doit() == ArrayContraction(ArrayTensorProduct(M, N), (0, 1))
622
+
623
+ expr = ArrayTensorProduct(ArrayDiagonal(M, (0, 1)), N)
624
+ assert str(expr) == "ArrayTensorProduct(ArrayDiagonal(M, (0, 1)), N)"
625
+ assert expr.doit() == PermuteDims(ArrayDiagonal(ArrayTensorProduct(M, N), (0, 1)), [2, 0, 1])
626
+
627
+ expr = ArrayTensorProduct(PermuteDims(M, [1, 0]), N)
628
+ assert str(expr) == "ArrayTensorProduct(PermuteDims(M, (0 1)), N)"
629
+ assert expr.doit() == PermuteDims(ArrayTensorProduct(M, N), [1, 0, 2, 3])
630
+
631
+ # ArrayContraction:
632
+
633
+ expr = ArrayContraction(_array_contraction(tp, (0, 2)), (0, 1))
634
+ assert isinstance(expr, ArrayContraction)
635
+ assert isinstance(expr.expr, ArrayContraction)
636
+ assert str(expr) == "ArrayContraction(ArrayContraction(ArrayTensorProduct(M, N, P), (0, 2)), (0, 1))"
637
+ assert expr.doit() == ArrayContraction(tp, (0, 2), (1, 3))
638
+
639
+ expr = ArrayContraction(ArrayContraction(ArrayContraction(tp, (0, 1)), (0, 1)), (0, 1))
640
+ assert expr.doit() == ArrayContraction(tp, (0, 1), (2, 3), (4, 5))
641
+ # assert expr._canonicalize() == ArrayContraction(ArrayContraction(tp, (0, 1)), (0, 1), (2, 3))
642
+
643
+ expr = ArrayContraction(ArrayDiagonal(tp, (0, 1)), (0, 1))
644
+ assert str(expr) == "ArrayContraction(ArrayDiagonal(ArrayTensorProduct(M, N, P), (0, 1)), (0, 1))"
645
+ assert expr.doit() == ArrayDiagonal(ArrayContraction(ArrayTensorProduct(N, M, P), (0, 1)), (0, 1))
646
+
647
+ expr = ArrayContraction(PermuteDims(M, [1, 0]), (0, 1))
648
+ assert str(expr) == "ArrayContraction(PermuteDims(M, (0 1)), (0, 1))"
649
+ assert expr.doit() == ArrayContraction(M, (0, 1))
650
+
651
+ # ArrayDiagonal:
652
+
653
+ expr = ArrayDiagonal(ArrayDiagonal(tp, (0, 2)), (0, 1))
654
+ assert str(expr) == "ArrayDiagonal(ArrayDiagonal(ArrayTensorProduct(M, N, P), (0, 2)), (0, 1))"
655
+ assert expr.doit() == ArrayDiagonal(tp, (0, 2), (1, 3))
656
+
657
+ expr = ArrayDiagonal(ArrayDiagonal(ArrayDiagonal(tp, (0, 1)), (0, 1)), (0, 1))
658
+ assert expr.doit() == ArrayDiagonal(tp, (0, 1), (2, 3), (4, 5))
659
+ assert expr._canonicalize() == expr.doit()
660
+
661
+ expr = ArrayDiagonal(ArrayContraction(tp, (0, 1)), (0, 1))
662
+ assert str(expr) == "ArrayDiagonal(ArrayContraction(ArrayTensorProduct(M, N, P), (0, 1)), (0, 1))"
663
+ assert expr.doit() == expr
664
+
665
+ expr = ArrayDiagonal(PermuteDims(M, [1, 0]), (0, 1))
666
+ assert str(expr) == "ArrayDiagonal(PermuteDims(M, (0 1)), (0, 1))"
667
+ assert expr.doit() == ArrayDiagonal(M, (0, 1))
668
+
669
+ # ArrayAdd:
670
+
671
+ expr = ArrayAdd(M)
672
+ assert isinstance(expr, ArrayAdd)
673
+ assert expr.doit() == M
674
+
675
+ expr = ArrayAdd(ArrayAdd(M, N), P)
676
+ assert str(expr) == "ArrayAdd(ArrayAdd(M, N), P)"
677
+ assert expr.doit() == ArrayAdd(M, N, P)
678
+
679
+ expr = ArrayAdd(M, ArrayAdd(N, ArrayAdd(P, M)))
680
+ assert expr.doit() == ArrayAdd(M, N, P, M)
681
+ assert expr._canonicalize() == ArrayAdd(M, N, ArrayAdd(P, M))
682
+
683
+ expr = ArrayAdd(M, ZeroArray(k, k), N)
684
+ assert str(expr) == "ArrayAdd(M, ZeroArray(k, k), N)"
685
+ assert expr.doit() == ArrayAdd(M, N)
686
+
687
+ # PermuteDims:
688
+
689
+ expr = PermuteDims(PermuteDims(M, [1, 0]), [1, 0])
690
+ assert str(expr) == "PermuteDims(PermuteDims(M, (0 1)), (0 1))"
691
+ assert expr.doit() == M
692
+
693
+ expr = PermuteDims(PermuteDims(PermuteDims(M, [1, 0]), [1, 0]), [1, 0])
694
+ assert expr.doit() == PermuteDims(M, [1, 0])
695
+ assert expr._canonicalize() == expr.doit()
696
+
697
+ # Reshape
698
+
699
+ expr = Reshape(A, (k**2,))
700
+ assert expr.shape == (k**2,)
701
+ assert isinstance(expr, Reshape)
702
+
703
+
704
+ def test_array_expr_construction_with_functions():
705
+
706
+ tp = tensorproduct(M, N)
707
+ assert tp == ArrayTensorProduct(M, N)
708
+
709
+ expr = tensorproduct(A, eye(2))
710
+ assert expr == ArrayTensorProduct(A, eye(2))
711
+
712
+ # Contraction:
713
+
714
+ expr = tensorcontraction(M, (0, 1))
715
+ assert expr == ArrayContraction(M, (0, 1))
716
+
717
+ expr = tensorcontraction(tp, (1, 2))
718
+ assert expr == ArrayContraction(tp, (1, 2))
719
+
720
+ expr = tensorcontraction(tensorcontraction(tp, (1, 2)), (0, 1))
721
+ assert expr == ArrayContraction(tp, (0, 3), (1, 2))
722
+
723
+ # Diagonalization:
724
+
725
+ expr = tensordiagonal(M, (0, 1))
726
+ assert expr == ArrayDiagonal(M, (0, 1))
727
+
728
+ expr = tensordiagonal(tensordiagonal(tp, (0, 1)), (0, 1))
729
+ assert expr == ArrayDiagonal(tp, (0, 1), (2, 3))
730
+
731
+ # Permutation of dimensions:
732
+
733
+ expr = permutedims(M, [1, 0])
734
+ assert expr == PermuteDims(M, [1, 0])
735
+
736
+ expr = permutedims(PermuteDims(tp, [1, 0, 2, 3]), [0, 1, 3, 2])
737
+ assert expr == PermuteDims(tp, [1, 0, 3, 2])
738
+
739
+ expr = PermuteDims(tp, index_order_new=["a", "b", "c", "d"], index_order_old=["d", "c", "b", "a"])
740
+ assert expr == PermuteDims(tp, [3, 2, 1, 0])
741
+
742
+ arr = Array(range(32)).reshape(2, 2, 2, 2, 2)
743
+ expr = PermuteDims(arr, index_order_new=["a", "b", "c", "d", "e"], index_order_old=['b', 'e', 'a', 'd', 'c'])
744
+ assert expr == PermuteDims(arr, [2, 0, 4, 3, 1])
745
+ assert expr.as_explicit() == permutedims(arr, index_order_new=["a", "b", "c", "d", "e"], index_order_old=['b', 'e', 'a', 'd', 'c'])
746
+
747
+
748
+ def test_array_element_expressions():
749
+ # Check commutative property:
750
+ assert M[0, 0]*N[0, 0] == N[0, 0]*M[0, 0]
751
+
752
+ # Check derivatives:
753
+ assert M[0, 0].diff(M[0, 0]) == 1
754
+ assert M[0, 0].diff(M[1, 0]) == 0
755
+ assert M[0, 0].diff(N[0, 0]) == 0
756
+ assert M[0, 1].diff(M[i, j]) == KroneckerDelta(i, 0)*KroneckerDelta(j, 1)
757
+ assert M[0, 1].diff(N[i, j]) == 0
758
+
759
+ K4 = ArraySymbol("K4", shape=(k, k, k, k))
760
+
761
+ assert K4[i, j, k, l].diff(K4[1, 2, 3, 4]) == (
762
+ KroneckerDelta(i, 1)*KroneckerDelta(j, 2)*KroneckerDelta(k, 3)*KroneckerDelta(l, 4)
763
+ )
764
+
765
+
766
+ def test_array_expr_reshape():
767
+
768
+ A = MatrixSymbol("A", 2, 2)
769
+ B = ArraySymbol("B", (2, 2, 2))
770
+ C = Array([1, 2, 3, 4])
771
+
772
+ expr = Reshape(A, (4,))
773
+ assert expr.expr == A
774
+ assert expr.shape == (4,)
775
+ assert expr.as_explicit() == Array([A[0, 0], A[0, 1], A[1, 0], A[1, 1]])
776
+
777
+ expr = Reshape(B, (2, 4))
778
+ assert expr.expr == B
779
+ assert expr.shape == (2, 4)
780
+ ee = expr.as_explicit()
781
+ assert isinstance(ee, ImmutableDenseNDimArray)
782
+ assert ee.shape == (2, 4)
783
+ assert ee == Array([[B[0, 0, 0], B[0, 0, 1], B[0, 1, 0], B[0, 1, 1]], [B[1, 0, 0], B[1, 0, 1], B[1, 1, 0], B[1, 1, 1]]])
784
+
785
+ expr = Reshape(A, (k, 2))
786
+ assert expr.shape == (k, 2)
787
+
788
+ raises(ValueError, lambda: Reshape(A, (2, 3)))
789
+ raises(ValueError, lambda: Reshape(A, (3,)))
790
+
791
+ expr = Reshape(C, (2, 2))
792
+ assert expr.expr == C
793
+ assert expr.shape == (2, 2)
794
+ assert expr.doit() == Array([[1, 2], [3, 4]])
795
+
796
+
797
+ def test_array_expr_as_explicit_with_explicit_component_arrays():
798
+ # Test if .as_explicit() works with explicit-component arrays
799
+ # nested in array expressions:
800
+ from sympy.abc import x, y, z, t
801
+ A = Array([[x, y], [z, t]])
802
+ assert ArrayTensorProduct(A, A).as_explicit() == tensorproduct(A, A)
803
+ assert ArrayDiagonal(A, (0, 1)).as_explicit() == tensordiagonal(A, (0, 1))
804
+ assert ArrayContraction(A, (0, 1)).as_explicit() == tensorcontraction(A, (0, 1))
805
+ assert ArrayAdd(A, A).as_explicit() == A + A
806
+ assert ArrayElementwiseApplyFunc(sin, A).as_explicit() == A.applyfunc(sin)
807
+ assert PermuteDims(A, [1, 0]).as_explicit() == permutedims(A, [1, 0])
808
+ assert Reshape(A, [4]).as_explicit() == A.reshape(4)
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_arrayexpr_derivatives.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sympy.core.symbol import symbols
2
+ from sympy.functions.elementary.trigonometric import (cos, sin)
3
+ from sympy.matrices.expressions.matexpr import MatrixSymbol
4
+ from sympy.matrices.expressions.special import Identity
5
+ from sympy.matrices.expressions.applyfunc import ElementwiseApplyFunction
6
+ from sympy.tensor.array.expressions.array_expressions import ArraySymbol, ArrayTensorProduct, \
7
+ PermuteDims, ArrayDiagonal, ArrayElementwiseApplyFunc, ArrayContraction, _permute_dims, Reshape
8
+ from sympy.tensor.array.expressions.arrayexpr_derivatives import array_derive
9
+
10
+ k = symbols("k")
11
+
12
+ I = Identity(k)
13
+ X = MatrixSymbol("X", k, k)
14
+ x = MatrixSymbol("x", k, 1)
15
+
16
+ A = MatrixSymbol("A", k, k)
17
+ B = MatrixSymbol("B", k, k)
18
+ C = MatrixSymbol("C", k, k)
19
+ D = MatrixSymbol("D", k, k)
20
+
21
+ A1 = ArraySymbol("A", (3, 2, k))
22
+
23
+
24
+ def test_arrayexpr_derivatives1():
25
+
26
+ res = array_derive(X, X)
27
+ assert res == PermuteDims(ArrayTensorProduct(I, I), [0, 2, 1, 3])
28
+
29
+ cg = ArrayTensorProduct(A, X, B)
30
+ res = array_derive(cg, X)
31
+ assert res == _permute_dims(
32
+ ArrayTensorProduct(I, A, I, B),
33
+ [0, 4, 2, 3, 1, 5, 6, 7])
34
+
35
+ cg = ArrayContraction(X, (0, 1))
36
+ res = array_derive(cg, X)
37
+ assert res == ArrayContraction(ArrayTensorProduct(I, I), (1, 3))
38
+
39
+ cg = ArrayDiagonal(X, (0, 1))
40
+ res = array_derive(cg, X)
41
+ assert res == ArrayDiagonal(ArrayTensorProduct(I, I), (1, 3))
42
+
43
+ cg = ElementwiseApplyFunction(sin, X)
44
+ res = array_derive(cg, X)
45
+ assert res.dummy_eq(ArrayDiagonal(
46
+ ArrayTensorProduct(
47
+ ElementwiseApplyFunction(cos, X),
48
+ I,
49
+ I
50
+ ), (0, 3), (1, 5)))
51
+
52
+ cg = ArrayElementwiseApplyFunc(sin, X)
53
+ res = array_derive(cg, X)
54
+ assert res.dummy_eq(ArrayDiagonal(
55
+ ArrayTensorProduct(
56
+ I,
57
+ I,
58
+ ArrayElementwiseApplyFunc(cos, X)
59
+ ), (1, 4), (3, 5)))
60
+
61
+ res = array_derive(A1, A1)
62
+ assert res == PermuteDims(
63
+ ArrayTensorProduct(Identity(3), Identity(2), Identity(k)),
64
+ [0, 2, 4, 1, 3, 5]
65
+ )
66
+
67
+ cg = ArrayElementwiseApplyFunc(sin, A1)
68
+ res = array_derive(cg, A1)
69
+ assert res.dummy_eq(ArrayDiagonal(
70
+ ArrayTensorProduct(
71
+ Identity(3), Identity(2), Identity(k),
72
+ ArrayElementwiseApplyFunc(cos, A1)
73
+ ), (1, 6), (3, 7), (5, 8)
74
+ ))
75
+
76
+ cg = Reshape(A, (k**2,))
77
+ res = array_derive(cg, A)
78
+ assert res == Reshape(PermuteDims(ArrayTensorProduct(I, I), [0, 2, 1, 3]), (k, k, k**2))
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_array_to_indexed.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sympy import Sum, Dummy, sin
2
+ from sympy.tensor.array.expressions import ArraySymbol, ArrayTensorProduct, ArrayContraction, PermuteDims, \
3
+ ArrayDiagonal, ArrayAdd, OneArray, ZeroArray, convert_indexed_to_array, ArrayElementwiseApplyFunc, Reshape
4
+ from sympy.tensor.array.expressions.from_array_to_indexed import convert_array_to_indexed
5
+
6
+ from sympy.abc import i, j, k, l, m, n, o
7
+
8
+
9
+ def test_convert_array_to_indexed_main():
10
+ A = ArraySymbol("A", (3, 3, 3))
11
+ B = ArraySymbol("B", (3, 3))
12
+ C = ArraySymbol("C", (3, 3))
13
+
14
+ d_ = Dummy("d_")
15
+
16
+ assert convert_array_to_indexed(A, [i, j, k]) == A[i, j, k]
17
+
18
+ expr = ArrayTensorProduct(A, B, C)
19
+ conv = convert_array_to_indexed(expr, [i,j,k,l,m,n,o])
20
+ assert conv == A[i,j,k]*B[l,m]*C[n,o]
21
+ assert convert_indexed_to_array(conv, [i,j,k,l,m,n,o]) == expr
22
+
23
+ expr = ArrayContraction(A, (0, 2))
24
+ assert convert_array_to_indexed(expr, [i]).dummy_eq(Sum(A[d_, i, d_], (d_, 0, 2)))
25
+
26
+ expr = ArrayDiagonal(A, (0, 2))
27
+ assert convert_array_to_indexed(expr, [i, j]) == A[j, i, j]
28
+
29
+ expr = PermuteDims(A, [1, 2, 0])
30
+ conv = convert_array_to_indexed(expr, [i, j, k])
31
+ assert conv == A[k, i, j]
32
+ assert convert_indexed_to_array(conv, [i, j, k]) == expr
33
+
34
+ expr = ArrayAdd(B, C, PermuteDims(C, [1, 0]))
35
+ conv = convert_array_to_indexed(expr, [i, j])
36
+ assert conv == B[i, j] + C[i, j] + C[j, i]
37
+ assert convert_indexed_to_array(conv, [i, j]) == expr
38
+
39
+ expr = ArrayElementwiseApplyFunc(sin, A)
40
+ conv = convert_array_to_indexed(expr, [i, j, k])
41
+ assert conv == sin(A[i, j, k])
42
+ assert convert_indexed_to_array(conv, [i, j, k]).dummy_eq(expr)
43
+
44
+ assert convert_array_to_indexed(OneArray(3, 3), [i, j]) == 1
45
+ assert convert_array_to_indexed(ZeroArray(3, 3), [i, j]) == 0
46
+
47
+ expr = Reshape(A, (27,))
48
+ assert convert_array_to_indexed(expr, [i]) == A[i // 9, i // 3 % 3, i % 3]
49
+
50
+ X = ArraySymbol("X", (2, 3, 4, 5, 6))
51
+ expr = Reshape(X, (2*3*4*5*6,))
52
+ assert convert_array_to_indexed(expr, [i]) == X[i // 360, i // 120 % 3, i // 30 % 4, i // 6 % 5, i % 6]
53
+
54
+ expr = Reshape(X, (4, 9, 2, 2, 5))
55
+ one_index = 180*i + 20*j + 10*k + 5*l + m
56
+ expected = X[one_index // (3*4*5*6), one_index // (4*5*6) % 3, one_index // (5*6) % 4, one_index // 6 % 5, one_index % 6]
57
+ assert convert_array_to_indexed(expr, [i, j, k, l, m]) == expected
58
+
59
+ X = ArraySymbol("X", (2*3*5,))
60
+ expr = Reshape(X, (2, 3, 5))
61
+ assert convert_array_to_indexed(expr, [i, j, k]) == X[15*i + 5*j + k]
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_array_to_matrix.py ADDED
@@ -0,0 +1,689 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sympy import Lambda, S, Dummy, KroneckerProduct
2
+ from sympy.core.symbol import symbols
3
+ from sympy.functions.elementary.miscellaneous import sqrt
4
+ from sympy.functions.elementary.trigonometric import cos, sin
5
+ from sympy.matrices.expressions.hadamard import HadamardProduct, HadamardPower
6
+ from sympy.matrices.expressions.special import (Identity, OneMatrix, ZeroMatrix)
7
+ from sympy.matrices.expressions.matexpr import MatrixElement
8
+ from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array
9
+ from sympy.tensor.array.expressions.from_array_to_matrix import _support_function_tp1_recognize, \
10
+ _array_diag2contr_diagmatrix, convert_array_to_matrix, _remove_trivial_dims, _array2matrix, \
11
+ _combine_removed, identify_removable_identity_matrices, _array_contraction_to_diagonal_multiple_identity
12
+ from sympy.matrices.expressions.matexpr import MatrixSymbol
13
+ from sympy.combinatorics import Permutation
14
+ from sympy.matrices.expressions.diagonal import DiagMatrix, DiagonalMatrix
15
+ from sympy.matrices import Trace, MatMul, Transpose
16
+ from sympy.tensor.array.expressions.array_expressions import ZeroArray, OneArray, \
17
+ ArrayElement, ArraySymbol, ArrayElementwiseApplyFunc, _array_tensor_product, _array_contraction, \
18
+ _array_diagonal, _permute_dims, PermuteDims, ArrayAdd, ArrayDiagonal, ArrayContraction, ArrayTensorProduct
19
+ from sympy.testing.pytest import raises
20
+
21
+
22
+ i, j, k, l, m, n = symbols("i j k l m n")
23
+
24
+ I = Identity(k)
25
+ I1 = Identity(1)
26
+
27
+ M = MatrixSymbol("M", k, k)
28
+ N = MatrixSymbol("N", k, k)
29
+ P = MatrixSymbol("P", k, k)
30
+ Q = MatrixSymbol("Q", k, k)
31
+
32
+ A = MatrixSymbol("A", k, k)
33
+ B = MatrixSymbol("B", k, k)
34
+ C = MatrixSymbol("C", k, k)
35
+ D = MatrixSymbol("D", k, k)
36
+
37
+ X = MatrixSymbol("X", k, k)
38
+ Y = MatrixSymbol("Y", k, k)
39
+
40
+ a = MatrixSymbol("a", k, 1)
41
+ b = MatrixSymbol("b", k, 1)
42
+ c = MatrixSymbol("c", k, 1)
43
+ d = MatrixSymbol("d", k, 1)
44
+
45
+ x = MatrixSymbol("x", k, 1)
46
+ y = MatrixSymbol("y", k, 1)
47
+
48
+
49
+ def test_arrayexpr_convert_array_to_matrix():
50
+
51
+ cg = _array_contraction(_array_tensor_product(M), (0, 1))
52
+ assert convert_array_to_matrix(cg) == Trace(M)
53
+
54
+ cg = _array_contraction(_array_tensor_product(M, N), (0, 1), (2, 3))
55
+ assert convert_array_to_matrix(cg) == Trace(M) * Trace(N)
56
+
57
+ cg = _array_contraction(_array_tensor_product(M, N), (0, 3), (1, 2))
58
+ assert convert_array_to_matrix(cg) == Trace(M * N)
59
+
60
+ cg = _array_contraction(_array_tensor_product(M, N), (0, 2), (1, 3))
61
+ assert convert_array_to_matrix(cg) == Trace(M * N.T)
62
+
63
+ cg = convert_matrix_to_array(M * N * P)
64
+ assert convert_array_to_matrix(cg) == M * N * P
65
+
66
+ cg = convert_matrix_to_array(M * N.T * P)
67
+ assert convert_array_to_matrix(cg) == M * N.T * P
68
+
69
+ cg = _array_contraction(_array_tensor_product(M,N,P,Q), (1, 2), (5, 6))
70
+ assert convert_array_to_matrix(cg) == _array_tensor_product(M * N, P * Q)
71
+
72
+ cg = _array_contraction(_array_tensor_product(-2, M, N), (1, 2))
73
+ assert convert_array_to_matrix(cg) == -2 * M * N
74
+
75
+ a = MatrixSymbol("a", k, 1)
76
+ b = MatrixSymbol("b", k, 1)
77
+ c = MatrixSymbol("c", k, 1)
78
+ cg = PermuteDims(
79
+ _array_contraction(
80
+ _array_tensor_product(
81
+ a,
82
+ ArrayAdd(
83
+ _array_tensor_product(b, c),
84
+ _array_tensor_product(c, b),
85
+ )
86
+ ), (2, 4)), [0, 1, 3, 2])
87
+ assert convert_array_to_matrix(cg) == a * (b.T * c + c.T * b)
88
+
89
+ za = ZeroArray(m, n)
90
+ assert convert_array_to_matrix(za) == ZeroMatrix(m, n)
91
+
92
+ cg = _array_tensor_product(3, M)
93
+ assert convert_array_to_matrix(cg) == 3 * M
94
+
95
+ # Partial conversion to matrix multiplication:
96
+ expr = _array_contraction(_array_tensor_product(M, N, P, Q), (0, 2), (1, 4, 6))
97
+ assert convert_array_to_matrix(expr) == _array_contraction(_array_tensor_product(M.T*N, P, Q), (0, 2, 4))
98
+
99
+ x = MatrixSymbol("x", k, 1)
100
+ cg = PermuteDims(
101
+ _array_contraction(_array_tensor_product(OneArray(1), x, OneArray(1), DiagMatrix(Identity(1))),
102
+ (0, 5)), Permutation(1, 2, 3))
103
+ assert convert_array_to_matrix(cg) == x
104
+
105
+ expr = ArrayAdd(M, PermuteDims(M, [1, 0]))
106
+ assert convert_array_to_matrix(expr) == M + Transpose(M)
107
+
108
+
109
+ def test_arrayexpr_convert_array_to_matrix2():
110
+ cg = _array_contraction(_array_tensor_product(M, N), (1, 3))
111
+ assert convert_array_to_matrix(cg) == M * N.T
112
+
113
+ cg = PermuteDims(_array_tensor_product(M, N), Permutation([0, 1, 3, 2]))
114
+ assert convert_array_to_matrix(cg) == _array_tensor_product(M, N.T)
115
+
116
+ cg = _array_tensor_product(M, PermuteDims(N, Permutation([1, 0])))
117
+ assert convert_array_to_matrix(cg) == _array_tensor_product(M, N.T)
118
+
119
+ cg = _array_contraction(
120
+ PermuteDims(
121
+ _array_tensor_product(M, N, P, Q), Permutation([0, 2, 3, 1, 4, 5, 7, 6])),
122
+ (1, 2), (3, 5)
123
+ )
124
+ assert convert_array_to_matrix(cg) == _array_tensor_product(M * P.T * Trace(N), Q.T)
125
+
126
+ cg = _array_contraction(
127
+ _array_tensor_product(M, N, P, PermuteDims(Q, Permutation([1, 0]))),
128
+ (1, 5), (2, 3)
129
+ )
130
+ assert convert_array_to_matrix(cg) == _array_tensor_product(M * P.T * Trace(N), Q.T)
131
+
132
+ cg = _array_tensor_product(M, PermuteDims(N, [1, 0]))
133
+ assert convert_array_to_matrix(cg) == _array_tensor_product(M, N.T)
134
+
135
+ cg = _array_tensor_product(PermuteDims(M, [1, 0]), PermuteDims(N, [1, 0]))
136
+ assert convert_array_to_matrix(cg) == _array_tensor_product(M.T, N.T)
137
+
138
+ cg = _array_tensor_product(PermuteDims(N, [1, 0]), PermuteDims(M, [1, 0]))
139
+ assert convert_array_to_matrix(cg) == _array_tensor_product(N.T, M.T)
140
+
141
+ cg = _array_contraction(M, (0,), (1,))
142
+ assert convert_array_to_matrix(cg) == OneMatrix(1, k)*M*OneMatrix(k, 1)
143
+
144
+ cg = _array_contraction(x, (0,), (1,))
145
+ assert convert_array_to_matrix(cg) == OneMatrix(1, k)*x
146
+
147
+ Xm = MatrixSymbol("Xm", m, n)
148
+ cg = _array_contraction(Xm, (0,), (1,))
149
+ assert convert_array_to_matrix(cg) == OneMatrix(1, m)*Xm*OneMatrix(n, 1)
150
+
151
+
152
+ def test_arrayexpr_convert_array_to_diagonalized_vector():
153
+
154
+ # Check matrix recognition over trivial dimensions:
155
+
156
+ cg = _array_tensor_product(a, b)
157
+ assert convert_array_to_matrix(cg) == a * b.T
158
+
159
+ cg = _array_tensor_product(I1, a, b)
160
+ assert convert_array_to_matrix(cg) == a * b.T
161
+
162
+ # Recognize trace inside a tensor product:
163
+
164
+ cg = _array_contraction(_array_tensor_product(A, B, C), (0, 3), (1, 2))
165
+ assert convert_array_to_matrix(cg) == Trace(A * B) * C
166
+
167
+ # Transform diagonal operator to contraction:
168
+
169
+ cg = _array_diagonal(_array_tensor_product(A, a), (1, 2))
170
+ assert _array_diag2contr_diagmatrix(cg) == _array_contraction(_array_tensor_product(A, OneArray(1), DiagMatrix(a)), (1, 3))
171
+ assert convert_array_to_matrix(cg) == A * DiagMatrix(a)
172
+
173
+ cg = _array_diagonal(_array_tensor_product(a, b), (0, 2))
174
+ assert _array_diag2contr_diagmatrix(cg) == _permute_dims(
175
+ _array_contraction(_array_tensor_product(DiagMatrix(a), OneArray(1), b), (0, 3)), [1, 2, 0]
176
+ )
177
+ assert convert_array_to_matrix(cg) == b.T * DiagMatrix(a)
178
+
179
+ cg = _array_diagonal(_array_tensor_product(A, a), (0, 2))
180
+ assert _array_diag2contr_diagmatrix(cg) == _array_contraction(_array_tensor_product(A, OneArray(1), DiagMatrix(a)), (0, 3))
181
+ assert convert_array_to_matrix(cg) == A.T * DiagMatrix(a)
182
+
183
+ cg = _array_diagonal(_array_tensor_product(I, x, I1), (0, 2), (3, 5))
184
+ assert _array_diag2contr_diagmatrix(cg) == _array_contraction(_array_tensor_product(I, OneArray(1), I1, DiagMatrix(x)), (0, 5))
185
+ assert convert_array_to_matrix(cg) == DiagMatrix(x)
186
+
187
+ cg = _array_diagonal(_array_tensor_product(I, x, A, B), (1, 2), (5, 6))
188
+ assert _array_diag2contr_diagmatrix(cg) == _array_diagonal(_array_contraction(_array_tensor_product(I, OneArray(1), A, B, DiagMatrix(x)), (1, 7)), (5, 6))
189
+ # TODO: this is returning a wrong result:
190
+ # convert_array_to_matrix(cg)
191
+
192
+ cg = _array_diagonal(_array_tensor_product(I1, a, b), (1, 3, 5))
193
+ assert convert_array_to_matrix(cg) == a*b.T
194
+
195
+ cg = _array_diagonal(_array_tensor_product(I1, a, b), (1, 3))
196
+ assert _array_diag2contr_diagmatrix(cg) == _array_contraction(_array_tensor_product(OneArray(1), a, b, I1), (2, 6))
197
+ assert convert_array_to_matrix(cg) == a*b.T
198
+
199
+ cg = _array_diagonal(_array_tensor_product(x, I1), (1, 2))
200
+ assert isinstance(cg, ArrayDiagonal)
201
+ assert cg.diagonal_indices == ((1, 2),)
202
+ assert convert_array_to_matrix(cg) == x
203
+
204
+ cg = _array_diagonal(_array_tensor_product(x, I), (0, 2))
205
+ assert _array_diag2contr_diagmatrix(cg) == _array_contraction(_array_tensor_product(OneArray(1), I, DiagMatrix(x)), (1, 3))
206
+ assert convert_array_to_matrix(cg).doit() == DiagMatrix(x)
207
+
208
+ raises(ValueError, lambda: _array_diagonal(x, (1,)))
209
+
210
+ # Ignore identity matrices with contractions:
211
+
212
+ cg = _array_contraction(_array_tensor_product(I, A, I, I), (0, 2), (1, 3), (5, 7))
213
+ assert cg.split_multiple_contractions() == cg
214
+ assert convert_array_to_matrix(cg) == Trace(A) * I
215
+
216
+ cg = _array_contraction(_array_tensor_product(Trace(A) * I, I, I), (1, 5), (3, 4))
217
+ assert cg.split_multiple_contractions() == cg
218
+ assert convert_array_to_matrix(cg).doit() == Trace(A) * I
219
+
220
+ # Add DiagMatrix when required:
221
+
222
+ cg = _array_contraction(_array_tensor_product(A, a), (1, 2))
223
+ assert cg.split_multiple_contractions() == cg
224
+ assert convert_array_to_matrix(cg) == A * a
225
+
226
+ cg = _array_contraction(_array_tensor_product(A, a, B), (1, 2, 4))
227
+ assert cg.split_multiple_contractions() == _array_contraction(_array_tensor_product(A, DiagMatrix(a), OneArray(1), B), (1, 2), (3, 5))
228
+ assert convert_array_to_matrix(cg) == A * DiagMatrix(a) * B
229
+
230
+ cg = _array_contraction(_array_tensor_product(A, a, B), (0, 2, 4))
231
+ assert cg.split_multiple_contractions() == _array_contraction(_array_tensor_product(A, DiagMatrix(a), OneArray(1), B), (0, 2), (3, 5))
232
+ assert convert_array_to_matrix(cg) == A.T * DiagMatrix(a) * B
233
+
234
+ cg = _array_contraction(_array_tensor_product(A, a, b, a.T, B), (0, 2, 4, 7, 9))
235
+ assert cg.split_multiple_contractions() == _array_contraction(_array_tensor_product(A, DiagMatrix(a), OneArray(1),
236
+ DiagMatrix(b), OneArray(1), DiagMatrix(a), OneArray(1), B),
237
+ (0, 2), (3, 5), (6, 9), (8, 12))
238
+ assert convert_array_to_matrix(cg) == A.T * DiagMatrix(a) * DiagMatrix(b) * DiagMatrix(a) * B.T
239
+
240
+ cg = _array_contraction(_array_tensor_product(I1, I1, I1), (1, 2, 4))
241
+ assert cg.split_multiple_contractions() == _array_contraction(_array_tensor_product(I1, I1, OneArray(1), I1), (1, 2), (3, 5))
242
+ assert convert_array_to_matrix(cg) == 1
243
+
244
+ cg = _array_contraction(_array_tensor_product(I, I, I, I, A), (1, 2, 8), (5, 6, 9))
245
+ assert convert_array_to_matrix(cg.split_multiple_contractions()).doit() == A
246
+
247
+ cg = _array_contraction(_array_tensor_product(A, a, C, a, B), (1, 2, 4), (5, 6, 8))
248
+ expected = _array_contraction(_array_tensor_product(A, DiagMatrix(a), OneArray(1), C, DiagMatrix(a), OneArray(1), B), (1, 3), (2, 5), (6, 7), (8, 10))
249
+ assert cg.split_multiple_contractions() == expected
250
+ assert convert_array_to_matrix(cg) == A * DiagMatrix(a) * C * DiagMatrix(a) * B
251
+
252
+ cg = _array_contraction(_array_tensor_product(a, I1, b, I1, (a.T*b).applyfunc(cos)), (1, 2, 8), (5, 6, 9))
253
+ expected = _array_contraction(_array_tensor_product(a, I1, OneArray(1), b, I1, OneArray(1), (a.T*b).applyfunc(cos)),
254
+ (1, 3), (2, 10), (6, 8), (7, 11))
255
+ assert cg.split_multiple_contractions().dummy_eq(expected)
256
+ assert convert_array_to_matrix(cg).doit().dummy_eq(MatMul(a, (a.T * b).applyfunc(cos), b.T))
257
+
258
+
259
+ def test_arrayexpr_convert_array_contraction_tp_additions():
260
+ a = ArrayAdd(
261
+ _array_tensor_product(M, N),
262
+ _array_tensor_product(N, M)
263
+ )
264
+ tp = _array_tensor_product(P, a, Q)
265
+ expr = _array_contraction(tp, (3, 4))
266
+ expected = _array_tensor_product(
267
+ P,
268
+ ArrayAdd(
269
+ _array_contraction(_array_tensor_product(M, N), (1, 2)),
270
+ _array_contraction(_array_tensor_product(N, M), (1, 2)),
271
+ ),
272
+ Q
273
+ )
274
+ assert expr == expected
275
+ assert convert_array_to_matrix(expr) == _array_tensor_product(P, M * N + N * M, Q)
276
+
277
+ expr = _array_contraction(tp, (1, 2), (3, 4), (5, 6))
278
+ result = _array_contraction(
279
+ _array_tensor_product(
280
+ P,
281
+ ArrayAdd(
282
+ _array_contraction(_array_tensor_product(M, N), (1, 2)),
283
+ _array_contraction(_array_tensor_product(N, M), (1, 2)),
284
+ ),
285
+ Q
286
+ ), (1, 2), (3, 4))
287
+ assert expr == result
288
+ assert convert_array_to_matrix(expr) == P * (M * N + N * M) * Q
289
+
290
+
291
+ def test_arrayexpr_convert_array_to_implicit_matmul():
292
+ # Trivial dimensions are suppressed, so the result can be expressed in matrix form:
293
+
294
+ cg = _array_tensor_product(a, b)
295
+ assert convert_array_to_matrix(cg) == a * b.T
296
+
297
+ cg = _array_tensor_product(a, b, I)
298
+ assert convert_array_to_matrix(cg) == _array_tensor_product(a*b.T, I)
299
+
300
+ cg = _array_tensor_product(I, a, b)
301
+ assert convert_array_to_matrix(cg) == _array_tensor_product(I, a*b.T)
302
+
303
+ cg = _array_tensor_product(a, I, b)
304
+ assert convert_array_to_matrix(cg) == _array_tensor_product(a, I, b)
305
+
306
+ cg = _array_contraction(_array_tensor_product(I, I), (1, 2))
307
+ assert convert_array_to_matrix(cg) == I
308
+
309
+ cg = PermuteDims(_array_tensor_product(I, Identity(1)), [0, 2, 1, 3])
310
+ assert convert_array_to_matrix(cg) == I
311
+
312
+
313
+ def test_arrayexpr_convert_array_to_matrix_remove_trivial_dims():
314
+
315
+ # Tensor Product:
316
+ assert _remove_trivial_dims(_array_tensor_product(a, b)) == (a * b.T, [1, 3])
317
+ assert _remove_trivial_dims(_array_tensor_product(a.T, b)) == (a * b.T, [0, 3])
318
+ assert _remove_trivial_dims(_array_tensor_product(a, b.T)) == (a * b.T, [1, 2])
319
+ assert _remove_trivial_dims(_array_tensor_product(a.T, b.T)) == (a * b.T, [0, 2])
320
+
321
+ assert _remove_trivial_dims(_array_tensor_product(I, a.T, b.T)) == (_array_tensor_product(I, a * b.T), [2, 4])
322
+ assert _remove_trivial_dims(_array_tensor_product(a.T, I, b.T)) == (_array_tensor_product(a.T, I, b.T), [])
323
+
324
+ assert _remove_trivial_dims(_array_tensor_product(a, I)) == (_array_tensor_product(a, I), [])
325
+ assert _remove_trivial_dims(_array_tensor_product(I, a)) == (_array_tensor_product(I, a), [])
326
+
327
+ assert _remove_trivial_dims(_array_tensor_product(a.T, b.T, c, d)) == (
328
+ _array_tensor_product(a * b.T, c * d.T), [0, 2, 5, 7])
329
+ assert _remove_trivial_dims(_array_tensor_product(a.T, I, b.T, c, d, I)) == (
330
+ _array_tensor_product(a.T, I, b*c.T, d, I), [4, 7])
331
+
332
+ # Addition:
333
+
334
+ cg = ArrayAdd(_array_tensor_product(a, b), _array_tensor_product(c, d))
335
+ assert _remove_trivial_dims(cg) == (a * b.T + c * d.T, [1, 3])
336
+
337
+ # Permute Dims:
338
+
339
+ cg = PermuteDims(_array_tensor_product(a, b), Permutation(3)(1, 2))
340
+ assert _remove_trivial_dims(cg) == (a * b.T, [2, 3])
341
+
342
+ cg = PermuteDims(_array_tensor_product(a, I, b), Permutation(5)(1, 2, 3, 4))
343
+ assert _remove_trivial_dims(cg) == (cg, [])
344
+
345
+ cg = PermuteDims(_array_tensor_product(I, b, a), Permutation(5)(1, 2, 4, 5, 3))
346
+ assert _remove_trivial_dims(cg) == (PermuteDims(_array_tensor_product(I, b * a.T), [0, 2, 3, 1]), [4, 5])
347
+
348
+ # Diagonal:
349
+
350
+ cg = _array_diagonal(_array_tensor_product(M, a), (1, 2))
351
+ assert _remove_trivial_dims(cg) == (cg, [])
352
+
353
+ # Contraction:
354
+
355
+ cg = _array_contraction(_array_tensor_product(M, a), (1, 2))
356
+ assert _remove_trivial_dims(cg) == (cg, [])
357
+
358
+ # A few more cases to test the removal and shift of nested removed axes
359
+ # with array contractions and array diagonals:
360
+ tp = _array_tensor_product(
361
+ OneMatrix(1, 1),
362
+ M,
363
+ x,
364
+ OneMatrix(1, 1),
365
+ Identity(1),
366
+ )
367
+
368
+ expr = _array_contraction(tp, (1, 8))
369
+ rexpr, removed = _remove_trivial_dims(expr)
370
+ assert removed == [0, 5, 6, 7]
371
+
372
+ expr = _array_contraction(tp, (1, 8), (3, 4))
373
+ rexpr, removed = _remove_trivial_dims(expr)
374
+ assert removed == [0, 3, 4, 5]
375
+
376
+ expr = _array_diagonal(tp, (1, 8))
377
+ rexpr, removed = _remove_trivial_dims(expr)
378
+ assert removed == [0, 5, 6, 7, 8]
379
+
380
+ expr = _array_diagonal(tp, (1, 8), (3, 4))
381
+ rexpr, removed = _remove_trivial_dims(expr)
382
+ assert removed == [0, 3, 4, 5, 6]
383
+
384
+ expr = _array_diagonal(_array_contraction(_array_tensor_product(A, x, I, I1), (1, 2, 5)), (1, 4))
385
+ rexpr, removed = _remove_trivial_dims(expr)
386
+ assert removed == [2, 3]
387
+
388
+ cg = _array_diagonal(_array_tensor_product(PermuteDims(_array_tensor_product(x, I1), Permutation(1, 2, 3)), (x.T*x).applyfunc(sqrt)), (2, 4), (3, 5))
389
+ rexpr, removed = _remove_trivial_dims(cg)
390
+ assert removed == [1, 2]
391
+
392
+ # Contractions with identity matrices need to be followed by a permutation
393
+ # in order
394
+ cg = _array_contraction(_array_tensor_product(A, B, C, M, I), (1, 8))
395
+ ret, removed = _remove_trivial_dims(cg)
396
+ assert ret == PermuteDims(_array_tensor_product(A, B, C, M), [0, 2, 3, 4, 5, 6, 7, 1])
397
+ assert removed == []
398
+
399
+ cg = _array_contraction(_array_tensor_product(A, B, C, M, I), (1, 8), (3, 4))
400
+ ret, removed = _remove_trivial_dims(cg)
401
+ assert ret == PermuteDims(_array_contraction(_array_tensor_product(A, B, C, M), (3, 4)), [0, 2, 3, 4, 5, 1])
402
+ assert removed == []
403
+
404
+ # Trivial matrices are sometimes inserted into MatMul expressions:
405
+
406
+ cg = _array_tensor_product(b*b.T, a.T*a)
407
+ ret, removed = _remove_trivial_dims(cg)
408
+ assert ret == b*a.T*a*b.T
409
+ assert removed == [2, 3]
410
+
411
+ Xs = ArraySymbol("X", (3, 2, k))
412
+ cg = _array_tensor_product(M, Xs, b.T*c, a*a.T, b*b.T, c.T*d)
413
+ ret, removed = _remove_trivial_dims(cg)
414
+ assert ret == _array_tensor_product(M, Xs, a*b.T*c*c.T*d*a.T, b*b.T)
415
+ assert removed == [5, 6, 11, 12]
416
+
417
+ cg = _array_diagonal(_array_tensor_product(I, I1, x), (1, 4), (3, 5))
418
+ assert _remove_trivial_dims(cg) == (PermuteDims(_array_diagonal(_array_tensor_product(I, x), (1, 2)), Permutation(1, 2)), [1])
419
+
420
+ expr = _array_diagonal(_array_tensor_product(x, I, y), (0, 2))
421
+ assert _remove_trivial_dims(expr) == (PermuteDims(_array_tensor_product(DiagMatrix(x), y), [1, 2, 3, 0]), [0])
422
+
423
+ expr = _array_diagonal(_array_tensor_product(x, I, y), (0, 2), (3, 4))
424
+ assert _remove_trivial_dims(expr) == (expr, [])
425
+
426
+
427
+ def test_arrayexpr_convert_array_to_matrix_diag2contraction_diagmatrix():
428
+ cg = _array_diagonal(_array_tensor_product(M, a), (1, 2))
429
+ res = _array_diag2contr_diagmatrix(cg)
430
+ assert res.shape == cg.shape
431
+ assert res == _array_contraction(_array_tensor_product(M, OneArray(1), DiagMatrix(a)), (1, 3))
432
+
433
+ raises(ValueError, lambda: _array_diagonal(_array_tensor_product(a, M), (1, 2)))
434
+
435
+ cg = _array_diagonal(_array_tensor_product(a.T, M), (1, 2))
436
+ res = _array_diag2contr_diagmatrix(cg)
437
+ assert res.shape == cg.shape
438
+ assert res == _array_contraction(_array_tensor_product(OneArray(1), M, DiagMatrix(a.T)), (1, 4))
439
+
440
+ cg = _array_diagonal(_array_tensor_product(a.T, M, N, b.T), (1, 2), (4, 7))
441
+ res = _array_diag2contr_diagmatrix(cg)
442
+ assert res.shape == cg.shape
443
+ assert res == _array_contraction(
444
+ _array_tensor_product(OneArray(1), M, N, OneArray(1), DiagMatrix(a.T), DiagMatrix(b.T)), (1, 7), (3, 9))
445
+
446
+ cg = _array_diagonal(_array_tensor_product(a, M, N, b.T), (0, 2), (4, 7))
447
+ res = _array_diag2contr_diagmatrix(cg)
448
+ assert res.shape == cg.shape
449
+ assert res == _array_contraction(
450
+ _array_tensor_product(OneArray(1), M, N, OneArray(1), DiagMatrix(a), DiagMatrix(b.T)), (1, 6), (3, 9))
451
+
452
+ cg = _array_diagonal(_array_tensor_product(a, M, N, b.T), (0, 4), (3, 7))
453
+ res = _array_diag2contr_diagmatrix(cg)
454
+ assert res.shape == cg.shape
455
+ assert res == _array_contraction(
456
+ _array_tensor_product(OneArray(1), M, N, OneArray(1), DiagMatrix(a), DiagMatrix(b.T)), (3, 6), (2, 9))
457
+
458
+ I1 = Identity(1)
459
+ x = MatrixSymbol("x", k, 1)
460
+ A = MatrixSymbol("A", k, k)
461
+ cg = _array_diagonal(_array_tensor_product(x, A.T, I1), (0, 2))
462
+ assert _array_diag2contr_diagmatrix(cg).shape == cg.shape
463
+ assert _array2matrix(cg).shape == cg.shape
464
+
465
+
466
+ def test_arrayexpr_convert_array_to_matrix_support_function():
467
+
468
+ assert _support_function_tp1_recognize([], [2 * k]) == 2 * k
469
+
470
+ assert _support_function_tp1_recognize([(1, 2)], [A, 2 * k, B, 3]) == 6 * k * A * B
471
+
472
+ assert _support_function_tp1_recognize([(0, 3), (1, 2)], [A, B]) == Trace(A * B)
473
+
474
+ assert _support_function_tp1_recognize([(1, 2)], [A, B]) == A * B
475
+ assert _support_function_tp1_recognize([(0, 2)], [A, B]) == A.T * B
476
+ assert _support_function_tp1_recognize([(1, 3)], [A, B]) == A * B.T
477
+ assert _support_function_tp1_recognize([(0, 3)], [A, B]) == A.T * B.T
478
+
479
+ assert _support_function_tp1_recognize([(1, 2), (5, 6)], [A, B, C, D]) == _array_tensor_product(A * B, C * D)
480
+ assert _support_function_tp1_recognize([(1, 4), (3, 6)], [A, B, C, D]) == PermuteDims(
481
+ _array_tensor_product(A * C, B * D), [0, 2, 1, 3])
482
+
483
+ assert _support_function_tp1_recognize([(0, 3), (1, 4)], [A, B, C]) == B * A * C
484
+
485
+ assert _support_function_tp1_recognize([(9, 10), (1, 2), (5, 6), (3, 4), (7, 8)],
486
+ [X, Y, A, B, C, D]) == X * Y * A * B * C * D
487
+
488
+ assert _support_function_tp1_recognize([(9, 10), (1, 2), (5, 6), (3, 4)],
489
+ [X, Y, A, B, C, D]) == _array_tensor_product(X * Y * A * B, C * D)
490
+
491
+ assert _support_function_tp1_recognize([(1, 7), (3, 8), (4, 11)], [X, Y, A, B, C, D]) == PermuteDims(
492
+ _array_tensor_product(X * B.T, Y * C, A.T * D.T), [0, 2, 4, 1, 3, 5]
493
+ )
494
+
495
+ assert _support_function_tp1_recognize([(0, 1), (3, 6), (5, 8)], [X, A, B, C, D]) == PermuteDims(
496
+ _array_tensor_product(Trace(X) * A * C, B * D), [0, 2, 1, 3])
497
+
498
+ assert _support_function_tp1_recognize([(1, 2), (3, 4), (5, 6), (7, 8)], [A, A, B, C, D]) == A ** 2 * B * C * D
499
+ assert _support_function_tp1_recognize([(1, 2), (3, 4), (5, 6), (7, 8)], [X, A, B, C, D]) == X * A * B * C * D
500
+
501
+ assert _support_function_tp1_recognize([(1, 6), (3, 8), (5, 10)], [X, Y, A, B, C, D]) == PermuteDims(
502
+ _array_tensor_product(X * B, Y * C, A * D), [0, 2, 4, 1, 3, 5]
503
+ )
504
+
505
+ assert _support_function_tp1_recognize([(1, 4), (3, 6)], [A, B, C, D]) == PermuteDims(
506
+ _array_tensor_product(A * C, B * D), [0, 2, 1, 3])
507
+
508
+ assert _support_function_tp1_recognize([(0, 4), (1, 7), (2, 5), (3, 8)], [X, A, B, C, D]) == C*X.T*B*A*D
509
+
510
+ assert _support_function_tp1_recognize([(0, 4), (1, 7), (2, 5), (3, 8)], [X, A, B, C, D]) == C*X.T*B*A*D
511
+
512
+
513
+ def test_convert_array_to_hadamard_products():
514
+
515
+ expr = HadamardProduct(M, N)
516
+ cg = convert_matrix_to_array(expr)
517
+ ret = convert_array_to_matrix(cg)
518
+ assert ret == expr
519
+
520
+ expr = HadamardProduct(M, N)*P
521
+ cg = convert_matrix_to_array(expr)
522
+ ret = convert_array_to_matrix(cg)
523
+ assert ret == expr
524
+
525
+ expr = Q*HadamardProduct(M, N)*P
526
+ cg = convert_matrix_to_array(expr)
527
+ ret = convert_array_to_matrix(cg)
528
+ assert ret == expr
529
+
530
+ expr = Q*HadamardProduct(M, N.T)*P
531
+ cg = convert_matrix_to_array(expr)
532
+ ret = convert_array_to_matrix(cg)
533
+ assert ret == expr
534
+
535
+ expr = HadamardProduct(M, N)*HadamardProduct(Q, P)
536
+ cg = convert_matrix_to_array(expr)
537
+ ret = convert_array_to_matrix(cg)
538
+ assert expr == ret
539
+
540
+ expr = P.T*HadamardProduct(M, N)*HadamardProduct(Q, P)
541
+ cg = convert_matrix_to_array(expr)
542
+ ret = convert_array_to_matrix(cg)
543
+ assert expr == ret
544
+
545
+ # ArrayDiagonal should be converted
546
+ cg = _array_diagonal(_array_tensor_product(M, N, Q), (1, 3), (0, 2, 4))
547
+ ret = convert_array_to_matrix(cg)
548
+ expected = PermuteDims(_array_diagonal(_array_tensor_product(HadamardProduct(M.T, N.T), Q), (1, 2)), [1, 0, 2])
549
+ assert expected == ret
550
+
551
+ # Special case that should return the same expression:
552
+ cg = _array_diagonal(_array_tensor_product(HadamardProduct(M, N), Q), (0, 2))
553
+ ret = convert_array_to_matrix(cg)
554
+ assert ret == cg
555
+
556
+ # Hadamard products with traces:
557
+
558
+ expr = Trace(HadamardProduct(M, N))
559
+ cg = convert_matrix_to_array(expr)
560
+ ret = convert_array_to_matrix(cg)
561
+ assert ret == Trace(HadamardProduct(M.T, N.T))
562
+
563
+ expr = Trace(A*HadamardProduct(M, N))
564
+ cg = convert_matrix_to_array(expr)
565
+ ret = convert_array_to_matrix(cg)
566
+ assert ret == Trace(HadamardProduct(M, N)*A)
567
+
568
+ expr = Trace(HadamardProduct(A, M)*N)
569
+ cg = convert_matrix_to_array(expr)
570
+ ret = convert_array_to_matrix(cg)
571
+ assert ret == Trace(HadamardProduct(M.T, N)*A)
572
+
573
+ # These should not be converted into Hadamard products:
574
+
575
+ cg = _array_diagonal(_array_tensor_product(M, N), (0, 1, 2, 3))
576
+ ret = convert_array_to_matrix(cg)
577
+ assert ret == cg
578
+
579
+ cg = _array_diagonal(_array_tensor_product(A), (0, 1))
580
+ ret = convert_array_to_matrix(cg)
581
+ assert ret == cg
582
+
583
+ cg = _array_diagonal(_array_tensor_product(M, N, P), (0, 2, 4), (1, 3, 5))
584
+ assert convert_array_to_matrix(cg) == HadamardProduct(M, N, P)
585
+
586
+ cg = _array_diagonal(_array_tensor_product(M, N, P), (0, 3, 4), (1, 2, 5))
587
+ assert convert_array_to_matrix(cg) == HadamardProduct(M, P, N.T)
588
+
589
+ cg = _array_diagonal(_array_tensor_product(I, I1, x), (1, 4), (3, 5))
590
+ assert convert_array_to_matrix(cg) == DiagMatrix(x)
591
+
592
+
593
+ def test_identify_removable_identity_matrices():
594
+
595
+ D = DiagonalMatrix(MatrixSymbol("D", k, k))
596
+
597
+ cg = _array_contraction(_array_tensor_product(A, B, I), (1, 2, 4, 5))
598
+ expected = _array_contraction(_array_tensor_product(A, B), (1, 2))
599
+ assert identify_removable_identity_matrices(cg) == expected
600
+
601
+ cg = _array_contraction(_array_tensor_product(A, B, C, I), (1, 3, 5, 6, 7))
602
+ expected = _array_contraction(_array_tensor_product(A, B, C), (1, 3, 5))
603
+ assert identify_removable_identity_matrices(cg) == expected
604
+
605
+ # Tests with diagonal matrices:
606
+
607
+ cg = _array_contraction(_array_tensor_product(A, B, D), (1, 2, 4, 5))
608
+ ret = identify_removable_identity_matrices(cg)
609
+ expected = _array_contraction(_array_tensor_product(A, B, D), (1, 4), (2, 5))
610
+ assert ret == expected
611
+
612
+ cg = _array_contraction(_array_tensor_product(A, B, D, M, N), (1, 2, 4, 5, 6, 8))
613
+ ret = identify_removable_identity_matrices(cg)
614
+ assert ret == cg
615
+
616
+
617
+ def test_combine_removed():
618
+
619
+ assert _combine_removed(6, [0, 1, 2], [0, 1, 2]) == [0, 1, 2, 3, 4, 5]
620
+ assert _combine_removed(8, [2, 5], [1, 3, 4]) == [1, 2, 4, 5, 6]
621
+ assert _combine_removed(8, [7], []) == [7]
622
+
623
+
624
+ def test_array_contraction_to_diagonal_multiple_identities():
625
+
626
+ expr = _array_contraction(_array_tensor_product(A, B, I, C), (1, 2, 4), (5, 6))
627
+ assert _array_contraction_to_diagonal_multiple_identity(expr) == (expr, [])
628
+ assert convert_array_to_matrix(expr) == _array_contraction(_array_tensor_product(A, B, C), (1, 2, 4))
629
+
630
+ expr = _array_contraction(_array_tensor_product(A, I, I), (1, 2, 4))
631
+ assert _array_contraction_to_diagonal_multiple_identity(expr) == (A, [2])
632
+ assert convert_array_to_matrix(expr) == A
633
+
634
+ expr = _array_contraction(_array_tensor_product(A, I, I, B), (1, 2, 4), (3, 6))
635
+ assert _array_contraction_to_diagonal_multiple_identity(expr) == (expr, [])
636
+
637
+ expr = _array_contraction(_array_tensor_product(A, I, I, B), (1, 2, 3, 4, 6))
638
+ assert _array_contraction_to_diagonal_multiple_identity(expr) == (expr, [])
639
+
640
+
641
+ def test_convert_array_element_to_matrix():
642
+
643
+ expr = ArrayElement(M, (i, j))
644
+ assert convert_array_to_matrix(expr) == MatrixElement(M, i, j)
645
+
646
+ expr = ArrayElement(_array_contraction(_array_tensor_product(M, N), (1, 3)), (i, j))
647
+ assert convert_array_to_matrix(expr) == MatrixElement(M*N.T, i, j)
648
+
649
+ expr = ArrayElement(_array_tensor_product(M, N), (i, j, m, n))
650
+ assert convert_array_to_matrix(expr) == expr
651
+
652
+
653
+ def test_convert_array_elementwise_function_to_matrix():
654
+
655
+ d = Dummy("d")
656
+
657
+ expr = ArrayElementwiseApplyFunc(Lambda(d, sin(d)), x.T*y)
658
+ assert convert_array_to_matrix(expr) == sin(x.T*y)
659
+
660
+ expr = ArrayElementwiseApplyFunc(Lambda(d, d**2), x.T*y)
661
+ assert convert_array_to_matrix(expr) == (x.T*y)**2
662
+
663
+ expr = ArrayElementwiseApplyFunc(Lambda(d, sin(d)), x)
664
+ assert convert_array_to_matrix(expr).dummy_eq(x.applyfunc(sin))
665
+
666
+ expr = ArrayElementwiseApplyFunc(Lambda(d, 1 / (2 * sqrt(d))), x)
667
+ assert convert_array_to_matrix(expr) == S.Half * HadamardPower(x, -S.Half)
668
+
669
+
670
+ def test_array2matrix():
671
+ # See issue https://github.com/sympy/sympy/pull/22877
672
+ expr = PermuteDims(ArrayContraction(ArrayTensorProduct(x, I, I1, x), (0, 3), (1, 7)), Permutation(2, 3))
673
+ expected = PermuteDims(ArrayTensorProduct(x*x.T, I1), Permutation(3)(1, 2))
674
+ assert _array2matrix(expr) == expected
675
+
676
+
677
+ def test_recognize_broadcasting():
678
+ expr = ArrayTensorProduct(x.T*x, A)
679
+ assert _remove_trivial_dims(expr) == (KroneckerProduct(x.T*x, A), [0, 1])
680
+
681
+ expr = ArrayTensorProduct(A, x.T*x)
682
+ assert _remove_trivial_dims(expr) == (KroneckerProduct(A, x.T*x), [2, 3])
683
+
684
+ expr = ArrayTensorProduct(A, B, x.T*x, C)
685
+ assert _remove_trivial_dims(expr) == (ArrayTensorProduct(A, KroneckerProduct(B, x.T*x), C), [4, 5])
686
+
687
+ # Always prefer matrix multiplication to Kronecker product, if possible:
688
+ expr = ArrayTensorProduct(a, b, x.T*x)
689
+ assert _remove_trivial_dims(expr) == (a*x.T*x*b.T, [1, 3, 4, 5])
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_indexed_to_array.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sympy import tanh
2
+ from sympy.concrete.summations import Sum
3
+ from sympy.core.symbol import symbols
4
+ from sympy.functions.special.tensor_functions import KroneckerDelta
5
+ from sympy.matrices.expressions.matexpr import MatrixSymbol
6
+ from sympy.matrices.expressions.special import Identity
7
+ from sympy.tensor.array.expressions import ArrayElementwiseApplyFunc
8
+ from sympy.tensor.indexed import IndexedBase
9
+ from sympy.combinatorics import Permutation
10
+ from sympy.tensor.array.expressions.array_expressions import ArrayContraction, ArrayTensorProduct, \
11
+ ArrayDiagonal, ArrayAdd, PermuteDims, ArrayElement, _array_tensor_product, _array_contraction, _array_diagonal, \
12
+ _array_add, _permute_dims, ArraySymbol, OneArray
13
+ from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix
14
+ from sympy.tensor.array.expressions.from_indexed_to_array import convert_indexed_to_array, _convert_indexed_to_array
15
+ from sympy.testing.pytest import raises
16
+
17
+
18
+ A, B = symbols("A B", cls=IndexedBase)
19
+ i, j, k, l, m, n = symbols("i j k l m n")
20
+ d0, d1, d2, d3 = symbols("d0:4")
21
+
22
+ I = Identity(k)
23
+
24
+ M = MatrixSymbol("M", k, k)
25
+ N = MatrixSymbol("N", k, k)
26
+ P = MatrixSymbol("P", k, k)
27
+ Q = MatrixSymbol("Q", k, k)
28
+
29
+ a = MatrixSymbol("a", k, 1)
30
+ b = MatrixSymbol("b", k, 1)
31
+ c = MatrixSymbol("c", k, 1)
32
+ d = MatrixSymbol("d", k, 1)
33
+
34
+
35
+ def test_arrayexpr_convert_index_to_array_support_function():
36
+ expr = M[i, j]
37
+ assert _convert_indexed_to_array(expr) == (M, (i, j))
38
+ expr = M[i, j]*N[k, l]
39
+ assert _convert_indexed_to_array(expr) == (ArrayTensorProduct(M, N), (i, j, k, l))
40
+ expr = M[i, j]*N[j, k]
41
+ assert _convert_indexed_to_array(expr) == (ArrayDiagonal(ArrayTensorProduct(M, N), (1, 2)), (i, k, j))
42
+ expr = Sum(M[i, j]*N[j, k], (j, 0, k-1))
43
+ assert _convert_indexed_to_array(expr) == (ArrayContraction(ArrayTensorProduct(M, N), (1, 2)), (i, k))
44
+ expr = M[i, j] + N[i, j]
45
+ assert _convert_indexed_to_array(expr) == (ArrayAdd(M, N), (i, j))
46
+ expr = M[i, j] + N[j, i]
47
+ assert _convert_indexed_to_array(expr) == (ArrayAdd(M, PermuteDims(N, Permutation([1, 0]))), (i, j))
48
+ expr = M[i, j] + M[j, i]
49
+ assert _convert_indexed_to_array(expr) == (ArrayAdd(M, PermuteDims(M, Permutation([1, 0]))), (i, j))
50
+ expr = (M*N*P)[i, j]
51
+ assert _convert_indexed_to_array(expr) == (_array_contraction(ArrayTensorProduct(M, N, P), (1, 2), (3, 4)), (i, j))
52
+ expr = expr.function # Disregard summation in previous expression
53
+ ret1, ret2 = _convert_indexed_to_array(expr)
54
+ assert ret1 == ArrayDiagonal(ArrayTensorProduct(M, N, P), (1, 2), (3, 4))
55
+ assert str(ret2) == "(i, j, _i_1, _i_2)"
56
+ expr = KroneckerDelta(i, j)*M[i, k]
57
+ assert _convert_indexed_to_array(expr) == (M, ({i, j}, k))
58
+ expr = KroneckerDelta(i, j)*KroneckerDelta(j, k)*M[i, l]
59
+ assert _convert_indexed_to_array(expr) == (M, ({i, j, k}, l))
60
+ expr = KroneckerDelta(j, k)*(M[i, j]*N[k, l] + N[i, j]*M[k, l])
61
+ assert _convert_indexed_to_array(expr) == (_array_diagonal(_array_add(
62
+ ArrayTensorProduct(M, N),
63
+ _permute_dims(ArrayTensorProduct(M, N), Permutation(0, 2)(1, 3))
64
+ ), (1, 2)), (i, l, frozenset({j, k})))
65
+ expr = KroneckerDelta(j, m)*KroneckerDelta(m, k)*(M[i, j]*N[k, l] + N[i, j]*M[k, l])
66
+ assert _convert_indexed_to_array(expr) == (_array_diagonal(_array_add(
67
+ ArrayTensorProduct(M, N),
68
+ _permute_dims(ArrayTensorProduct(M, N), Permutation(0, 2)(1, 3))
69
+ ), (1, 2)), (i, l, frozenset({j, m, k})))
70
+ expr = KroneckerDelta(i, j)*KroneckerDelta(j, k)*KroneckerDelta(k,m)*M[i, 0]*KroneckerDelta(m, n)
71
+ assert _convert_indexed_to_array(expr) == (M, ({i, j, k, m, n}, 0))
72
+ expr = M[i, i]
73
+ assert _convert_indexed_to_array(expr) == (ArrayDiagonal(M, (0, 1)), (i,))
74
+
75
+
76
+ def test_arrayexpr_convert_indexed_to_array_expression():
77
+
78
+ s = Sum(A[i]*B[i], (i, 0, 3))
79
+ cg = convert_indexed_to_array(s)
80
+ assert cg == ArrayContraction(ArrayTensorProduct(A, B), (0, 1))
81
+
82
+ expr = M*N
83
+ result = ArrayContraction(ArrayTensorProduct(M, N), (1, 2))
84
+ elem = expr[i, j]
85
+ assert convert_indexed_to_array(elem) == result
86
+
87
+ expr = M*N*M
88
+ elem = expr[i, j]
89
+ result = _array_contraction(_array_tensor_product(M, M, N), (1, 4), (2, 5))
90
+ cg = convert_indexed_to_array(elem)
91
+ assert cg == result
92
+
93
+ cg = convert_indexed_to_array((M * N * P)[i, j])
94
+ assert cg == _array_contraction(ArrayTensorProduct(M, N, P), (1, 2), (3, 4))
95
+
96
+ cg = convert_indexed_to_array((M * N.T * P)[i, j])
97
+ assert cg == _array_contraction(ArrayTensorProduct(M, N, P), (1, 3), (2, 4))
98
+
99
+ expr = -2*M*N
100
+ elem = expr[i, j]
101
+ cg = convert_indexed_to_array(elem)
102
+ assert cg == ArrayContraction(ArrayTensorProduct(-2, M, N), (1, 2))
103
+
104
+
105
+ def test_arrayexpr_convert_array_element_to_array_expression():
106
+ A = ArraySymbol("A", (k,))
107
+ B = ArraySymbol("B", (k,))
108
+
109
+ s = Sum(A[i]*B[i], (i, 0, k-1))
110
+ cg = convert_indexed_to_array(s)
111
+ assert cg == ArrayContraction(ArrayTensorProduct(A, B), (0, 1))
112
+
113
+ s = A[i]*B[i]
114
+ cg = convert_indexed_to_array(s)
115
+ assert cg == ArrayDiagonal(ArrayTensorProduct(A, B), (0, 1))
116
+
117
+ s = A[i]*B[j]
118
+ cg = convert_indexed_to_array(s, [i, j])
119
+ assert cg == ArrayTensorProduct(A, B)
120
+ cg = convert_indexed_to_array(s, [j, i])
121
+ assert cg == ArrayTensorProduct(B, A)
122
+
123
+ s = tanh(A[i]*B[j])
124
+ cg = convert_indexed_to_array(s, [i, j])
125
+ assert cg.dummy_eq(ArrayElementwiseApplyFunc(tanh, ArrayTensorProduct(A, B)))
126
+
127
+
128
+ def test_arrayexpr_convert_indexed_to_array_and_back_to_matrix():
129
+
130
+ expr = a.T*b
131
+ elem = expr[0, 0]
132
+ cg = convert_indexed_to_array(elem)
133
+ assert cg == ArrayElement(ArrayContraction(ArrayTensorProduct(a, b), (0, 2)), [0, 0])
134
+
135
+ expr = M[i,j] + N[i,j]
136
+ p1, p2 = _convert_indexed_to_array(expr)
137
+ assert convert_array_to_matrix(p1) == M + N
138
+
139
+ expr = M[i,j] + N[j,i]
140
+ p1, p2 = _convert_indexed_to_array(expr)
141
+ assert convert_array_to_matrix(p1) == M + N.T
142
+
143
+ expr = M[i,j]*N[k,l] + N[i,j]*M[k,l]
144
+ p1, p2 = _convert_indexed_to_array(expr)
145
+ assert convert_array_to_matrix(p1) == ArrayAdd(
146
+ ArrayTensorProduct(M, N),
147
+ ArrayTensorProduct(N, M))
148
+
149
+ expr = (M*N*P)[i, j]
150
+ p1, p2 = _convert_indexed_to_array(expr)
151
+ assert convert_array_to_matrix(p1) == M * N * P
152
+
153
+ expr = Sum(M[i,j]*(N*P)[j,m], (j, 0, k-1))
154
+ p1, p2 = _convert_indexed_to_array(expr)
155
+ assert convert_array_to_matrix(p1) == M * N * P
156
+
157
+ expr = Sum((P[j, m] + P[m, j])*(M[i,j]*N[m,n] + N[i,j]*M[m,n]), (j, 0, k-1), (m, 0, k-1))
158
+ p1, p2 = _convert_indexed_to_array(expr)
159
+ assert convert_array_to_matrix(p1) == M * P * N + M * P.T * N + N * P * M + N * P.T * M
160
+
161
+
162
+ def test_arrayexpr_convert_indexed_to_array_out_of_bounds():
163
+
164
+ expr = Sum(M[i, i], (i, 0, 4))
165
+ raises(ValueError, lambda: convert_indexed_to_array(expr))
166
+ expr = Sum(M[i, i], (i, 0, k))
167
+ raises(ValueError, lambda: convert_indexed_to_array(expr))
168
+ expr = Sum(M[i, i], (i, 1, k-1))
169
+ raises(ValueError, lambda: convert_indexed_to_array(expr))
170
+
171
+ expr = Sum(M[i, j]*N[j,m], (j, 0, 4))
172
+ raises(ValueError, lambda: convert_indexed_to_array(expr))
173
+ expr = Sum(M[i, j]*N[j,m], (j, 0, k))
174
+ raises(ValueError, lambda: convert_indexed_to_array(expr))
175
+ expr = Sum(M[i, j]*N[j,m], (j, 1, k-1))
176
+ raises(ValueError, lambda: convert_indexed_to_array(expr))
177
+
178
+
179
+ def test_arrayexpr_convert_indexed_to_array_broadcast():
180
+ A = ArraySymbol("A", (3, 3))
181
+ B = ArraySymbol("B", (3, 3))
182
+
183
+ expr = A[i, j] + B[k, l]
184
+ O2 = OneArray(3, 3)
185
+ expected = ArrayAdd(ArrayTensorProduct(A, O2), ArrayTensorProduct(O2, B))
186
+ assert convert_indexed_to_array(expr) == expected
187
+ assert convert_indexed_to_array(expr, [i, j, k, l]) == expected
188
+ assert convert_indexed_to_array(expr, [l, k, i, j]) == ArrayAdd(PermuteDims(ArrayTensorProduct(O2, A), [1, 0, 2, 3]), PermuteDims(ArrayTensorProduct(B, O2), [1, 0, 2, 3]))
189
+
190
+ expr = A[i, j] + B[j, k]
191
+ O1 = OneArray(3)
192
+ assert convert_indexed_to_array(expr, [i, j, k]) == ArrayAdd(ArrayTensorProduct(A, O1), ArrayTensorProduct(O1, B))
193
+
194
+ C = ArraySymbol("C", (d0, d1))
195
+ D = ArraySymbol("D", (d3, d1))
196
+
197
+ expr = C[i, j] + D[k, j]
198
+ assert convert_indexed_to_array(expr, [i, j, k]) == ArrayAdd(ArrayTensorProduct(C, OneArray(d3)), PermuteDims(ArrayTensorProduct(OneArray(d0), D), [0, 2, 1]))
199
+
200
+ X = ArraySymbol("X", (5, 3))
201
+
202
+ expr = X[i, n] - X[j, n]
203
+ assert convert_indexed_to_array(expr, [i, j, n]) == ArrayAdd(ArrayTensorProduct(-1, OneArray(5), X), PermuteDims(ArrayTensorProduct(X, OneArray(5)), [0, 2, 1]))
204
+
205
+ raises(ValueError, lambda: convert_indexed_to_array(C[i, j] + D[i, j]))
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_convert_matrix_to_array.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sympy import Lambda, KroneckerProduct
2
+ from sympy.core.symbol import symbols, Dummy
3
+ from sympy.matrices.expressions.hadamard import (HadamardPower, HadamardProduct)
4
+ from sympy.matrices.expressions.inverse import Inverse
5
+ from sympy.matrices.expressions.matexpr import MatrixSymbol
6
+ from sympy.matrices.expressions.matpow import MatPow
7
+ from sympy.matrices.expressions.special import Identity
8
+ from sympy.matrices.expressions.trace import Trace
9
+ from sympy.matrices.expressions.transpose import Transpose
10
+ from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct, ArrayContraction, \
11
+ PermuteDims, ArrayDiagonal, ArrayElementwiseApplyFunc, _array_contraction, _array_tensor_product, Reshape
12
+ from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix
13
+ from sympy.tensor.array.expressions.from_matrix_to_array import convert_matrix_to_array
14
+
15
+ i, j, k, l, m, n = symbols("i j k l m n")
16
+
17
+ I = Identity(k)
18
+
19
+ M = MatrixSymbol("M", k, k)
20
+ N = MatrixSymbol("N", k, k)
21
+ P = MatrixSymbol("P", k, k)
22
+ Q = MatrixSymbol("Q", k, k)
23
+
24
+ A = MatrixSymbol("A", k, k)
25
+ B = MatrixSymbol("B", k, k)
26
+ C = MatrixSymbol("C", k, k)
27
+ D = MatrixSymbol("D", k, k)
28
+
29
+ X = MatrixSymbol("X", k, k)
30
+ Y = MatrixSymbol("Y", k, k)
31
+
32
+ a = MatrixSymbol("a", k, 1)
33
+ b = MatrixSymbol("b", k, 1)
34
+ c = MatrixSymbol("c", k, 1)
35
+ d = MatrixSymbol("d", k, 1)
36
+
37
+
38
+ def test_arrayexpr_convert_matrix_to_array():
39
+
40
+ expr = M*N
41
+ result = ArrayContraction(ArrayTensorProduct(M, N), (1, 2))
42
+ assert convert_matrix_to_array(expr) == result
43
+
44
+ expr = M*N*M
45
+ result = _array_contraction(ArrayTensorProduct(M, N, M), (1, 2), (3, 4))
46
+ assert convert_matrix_to_array(expr) == result
47
+
48
+ expr = Transpose(M)
49
+ assert convert_matrix_to_array(expr) == PermuteDims(M, [1, 0])
50
+
51
+ expr = M*Transpose(N)
52
+ assert convert_matrix_to_array(expr) == _array_contraction(_array_tensor_product(M, PermuteDims(N, [1, 0])), (1, 2))
53
+
54
+ expr = 3*M*N
55
+ res = convert_matrix_to_array(expr)
56
+ rexpr = convert_array_to_matrix(res)
57
+ assert expr == rexpr
58
+
59
+ expr = 3*M + N*M.T*M + 4*k*N
60
+ res = convert_matrix_to_array(expr)
61
+ rexpr = convert_array_to_matrix(res)
62
+ assert expr == rexpr
63
+
64
+ expr = Inverse(M)*N
65
+ rexpr = convert_array_to_matrix(convert_matrix_to_array(expr))
66
+ assert expr == rexpr
67
+
68
+ expr = M**2
69
+ rexpr = convert_array_to_matrix(convert_matrix_to_array(expr))
70
+ assert expr == rexpr
71
+
72
+ expr = M*(2*N + 3*M)
73
+ res = convert_matrix_to_array(expr)
74
+ rexpr = convert_array_to_matrix(res)
75
+ assert expr == rexpr
76
+
77
+ expr = Trace(M)
78
+ result = ArrayContraction(M, (0, 1))
79
+ assert convert_matrix_to_array(expr) == result
80
+
81
+ expr = 3*Trace(M)
82
+ result = ArrayContraction(ArrayTensorProduct(3, M), (0, 1))
83
+ assert convert_matrix_to_array(expr) == result
84
+
85
+ expr = 3*Trace(Trace(M) * M)
86
+ result = ArrayContraction(ArrayTensorProduct(3, M, M), (0, 1), (2, 3))
87
+ assert convert_matrix_to_array(expr) == result
88
+
89
+ expr = 3*Trace(M)**2
90
+ result = ArrayContraction(ArrayTensorProduct(3, M, M), (0, 1), (2, 3))
91
+ assert convert_matrix_to_array(expr) == result
92
+
93
+ expr = HadamardProduct(M, N)
94
+ result = ArrayDiagonal(ArrayTensorProduct(M, N), (0, 2), (1, 3))
95
+ assert convert_matrix_to_array(expr) == result
96
+
97
+ expr = HadamardProduct(M*N, N*M)
98
+ result = ArrayDiagonal(ArrayContraction(ArrayTensorProduct(M, N, N, M), (1, 2), (5, 6)), (0, 2), (1, 3))
99
+ assert convert_matrix_to_array(expr) == result
100
+
101
+ expr = HadamardPower(M, 2)
102
+ result = ArrayDiagonal(ArrayTensorProduct(M, M), (0, 2), (1, 3))
103
+ assert convert_matrix_to_array(expr) == result
104
+
105
+ expr = HadamardPower(M*N, 2)
106
+ result = ArrayDiagonal(ArrayContraction(ArrayTensorProduct(M, N, M, N), (1, 2), (5, 6)), (0, 2), (1, 3))
107
+ assert convert_matrix_to_array(expr) == result
108
+
109
+ expr = HadamardPower(M, n)
110
+ d0 = Dummy("d0")
111
+ result = ArrayElementwiseApplyFunc(Lambda(d0, d0**n), M)
112
+ assert convert_matrix_to_array(expr).dummy_eq(result)
113
+
114
+ expr = M**2
115
+ assert isinstance(expr, MatPow)
116
+ assert convert_matrix_to_array(expr) == ArrayContraction(ArrayTensorProduct(M, M), (1, 2))
117
+
118
+ expr = a.T*b
119
+ cg = convert_matrix_to_array(expr)
120
+ assert cg == ArrayContraction(ArrayTensorProduct(a, b), (0, 2))
121
+
122
+ expr = KroneckerProduct(A, B)
123
+ cg = convert_matrix_to_array(expr)
124
+ assert cg == Reshape(PermuteDims(ArrayTensorProduct(A, B), [0, 2, 1, 3]), (k**2, k**2))
125
+
126
+ expr = KroneckerProduct(A, B, C, D)
127
+ cg = convert_matrix_to_array(expr)
128
+ assert cg == Reshape(PermuteDims(ArrayTensorProduct(A, B, C, D), [0, 2, 4, 6, 1, 3, 5, 7]), (k**4, k**4))
evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/array/expressions/tests/test_deprecated_conv_modules.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sympy import MatrixSymbol, symbols, Sum
2
+ from sympy.tensor.array.expressions import conv_array_to_indexed, from_array_to_indexed, ArrayTensorProduct, \
3
+ ArrayContraction, conv_array_to_matrix, from_array_to_matrix, conv_matrix_to_array, from_matrix_to_array, \
4
+ conv_indexed_to_array, from_indexed_to_array
5
+ from sympy.testing.pytest import warns
6
+ from sympy.utilities.exceptions import SymPyDeprecationWarning
7
+
8
+
9
+ def test_deprecated_conv_module_results():
10
+
11
+ M = MatrixSymbol("M", 3, 3)
12
+ N = MatrixSymbol("N", 3, 3)
13
+ i, j, d = symbols("i j d")
14
+
15
+ x = ArrayContraction(ArrayTensorProduct(M, N), (1, 2))
16
+ y = Sum(M[i, d]*N[d, j], (d, 0, 2))
17
+
18
+ with warns(SymPyDeprecationWarning, test_stacklevel=False):
19
+ assert conv_array_to_indexed.convert_array_to_indexed(x, [i, j]).dummy_eq(from_array_to_indexed.convert_array_to_indexed(x, [i, j]))
20
+ assert conv_array_to_matrix.convert_array_to_matrix(x) == from_array_to_matrix.convert_array_to_matrix(x)
21
+ assert conv_matrix_to_array.convert_matrix_to_array(M*N) == from_matrix_to_array.convert_matrix_to_array(M*N)
22
+ assert conv_indexed_to_array.convert_indexed_to_array(y) == from_indexed_to_array.convert_indexed_to_array(y)
evalkit_tf437/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc ADDED
Binary file (3.73 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/__init__.py ADDED
File without changes
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/__pycache__/tpu.cpython-310.pyc ADDED
Binary file (3.85 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from argparse import ArgumentParser
18
+
19
+ from accelerate.commands.config import get_config_parser
20
+ from accelerate.commands.env import env_command_parser
21
+ from accelerate.commands.estimate import estimate_command_parser
22
+ from accelerate.commands.launch import launch_command_parser
23
+ from accelerate.commands.test import test_command_parser
24
+ from accelerate.commands.tpu import tpu_command_parser
25
+
26
+
27
+ def main():
28
+ parser = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
29
+ subparsers = parser.add_subparsers(help="accelerate command helpers")
30
+
31
+ # Register commands
32
+ get_config_parser(subparsers=subparsers)
33
+ estimate_command_parser(subparsers=subparsers)
34
+ env_command_parser(subparsers=subparsers)
35
+ launch_command_parser(subparsers=subparsers)
36
+ tpu_command_parser(subparsers=subparsers)
37
+ test_command_parser(subparsers=subparsers)
38
+
39
+ # Let's go
40
+ args = parser.parse_args()
41
+
42
+ if not hasattr(args, "func"):
43
+ parser.print_help()
44
+ exit(1)
45
+
46
+ # Run
47
+ args.func(args)
48
+
49
+
50
+ if __name__ == "__main__":
51
+ main()
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+
19
+ from .config import config_command_parser
20
+ from .config_args import default_config_file, load_config_from_file # noqa: F401
21
+ from .default import default_command_parser
22
+ from .update import update_command_parser
23
+
24
+
25
+ def get_config_parser(subparsers=None):
26
+ parent_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
27
+ # The main config parser
28
+ config_parser = config_command_parser(subparsers)
29
+ # The subparser to add commands to
30
+ subcommands = config_parser.add_subparsers(title="subcommands", dest="subcommand")
31
+
32
+ # Then add other parsers with the parent parser
33
+ default_command_parser(subcommands, parents=[parent_parser])
34
+ update_command_parser(subcommands, parents=[parent_parser])
35
+
36
+ return config_parser
37
+
38
+
39
+ def main():
40
+ config_parser = get_config_parser()
41
+ args = config_parser.parse_args()
42
+
43
+ if not hasattr(args, "func"):
44
+ config_parser.print_help()
45
+ exit(1)
46
+
47
+ # Run
48
+ args.func(args)
49
+
50
+
51
+ if __name__ == "__main__":
52
+ main()
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/cluster.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config.cpython-310.pyc ADDED
Binary file (2.43 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_args.cpython-310.pyc ADDED
Binary file (6.98 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/config_utils.cpython-310.pyc ADDED
Binary file (2.72 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/default.cpython-310.pyc ADDED
Binary file (3.78 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/sagemaker.cpython-310.pyc ADDED
Binary file (6.86 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/__pycache__/update.cpython-310.pyc ADDED
Binary file (1.86 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/cluster.py ADDED
@@ -0,0 +1,645 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import os
18
+
19
+ from ...utils import (
20
+ ComputeEnvironment,
21
+ DistributedType,
22
+ is_deepspeed_available,
23
+ is_mps_available,
24
+ is_npu_available,
25
+ is_transformers_available,
26
+ is_xpu_available,
27
+ )
28
+ from ...utils.constants import (
29
+ DEEPSPEED_MULTINODE_LAUNCHERS,
30
+ FSDP_AUTO_WRAP_POLICY,
31
+ FSDP_BACKWARD_PREFETCH,
32
+ FSDP_SHARDING_STRATEGY,
33
+ FSDP_STATE_DICT_TYPE,
34
+ TORCH_DYNAMO_MODES,
35
+ )
36
+ from .config_args import ClusterConfig
37
+ from .config_utils import (
38
+ DYNAMO_BACKENDS,
39
+ _ask_field,
40
+ _ask_options,
41
+ _convert_distributed_mode,
42
+ _convert_dynamo_backend,
43
+ _convert_mixed_precision,
44
+ _convert_yes_no_to_bool,
45
+ )
46
+
47
+
48
+ def get_cluster_input():
49
+ distributed_type = _ask_options(
50
+ "Which type of machine are you using?",
51
+ ["No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "TPU"],
52
+ _convert_distributed_mode,
53
+ )
54
+
55
+ machine_rank = 0
56
+ num_machines = 1
57
+ num_processes = 1
58
+ gpu_ids = None
59
+ main_process_ip = None
60
+ main_process_port = None
61
+ rdzv_backend = "static"
62
+ same_network = True
63
+ debug = False
64
+
65
+ if distributed_type in [
66
+ DistributedType.MULTI_GPU,
67
+ DistributedType.MULTI_NPU,
68
+ DistributedType.MULTI_XPU,
69
+ DistributedType.MULTI_CPU,
70
+ ]:
71
+ num_machines = _ask_field(
72
+ "How many different machines will you use (use more than 1 for multi-node training)? [1]: ",
73
+ int,
74
+ default=1,
75
+ )
76
+ if num_machines > 1:
77
+ machine_rank = _ask_options(
78
+ "What is the rank of this machine?",
79
+ list(range(num_machines)),
80
+ int,
81
+ )
82
+ main_process_ip = _ask_field(
83
+ "What is the IP address of the machine that will host the main process? ",
84
+ )
85
+ main_process_port = _ask_field(
86
+ "What is the port you will use to communicate with the main process? ",
87
+ int,
88
+ )
89
+ same_network = _ask_field(
90
+ "Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: ",
91
+ _convert_yes_no_to_bool,
92
+ default=True,
93
+ error_message="Please enter yes or no.",
94
+ )
95
+ if not same_network:
96
+ rdzv_backend = _ask_field(
97
+ "What rendezvous backend will you use? ('static', 'c10d', ...): ", default="static"
98
+ )
99
+ debug = _ask_field(
100
+ "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ",
101
+ _convert_yes_no_to_bool,
102
+ default=False,
103
+ error_message="Please enter yes or no.",
104
+ )
105
+
106
+ if distributed_type == DistributedType.NO:
107
+ use_cpu = _ask_field(
108
+ "Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:",
109
+ _convert_yes_no_to_bool,
110
+ default=False,
111
+ error_message="Please enter yes or no.",
112
+ )
113
+ elif distributed_type == DistributedType.MULTI_CPU:
114
+ use_cpu = True
115
+ else:
116
+ use_cpu = False
117
+
118
+ ipex_config = {}
119
+ if use_cpu:
120
+ ipex_config["ipex"] = _ask_field(
121
+ "Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:",
122
+ _convert_yes_no_to_bool,
123
+ default=False,
124
+ error_message="Please enter yes or no.",
125
+ )
126
+ if (
127
+ not use_cpu
128
+ and is_xpu_available()
129
+ and distributed_type not in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.TPU]
130
+ ):
131
+ ipex_config["use_xpu"] = _ask_field(
132
+ "Do you want to use XPU plugin to speed up training on XPU? [yes/NO]:",
133
+ _convert_yes_no_to_bool,
134
+ default=False,
135
+ error_message="Please enter yes or no.",
136
+ )
137
+
138
+ dynamo_config = {}
139
+ use_dynamo = _ask_field(
140
+ "Do you wish to optimize your script with torch dynamo?[yes/NO]:",
141
+ _convert_yes_no_to_bool,
142
+ default=False,
143
+ error_message="Please enter yes or no.",
144
+ )
145
+ if use_dynamo:
146
+ prefix = "dynamo_"
147
+ dynamo_config[prefix + "backend"] = _ask_options(
148
+ "Which dynamo backend would you like to use?",
149
+ [x.lower() for x in DYNAMO_BACKENDS],
150
+ _convert_dynamo_backend,
151
+ default=2,
152
+ )
153
+ use_custom_options = _ask_field(
154
+ "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ",
155
+ _convert_yes_no_to_bool,
156
+ default=False,
157
+ error_message="Please enter yes or no.",
158
+ )
159
+
160
+ if use_custom_options:
161
+ dynamo_config[prefix + "mode"] = _ask_options(
162
+ "Which mode do you want to use?",
163
+ TORCH_DYNAMO_MODES,
164
+ lambda x: TORCH_DYNAMO_MODES[int(x)],
165
+ default=0,
166
+ )
167
+ dynamo_config[prefix + "use_fullgraph"] = _ask_field(
168
+ "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ",
169
+ _convert_yes_no_to_bool,
170
+ default=False,
171
+ error_message="Please enter yes or no.",
172
+ )
173
+ dynamo_config[prefix + "use_dynamic"] = _ask_field(
174
+ "Do you want to enable dynamic shape tracing? [yes/NO]: ",
175
+ _convert_yes_no_to_bool,
176
+ default=False,
177
+ error_message="Please enter yes or no.",
178
+ )
179
+
180
+ use_mps = not use_cpu and is_mps_available()
181
+ deepspeed_config = {}
182
+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.NO] and not use_mps:
183
+ use_deepspeed = _ask_field(
184
+ "Do you want to use DeepSpeed? [yes/NO]: ",
185
+ _convert_yes_no_to_bool,
186
+ default=False,
187
+ error_message="Please enter yes or no.",
188
+ )
189
+ if use_deepspeed:
190
+ distributed_type = DistributedType.DEEPSPEED
191
+ assert (
192
+ is_deepspeed_available()
193
+ ), "DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source"
194
+
195
+ if distributed_type == DistributedType.DEEPSPEED:
196
+ use_deepspeed_config = _ask_field(
197
+ "Do you want to specify a json file to a DeepSpeed config? [yes/NO]: ",
198
+ _convert_yes_no_to_bool,
199
+ default=False,
200
+ error_message="Please enter yes or no.",
201
+ )
202
+ if use_deepspeed_config:
203
+ deepspeed_config["deepspeed_config_file"] = _ask_field(
204
+ "Please enter the path to the json DeepSpeed config file: ",
205
+ str,
206
+ default="none",
207
+ )
208
+ else:
209
+ deepspeed_config["zero_stage"] = _ask_options(
210
+ "What should be your DeepSpeed's ZeRO optimization stage?",
211
+ [0, 1, 2, 3],
212
+ int,
213
+ default=2,
214
+ )
215
+
216
+ deepspeed_devices = ["none", "cpu", "nvme"]
217
+ if deepspeed_config["zero_stage"] >= 2:
218
+ deepspeed_config["offload_optimizer_device"] = _ask_options(
219
+ "Where to offload optimizer states?", deepspeed_devices, lambda x: deepspeed_devices[int(x)]
220
+ )
221
+ deepspeed_config["offload_param_device"] = _ask_options(
222
+ "Where to offload parameters?", deepspeed_devices, lambda x: deepspeed_devices[int(x)]
223
+ )
224
+ if deepspeed_config["offload_param_device"] == "nvme":
225
+ deepspeed_config["offload_param_nvme_path"] = _ask_field(
226
+ "Nvme Path to offload parameters?",
227
+ str,
228
+ default="/nvme",
229
+ )
230
+ if deepspeed_config["offload_optimizer_device"] == "nvme":
231
+ deepspeed_config["offload_optimizer_nvme_path"] = _ask_field(
232
+ "Nvme Path to offload optimizer states?",
233
+ str,
234
+ default="/nvme",
235
+ )
236
+ deepspeed_config["gradient_accumulation_steps"] = _ask_field(
237
+ "How many gradient accumulation steps you're passing in your script? [1]: ",
238
+ int,
239
+ default=1,
240
+ )
241
+ use_gradient_clipping = _ask_field(
242
+ "Do you want to use gradient clipping? [yes/NO]: ",
243
+ _convert_yes_no_to_bool,
244
+ default=False,
245
+ error_message="Please enter yes or no.",
246
+ )
247
+ if use_gradient_clipping:
248
+ deepspeed_config["gradient_clipping"] = _ask_field(
249
+ "What is the gradient clipping value? [1.0]: ",
250
+ float,
251
+ default=1.0,
252
+ )
253
+ if deepspeed_config["zero_stage"] == 3:
254
+ deepspeed_config["zero3_save_16bit_model"] = _ask_field(
255
+ "Do you want to save 16-bit model weights when using ZeRO Stage-3? [yes/NO]: ",
256
+ _convert_yes_no_to_bool,
257
+ default=False,
258
+ error_message="Please enter yes or no.",
259
+ )
260
+ deepspeed_config["zero3_init_flag"] = _ask_field(
261
+ "Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: ",
262
+ _convert_yes_no_to_bool,
263
+ default=False,
264
+ error_message="Please enter yes or no.",
265
+ )
266
+ if deepspeed_config["zero3_init_flag"]:
267
+ if not is_transformers_available():
268
+ raise Exception(
269
+ "When `zero3_init_flag` is set, it requires Transformers to be installed. "
270
+ "Please run `pip3 install transformers`."
271
+ )
272
+
273
+ if num_machines > 1:
274
+ launcher_query = "Which Type of launcher do you want to use?"
275
+ deepspeed_config["deepspeed_multinode_launcher"] = _ask_options(
276
+ launcher_query,
277
+ DEEPSPEED_MULTINODE_LAUNCHERS,
278
+ lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)],
279
+ )
280
+
281
+ if deepspeed_config["deepspeed_multinode_launcher"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
282
+ deepspeed_config["deepspeed_hostfile"] = _ask_field(
283
+ "DeepSpeed configures multi-node compute resources with hostfile. "
284
+ "Each row is of the format `hostname slots=[num_gpus]`, e.g., `localhost slots=2`; "
285
+ "for more information please refer official [documentation]"
286
+ "(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). "
287
+ "Please specify the location of hostfile: ",
288
+ str,
289
+ )
290
+
291
+ is_exclusion_filter = _ask_field(
292
+ "Do you want to specify exclusion filter string? [yes/NO]: ",
293
+ _convert_yes_no_to_bool,
294
+ default=False,
295
+ error_message="Please enter yes or no.",
296
+ )
297
+ if is_exclusion_filter:
298
+ deepspeed_config["deepspeed_exclusion_filter"] = _ask_field(
299
+ "DeepSpeed exclusion filter string: ",
300
+ str,
301
+ )
302
+
303
+ is_inclusion_filter = _ask_field(
304
+ "Do you want to specify inclusion filter string? [yes/NO]: ",
305
+ _convert_yes_no_to_bool,
306
+ default=False,
307
+ error_message="Please enter yes or no.",
308
+ )
309
+ if is_inclusion_filter:
310
+ deepspeed_config["deepspeed_inclusion_filter"] = _ask_field(
311
+ "DeepSpeed inclusion filter string: ",
312
+ str,
313
+ )
314
+
315
+ fsdp_config = {}
316
+ if distributed_type in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU]:
317
+ use_fsdp = _ask_field(
318
+ "Do you want to use FullyShardedDataParallel? [yes/NO]: ",
319
+ _convert_yes_no_to_bool,
320
+ default=False,
321
+ error_message="Please enter yes or no.",
322
+ )
323
+ if use_fsdp:
324
+ distributed_type = DistributedType.FSDP
325
+ if distributed_type == DistributedType.FSDP:
326
+ sharding_strategy_query = "What should be your sharding strategy?"
327
+ fsdp_config["fsdp_sharding_strategy"] = _ask_options(
328
+ sharding_strategy_query,
329
+ FSDP_SHARDING_STRATEGY,
330
+ lambda x: int(x) + 1,
331
+ default=1,
332
+ )
333
+ fsdp_config["fsdp_offload_params"] = _ask_field(
334
+ "Do you want to offload parameters and gradients to CPU? [yes/NO]: ",
335
+ _convert_yes_no_to_bool,
336
+ default=False,
337
+ error_message="Please enter yes or no.",
338
+ )
339
+ fsdp_wrap_query = "What should be your auto wrap policy?"
340
+ fsdp_config["fsdp_auto_wrap_policy"] = _ask_options(
341
+ fsdp_wrap_query,
342
+ FSDP_AUTO_WRAP_POLICY,
343
+ lambda x: FSDP_AUTO_WRAP_POLICY[int(x)],
344
+ )
345
+ if fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[0]:
346
+ use_no_split_modules = _ask_field(
347
+ "Do you want to use the model's `_no_split_modules` to wrap. Only applicable for 🤗 Transformers [yes/NO]: ",
348
+ _convert_yes_no_to_bool,
349
+ default=False,
350
+ error_message="Please enter yes or no.",
351
+ )
352
+ if not use_no_split_modules:
353
+ fsdp_config["fsdp_transformer_layer_cls_to_wrap"] = _ask_field(
354
+ "Specify the comma-separated list of transformer layer class names (case-sensitive) to wrap ,e.g, :"
355
+ "`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput` ...? : ",
356
+ str,
357
+ )
358
+ elif fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[1]:
359
+ fsdp_config["fsdp_min_num_params"] = _ask_field(
360
+ "What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: ",
361
+ int,
362
+ default=100000000,
363
+ )
364
+ fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?"
365
+ fsdp_config["fsdp_backward_prefetch_policy"] = _ask_options(
366
+ fsdp_backward_prefetch_query,
367
+ FSDP_BACKWARD_PREFETCH,
368
+ lambda x: FSDP_BACKWARD_PREFETCH[int(x)],
369
+ )
370
+ fsdp_state_dict_type_query = "What should be your FSDP's state dict type?"
371
+ fsdp_config["fsdp_state_dict_type"] = _ask_options(
372
+ fsdp_state_dict_type_query,
373
+ FSDP_STATE_DICT_TYPE,
374
+ lambda x: FSDP_STATE_DICT_TYPE[int(x)],
375
+ default=2,
376
+ )
377
+ fsdp_config["fsdp_forward_prefetch"] = _ask_field(
378
+ "Do you want to enable FSDP's forward prefetch policy? [yes/NO]: ",
379
+ _convert_yes_no_to_bool,
380
+ default=False,
381
+ error_message="Please enter yes or no.",
382
+ )
383
+ fsdp_config["fsdp_use_orig_params"] = _ask_field(
384
+ "Do you want to enable FSDP's `use_orig_params` feature? [yes/NO]: ",
385
+ _convert_yes_no_to_bool,
386
+ default=False,
387
+ error_message="Please enter yes or no.",
388
+ )
389
+ fsdp_config["fsdp_sync_module_states"] = _ask_field(
390
+ "Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ",
391
+ _convert_yes_no_to_bool,
392
+ default=True,
393
+ error_message="Please enter yes or no.",
394
+ )
395
+
396
+ megatron_lm_config = {}
397
+ if distributed_type in [DistributedType.MULTI_GPU]:
398
+ use_megatron_lm = _ask_field(
399
+ "Do you want to use Megatron-LM ? [yes/NO]: ",
400
+ _convert_yes_no_to_bool,
401
+ default=False,
402
+ error_message="Please enter yes or no.",
403
+ )
404
+ if use_megatron_lm:
405
+ distributed_type = DistributedType.MEGATRON_LM
406
+ if distributed_type == DistributedType.MEGATRON_LM:
407
+ prefix = "megatron_lm_"
408
+ megatron_lm_config[prefix + "tp_degree"] = _ask_field(
409
+ "What is the Tensor Parallelism degree/size? [1]:",
410
+ int,
411
+ default=1,
412
+ error_message="Please enter an integer.",
413
+ )
414
+ if megatron_lm_config[prefix + "tp_degree"] > 1:
415
+ megatron_lm_config[prefix + "sequence_parallelism"] = _ask_field(
416
+ "Do you want to enable Sequence Parallelism? [YES/no]: ",
417
+ _convert_yes_no_to_bool,
418
+ default=True,
419
+ error_message="Please enter yes or no.",
420
+ )
421
+
422
+ megatron_lm_config[prefix + "pp_degree"] = _ask_field(
423
+ "What is the Pipeline Parallelism degree/size? [1]:",
424
+ int,
425
+ default=1,
426
+ error_message="Please enter an integer.",
427
+ )
428
+ if megatron_lm_config[prefix + "pp_degree"] > 1:
429
+ megatron_lm_config[prefix + "num_micro_batches"] = _ask_field(
430
+ "What is the number of micro-batches? [1]:",
431
+ int,
432
+ default=1,
433
+ error_message="Please enter an integer.",
434
+ )
435
+
436
+ megatron_lm_config[prefix + "recompute_activations"] = _ask_field(
437
+ "Do you want to enable selective activation recomputation? [YES/no]: ",
438
+ _convert_yes_no_to_bool,
439
+ default=True,
440
+ error_message="Please enter yes or no.",
441
+ )
442
+
443
+ megatron_lm_config[prefix + "use_distributed_optimizer"] = _ask_field(
444
+ "Do you want to use distributed optimizer "
445
+ "which shards optimizer state and gradients across data pralellel ranks? [YES/no]: ",
446
+ _convert_yes_no_to_bool,
447
+ default=True,
448
+ error_message="Please enter yes or no.",
449
+ )
450
+
451
+ megatron_lm_config[prefix + "gradient_clipping"] = _ask_field(
452
+ "What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ",
453
+ float,
454
+ default=1.0,
455
+ )
456
+ # TPU specific defaults
457
+ tpu_commands = None
458
+ tpu_command_file = None
459
+ tpu_downcast_bf16 = "no"
460
+ tpu_env = []
461
+ tpu_name = None
462
+ tpu_vm = None
463
+ tpu_zone = None
464
+ tpu_use_sudo = False
465
+ tpu_use_cluster = False
466
+
467
+ if distributed_type in [
468
+ DistributedType.MULTI_CPU,
469
+ DistributedType.MULTI_XPU,
470
+ DistributedType.MULTI_GPU,
471
+ DistributedType.MULTI_NPU,
472
+ DistributedType.TPU,
473
+ ]:
474
+ machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "")
475
+ if machine_type == "TPU":
476
+ machine_type += " cores"
477
+ else:
478
+ machine_type += "(s)"
479
+ num_processes = _ask_field(
480
+ f"How many {machine_type} should be used for distributed training? [1]:",
481
+ int,
482
+ default=1,
483
+ error_message="Please enter an integer.",
484
+ )
485
+ elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
486
+ num_processes = _ask_field(
487
+ "How many GPU(s) should be used for distributed training? [1]:",
488
+ int,
489
+ default=1,
490
+ error_message="Please enter an integer.",
491
+ )
492
+ else:
493
+ num_processes = 1
494
+
495
+ if (distributed_type == DistributedType.MULTI_GPU) and (num_machines == 1) and (num_processes == 1):
496
+ raise ValueError(
497
+ f"Specified distributed type {distributed_type} but only using 1 GPU on a single machine. Please select `No distributed training` for the type of machine you are using."
498
+ )
499
+
500
+ if (
501
+ distributed_type
502
+ in [
503
+ DistributedType.MULTI_GPU,
504
+ DistributedType.MULTI_NPU,
505
+ DistributedType.MULTI_XPU,
506
+ DistributedType.NO,
507
+ ]
508
+ and not use_cpu
509
+ and not use_mps
510
+ ):
511
+ if is_npu_available():
512
+ machine_type = "NPU(s)"
513
+ else:
514
+ machine_type = "GPU(s)"
515
+ gpu_ids = _ask_field(
516
+ f"What {machine_type} (by id) should be used for training on this machine as a comma-seperated list? [all]:",
517
+ default="all",
518
+ )
519
+
520
+ if distributed_type == DistributedType.TPU:
521
+ mixed_precision = "no"
522
+ main_training_function = _ask_field(
523
+ "What is the name of the function in your script that should be launched in all parallel scripts? [main]: ",
524
+ default="main",
525
+ )
526
+ tpu_use_cluster = _ask_field(
527
+ "Are you using a TPU cluster? [yes/NO]: ",
528
+ _convert_yes_no_to_bool,
529
+ default=False,
530
+ error_message="Please enter yes or no.",
531
+ )
532
+ if tpu_use_cluster:
533
+ tpu_name = _ask_field(
534
+ "What is the name of your TPU cluster? ",
535
+ default=None,
536
+ error_message="Please enter the name of your TPU cluster.",
537
+ )
538
+ tpu_zone = _ask_field(
539
+ "What is the zone of your TPU cluster? ",
540
+ default=None,
541
+ error_message="Please enter the zone of your TPU cluster.",
542
+ )
543
+ tpu_use_sudo = _ask_field(
544
+ "To run a python script in a TPU pod, should `sudo` be used? [yes/NO]: ",
545
+ default=False,
546
+ error_message="Please enter yes or no.",
547
+ )
548
+ run_commands = _ask_field(
549
+ "Do you have code you wish to run on startup in each pod? [yes/NO]: ",
550
+ _convert_yes_no_to_bool,
551
+ default=False,
552
+ error_message="Please enter yes or no.",
553
+ )
554
+ if run_commands:
555
+ use_command_file = _ask_field(
556
+ "Is this code located in a bash script? [yes/NO]: ",
557
+ _convert_yes_no_to_bool,
558
+ default=False,
559
+ error_message="Please enter yes or no.",
560
+ )
561
+ if use_command_file:
562
+ tpu_command_file = _ask_field(
563
+ "What is the path to your bash script? ",
564
+ default=None,
565
+ error_message="Please enter the path to your bash script.",
566
+ )
567
+ tpu_command_file = os.path.abspath(tpu_command_file)
568
+ else:
569
+ print("Please enter each command seperately you wish to run on startup in each pod.")
570
+ tpu_commands = []
571
+ another_command = True
572
+ while another_command:
573
+ tpu_commands.append(
574
+ _ask_field(
575
+ "Please enter a single command to be ran ",
576
+ default=None,
577
+ error_message="Please enter the commands you wish to run on startup in each pod as a single string.",
578
+ )
579
+ )
580
+ another_command = _ask_field(
581
+ "Do you wish to add another command? [yes/NO]: ",
582
+ _convert_yes_no_to_bool,
583
+ default=False,
584
+ error_message="Please enter yes or no.",
585
+ )
586
+ tpu_vm = _ask_field(
587
+ "If not using an instance group, what are the names of the Compute VM instances to be used, seperated by a comma: ",
588
+ default="",
589
+ ).split(",")
590
+ tpu_env = _ask_field(
591
+ "What environment variables do you wish to set in each pod, seperated by a comma: ",
592
+ default="",
593
+ ).split(",")
594
+
595
+ else:
596
+ main_training_function = "main"
597
+ if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:
598
+ mixed_precision = None
599
+ else:
600
+ mixed_precision = _ask_options(
601
+ "Do you wish to use FP16 or BF16 (mixed precision)?",
602
+ ["no", "fp16", "bf16", "fp8"],
603
+ _convert_mixed_precision,
604
+ )
605
+
606
+ if use_dynamo and mixed_precision == "no" and not use_cpu:
607
+ print(
608
+ "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
609
+ )
610
+
611
+ if distributed_type == DistributedType.TPU and mixed_precision == "bf16":
612
+ tpu_downcast_bf16 = _ask_field(
613
+ "Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no"
614
+ )
615
+
616
+ return ClusterConfig(
617
+ compute_environment=ComputeEnvironment.LOCAL_MACHINE,
618
+ distributed_type=distributed_type,
619
+ num_processes=num_processes,
620
+ gpu_ids=gpu_ids,
621
+ mixed_precision=mixed_precision,
622
+ downcast_bf16=tpu_downcast_bf16,
623
+ machine_rank=machine_rank,
624
+ num_machines=num_machines,
625
+ main_process_ip=main_process_ip,
626
+ main_process_port=main_process_port,
627
+ main_training_function=main_training_function,
628
+ deepspeed_config=deepspeed_config,
629
+ fsdp_config=fsdp_config,
630
+ megatron_lm_config=megatron_lm_config,
631
+ ipex_config=ipex_config,
632
+ use_cpu=use_cpu,
633
+ rdzv_backend=rdzv_backend,
634
+ same_network=same_network,
635
+ commands=tpu_commands,
636
+ command_file=tpu_command_file,
637
+ tpu_env=tpu_env,
638
+ tpu_name=tpu_name,
639
+ tpu_vm=tpu_vm,
640
+ tpu_zone=tpu_zone,
641
+ tpu_use_sudo=tpu_use_sudo,
642
+ tpu_use_cluster=tpu_use_cluster,
643
+ dynamo_config=dynamo_config,
644
+ debug=debug,
645
+ )
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/config.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import os
19
+
20
+ from accelerate.utils import ComputeEnvironment
21
+
22
+ from .cluster import get_cluster_input
23
+ from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
24
+ from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
25
+ from .sagemaker import get_sagemaker_input
26
+
27
+
28
+ description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
29
+
30
+
31
+ def get_user_input():
32
+ compute_environment = _ask_options(
33
+ "In which compute environment are you running?",
34
+ ["This machine", "AWS (Amazon SageMaker)"],
35
+ _convert_compute_environment,
36
+ )
37
+ if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
38
+ config = get_sagemaker_input()
39
+ else:
40
+ config = get_cluster_input()
41
+ return config
42
+
43
+
44
+ def config_command_parser(subparsers=None):
45
+ if subparsers is not None:
46
+ parser = subparsers.add_parser("config", description=description)
47
+ else:
48
+ parser = argparse.ArgumentParser("Accelerate config command", description=description)
49
+
50
+ parser.add_argument(
51
+ "--config_file",
52
+ default=None,
53
+ help=(
54
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
55
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
56
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
57
+ "with 'huggingface'."
58
+ ),
59
+ )
60
+
61
+ if subparsers is not None:
62
+ parser.set_defaults(func=config_command)
63
+ return parser
64
+
65
+
66
+ def config_command(args):
67
+ config = get_user_input()
68
+ if args.config_file is not None:
69
+ config_file = args.config_file
70
+ else:
71
+ if not os.path.isdir(cache_dir):
72
+ os.makedirs(cache_dir)
73
+ config_file = default_yaml_config_file
74
+
75
+ if config_file.endswith(".json"):
76
+ config.to_json_file(config_file)
77
+ else:
78
+ config.to_yaml_file(config_file)
79
+ print(f"accelerate configuration saved at {config_file}")
80
+
81
+
82
+ def main():
83
+ parser = config_command_parser()
84
+ args = parser.parse_args()
85
+ config_command(args)
86
+
87
+
88
+ if __name__ == "__main__":
89
+ main()
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/config_args.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import json
18
+ import os
19
+ from dataclasses import dataclass
20
+ from enum import Enum
21
+ from typing import List, Optional, Union
22
+
23
+ import yaml
24
+
25
+ from ...utils import ComputeEnvironment, DistributedType, SageMakerDistributedType
26
+ from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION
27
+
28
+
29
+ hf_cache_home = os.path.expanduser(
30
+ os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
31
+ )
32
+ cache_dir = os.path.join(hf_cache_home, "accelerate")
33
+ default_json_config_file = os.path.join(cache_dir, "default_config.yaml")
34
+ default_yaml_config_file = os.path.join(cache_dir, "default_config.yaml")
35
+
36
+ # For backward compatibility: the default config is the json one if it's the only existing file.
37
+ if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):
38
+ default_config_file = default_yaml_config_file
39
+ else:
40
+ default_config_file = default_json_config_file
41
+
42
+
43
+ def load_config_from_file(config_file):
44
+ if config_file is not None:
45
+ if not os.path.isfile(config_file):
46
+ raise FileNotFoundError(
47
+ f"The passed configuration file `{config_file}` does not exist. "
48
+ "Please pass an existing file to `accelerate launch`, or use the the default one "
49
+ "created through `accelerate config` and run `accelerate launch` "
50
+ "without the `--config_file` argument."
51
+ )
52
+ else:
53
+ config_file = default_config_file
54
+ with open(config_file, "r", encoding="utf-8") as f:
55
+ if config_file.endswith(".json"):
56
+ if (
57
+ json.load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
58
+ == ComputeEnvironment.LOCAL_MACHINE
59
+ ):
60
+ config_class = ClusterConfig
61
+ else:
62
+ config_class = SageMakerConfig
63
+ return config_class.from_json_file(json_file=config_file)
64
+ else:
65
+ if (
66
+ yaml.safe_load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
67
+ == ComputeEnvironment.LOCAL_MACHINE
68
+ ):
69
+ config_class = ClusterConfig
70
+ else:
71
+ config_class = SageMakerConfig
72
+ return config_class.from_yaml_file(yaml_file=config_file)
73
+
74
+
75
+ @dataclass
76
+ class BaseConfig:
77
+ compute_environment: ComputeEnvironment
78
+ distributed_type: Union[DistributedType, SageMakerDistributedType]
79
+ mixed_precision: str
80
+ use_cpu: bool
81
+ debug: bool
82
+
83
+ def to_dict(self):
84
+ result = self.__dict__
85
+ # For serialization, it's best to convert Enums to strings (or their underlying value type).
86
+ for key, value in result.items():
87
+ if isinstance(value, Enum):
88
+ result[key] = value.value
89
+ if isinstance(value, dict) and not bool(value):
90
+ result[key] = None
91
+ result = {k: v for k, v in result.items() if v is not None}
92
+ return result
93
+
94
+ @classmethod
95
+ def from_json_file(cls, json_file=None):
96
+ json_file = default_json_config_file if json_file is None else json_file
97
+ with open(json_file, "r", encoding="utf-8") as f:
98
+ config_dict = json.load(f)
99
+ if "compute_environment" not in config_dict:
100
+ config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
101
+ if "mixed_precision" not in config_dict:
102
+ config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
103
+ if "fp16" in config_dict: # Convert the config to the new format.
104
+ del config_dict["fp16"]
105
+ if "dynamo_backend" in config_dict: # Convert the config to the new format.
106
+ dynamo_backend = config_dict.pop("dynamo_backend")
107
+ config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend}
108
+ if "use_cpu" not in config_dict:
109
+ config_dict["use_cpu"] = False
110
+ if "debug" not in config_dict:
111
+ config_dict["debug"] = False
112
+ extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
113
+ if len(extra_keys) > 0:
114
+ raise ValueError(
115
+ f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
116
+ " version or fix (and potentially remove) these keys from your config file."
117
+ )
118
+
119
+ return cls(**config_dict)
120
+
121
+ def to_json_file(self, json_file):
122
+ with open(json_file, "w", encoding="utf-8") as f:
123
+ content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
124
+ f.write(content)
125
+
126
+ @classmethod
127
+ def from_yaml_file(cls, yaml_file=None):
128
+ yaml_file = default_yaml_config_file if yaml_file is None else yaml_file
129
+ with open(yaml_file, "r", encoding="utf-8") as f:
130
+ config_dict = yaml.safe_load(f)
131
+ if "compute_environment" not in config_dict:
132
+ config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
133
+ if "mixed_precision" not in config_dict:
134
+ config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
135
+ if isinstance(config_dict["mixed_precision"], bool) and not config_dict["mixed_precision"]:
136
+ config_dict["mixed_precision"] = "no"
137
+ if "fp16" in config_dict: # Convert the config to the new format.
138
+ del config_dict["fp16"]
139
+ if "dynamo_backend" in config_dict: # Convert the config to the new format.
140
+ dynamo_backend = config_dict.pop("dynamo_backend")
141
+ config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend}
142
+ if "use_cpu" not in config_dict:
143
+ config_dict["use_cpu"] = False
144
+ if "debug" not in config_dict:
145
+ config_dict["debug"] = False
146
+ extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
147
+ if len(extra_keys) > 0:
148
+ raise ValueError(
149
+ f"The config file at {yaml_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
150
+ " version or fix (and potentially remove) these keys from your config file."
151
+ )
152
+ return cls(**config_dict)
153
+
154
+ def to_yaml_file(self, yaml_file):
155
+ with open(yaml_file, "w", encoding="utf-8") as f:
156
+ yaml.safe_dump(self.to_dict(), f)
157
+
158
+ def __post_init__(self):
159
+ if isinstance(self.compute_environment, str):
160
+ self.compute_environment = ComputeEnvironment(self.compute_environment)
161
+ if isinstance(self.distributed_type, str):
162
+ if self.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
163
+ self.distributed_type = SageMakerDistributedType(self.distributed_type)
164
+ else:
165
+ self.distributed_type = DistributedType(self.distributed_type)
166
+ if self.dynamo_config is None:
167
+ self.dynamo_config = {}
168
+
169
+
170
+ @dataclass
171
+ class ClusterConfig(BaseConfig):
172
+ num_processes: int
173
+ machine_rank: int = 0
174
+ num_machines: int = 1
175
+ gpu_ids: Optional[str] = None
176
+ main_process_ip: Optional[str] = None
177
+ main_process_port: Optional[int] = None
178
+ rdzv_backend: Optional[str] = "static"
179
+ same_network: Optional[bool] = False
180
+ main_training_function: str = "main"
181
+
182
+ # args for deepspeed_plugin
183
+ deepspeed_config: dict = None
184
+ # args for fsdp
185
+ fsdp_config: dict = None
186
+ # args for megatron_lm
187
+ megatron_lm_config: dict = None
188
+ # args for ipex
189
+ ipex_config: dict = None
190
+ # args for TPU
191
+ downcast_bf16: bool = False
192
+
193
+ # args for TPU pods
194
+ tpu_name: str = None
195
+ tpu_zone: str = None
196
+ tpu_use_cluster: bool = False
197
+ tpu_use_sudo: bool = False
198
+ command_file: str = None
199
+ commands: List[str] = None
200
+ tpu_vm: List[str] = None
201
+ tpu_env: List[str] = None
202
+
203
+ # args for dynamo
204
+ dynamo_config: dict = None
205
+
206
+ def __post_init__(self):
207
+ if self.deepspeed_config is None:
208
+ self.deepspeed_config = {}
209
+ if self.fsdp_config is None:
210
+ self.fsdp_config = {}
211
+ if self.megatron_lm_config is None:
212
+ self.megatron_lm_config = {}
213
+ if self.ipex_config is None:
214
+ self.ipex_config = {}
215
+ return super().__post_init__()
216
+
217
+
218
+ @dataclass
219
+ class SageMakerConfig(BaseConfig):
220
+ ec2_instance_type: str
221
+ iam_role_name: str
222
+ image_uri: Optional[str] = None
223
+ profile: Optional[str] = None
224
+ region: str = "us-east-1"
225
+ num_machines: int = 1
226
+ gpu_ids: str = "all"
227
+ base_job_name: str = f"accelerate-sagemaker-{num_machines}"
228
+ pytorch_version: str = SAGEMAKER_PYTORCH_VERSION
229
+ transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION
230
+ py_version: str = SAGEMAKER_PYTHON_VERSION
231
+ sagemaker_inputs_file: str = None
232
+ sagemaker_metrics_file: str = None
233
+ additional_args: dict = None
234
+ dynamo_config: dict = None
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/config_utils.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+
19
+ from ...utils.dataclasses import (
20
+ ComputeEnvironment,
21
+ DistributedType,
22
+ DynamoBackend,
23
+ PrecisionType,
24
+ SageMakerDistributedType,
25
+ )
26
+ from ..menu import BulletMenu
27
+
28
+
29
+ DYNAMO_BACKENDS = [
30
+ "EAGER",
31
+ "AOT_EAGER",
32
+ "INDUCTOR",
33
+ "NVFUSER",
34
+ "AOT_NVFUSER",
35
+ "AOT_CUDAGRAPHS",
36
+ "OFI",
37
+ "FX2TRT",
38
+ "ONNXRT",
39
+ "IPEX",
40
+ ]
41
+
42
+
43
+ def _ask_field(input_text, convert_value=None, default=None, error_message=None):
44
+ ask_again = True
45
+ while ask_again:
46
+ result = input(input_text)
47
+ try:
48
+ if default is not None and len(result) == 0:
49
+ return default
50
+ return convert_value(result) if convert_value is not None else result
51
+ except Exception:
52
+ if error_message is not None:
53
+ print(error_message)
54
+
55
+
56
+ def _ask_options(input_text, options=[], convert_value=None, default=0):
57
+ menu = BulletMenu(input_text, options)
58
+ result = menu.run(default_choice=default)
59
+ return convert_value(result) if convert_value is not None else result
60
+
61
+
62
+ def _convert_compute_environment(value):
63
+ value = int(value)
64
+ return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value])
65
+
66
+
67
+ def _convert_distributed_mode(value):
68
+ value = int(value)
69
+ return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value])
70
+
71
+
72
+ def _convert_dynamo_backend(value):
73
+ value = int(value)
74
+ return DynamoBackend(DYNAMO_BACKENDS[value]).value
75
+
76
+
77
+ def _convert_mixed_precision(value):
78
+ value = int(value)
79
+ return PrecisionType(["no", "fp16", "bf16", "fp8"][value])
80
+
81
+
82
+ def _convert_sagemaker_distributed_mode(value):
83
+ value = int(value)
84
+ return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value])
85
+
86
+
87
+ def _convert_yes_no_to_bool(value):
88
+ return {"yes": True, "no": False}[value.lower()]
89
+
90
+
91
+ class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
92
+ """
93
+ A custom formatter that will remove the usage line from the help message for subcommands.
94
+ """
95
+
96
+ def _format_usage(self, usage, actions, groups, prefix):
97
+ usage = super()._format_usage(usage, actions, groups, prefix)
98
+ usage = usage.replace("<command> [<args>] ", "")
99
+ return usage
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/default.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from pathlib import Path
18
+
19
+ import torch
20
+
21
+ from ...utils import is_npu_available, is_xpu_available
22
+ from .config_args import ClusterConfig, default_json_config_file
23
+ from .config_utils import SubcommandHelpFormatter
24
+
25
+
26
+ description = "Create a default config file for Accelerate with only a few flags set."
27
+
28
+
29
+ def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file, use_xpu: bool = False):
30
+ """
31
+ Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also
32
+ set CPU if it is a CPU-only machine.
33
+
34
+ Args:
35
+ mixed_precision (`str`, *optional*, defaults to "no"):
36
+ Mixed Precision to use. Should be one of "no", "fp16", or "bf16"
37
+ save_location (`str`, *optional*, defaults to `default_json_config_file`):
38
+ Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default
39
+ location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting
40
+ the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`.
41
+ use_xpu (`bool`, *optional*, defaults to `False`):
42
+ Whether to use XPU if available.
43
+ """
44
+ path = Path(save_location)
45
+ path.parent.mkdir(parents=True, exist_ok=True)
46
+ if path.exists():
47
+ print(
48
+ f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`."
49
+ )
50
+ return False
51
+ mixed_precision = mixed_precision.lower()
52
+ if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
53
+ raise ValueError(
54
+ f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}"
55
+ )
56
+ config = {
57
+ "compute_environment": "LOCAL_MACHINE",
58
+ "mixed_precision": mixed_precision,
59
+ }
60
+ if torch.cuda.is_available():
61
+ num_gpus = torch.cuda.device_count()
62
+ config["num_processes"] = num_gpus
63
+ config["use_cpu"] = False
64
+ if num_gpus > 1:
65
+ config["distributed_type"] = "MULTI_GPU"
66
+ else:
67
+ config["distributed_type"] = "NO"
68
+ elif is_xpu_available() and use_xpu:
69
+ num_xpus = torch.xpu.device_count()
70
+ config["num_processes"] = num_xpus
71
+ config["use_cpu"] = False
72
+ if num_xpus > 1:
73
+ config["distributed_type"] = "MULTI_XPU"
74
+ else:
75
+ config["distributed_type"] = "NO"
76
+ elif is_npu_available():
77
+ num_npus = torch.npu.device_count()
78
+ config["num_processes"] = num_npus
79
+ config["use_cpu"] = False
80
+ if num_npus > 1:
81
+ config["distributed_type"] = "MULTI_NPU"
82
+ else:
83
+ config["distributed_type"] = "NO"
84
+ else:
85
+ num_xpus = 0
86
+ config["use_cpu"] = True
87
+ config["num_processes"] = 1
88
+ config["distributed_type"] = "NO"
89
+ config["debug"] = False
90
+ config = ClusterConfig(**config)
91
+ config.to_json_file(path)
92
+ return path
93
+
94
+
95
+ def default_command_parser(parser, parents):
96
+ parser = parser.add_parser("default", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
97
+ parser.add_argument(
98
+ "--config_file",
99
+ default=default_json_config_file,
100
+ help=(
101
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
102
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
103
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
104
+ "with 'huggingface'."
105
+ ),
106
+ dest="save_location",
107
+ )
108
+
109
+ parser.add_argument(
110
+ "--mixed_precision",
111
+ choices=["no", "fp16", "bf16"],
112
+ type=str,
113
+ help="Whether or not to use mixed precision training. "
114
+ "Choose between FP16 and BF16 (bfloat16) training. "
115
+ "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
116
+ default="no",
117
+ )
118
+ parser.set_defaults(func=default_config_command)
119
+ return parser
120
+
121
+
122
+ def default_config_command(args):
123
+ config_file = write_basic_config(args.mixed_precision, args.save_location)
124
+ if config_file:
125
+ print(f"accelerate configuration saved at {config_file}")
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/sagemaker.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import json
17
+ import os
18
+
19
+ from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
20
+ from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
21
+ from ...utils.imports import is_boto3_available
22
+ from .config_args import SageMakerConfig
23
+ from .config_utils import (
24
+ DYNAMO_BACKENDS,
25
+ _ask_field,
26
+ _ask_options,
27
+ _convert_dynamo_backend,
28
+ _convert_mixed_precision,
29
+ _convert_sagemaker_distributed_mode,
30
+ _convert_yes_no_to_bool,
31
+ )
32
+
33
+
34
+ if is_boto3_available():
35
+ import boto3 # noqa: F401
36
+
37
+
38
+ def _create_iam_role_for_sagemaker(role_name):
39
+ iam_client = boto3.client("iam")
40
+
41
+ sagemaker_trust_policy = {
42
+ "Version": "2012-10-17",
43
+ "Statement": [
44
+ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
45
+ ],
46
+ }
47
+ try:
48
+ # create the role, associated with the chosen trust policy
49
+ iam_client.create_role(
50
+ RoleName=role_name, AssumeRolePolicyDocument=json.dumps(sagemaker_trust_policy, indent=2)
51
+ )
52
+ policy_document = {
53
+ "Version": "2012-10-17",
54
+ "Statement": [
55
+ {
56
+ "Effect": "Allow",
57
+ "Action": [
58
+ "sagemaker:*",
59
+ "ecr:GetDownloadUrlForLayer",
60
+ "ecr:BatchGetImage",
61
+ "ecr:BatchCheckLayerAvailability",
62
+ "ecr:GetAuthorizationToken",
63
+ "cloudwatch:PutMetricData",
64
+ "cloudwatch:GetMetricData",
65
+ "cloudwatch:GetMetricStatistics",
66
+ "cloudwatch:ListMetrics",
67
+ "logs:CreateLogGroup",
68
+ "logs:CreateLogStream",
69
+ "logs:DescribeLogStreams",
70
+ "logs:PutLogEvents",
71
+ "logs:GetLogEvents",
72
+ "s3:CreateBucket",
73
+ "s3:ListBucket",
74
+ "s3:GetBucketLocation",
75
+ "s3:GetObject",
76
+ "s3:PutObject",
77
+ ],
78
+ "Resource": "*",
79
+ }
80
+ ],
81
+ }
82
+ # attach policy to role
83
+ iam_client.put_role_policy(
84
+ RoleName=role_name,
85
+ PolicyName=f"{role_name}_policy_permission",
86
+ PolicyDocument=json.dumps(policy_document, indent=2),
87
+ )
88
+ except iam_client.exceptions.EntityAlreadyExistsException:
89
+ print(f"role {role_name} already exists. Using existing one")
90
+
91
+
92
+ def _get_iam_role_arn(role_name):
93
+ iam_client = boto3.client("iam")
94
+ return iam_client.get_role(RoleName=role_name)["Role"]["Arn"]
95
+
96
+
97
+ def get_sagemaker_input():
98
+ credentials_configuration = _ask_options(
99
+ "How do you want to authorize?",
100
+ ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "],
101
+ int,
102
+ )
103
+ aws_profile = None
104
+ if credentials_configuration == 0:
105
+ aws_profile = _ask_field("Enter your AWS Profile name: [default] ", default="default")
106
+ os.environ["AWS_PROFILE"] = aws_profile
107
+ else:
108
+ print(
109
+ "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
110
+ "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`"
111
+ )
112
+ aws_access_key_id = _ask_field("AWS Access Key ID: ")
113
+ os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id
114
+
115
+ aws_secret_access_key = _ask_field("AWS Secret Access Key: ")
116
+ os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key
117
+
118
+ aws_region = _ask_field("Enter your AWS Region: [us-east-1]", default="us-east-1")
119
+ os.environ["AWS_DEFAULT_REGION"] = aws_region
120
+
121
+ role_management = _ask_options(
122
+ "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?",
123
+ ["Provide IAM Role name", "Create new IAM role using credentials"],
124
+ int,
125
+ )
126
+ if role_management == 0:
127
+ iam_role_name = _ask_field("Enter your IAM role name: ")
128
+ else:
129
+ iam_role_name = "accelerate_sagemaker_execution_role"
130
+ print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials')
131
+ _create_iam_role_for_sagemaker(iam_role_name)
132
+
133
+ is_custom_docker_image = _ask_field(
134
+ "Do you want to use custom Docker image? [yes/NO]: ",
135
+ _convert_yes_no_to_bool,
136
+ default=False,
137
+ error_message="Please enter yes or no.",
138
+ )
139
+ docker_image = None
140
+ if is_custom_docker_image:
141
+ docker_image = _ask_field("Enter your Docker image: ", lambda x: str(x).lower())
142
+
143
+ is_sagemaker_inputs_enabled = _ask_field(
144
+ "Do you want to provide SageMaker input channels with data locations? [yes/NO]: ",
145
+ _convert_yes_no_to_bool,
146
+ default=False,
147
+ error_message="Please enter yes or no.",
148
+ )
149
+ sagemaker_inputs_file = None
150
+ if is_sagemaker_inputs_enabled:
151
+ sagemaker_inputs_file = _ask_field(
152
+ "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ",
153
+ lambda x: str(x).lower(),
154
+ )
155
+
156
+ is_sagemaker_metrics_enabled = _ask_field(
157
+ "Do you want to enable SageMaker metrics? [yes/NO]: ",
158
+ _convert_yes_no_to_bool,
159
+ default=False,
160
+ error_message="Please enter yes or no.",
161
+ )
162
+ sagemaker_metrics_file = None
163
+ if is_sagemaker_metrics_enabled:
164
+ sagemaker_metrics_file = _ask_field(
165
+ "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ",
166
+ lambda x: str(x).lower(),
167
+ )
168
+
169
+ distributed_type = _ask_options(
170
+ "What is the distributed mode?",
171
+ ["No distributed training", "Data parallelism"],
172
+ _convert_sagemaker_distributed_mode,
173
+ )
174
+ dynamo_config = {}
175
+ use_dynamo = _ask_field(
176
+ "Do you wish to optimize your script with torch dynamo?[yes/NO]:",
177
+ _convert_yes_no_to_bool,
178
+ default=False,
179
+ error_message="Please enter yes or no.",
180
+ )
181
+ if use_dynamo:
182
+ prefix = "dynamo_"
183
+ dynamo_config[prefix + "backend"] = _ask_options(
184
+ "Which dynamo backend would you like to use?",
185
+ [x.lower() for x in DYNAMO_BACKENDS],
186
+ _convert_dynamo_backend,
187
+ default=2,
188
+ )
189
+ use_custom_options = _ask_field(
190
+ "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ",
191
+ _convert_yes_no_to_bool,
192
+ default=False,
193
+ error_message="Please enter yes or no.",
194
+ )
195
+
196
+ if use_custom_options:
197
+ dynamo_config[prefix + "mode"] = _ask_options(
198
+ "Which mode do you want to use?",
199
+ TORCH_DYNAMO_MODES,
200
+ lambda x: TORCH_DYNAMO_MODES[int(x)],
201
+ default="default",
202
+ )
203
+ dynamo_config[prefix + "use_fullgraph"] = _ask_field(
204
+ "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ",
205
+ _convert_yes_no_to_bool,
206
+ default=False,
207
+ error_message="Please enter yes or no.",
208
+ )
209
+ dynamo_config[prefix + "use_dynamic"] = _ask_field(
210
+ "Do you want to enable dynamic shape tracing? [yes/NO]: ",
211
+ _convert_yes_no_to_bool,
212
+ default=False,
213
+ error_message="Please enter yes or no.",
214
+ )
215
+ ec2_instance_query = "Which EC2 instance type you want to use for your training?"
216
+ if distributed_type != SageMakerDistributedType.NO:
217
+ ec2_instance_type = _ask_options(
218
+ ec2_instance_query, SAGEMAKER_PARALLEL_EC2_INSTANCES, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)]
219
+ )
220
+ else:
221
+ ec2_instance_query += "? [ml.p3.2xlarge]:"
222
+ ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default="ml.p3.2xlarge")
223
+
224
+ debug = False
225
+ if distributed_type != SageMakerDistributedType.NO:
226
+ debug = _ask_field(
227
+ "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ",
228
+ _convert_yes_no_to_bool,
229
+ default=False,
230
+ error_message="Please enter yes or no.",
231
+ )
232
+
233
+ num_machines = 1
234
+ if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
235
+ num_machines = _ask_field(
236
+ "How many machines do you want use? [1]: ",
237
+ int,
238
+ default=1,
239
+ )
240
+
241
+ mixed_precision = _ask_options(
242
+ "Do you wish to use FP16 or BF16 (mixed precision)?",
243
+ ["no", "fp16", "bf16", "fp8"],
244
+ _convert_mixed_precision,
245
+ )
246
+
247
+ if use_dynamo and mixed_precision == "no":
248
+ print(
249
+ "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
250
+ )
251
+
252
+ return SageMakerConfig(
253
+ image_uri=docker_image,
254
+ compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER,
255
+ distributed_type=distributed_type,
256
+ use_cpu=False,
257
+ dynamo_config=dynamo_config,
258
+ ec2_instance_type=ec2_instance_type,
259
+ profile=aws_profile,
260
+ region=aws_region,
261
+ iam_role_name=iam_role_name,
262
+ mixed_precision=mixed_precision,
263
+ num_machines=num_machines,
264
+ sagemaker_inputs_file=sagemaker_inputs_file,
265
+ sagemaker_metrics_file=sagemaker_metrics_file,
266
+ debug=debug,
267
+ )
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/config/update.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from pathlib import Path
18
+
19
+ from .config_args import default_config_file, load_config_from_file
20
+ from .config_utils import SubcommandHelpFormatter
21
+
22
+
23
+ description = "Update an existing config file with the latest defaults while maintaining the old configuration."
24
+
25
+
26
+ def update_config(args):
27
+ """
28
+ Update an existing config file with the latest defaults while maintaining the old configuration.
29
+ """
30
+ config_file = args.config_file
31
+ if config_file is None and Path(default_config_file).exists():
32
+ config_file = default_config_file
33
+ elif not Path(config_file).exists():
34
+ raise ValueError(f"The passed config file located at {config_file} doesn't exist.")
35
+ config = load_config_from_file(config_file)
36
+
37
+ if config_file.endswith(".json"):
38
+ config.to_json_file(config_file)
39
+ else:
40
+ config.to_yaml_file(config_file)
41
+ return config_file
42
+
43
+
44
+ def update_command_parser(parser, parents):
45
+ parser = parser.add_parser("update", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
46
+ parser.add_argument(
47
+ "--config_file",
48
+ default=None,
49
+ help=(
50
+ "The path to the config file to update. Will default to a file named default_config.yaml in the cache "
51
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
52
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
53
+ "with 'huggingface'."
54
+ ),
55
+ )
56
+
57
+ parser.set_defaults(func=update_config_command)
58
+ return parser
59
+
60
+
61
+ def update_config_command(args):
62
+ config_file = update_config(args)
63
+ print(f"Sucessfully updated the configuration file at {config_file}.")
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/estimate.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import argparse
17
+
18
+ from huggingface_hub import model_info
19
+ from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
20
+
21
+ from accelerate import init_empty_weights
22
+ from accelerate.utils import (
23
+ calculate_maximum_sizes,
24
+ convert_bytes,
25
+ is_timm_available,
26
+ is_transformers_available,
27
+ )
28
+
29
+
30
+ if is_transformers_available():
31
+ import transformers
32
+ from transformers import AutoConfig, AutoModel
33
+
34
+ if is_timm_available():
35
+ import timm
36
+
37
+
38
+ def verify_on_hub(repo: str, token: str = None):
39
+ "Verifies that the model is on the hub and returns the model info."
40
+ try:
41
+ return model_info(repo, token=token)
42
+ except GatedRepoError:
43
+ return "gated"
44
+ except RepositoryNotFoundError:
45
+ return "repo"
46
+
47
+
48
+ def check_has_model(error):
49
+ """
50
+ Checks what library spawned `error` when a model is not found
51
+ """
52
+ if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]:
53
+ return "timm"
54
+ elif (
55
+ is_transformers_available()
56
+ and isinstance(error, OSError)
57
+ and "does not appear to have a file named" in error.args[0]
58
+ ):
59
+ return "transformers"
60
+ else:
61
+ return "unknown"
62
+
63
+
64
+ def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None):
65
+ """
66
+ Creates an empty model from its parent library on the `Hub` to calculate the overall memory consumption.
67
+
68
+ Args:
69
+ model_name (`str`):
70
+ The model name on the Hub
71
+ library_name (`str`):
72
+ The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no
73
+ metadata on the Hub to determine the library.
74
+ trust_remote_code (`bool`, `optional`, defaults to `False`):
75
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
76
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
77
+ execute code present on the Hub on your local machine.
78
+ access_token (`str`, `optional`, defaults to `None`):
79
+ The access token to use to access private or gated models on the Hub. (for use on the Gradio app)
80
+
81
+ Returns:
82
+ `torch.nn.Module`: The torch model that has been initialized on the `meta` device.
83
+
84
+ """
85
+ model_info = verify_on_hub(model_name, access_token)
86
+ # Simplified errors
87
+ if model_info == "gated":
88
+ raise GatedRepoError(
89
+ f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`."
90
+ )
91
+ elif model_info == "repo":
92
+ raise RepositoryNotFoundError(
93
+ f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo,"
94
+ " make sure you are authenticated via `huggingface-cli login` and have access."
95
+ )
96
+ if library_name is None:
97
+ library_name = getattr(model_info, "library_name", False)
98
+ if not library_name:
99
+ raise ValueError(
100
+ f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)"
101
+ )
102
+ if library_name == "transformers":
103
+ if not is_transformers_available():
104
+ raise ImportError(
105
+ f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`"
106
+ )
107
+ print(f"Loading pretrained config for `{model_name}` from `transformers`...")
108
+
109
+ auto_map = model_info.config.get("auto_map", False)
110
+ config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code)
111
+
112
+ with init_empty_weights():
113
+ # remote code could specify a specific `AutoModel` class in the `auto_map`
114
+ constructor = AutoModel
115
+ if isinstance(auto_map, dict):
116
+ value = None
117
+ for key in auto_map.keys():
118
+ if key.startswith("AutoModelFor"):
119
+ value = key
120
+ break
121
+ if value is not None:
122
+ constructor = getattr(transformers, value)
123
+ model = constructor.from_config(config, trust_remote_code=trust_remote_code)
124
+ elif library_name == "timm":
125
+ if not is_timm_available():
126
+ raise ImportError(
127
+ f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`"
128
+ )
129
+ print(f"Loading pretrained config for `{model_name}` from `timm`...")
130
+ with init_empty_weights():
131
+ model = timm.create_model(model_name, pretrained=False)
132
+ else:
133
+ raise ValueError(
134
+ f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support."
135
+ )
136
+ return model
137
+
138
+
139
+ def create_ascii_table(headers: list, rows: list, title: str):
140
+ "Creates a pretty table from a list of rows, minimal version of `tabulate`."
141
+ sep_char, in_between = "│", "─"
142
+ column_widths = []
143
+ for i in range(len(headers)):
144
+ column_values = [row[i] for row in rows] + [headers[i]]
145
+ max_column_width = max(len(value) for value in column_values)
146
+ column_widths.append(max_column_width)
147
+
148
+ formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))]
149
+
150
+ pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}"
151
+ diff = 0
152
+
153
+ def make_row(left_char, middle_char, right_char):
154
+ return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}"
155
+
156
+ separator = make_row("├", "┼", "┤")
157
+ if len(title) > sum(column_widths):
158
+ diff = abs(len(title) - len(separator))
159
+ column_widths[-1] += diff
160
+
161
+ # Update with diff
162
+ separator = make_row("├", "┼", "┤")
163
+ initial_rows = [
164
+ make_row("┌", in_between, "┐"),
165
+ f"{sep_char}{title.center(len(separator) - 2)}{sep_char}",
166
+ make_row("├", "┬", "┤"),
167
+ ]
168
+ table = "\n".join(initial_rows) + "\n"
169
+ column_widths[-1] += diff
170
+ centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)]
171
+ table += f"{pattern % tuple(centered_line)}\n{separator}\n"
172
+ for i, line in enumerate(rows):
173
+ centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)]
174
+ table += f"{pattern % tuple(centered_line)}\n"
175
+ table += f'└{"┴".join([in_between * n for n in column_widths])}┘'
176
+
177
+ return table
178
+
179
+
180
+ def estimate_command_parser(subparsers=None):
181
+ if subparsers is not None:
182
+ parser = subparsers.add_parser("estimate-memory")
183
+ else:
184
+ parser = argparse.ArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
185
+
186
+ parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.")
187
+ parser.add_argument(
188
+ "--library_name",
189
+ type=str,
190
+ help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.",
191
+ choices=["timm", "transformers"],
192
+ )
193
+ parser.add_argument(
194
+ "--dtypes",
195
+ type=str,
196
+ nargs="+",
197
+ default=["float32", "float16", "int8", "int4"],
198
+ help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`",
199
+ choices=["float32", "float16", "int8", "int4"],
200
+ )
201
+ parser.add_argument(
202
+ "--trust_remote_code",
203
+ action="store_true",
204
+ help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag
205
+ should only be used for repositories you trust and in which you have read the code, as it will execute
206
+ code present on the Hub on your local machine.""",
207
+ )
208
+
209
+ if subparsers is not None:
210
+ parser.set_defaults(func=estimate_command)
211
+ return parser
212
+
213
+
214
+ def gather_data(args):
215
+ "Creates an empty model and gathers the data for the sizes"
216
+ try:
217
+ model = create_empty_model(
218
+ args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code
219
+ )
220
+ except (RuntimeError, OSError) as e:
221
+ library = check_has_model(e)
222
+ if library != "unknown":
223
+ raise RuntimeError(
224
+ f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo."
225
+ )
226
+ raise e
227
+
228
+ total_size, largest_layer = calculate_maximum_sizes(model)
229
+
230
+ data = []
231
+
232
+ for dtype in args.dtypes:
233
+ dtype_total_size = total_size
234
+ dtype_largest_layer = largest_layer[0]
235
+ if dtype == "float16":
236
+ dtype_total_size /= 2
237
+ dtype_largest_layer /= 2
238
+ elif dtype == "int8":
239
+ dtype_total_size /= 4
240
+ dtype_largest_layer /= 4
241
+ elif dtype == "int4":
242
+ dtype_total_size /= 8
243
+ dtype_largest_layer /= 8
244
+ dtype_training_size = dtype_total_size * 4
245
+ data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size])
246
+ return data
247
+
248
+
249
+ def estimate_command(args):
250
+ data = gather_data(args)
251
+ for row in data:
252
+ for i, item in enumerate(row):
253
+ if isinstance(item, (int, float)):
254
+ row[i] = convert_bytes(item)
255
+
256
+ headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"]
257
+
258
+ title = f"Memory Usage for loading `{args.model_name}`"
259
+ table = create_ascii_table(headers, data, title)
260
+ print(table)
261
+
262
+
263
+ def main():
264
+ parser = estimate_command_parser()
265
+ args = parser.parse_args()
266
+ estimate_command(args)
267
+
268
+
269
+ if __name__ == "__main__":
270
+ main()
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/launch.py ADDED
@@ -0,0 +1,996 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import importlib
19
+ import logging
20
+ import os
21
+ import subprocess
22
+ import sys
23
+ from pathlib import Path
24
+
25
+ import psutil
26
+ import torch
27
+
28
+ from accelerate.commands.config import default_config_file, load_config_from_file
29
+ from accelerate.commands.config.config_args import SageMakerConfig
30
+ from accelerate.commands.config.config_utils import DYNAMO_BACKENDS
31
+ from accelerate.state import get_int_from_env
32
+ from accelerate.utils import (
33
+ ComputeEnvironment,
34
+ DistributedType,
35
+ PrepareForLaunch,
36
+ _filter_args,
37
+ is_bf16_available,
38
+ is_deepspeed_available,
39
+ is_npu_available,
40
+ is_rich_available,
41
+ is_sagemaker_available,
42
+ is_torch_version,
43
+ is_tpu_available,
44
+ is_xpu_available,
45
+ patch_environment,
46
+ prepare_deepspeed_cmd_env,
47
+ prepare_multi_gpu_env,
48
+ prepare_sagemager_args_inputs,
49
+ prepare_simple_launcher_cmd_env,
50
+ prepare_tpu,
51
+ )
52
+ from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES
53
+
54
+
55
+ if is_rich_available():
56
+ from rich import get_console
57
+ from rich.logging import RichHandler
58
+
59
+ FORMAT = "%(message)s"
60
+ logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()])
61
+
62
+
63
+ logger = logging.getLogger(__name__)
64
+
65
+ options_to_group = {
66
+ "--multi-gpu": "Distributed GPUs",
67
+ "--tpu": "TPU",
68
+ "--use_deepspeed": "DeepSpeed Arguments",
69
+ "--use_fsdp": "FSDP Arguments",
70
+ "--use_megatron_lm": "Megatron-LM Arguments",
71
+ }
72
+
73
+
74
+ def clean_option(option):
75
+ "Finds all cases of - after the first two characters and changes them to _"
76
+ if option.startswith("--"):
77
+ return option[:3] + option[3:].replace("-", "_")
78
+
79
+
80
+ class _CustomHelpAction(argparse._HelpAction):
81
+ """
82
+ This is a custom help action that will hide all arguments that are not used in the command line when the help is
83
+ called. This is useful for the case where the user is using a specific platform and only wants to see the arguments
84
+ for that platform.
85
+ """
86
+
87
+ def __call__(self, parser, namespace, values, option_string=None):
88
+ if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
89
+ args = sys.argv[2:]
90
+ else:
91
+ args = sys.argv[1:]
92
+ opts = parser._actions
93
+ titles = [
94
+ "Hardware Selection Arguments",
95
+ "Resource Selection Arguments",
96
+ "Training Paradigm Arguments",
97
+ "positional arguments",
98
+ "optional arguments",
99
+ ]
100
+ if len(args) > 1:
101
+ used_platforms = [arg for arg in args if arg in options_to_group.keys()]
102
+ args = list(map(clean_option, args))
103
+ used_titles = [options_to_group[o] for o in used_platforms]
104
+ for i, arg in enumerate(opts):
105
+ # If the argument's container is outside of the used titles, hide it
106
+ if arg.container.title not in titles + used_titles:
107
+ setattr(opts[i], "help", argparse.SUPPRESS)
108
+ # If the argument is hardware selection, but not being passed, hide it
109
+ elif arg.container.title == "Hardware Selection Arguments":
110
+ if set(arg.option_strings).isdisjoint(set(args)):
111
+ setattr(opts[i], "help", argparse.SUPPRESS)
112
+ else:
113
+ setattr(opts[i], "help", arg.help + " (currently selected)")
114
+ # If the argument is a training paradigm, but not being passed, hide it
115
+ elif arg.container.title == "Training Paradigm Arguments":
116
+ if set(arg.option_strings).isdisjoint(set(used_platforms)):
117
+ setattr(opts[i], "help", argparse.SUPPRESS)
118
+ else:
119
+ setattr(opts[i], "help", arg.help + " (currently selected)")
120
+ for i, group in enumerate(list(parser._action_groups)):
121
+ # If all arguments in the group are hidden, hide the group
122
+ if all([arg.help == argparse.SUPPRESS for arg in group._group_actions]):
123
+ parser._action_groups.remove(group)
124
+
125
+ super().__call__(parser, namespace, values, option_string)
126
+
127
+
128
+ def launch_command_parser(subparsers=None):
129
+ if subparsers is not None:
130
+ parser = subparsers.add_parser("launch", add_help=False, allow_abbrev=False)
131
+ else:
132
+ parser = argparse.ArgumentParser("Accelerate launch command", add_help=False, allow_abbrev=False)
133
+
134
+ parser.register("action", "help", _CustomHelpAction)
135
+ parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
136
+
137
+ parser.add_argument(
138
+ "--config_file", default=None, help="The config file to use for the default values in the launching script."
139
+ )
140
+ parser.add_argument(
141
+ "--quiet",
142
+ "-q",
143
+ action="store_true",
144
+ help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)",
145
+ )
146
+ # Hardware selection arguments
147
+ hardware_args = parser.add_argument_group(
148
+ "Hardware Selection Arguments", "Arguments for selecting the hardware to be used."
149
+ )
150
+ hardware_args.add_argument(
151
+ "--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU."
152
+ )
153
+ hardware_args.add_argument(
154
+ "--multi_gpu",
155
+ default=False,
156
+ action="store_true",
157
+ help="Whether or not this should launch a distributed GPU training.",
158
+ )
159
+ hardware_args.add_argument(
160
+ "--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training."
161
+ )
162
+ hardware_args.add_argument(
163
+ "--ipex",
164
+ default=False,
165
+ action="store_true",
166
+ help="Whether or not this should launch a Intel PyTorch Extension (IPEX) training.",
167
+ )
168
+
169
+ # Resource selection arguments
170
+ resource_args = parser.add_argument_group(
171
+ "Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used."
172
+ )
173
+ resource_args.add_argument(
174
+ "--mixed_precision",
175
+ type=str,
176
+ choices=["no", "fp16", "bf16", "fp8"],
177
+ help="Whether or not to use mixed precision training. "
178
+ "Choose between FP16 and BF16 (bfloat16) training. "
179
+ "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
180
+ )
181
+ resource_args.add_argument(
182
+ "--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel."
183
+ )
184
+ resource_args.add_argument(
185
+ "--num_machines", type=int, default=None, help="The total number of machines used in this training."
186
+ )
187
+ resource_args.add_argument(
188
+ "--num_cpu_threads_per_process",
189
+ type=int,
190
+ default=None,
191
+ help="The number of CPU threads per process. Can be tuned for optimal performance.",
192
+ )
193
+
194
+ # Dynamo arguments
195
+ resource_args.add_argument(
196
+ "--dynamo_backend",
197
+ type=str,
198
+ choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS],
199
+ help="Choose a backend to optimize your training with dynamo, see more at "
200
+ "https://github.com/pytorch/torchdynamo.",
201
+ )
202
+ resource_args.add_argument(
203
+ "--dynamo_mode",
204
+ type=str,
205
+ default="default",
206
+ choices=TORCH_DYNAMO_MODES,
207
+ help="Choose a mode to optimize your training with dynamo.",
208
+ )
209
+ resource_args.add_argument(
210
+ "--dynamo_use_fullgraph",
211
+ default=False,
212
+ action="store_true",
213
+ help="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs",
214
+ )
215
+ resource_args.add_argument(
216
+ "--dynamo_use_dynamic",
217
+ default=False,
218
+ action="store_true",
219
+ help="Whether to enable dynamic shape tracing.",
220
+ )
221
+
222
+ # Training Paradigm arguments
223
+ paradigm_args = parser.add_argument_group(
224
+ "Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used."
225
+ )
226
+ paradigm_args.add_argument(
227
+ "--use_deepspeed",
228
+ default=False,
229
+ action="store_true",
230
+ help="Whether to use deepspeed.",
231
+ )
232
+ paradigm_args.add_argument(
233
+ "--use_fsdp",
234
+ default=False,
235
+ action="store_true",
236
+ help="Whether to use fsdp.",
237
+ )
238
+ paradigm_args.add_argument(
239
+ "--use_megatron_lm",
240
+ default=False,
241
+ action="store_true",
242
+ help="Whether to use Megatron-LM.",
243
+ )
244
+ paradigm_args.add_argument(
245
+ "--use_xpu",
246
+ default=False,
247
+ action="store_true",
248
+ help="Whether to use IPEX plugin to speed up training on XPU specifically.",
249
+ )
250
+
251
+ # distributed GPU training arguments
252
+ distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.")
253
+ distributed_args.add_argument(
254
+ "--gpu_ids",
255
+ default=None,
256
+ help="What GPUs (by id) should be used for training on this machine as a comma-seperated list",
257
+ )
258
+ distributed_args.add_argument(
259
+ "--same_network",
260
+ default=False,
261
+ action="store_true",
262
+ help="Whether all machines used for multinode training exist on the same local network.",
263
+ )
264
+ distributed_args.add_argument(
265
+ "--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched."
266
+ )
267
+ distributed_args.add_argument(
268
+ "--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0."
269
+ )
270
+ distributed_args.add_argument(
271
+ "--main_process_port",
272
+ type=int,
273
+ default=None,
274
+ help="The port to use to communicate with the machine of rank 0.",
275
+ )
276
+ distributed_args.add_argument(
277
+ "-t",
278
+ "--tee",
279
+ default="0",
280
+ type=str,
281
+ help="Tee std streams into a log file and also to console.",
282
+ )
283
+ distributed_args.add_argument(
284
+ "--role",
285
+ type=str,
286
+ default="default",
287
+ help="User-defined role for the workers.",
288
+ )
289
+ # Rendezvous related arguments
290
+ distributed_args.add_argument(
291
+ "--rdzv_backend",
292
+ type=str,
293
+ default="static",
294
+ help="The rendezvous method to use, such as 'static' (the default) or 'c10d'",
295
+ )
296
+ distributed_args.add_argument(
297
+ "--rdzv_conf",
298
+ type=str,
299
+ default="",
300
+ help="Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).",
301
+ )
302
+ distributed_args.add_argument(
303
+ "--max_restarts",
304
+ type=int,
305
+ default=0,
306
+ help="Maximum number of worker group restarts before failing.",
307
+ )
308
+ distributed_args.add_argument(
309
+ "--monitor_interval",
310
+ type=float,
311
+ default=5,
312
+ help="Interval, in seconds, to monitor the state of workers.",
313
+ )
314
+ parser.add_argument(
315
+ "-m",
316
+ "--module",
317
+ action="store_true",
318
+ help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.",
319
+ )
320
+ parser.add_argument(
321
+ "--no_python",
322
+ action="store_true",
323
+ help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.",
324
+ )
325
+
326
+ # TPU arguments
327
+ tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.")
328
+ tpu_args.add_argument(
329
+ "--tpu_cluster",
330
+ action="store_true",
331
+ dest="tpu_use_cluster",
332
+ help="Whether to use a GCP TPU pod for training.",
333
+ )
334
+ tpu_args.add_argument(
335
+ "--no_tpu_cluster",
336
+ action="store_false",
337
+ dest="tpu_use_cluster",
338
+ help="Should not be passed explicitly, this is for internal use only.",
339
+ )
340
+ tpu_args.add_argument(
341
+ "--tpu_use_sudo",
342
+ action="store_true",
343
+ help="Whether to use `sudo` when running the TPU training script in each pod.",
344
+ )
345
+ tpu_args.add_argument(
346
+ "--vm",
347
+ type=str,
348
+ action="append",
349
+ help=(
350
+ "List of single Compute VM instance names. "
351
+ "If not provided we assume usage of instance groups. For TPU pods."
352
+ ),
353
+ )
354
+ tpu_args.add_argument(
355
+ "--env",
356
+ type=str,
357
+ action="append",
358
+ help="List of environment variables to set on the Compute VM instances. For TPU pods.",
359
+ )
360
+ tpu_args.add_argument(
361
+ "--main_training_function",
362
+ type=str,
363
+ default=None,
364
+ help="The name of the main function to be executed in your script (only for TPU training).",
365
+ )
366
+ tpu_args.add_argument(
367
+ "--downcast_bf16",
368
+ action="store_true",
369
+ help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.",
370
+ )
371
+
372
+ # DeepSpeed arguments
373
+ deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.")
374
+ deepspeed_args.add_argument(
375
+ "--deepspeed_config_file",
376
+ default=None,
377
+ type=str,
378
+ help="DeepSpeed config file.",
379
+ )
380
+ deepspeed_args.add_argument(
381
+ "--zero_stage",
382
+ default=None,
383
+ type=int,
384
+ help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). "
385
+ "If unspecified, will default to `2`.",
386
+ )
387
+ deepspeed_args.add_argument(
388
+ "--offload_optimizer_device",
389
+ default=None,
390
+ type=str,
391
+ help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). "
392
+ "If unspecified, will default to 'none'.",
393
+ )
394
+ deepspeed_args.add_argument(
395
+ "--offload_param_device",
396
+ default=None,
397
+ type=str,
398
+ help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). "
399
+ "If unspecified, will default to 'none'.",
400
+ )
401
+ deepspeed_args.add_argument(
402
+ "--offload_optimizer_nvme_path",
403
+ default=None,
404
+ type=str,
405
+ help="Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). "
406
+ "If unspecified, will default to 'none'.",
407
+ )
408
+ deepspeed_args.add_argument(
409
+ "--offload_param_nvme_path",
410
+ default=None,
411
+ type=str,
412
+ help="Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). "
413
+ "If unspecified, will default to 'none'.",
414
+ )
415
+ deepspeed_args.add_argument(
416
+ "--gradient_accumulation_steps",
417
+ default=None,
418
+ type=int,
419
+ help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). "
420
+ "If unspecified, will default to `1`.",
421
+ )
422
+ deepspeed_args.add_argument(
423
+ "--gradient_clipping",
424
+ default=None,
425
+ type=float,
426
+ help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). "
427
+ "If unspecified, will default to `1.0`.",
428
+ )
429
+ deepspeed_args.add_argument(
430
+ "--zero3_init_flag",
431
+ default=None,
432
+ type=str,
433
+ help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. "
434
+ "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.",
435
+ )
436
+ deepspeed_args.add_argument(
437
+ "--zero3_save_16bit_model",
438
+ default=None,
439
+ type=str,
440
+ help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. "
441
+ "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.",
442
+ )
443
+ deepspeed_args.add_argument(
444
+ "--deepspeed_hostfile",
445
+ default=None,
446
+ type=str,
447
+ help="DeepSpeed hostfile for configuring multi-node compute resources.",
448
+ )
449
+ deepspeed_args.add_argument(
450
+ "--deepspeed_exclusion_filter",
451
+ default=None,
452
+ type=str,
453
+ help="DeepSpeed exclusion filter string when using mutli-node setup.",
454
+ )
455
+ deepspeed_args.add_argument(
456
+ "--deepspeed_inclusion_filter",
457
+ default=None,
458
+ type=str,
459
+ help="DeepSpeed inclusion filter string when using mutli-node setup.",
460
+ )
461
+ deepspeed_args.add_argument(
462
+ "--deepspeed_multinode_launcher",
463
+ default=None,
464
+ type=str,
465
+ help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.",
466
+ )
467
+
468
+ # fsdp arguments
469
+ fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.")
470
+ fsdp_args.add_argument(
471
+ "--fsdp_offload_params",
472
+ default="false",
473
+ type=str,
474
+ help="Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).",
475
+ )
476
+ fsdp_args.add_argument(
477
+ "--fsdp_min_num_params",
478
+ type=int,
479
+ default=1e8,
480
+ help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).",
481
+ )
482
+ fsdp_args.add_argument(
483
+ "--fsdp_sharding_strategy",
484
+ type=int,
485
+ default=1,
486
+ help="FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).",
487
+ )
488
+ fsdp_args.add_argument(
489
+ "--fsdp_auto_wrap_policy",
490
+ type=str,
491
+ default=None,
492
+ help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).",
493
+ )
494
+ fsdp_args.add_argument(
495
+ "--fsdp_transformer_layer_cls_to_wrap",
496
+ default=None,
497
+ type=str,
498
+ help="Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... "
499
+ "(useful only when `use_fsdp` flag is passed).",
500
+ )
501
+ fsdp_args.add_argument(
502
+ "--fsdp_backward_prefetch_policy",
503
+ default=None,
504
+ type=str,
505
+ help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).",
506
+ )
507
+ fsdp_args.add_argument(
508
+ "--fsdp_state_dict_type",
509
+ default=None,
510
+ type=str,
511
+ help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).",
512
+ )
513
+ fsdp_args.add_argument(
514
+ "--fsdp_forward_prefetch",
515
+ default="false",
516
+ type=str,
517
+ help="If True, then FSDP explicitly prefetches the next upcoming "
518
+ "all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).",
519
+ )
520
+ fsdp_args.add_argument(
521
+ "--fsdp_use_orig_params",
522
+ default="false",
523
+ type=str,
524
+ help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres."
525
+ " (useful only when `use_fsdp` flag is passed).",
526
+ )
527
+ fsdp_args.add_argument(
528
+ "--fsdp_sync_module_states",
529
+ default="true",
530
+ type=str,
531
+ help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0."
532
+ " (useful only when `use_fsdp` flag is passed).",
533
+ )
534
+
535
+ # megatron_lm args
536
+ megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.")
537
+ megatron_lm_args.add_argument(
538
+ "--megatron_lm_tp_degree",
539
+ type=int,
540
+ default=1,
541
+ help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).",
542
+ )
543
+ megatron_lm_args.add_argument(
544
+ "--megatron_lm_pp_degree",
545
+ type=int,
546
+ default=1,
547
+ help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).",
548
+ )
549
+ megatron_lm_args.add_argument(
550
+ "--megatron_lm_num_micro_batches",
551
+ type=int,
552
+ default=None,
553
+ help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).",
554
+ )
555
+ megatron_lm_args.add_argument(
556
+ "--megatron_lm_sequence_parallelism",
557
+ default=None,
558
+ type=str,
559
+ help="Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. "
560
+ "(useful only when `use_megatron_lm` flag is passed).",
561
+ )
562
+ megatron_lm_args.add_argument(
563
+ "--megatron_lm_recompute_activations",
564
+ default=None,
565
+ type=str,
566
+ help="Decides Whether (true|false) to enable Selective Activation Recomputation. "
567
+ "(useful only when `use_megatron_lm` flag is passed).",
568
+ )
569
+ megatron_lm_args.add_argument(
570
+ "--megatron_lm_use_distributed_optimizer",
571
+ default=None,
572
+ type=str,
573
+ help="Decides Whether (true|false) to use distributed optimizer "
574
+ "which shards optimizer state and gradients across Data Pralellel (DP) ranks. "
575
+ "(useful only when `use_megatron_lm` flag is passed).",
576
+ )
577
+ megatron_lm_args.add_argument(
578
+ "--megatron_lm_gradient_clipping",
579
+ default=1.0,
580
+ type=float,
581
+ help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). "
582
+ "(useful only when `use_megatron_lm` flag is passed).",
583
+ )
584
+
585
+ # AWS arguments
586
+ aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.")
587
+ aws_args.add_argument(
588
+ "--aws_access_key_id",
589
+ type=str,
590
+ default=None,
591
+ help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job",
592
+ )
593
+ aws_args.add_argument(
594
+ "--aws_secret_access_key",
595
+ type=str,
596
+ default=None,
597
+ help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.",
598
+ )
599
+ parser.add_argument(
600
+ "--debug",
601
+ action="store_true",
602
+ help="Whether to print out the torch.distributed stack trace when something fails.",
603
+ )
604
+ parser.add_argument(
605
+ "training_script",
606
+ type=str,
607
+ help=(
608
+ "The full path to the script to be launched in parallel, followed by all the arguments for the training "
609
+ "script."
610
+ ),
611
+ )
612
+
613
+ # Other arguments of the training scripts
614
+ parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.")
615
+
616
+ if subparsers is not None:
617
+ parser.set_defaults(func=launch_command)
618
+ return parser
619
+
620
+
621
+ def simple_launcher(args):
622
+ cmd, current_env = prepare_simple_launcher_cmd_env(args)
623
+
624
+ process = subprocess.Popen(cmd, env=current_env)
625
+ process.wait()
626
+ if process.returncode != 0:
627
+ if not args.quiet:
628
+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
629
+ else:
630
+ sys.exit(1)
631
+
632
+
633
+ def multi_gpu_launcher(args):
634
+ import torch.distributed.run as distrib_run
635
+
636
+ current_env = prepare_multi_gpu_env(args)
637
+
638
+ debug = getattr(args, "debug", False)
639
+ args = _filter_args(
640
+ args,
641
+ distrib_run.get_args_parser(),
642
+ ["--training_script", args.training_script, "--training_script_args", args.training_script_args],
643
+ )
644
+ with patch_environment(**current_env):
645
+ try:
646
+ distrib_run.run(args)
647
+ except Exception:
648
+ if is_rich_available() and debug:
649
+ console = get_console()
650
+ console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
651
+ console.print_exception(suppress=[__file__], show_locals=False)
652
+ else:
653
+ raise
654
+
655
+
656
+ def deepspeed_launcher(args):
657
+ import torch.distributed.run as distrib_run
658
+
659
+ if not is_deepspeed_available():
660
+ raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.")
661
+
662
+ cmd, current_env = prepare_deepspeed_cmd_env(args)
663
+
664
+ if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
665
+ with open(".deepspeed_env", "a") as f:
666
+ for key, value in current_env.items():
667
+ if ";" in value or " " in value:
668
+ continue
669
+ f.write(f"{key}={value}\n")
670
+
671
+ process = subprocess.Popen(cmd, env=current_env)
672
+ process.wait()
673
+ if process.returncode != 0:
674
+ if not args.quiet:
675
+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
676
+ else:
677
+ sys.exit(1)
678
+ else:
679
+ debug = getattr(args, "debug", False)
680
+ args = _filter_args(
681
+ args,
682
+ distrib_run.get_args_parser(),
683
+ ["--training_script", args.training_script, "--training_script_args", args.training_script_args],
684
+ )
685
+ with patch_environment(**current_env):
686
+ try:
687
+ distrib_run.run(args)
688
+ except Exception:
689
+ if is_rich_available() and debug:
690
+ console = get_console()
691
+ console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
692
+ console.print_exception(suppress=[__file__], show_locals=False)
693
+ else:
694
+ raise
695
+
696
+
697
+ def tpu_launcher(args):
698
+ import torch_xla.distributed.xla_multiprocessing as xmp
699
+
700
+ if args.no_python:
701
+ raise ValueError("--no_python cannot be used with TPU launcher")
702
+
703
+ args, current_env = prepare_tpu(args, {})
704
+
705
+ if args.module:
706
+ mod_name = args.training_script
707
+ else:
708
+ # Import training_script as a module
709
+ script_path = Path(args.training_script)
710
+ sys.path.append(str(script_path.parent.resolve()))
711
+ mod_name = script_path.stem
712
+
713
+ mod = importlib.import_module(mod_name)
714
+ if not hasattr(mod, args.main_training_function):
715
+ raise ValueError(
716
+ f"Your training script should have a function named {args.main_training_function}, or you should pass a "
717
+ "different value to `--main_training_function`."
718
+ )
719
+
720
+ # Patch sys.argv
721
+ sys.argv = [mod.__file__] + args.training_script_args
722
+
723
+ main_function = getattr(mod, args.main_training_function)
724
+ with patch_environment(**current_env):
725
+ xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes)
726
+
727
+
728
+ def tpu_pod_launcher(args):
729
+ from torch_xla.distributed import xla_dist
730
+
731
+ current_env = {}
732
+ args, current_env = prepare_tpu(args, current_env, True)
733
+ debug = getattr(args, "debug", False)
734
+
735
+ training_script = args.training_script
736
+ training_script_args = args.training_script_args
737
+ new_args = _filter_args(
738
+ args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"]
739
+ )
740
+
741
+ if args.tpu_use_sudo:
742
+ new_cmd = ["sudo"]
743
+ else:
744
+ new_cmd = []
745
+
746
+ new_cmd += [
747
+ "accelerate-launch",
748
+ "--tpu",
749
+ "--no_tpu_cluster",
750
+ "--num_machines",
751
+ str(1),
752
+ "--mixed_precision",
753
+ "no",
754
+ "--dynamo_backend",
755
+ "no",
756
+ "--num_processes",
757
+ str(args.num_processes),
758
+ "--main_training_function",
759
+ str(args.main_training_function),
760
+ training_script,
761
+ ] + training_script_args
762
+
763
+ new_args.positional = new_cmd
764
+ bad_flags = ""
765
+ for arg in vars(new_args):
766
+ if arg.startswith("docker_"):
767
+ value = getattr(new_args, arg)
768
+ if value != "" and value is not None:
769
+ bad_flags += f'{arg}="{value}"\n'
770
+ if bad_flags != "":
771
+ raise ValueError(
772
+ f"Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\n{bad_flags}"
773
+ )
774
+ new_args.env = [f"{k}={v}" for k, v in current_env.items()]
775
+ new_args.env.append("ACCELERATE_IN_TPU_POD=1")
776
+ try:
777
+ xla_dist.resolve_and_execute(new_args)
778
+ except Exception:
779
+ if is_rich_available() and debug:
780
+ console = get_console()
781
+ console.print("\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]")
782
+ console.print_exception(suppress=[__file__], show_locals=False)
783
+ else:
784
+ raise
785
+
786
+
787
+ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):
788
+ if not is_sagemaker_available():
789
+ raise ImportError(
790
+ "Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`"
791
+ )
792
+ if args.module or args.no_python:
793
+ raise ValueError(
794
+ "SageMaker requires a python training script file and cannot be used with --module or --no_python"
795
+ )
796
+
797
+ from sagemaker.huggingface import HuggingFace
798
+
799
+ args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args)
800
+
801
+ huggingface_estimator = HuggingFace(**args)
802
+
803
+ huggingface_estimator.fit(inputs=sagemaker_inputs)
804
+ print(f"You can find your model data at: {huggingface_estimator.model_data}")
805
+
806
+
807
+ def _validate_launch_command(args):
808
+ # Sanity checks
809
+ if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:
810
+ raise ValueError(
811
+ "You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time."
812
+ )
813
+ if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2):
814
+ raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.")
815
+
816
+ defaults = None
817
+ warned = []
818
+ mp_from_config_flag = False
819
+ # Get the default from the config file.
820
+ if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:
821
+ defaults = load_config_from_file(args.config_file)
822
+ if (
823
+ not args.multi_gpu
824
+ and not args.tpu
825
+ and not args.tpu_use_cluster
826
+ and not args.use_deepspeed
827
+ and not args.use_fsdp
828
+ and not args.use_megatron_lm
829
+ ):
830
+ args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED
831
+ args.multi_gpu = (
832
+ True
833
+ if defaults.distributed_type
834
+ in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU)
835
+ else False
836
+ )
837
+ args.tpu = defaults.distributed_type == DistributedType.TPU
838
+ args.use_fsdp = defaults.distributed_type == DistributedType.FSDP
839
+ args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM
840
+ args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False
841
+ if args.gpu_ids is None:
842
+ if defaults.gpu_ids is not None:
843
+ args.gpu_ids = defaults.gpu_ids
844
+ else:
845
+ args.gpu_ids = "all"
846
+
847
+ if args.multi_gpu and args.num_machines is None:
848
+ args.num_machines = defaults.num_machines
849
+
850
+ if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1:
851
+ raise ValueError(
852
+ "Less than two GPU ids were configured and tried to run on on multiple GPUs. "
853
+ "Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`."
854
+ )
855
+ if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:
856
+ # Update args with the defaults
857
+ for name, attr in defaults.__dict__.items():
858
+ if isinstance(attr, dict):
859
+ for k in defaults.deepspeed_config:
860
+ setattr(args, k, defaults.deepspeed_config[k])
861
+ for k in defaults.fsdp_config:
862
+ arg_to_set = k
863
+ if "fsdp" not in arg_to_set:
864
+ arg_to_set = "fsdp_" + arg_to_set
865
+ setattr(args, arg_to_set, defaults.fsdp_config[k])
866
+ for k in defaults.megatron_lm_config:
867
+ setattr(args, k, defaults.megatron_lm_config[k])
868
+ for k in defaults.dynamo_config:
869
+ setattr(args, k, defaults.dynamo_config[k])
870
+ for k in defaults.ipex_config:
871
+ setattr(args, k, defaults.ipex_config[k])
872
+ continue
873
+
874
+ # Those args are handled separately
875
+ if (
876
+ name not in ["compute_environment", "mixed_precision", "distributed_type"]
877
+ and getattr(args, name, None) is None
878
+ ):
879
+ setattr(args, name, attr)
880
+ if not args.debug:
881
+ args.debug = defaults.debug
882
+
883
+ if not args.mixed_precision:
884
+ if defaults.mixed_precision is None:
885
+ args.mixed_precision = "no"
886
+ else:
887
+ args.mixed_precision = defaults.mixed_precision
888
+ mp_from_config_flag = True
889
+ else:
890
+ native_amp = False
891
+ err = "{mode} mixed precision requires {requirement}"
892
+ if args.use_cpu or (args.use_xpu and torch.xpu.is_available()):
893
+ native_amp = is_torch_version(">=", "1.10")
894
+ else:
895
+ native_amp = is_bf16_available(True)
896
+ if args.mixed_precision == "bf16" and not native_amp and not (args.tpu and is_tpu_available()):
897
+ raise ValueError(err.format(mode="bf16", requirement="PyTorch >= 1.10 and a supported device."))
898
+
899
+ # Silently set the default here
900
+ if args.dynamo_backend is None:
901
+ args.dynamo_backend = "no"
902
+ else:
903
+ if args.num_processes is None:
904
+ if args.use_xpu and is_xpu_available():
905
+ args.num_processes = torch.xpu.device_count()
906
+ elif is_npu_available():
907
+ args.num_processes = torch.npu.device_count()
908
+ else:
909
+ args.num_processes = torch.cuda.device_count()
910
+ warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`")
911
+ if args.debug is None:
912
+ args.debug = False
913
+ if not args.multi_gpu and (
914
+ (args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1)
915
+ or (is_npu_available() and torch.npu.device_count() > 1)
916
+ or (torch.cuda.device_count() > 1)
917
+ ):
918
+ warned.append(
919
+ "\t\tMore than one GPU was found, enabling multi-GPU training.\n"
920
+ "\t\tIf this was unintended please pass in `--num_processes=1`."
921
+ )
922
+ args.multi_gpu = True
923
+ if args.num_machines is None:
924
+ warned.append("\t`--num_machines` was set to a value of `1`")
925
+ args.num_machines = 1
926
+ if args.mixed_precision is None:
927
+ warned.append("\t`--mixed_precision` was set to a value of `'no'`")
928
+ args.mixed_precision = "no"
929
+ if not hasattr(args, "use_cpu"):
930
+ args.use_cpu = args.cpu
931
+ if args.dynamo_backend is None:
932
+ warned.append("\t`--dynamo_backend` was set to a value of `'no'`")
933
+ args.dynamo_backend = "no"
934
+ if args.debug:
935
+ logger.debug("Running script in debug mode, expect distributed operations to be slightly slower.")
936
+
937
+ is_aws_env_disabled = defaults is None or (
938
+ defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER
939
+ )
940
+ if is_aws_env_disabled and args.num_cpu_threads_per_process is None:
941
+ args.num_cpu_threads_per_process = 1
942
+ if args.use_cpu and args.num_processes >= 1:
943
+ local_size = get_int_from_env(
944
+ ["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1
945
+ )
946
+ threads_per_process = int(psutil.cpu_count(logical=False) / local_size)
947
+ if threads_per_process > 1:
948
+ args.num_cpu_threads_per_process = threads_per_process
949
+ warned.append(
950
+ f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs"
951
+ )
952
+
953
+ if any(warned):
954
+ message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n"
955
+ message += "\n".join(warned)
956
+ message += (
957
+ "\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`."
958
+ )
959
+ logger.warning(message)
960
+ return args, defaults, mp_from_config_flag
961
+
962
+
963
+ def launch_command(args):
964
+ args, defaults, mp_from_config_flag = _validate_launch_command(args)
965
+ # Use the proper launcher
966
+ if args.use_deepspeed and not args.cpu:
967
+ args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else []
968
+ if mp_from_config_flag:
969
+ args.deepspeed_fields_from_accelerate_config.append("mixed_precision")
970
+ args.deepspeed_fields_from_accelerate_config = ",".join(args.deepspeed_fields_from_accelerate_config)
971
+ deepspeed_launcher(args)
972
+ elif args.use_fsdp and not args.cpu:
973
+ multi_gpu_launcher(args)
974
+ elif args.use_megatron_lm and not args.cpu:
975
+ multi_gpu_launcher(args)
976
+ elif args.multi_gpu and not args.cpu:
977
+ multi_gpu_launcher(args)
978
+ elif args.tpu and not args.cpu:
979
+ if args.tpu_use_cluster:
980
+ tpu_pod_launcher(args)
981
+ else:
982
+ tpu_launcher(args)
983
+ elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
984
+ sagemaker_launcher(defaults, args)
985
+ else:
986
+ simple_launcher(args)
987
+
988
+
989
+ def main():
990
+ parser = launch_command_parser()
991
+ args = parser.parse_args()
992
+ launch_command(args)
993
+
994
+
995
+ if __name__ == "__main__":
996
+ main()
evalkit_tf437/lib/python3.10/site-packages/accelerate/commands/test.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import os
19
+
20
+ from accelerate.test_utils import execute_subprocess_async
21
+
22
+
23
+ def test_command_parser(subparsers=None):
24
+ if subparsers is not None:
25
+ parser = subparsers.add_parser("test")
26
+ else:
27
+ parser = argparse.ArgumentParser("Accelerate test command")
28
+
29
+ parser.add_argument(
30
+ "--config_file",
31
+ default=None,
32
+ help=(
33
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
34
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
35
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
36
+ "with 'huggingface'."
37
+ ),
38
+ )
39
+
40
+ if subparsers is not None:
41
+ parser.set_defaults(func=test_command)
42
+ return parser
43
+
44
+
45
+ def test_command(args):
46
+ script_name = os.path.sep.join(__file__.split(os.path.sep)[:-2] + ["test_utils", "scripts", "test_script.py"])
47
+
48
+ if args.config_file is None:
49
+ test_args = script_name
50
+ else:
51
+ test_args = f"--config_file={args.config_file} {script_name}"
52
+
53
+ cmd = ["accelerate-launch"] + test_args.split()
54
+ result = execute_subprocess_async(cmd, env=os.environ.copy())
55
+ if result.returncode == 0:
56
+ print("Test is a success! You are ready for your distributed training!")
57
+
58
+
59
+ def main():
60
+ parser = test_command_parser()
61
+ args = parser.parse_args()
62
+ test_command(args)
63
+
64
+
65
+ if __name__ == "__main__":
66
+ main()