diff --git a/.gitattributes b/.gitattributes index 48516c5901116d69a55a07942d2e61d5b2b7b1d9..87846a883144f5a500caec60c284433dd0a56054 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1617,3 +1617,4 @@ evalkit_internvl/lib/python3.10/site-packages/sympy/matrices/__pycache__/matrixb evalkit_tf437/lib/python3.10/site-packages/pip/_vendor/distlib/w64-arm.exe filter=lfs diff=lfs merge=lfs -text evalkit_internvl/lib/python3.10/site-packages/sympy/core/__pycache__/function.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text evalkit_internvl/lib/python3.10/site-packages/sympy/core/__pycache__/numbers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +evalkit_internvl/lib/python3.10/site-packages/sympy/stats/__pycache__/crv_types.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_appellseqs.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_appellseqs.py new file mode 100644 index 0000000000000000000000000000000000000000..f4718a2da272ac6f36a968572dc246ebc699e5c4 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_appellseqs.py @@ -0,0 +1,91 @@ +"""Tests for efficient functions for generating Appell sequences.""" +from sympy.core.numbers import Rational as Q +from sympy.polys.polytools import Poly +from sympy.testing.pytest import raises +from sympy.polys.appellseqs import (bernoulli_poly, bernoulli_c_poly, + euler_poly, genocchi_poly, andre_poly) +from sympy.abc import x + +def test_bernoulli_poly(): + raises(ValueError, lambda: bernoulli_poly(-1, x)) + assert bernoulli_poly(1, x, polys=True) == Poly(x - Q(1,2)) + + assert bernoulli_poly(0, x) == 1 + assert bernoulli_poly(1, x) == x - Q(1,2) + assert bernoulli_poly(2, x) == x**2 - x + Q(1,6) + assert bernoulli_poly(3, x) == x**3 - Q(3,2)*x**2 + Q(1,2)*x + assert bernoulli_poly(4, x) == x**4 - 2*x**3 + x**2 - Q(1,30) + assert bernoulli_poly(5, x) == x**5 - Q(5,2)*x**4 + Q(5,3)*x**3 - Q(1,6)*x + assert bernoulli_poly(6, x) == x**6 - 3*x**5 + Q(5,2)*x**4 - Q(1,2)*x**2 + Q(1,42) + + assert bernoulli_poly(1).dummy_eq(x - Q(1,2)) + assert bernoulli_poly(1, polys=True) == Poly(x - Q(1,2)) + +def test_bernoulli_c_poly(): + raises(ValueError, lambda: bernoulli_c_poly(-1, x)) + assert bernoulli_c_poly(1, x, polys=True) == Poly(x, domain='QQ') + + assert bernoulli_c_poly(0, x) == 1 + assert bernoulli_c_poly(1, x) == x + assert bernoulli_c_poly(2, x) == x**2 - Q(1,3) + assert bernoulli_c_poly(3, x) == x**3 - x + assert bernoulli_c_poly(4, x) == x**4 - 2*x**2 + Q(7,15) + assert bernoulli_c_poly(5, x) == x**5 - Q(10,3)*x**3 + Q(7,3)*x + assert bernoulli_c_poly(6, x) == x**6 - 5*x**4 + 7*x**2 - Q(31,21) + + assert bernoulli_c_poly(1).dummy_eq(x) + assert bernoulli_c_poly(1, polys=True) == Poly(x, domain='QQ') + + assert 2**8 * bernoulli_poly(8, (x+1)/2).expand() == bernoulli_c_poly(8, x) + assert 2**9 * bernoulli_poly(9, (x+1)/2).expand() == bernoulli_c_poly(9, x) + +def test_genocchi_poly(): + raises(ValueError, lambda: genocchi_poly(-1, x)) + assert genocchi_poly(2, x, polys=True) == Poly(-2*x + 1) + + assert genocchi_poly(0, x) == 0 + assert genocchi_poly(1, x) == -1 + assert genocchi_poly(2, x) == 1 - 2*x + assert genocchi_poly(3, x) == 3*x - 3*x**2 + assert genocchi_poly(4, x) == -1 + 6*x**2 - 4*x**3 + assert genocchi_poly(5, x) == -5*x + 10*x**3 - 5*x**4 + assert genocchi_poly(6, x) == 3 - 15*x**2 + 15*x**4 - 6*x**5 + + assert genocchi_poly(2).dummy_eq(-2*x + 1) + assert genocchi_poly(2, polys=True) == Poly(-2*x + 1) + + assert 2 * (bernoulli_poly(8, x) - bernoulli_c_poly(8, x)) == genocchi_poly(8, x) + assert 2 * (bernoulli_poly(9, x) - bernoulli_c_poly(9, x)) == genocchi_poly(9, x) + +def test_euler_poly(): + raises(ValueError, lambda: euler_poly(-1, x)) + assert euler_poly(1, x, polys=True) == Poly(x - Q(1,2)) + + assert euler_poly(0, x) == 1 + assert euler_poly(1, x) == x - Q(1,2) + assert euler_poly(2, x) == x**2 - x + assert euler_poly(3, x) == x**3 - Q(3,2)*x**2 + Q(1,4) + assert euler_poly(4, x) == x**4 - 2*x**3 + x + assert euler_poly(5, x) == x**5 - Q(5,2)*x**4 + Q(5,2)*x**2 - Q(1,2) + assert euler_poly(6, x) == x**6 - 3*x**5 + 5*x**3 - 3*x + + assert euler_poly(1).dummy_eq(x - Q(1,2)) + assert euler_poly(1, polys=True) == Poly(x - Q(1,2)) + + assert genocchi_poly(9, x) == euler_poly(8, x) * -9 + assert genocchi_poly(10, x) == euler_poly(9, x) * -10 + +def test_andre_poly(): + raises(ValueError, lambda: andre_poly(-1, x)) + assert andre_poly(1, x, polys=True) == Poly(x) + + assert andre_poly(0, x) == 1 + assert andre_poly(1, x) == x + assert andre_poly(2, x) == x**2 - 1 + assert andre_poly(3, x) == x**3 - 3*x + assert andre_poly(4, x) == x**4 - 6*x**2 + 5 + assert andre_poly(5, x) == x**5 - 10*x**3 + 25*x + assert andre_poly(6, x) == x**6 - 15*x**4 + 75*x**2 - 61 + + assert andre_poly(1).dummy_eq(x) + assert andre_poly(1, polys=True) == Poly(x) diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_constructor.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_constructor.py new file mode 100644 index 0000000000000000000000000000000000000000..14dacb9bb1c12e983a83590fd9af8c8d8f3ff036 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_constructor.py @@ -0,0 +1,208 @@ +"""Tests for tools for constructing domains for expressions. """ + +from sympy.polys.constructor import construct_domain +from sympy.polys.domains import ZZ, QQ, ZZ_I, QQ_I, RR, CC, EX +from sympy.polys.domains.realfield import RealField +from sympy.polys.domains.complexfield import ComplexField + +from sympy.core import (Catalan, GoldenRatio) +from sympy.core.numbers import (E, Float, I, Rational, pi) +from sympy.core.singleton import S +from sympy.functions.elementary.exponential import exp +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.trigonometric import sin +from sympy.abc import x, y + + +def test_construct_domain(): + + assert construct_domain([1, 2, 3]) == (ZZ, [ZZ(1), ZZ(2), ZZ(3)]) + assert construct_domain([1, 2, 3], field=True) == (QQ, [QQ(1), QQ(2), QQ(3)]) + + assert construct_domain([S.One, S(2), S(3)]) == (ZZ, [ZZ(1), ZZ(2), ZZ(3)]) + assert construct_domain([S.One, S(2), S(3)], field=True) == (QQ, [QQ(1), QQ(2), QQ(3)]) + + assert construct_domain([S.Half, S(2)]) == (QQ, [QQ(1, 2), QQ(2)]) + result = construct_domain([3.14, 1, S.Half]) + assert isinstance(result[0], RealField) + assert result[1] == [RR(3.14), RR(1.0), RR(0.5)] + + result = construct_domain([3.14, I, S.Half]) + assert isinstance(result[0], ComplexField) + assert result[1] == [CC(3.14), CC(1.0j), CC(0.5)] + + assert construct_domain([1.0+I]) == (CC, [CC(1.0, 1.0)]) + assert construct_domain([2.0+3.0*I]) == (CC, [CC(2.0, 3.0)]) + + assert construct_domain([1, I]) == (ZZ_I, [ZZ_I(1, 0), ZZ_I(0, 1)]) + assert construct_domain([1, I/2]) == (QQ_I, [QQ_I(1, 0), QQ_I(0, S.Half)]) + + assert construct_domain([3.14, sqrt(2)], extension=None) == (EX, [EX(3.14), EX(sqrt(2))]) + assert construct_domain([3.14, sqrt(2)], extension=True) == (EX, [EX(3.14), EX(sqrt(2))]) + + assert construct_domain([1, sqrt(2)], extension=None) == (EX, [EX(1), EX(sqrt(2))]) + + assert construct_domain([x, sqrt(x)]) == (EX, [EX(x), EX(sqrt(x))]) + assert construct_domain([x, sqrt(x), sqrt(y)]) == (EX, [EX(x), EX(sqrt(x)), EX(sqrt(y))]) + + alg = QQ.algebraic_field(sqrt(2)) + + assert construct_domain([7, S.Half, sqrt(2)], extension=True) == \ + (alg, [alg.convert(7), alg.convert(S.Half), alg.convert(sqrt(2))]) + + alg = QQ.algebraic_field(sqrt(2) + sqrt(3)) + + assert construct_domain([7, sqrt(2), sqrt(3)], extension=True) == \ + (alg, [alg.convert(7), alg.convert(sqrt(2)), alg.convert(sqrt(3))]) + + dom = ZZ[x] + + assert construct_domain([2*x, 3]) == \ + (dom, [dom.convert(2*x), dom.convert(3)]) + + dom = ZZ[x, y] + + assert construct_domain([2*x, 3*y]) == \ + (dom, [dom.convert(2*x), dom.convert(3*y)]) + + dom = QQ[x] + + assert construct_domain([x/2, 3]) == \ + (dom, [dom.convert(x/2), dom.convert(3)]) + + dom = QQ[x, y] + + assert construct_domain([x/2, 3*y]) == \ + (dom, [dom.convert(x/2), dom.convert(3*y)]) + + dom = ZZ_I[x] + + assert construct_domain([2*x, I]) == \ + (dom, [dom.convert(2*x), dom.convert(I)]) + + dom = ZZ_I[x, y] + + assert construct_domain([2*x, I*y]) == \ + (dom, [dom.convert(2*x), dom.convert(I*y)]) + + dom = QQ_I[x] + + assert construct_domain([x/2, I]) == \ + (dom, [dom.convert(x/2), dom.convert(I)]) + + dom = QQ_I[x, y] + + assert construct_domain([x/2, I*y]) == \ + (dom, [dom.convert(x/2), dom.convert(I*y)]) + + dom = RR[x] + + assert construct_domain([x/2, 3.5]) == \ + (dom, [dom.convert(x/2), dom.convert(3.5)]) + + dom = RR[x, y] + + assert construct_domain([x/2, 3.5*y]) == \ + (dom, [dom.convert(x/2), dom.convert(3.5*y)]) + + dom = CC[x] + + assert construct_domain([I*x/2, 3.5]) == \ + (dom, [dom.convert(I*x/2), dom.convert(3.5)]) + + dom = CC[x, y] + + assert construct_domain([I*x/2, 3.5*y]) == \ + (dom, [dom.convert(I*x/2), dom.convert(3.5*y)]) + + dom = CC[x] + + assert construct_domain([x/2, I*3.5]) == \ + (dom, [dom.convert(x/2), dom.convert(I*3.5)]) + + dom = CC[x, y] + + assert construct_domain([x/2, I*3.5*y]) == \ + (dom, [dom.convert(x/2), dom.convert(I*3.5*y)]) + + dom = ZZ.frac_field(x) + + assert construct_domain([2/x, 3]) == \ + (dom, [dom.convert(2/x), dom.convert(3)]) + + dom = ZZ.frac_field(x, y) + + assert construct_domain([2/x, 3*y]) == \ + (dom, [dom.convert(2/x), dom.convert(3*y)]) + + dom = RR.frac_field(x) + + assert construct_domain([2/x, 3.5]) == \ + (dom, [dom.convert(2/x), dom.convert(3.5)]) + + dom = RR.frac_field(x, y) + + assert construct_domain([2/x, 3.5*y]) == \ + (dom, [dom.convert(2/x), dom.convert(3.5*y)]) + + dom = RealField(prec=336)[x] + + assert construct_domain([pi.evalf(100)*x]) == \ + (dom, [dom.convert(pi.evalf(100)*x)]) + + assert construct_domain(2) == (ZZ, ZZ(2)) + assert construct_domain(S(2)/3) == (QQ, QQ(2, 3)) + assert construct_domain(Rational(2, 3)) == (QQ, QQ(2, 3)) + + assert construct_domain({}) == (ZZ, {}) + + +def test_complex_exponential(): + w = exp(-I*2*pi/3, evaluate=False) + alg = QQ.algebraic_field(w) + assert construct_domain([w**2, w, 1], extension=True) == ( + alg, + [alg.convert(w**2), + alg.convert(w), + alg.convert(1)] + ) + + +def test_composite_option(): + assert construct_domain({(1,): sin(y)}, composite=False) == \ + (EX, {(1,): EX(sin(y))}) + + assert construct_domain({(1,): y}, composite=False) == \ + (EX, {(1,): EX(y)}) + + assert construct_domain({(1, 1): 1}, composite=False) == \ + (ZZ, {(1, 1): 1}) + + assert construct_domain({(1, 0): y}, composite=False) == \ + (EX, {(1, 0): EX(y)}) + + +def test_precision(): + f1 = Float("1.01") + f2 = Float("1.0000000000000000000001") + for u in [1, 1e-2, 1e-6, 1e-13, 1e-14, 1e-16, 1e-20, 1e-100, 1e-300, + f1, f2]: + result = construct_domain([u]) + v = float(result[1][0]) + assert abs(u - v) / u < 1e-14 # Test relative accuracy + + result = construct_domain([f1]) + y = result[1][0] + assert y-1 > 1e-50 + + result = construct_domain([f2]) + y = result[1][0] + assert y-1 > 1e-50 + + +def test_issue_11538(): + for n in [E, pi, Catalan]: + assert construct_domain(n)[0] == ZZ[n] + assert construct_domain(x + n)[0] == ZZ[x, n] + assert construct_domain(GoldenRatio)[0] == EX + assert construct_domain(x + GoldenRatio)[0] == EX diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_densearith.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_densearith.py new file mode 100644 index 0000000000000000000000000000000000000000..ea626f1feac246198d46986d0c193804e9c78891 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_densearith.py @@ -0,0 +1,997 @@ +"""Tests for dense recursive polynomials' arithmetics. """ + +from sympy.external.gmpy import GROUND_TYPES + +from sympy.polys.densebasic import ( + dup_normal, dmp_normal, +) + +from sympy.polys.densearith import ( + dup_add_term, dmp_add_term, + dup_sub_term, dmp_sub_term, + dup_mul_term, dmp_mul_term, + dup_add_ground, dmp_add_ground, + dup_sub_ground, dmp_sub_ground, + dup_mul_ground, dmp_mul_ground, + dup_quo_ground, dmp_quo_ground, + dup_exquo_ground, dmp_exquo_ground, + dup_lshift, dup_rshift, + dup_abs, dmp_abs, + dup_neg, dmp_neg, + dup_add, dmp_add, + dup_sub, dmp_sub, + dup_mul, dmp_mul, + dup_sqr, dmp_sqr, + dup_pow, dmp_pow, + dup_add_mul, dmp_add_mul, + dup_sub_mul, dmp_sub_mul, + dup_pdiv, dup_prem, dup_pquo, dup_pexquo, + dmp_pdiv, dmp_prem, dmp_pquo, dmp_pexquo, + dup_rr_div, dmp_rr_div, + dup_ff_div, dmp_ff_div, + dup_div, dup_rem, dup_quo, dup_exquo, + dmp_div, dmp_rem, dmp_quo, dmp_exquo, + dup_max_norm, dmp_max_norm, + dup_l1_norm, dmp_l1_norm, + dup_l2_norm_squared, dmp_l2_norm_squared, + dup_expand, dmp_expand, +) + +from sympy.polys.polyerrors import ( + ExactQuotientFailed, +) + +from sympy.polys.specialpolys import f_polys +from sympy.polys.domains import FF, ZZ, QQ + +from sympy.testing.pytest import raises + +f_0, f_1, f_2, f_3, f_4, f_5, f_6 = [ f.to_dense() for f in f_polys() ] +F_0 = dmp_mul_ground(dmp_normal(f_0, 2, QQ), QQ(1, 7), 2, QQ) + +def test_dup_add_term(): + f = dup_normal([], ZZ) + + assert dup_add_term(f, ZZ(0), 0, ZZ) == dup_normal([], ZZ) + + assert dup_add_term(f, ZZ(1), 0, ZZ) == dup_normal([1], ZZ) + assert dup_add_term(f, ZZ(1), 1, ZZ) == dup_normal([1, 0], ZZ) + assert dup_add_term(f, ZZ(1), 2, ZZ) == dup_normal([1, 0, 0], ZZ) + + f = dup_normal([1, 1, 1], ZZ) + + assert dup_add_term(f, ZZ(1), 0, ZZ) == dup_normal([1, 1, 2], ZZ) + assert dup_add_term(f, ZZ(1), 1, ZZ) == dup_normal([1, 2, 1], ZZ) + assert dup_add_term(f, ZZ(1), 2, ZZ) == dup_normal([2, 1, 1], ZZ) + + assert dup_add_term(f, ZZ(1), 3, ZZ) == dup_normal([1, 1, 1, 1], ZZ) + assert dup_add_term(f, ZZ(1), 4, ZZ) == dup_normal([1, 0, 1, 1, 1], ZZ) + assert dup_add_term(f, ZZ(1), 5, ZZ) == dup_normal([1, 0, 0, 1, 1, 1], ZZ) + assert dup_add_term( + f, ZZ(1), 6, ZZ) == dup_normal([1, 0, 0, 0, 1, 1, 1], ZZ) + + assert dup_add_term(f, ZZ(-1), 2, ZZ) == dup_normal([1, 1], ZZ) + + +def test_dmp_add_term(): + assert dmp_add_term([ZZ(1), ZZ(1), ZZ(1)], ZZ(1), 2, 0, ZZ) == \ + dup_add_term([ZZ(1), ZZ(1), ZZ(1)], ZZ(1), 2, ZZ) + assert dmp_add_term(f_0, [[]], 3, 2, ZZ) == f_0 + assert dmp_add_term(F_0, [[]], 3, 2, QQ) == F_0 + + +def test_dup_sub_term(): + f = dup_normal([], ZZ) + + assert dup_sub_term(f, ZZ(0), 0, ZZ) == dup_normal([], ZZ) + + assert dup_sub_term(f, ZZ(1), 0, ZZ) == dup_normal([-1], ZZ) + assert dup_sub_term(f, ZZ(1), 1, ZZ) == dup_normal([-1, 0], ZZ) + assert dup_sub_term(f, ZZ(1), 2, ZZ) == dup_normal([-1, 0, 0], ZZ) + + f = dup_normal([1, 1, 1], ZZ) + + assert dup_sub_term(f, ZZ(2), 0, ZZ) == dup_normal([ 1, 1, -1], ZZ) + assert dup_sub_term(f, ZZ(2), 1, ZZ) == dup_normal([ 1, -1, 1], ZZ) + assert dup_sub_term(f, ZZ(2), 2, ZZ) == dup_normal([-1, 1, 1], ZZ) + + assert dup_sub_term(f, ZZ(1), 3, ZZ) == dup_normal([-1, 1, 1, 1], ZZ) + assert dup_sub_term(f, ZZ(1), 4, ZZ) == dup_normal([-1, 0, 1, 1, 1], ZZ) + assert dup_sub_term(f, ZZ(1), 5, ZZ) == dup_normal([-1, 0, 0, 1, 1, 1], ZZ) + assert dup_sub_term( + f, ZZ(1), 6, ZZ) == dup_normal([-1, 0, 0, 0, 1, 1, 1], ZZ) + + assert dup_sub_term(f, ZZ(1), 2, ZZ) == dup_normal([1, 1], ZZ) + + +def test_dmp_sub_term(): + assert dmp_sub_term([ZZ(1), ZZ(1), ZZ(1)], ZZ(1), 2, 0, ZZ) == \ + dup_sub_term([ZZ(1), ZZ(1), ZZ(1)], ZZ(1), 2, ZZ) + assert dmp_sub_term(f_0, [[]], 3, 2, ZZ) == f_0 + assert dmp_sub_term(F_0, [[]], 3, 2, QQ) == F_0 + + +def test_dup_mul_term(): + f = dup_normal([], ZZ) + + assert dup_mul_term(f, ZZ(2), 3, ZZ) == dup_normal([], ZZ) + + f = dup_normal([1, 1], ZZ) + + assert dup_mul_term(f, ZZ(0), 3, ZZ) == dup_normal([], ZZ) + + f = dup_normal([1, 2, 3], ZZ) + + assert dup_mul_term(f, ZZ(2), 0, ZZ) == dup_normal([2, 4, 6], ZZ) + assert dup_mul_term(f, ZZ(2), 1, ZZ) == dup_normal([2, 4, 6, 0], ZZ) + assert dup_mul_term(f, ZZ(2), 2, ZZ) == dup_normal([2, 4, 6, 0, 0], ZZ) + assert dup_mul_term(f, ZZ(2), 3, ZZ) == dup_normal([2, 4, 6, 0, 0, 0], ZZ) + + +def test_dmp_mul_term(): + assert dmp_mul_term([ZZ(1), ZZ(2), ZZ(3)], ZZ(2), 1, 0, ZZ) == \ + dup_mul_term([ZZ(1), ZZ(2), ZZ(3)], ZZ(2), 1, ZZ) + + assert dmp_mul_term([[]], [ZZ(2)], 3, 1, ZZ) == [[]] + assert dmp_mul_term([[ZZ(1)]], [], 3, 1, ZZ) == [[]] + + assert dmp_mul_term([[ZZ(1), ZZ(2)], [ZZ(3)]], [ZZ(2)], 2, 1, ZZ) == \ + [[ZZ(2), ZZ(4)], [ZZ(6)], [], []] + + assert dmp_mul_term([[]], [QQ(2, 3)], 3, 1, QQ) == [[]] + assert dmp_mul_term([[QQ(1, 2)]], [], 3, 1, QQ) == [[]] + + assert dmp_mul_term([[QQ(1, 5), QQ(2, 5)], [QQ(3, 5)]], [QQ(2, 3)], 2, 1, QQ) == \ + [[QQ(2, 15), QQ(4, 15)], [QQ(6, 15)], [], []] + + +def test_dup_add_ground(): + f = ZZ.map([1, 2, 3, 4]) + g = ZZ.map([1, 2, 3, 8]) + + assert dup_add_ground(f, ZZ(4), ZZ) == g + + +def test_dmp_add_ground(): + f = ZZ.map([[1], [2], [3], [4]]) + g = ZZ.map([[1], [2], [3], [8]]) + + assert dmp_add_ground(f, ZZ(4), 1, ZZ) == g + + +def test_dup_sub_ground(): + f = ZZ.map([1, 2, 3, 4]) + g = ZZ.map([1, 2, 3, 0]) + + assert dup_sub_ground(f, ZZ(4), ZZ) == g + + +def test_dmp_sub_ground(): + f = ZZ.map([[1], [2], [3], [4]]) + g = ZZ.map([[1], [2], [3], []]) + + assert dmp_sub_ground(f, ZZ(4), 1, ZZ) == g + + +def test_dup_mul_ground(): + f = dup_normal([], ZZ) + + assert dup_mul_ground(f, ZZ(2), ZZ) == dup_normal([], ZZ) + + f = dup_normal([1, 2, 3], ZZ) + + assert dup_mul_ground(f, ZZ(0), ZZ) == dup_normal([], ZZ) + assert dup_mul_ground(f, ZZ(2), ZZ) == dup_normal([2, 4, 6], ZZ) + + +def test_dmp_mul_ground(): + assert dmp_mul_ground(f_0, ZZ(2), 2, ZZ) == [ + [[ZZ(2), ZZ(4), ZZ(6)], [ZZ(4)]], + [[ZZ(6)]], + [[ZZ(8), ZZ(10), ZZ(12)], [ZZ(2), ZZ(4), ZZ(2)], [ZZ(2)]] + ] + + assert dmp_mul_ground(F_0, QQ(1, 2), 2, QQ) == [ + [[QQ(1, 14), QQ(2, 14), QQ(3, 14)], [QQ(2, 14)]], + [[QQ(3, 14)]], + [[QQ(4, 14), QQ(5, 14), QQ(6, 14)], [QQ(1, 14), QQ(2, 14), + QQ(1, 14)], [QQ(1, 14)]] + ] + + +def test_dup_quo_ground(): + raises(ZeroDivisionError, lambda: dup_quo_ground(dup_normal([1, 2, + 3], ZZ), ZZ(0), ZZ)) + + f = dup_normal([], ZZ) + + assert dup_quo_ground(f, ZZ(3), ZZ) == dup_normal([], ZZ) + + f = dup_normal([6, 2, 8], ZZ) + + assert dup_quo_ground(f, ZZ(1), ZZ) == f + assert dup_quo_ground(f, ZZ(2), ZZ) == dup_normal([3, 1, 4], ZZ) + + assert dup_quo_ground(f, ZZ(3), ZZ) == dup_normal([2, 0, 2], ZZ) + + f = dup_normal([6, 2, 8], QQ) + + assert dup_quo_ground(f, QQ(1), QQ) == f + assert dup_quo_ground(f, QQ(2), QQ) == [QQ(3), QQ(1), QQ(4)] + assert dup_quo_ground(f, QQ(7), QQ) == [QQ(6, 7), QQ(2, 7), QQ(8, 7)] + + +def test_dup_exquo_ground(): + raises(ZeroDivisionError, lambda: dup_exquo_ground(dup_normal([1, + 2, 3], ZZ), ZZ(0), ZZ)) + raises(ExactQuotientFailed, lambda: dup_exquo_ground(dup_normal([1, + 2, 3], ZZ), ZZ(3), ZZ)) + + f = dup_normal([], ZZ) + + assert dup_exquo_ground(f, ZZ(3), ZZ) == dup_normal([], ZZ) + + f = dup_normal([6, 2, 8], ZZ) + + assert dup_exquo_ground(f, ZZ(1), ZZ) == f + assert dup_exquo_ground(f, ZZ(2), ZZ) == dup_normal([3, 1, 4], ZZ) + + f = dup_normal([6, 2, 8], QQ) + + assert dup_exquo_ground(f, QQ(1), QQ) == f + assert dup_exquo_ground(f, QQ(2), QQ) == [QQ(3), QQ(1), QQ(4)] + assert dup_exquo_ground(f, QQ(7), QQ) == [QQ(6, 7), QQ(2, 7), QQ(8, 7)] + + +def test_dmp_quo_ground(): + f = dmp_normal([[6], [2], [8]], 1, ZZ) + + assert dmp_quo_ground(f, ZZ(1), 1, ZZ) == f + assert dmp_quo_ground( + f, ZZ(2), 1, ZZ) == dmp_normal([[3], [1], [4]], 1, ZZ) + + assert dmp_normal(dmp_quo_ground( + f, ZZ(3), 1, ZZ), 1, ZZ) == dmp_normal([[2], [], [2]], 1, ZZ) + + +def test_dmp_exquo_ground(): + f = dmp_normal([[6], [2], [8]], 1, ZZ) + + assert dmp_exquo_ground(f, ZZ(1), 1, ZZ) == f + assert dmp_exquo_ground( + f, ZZ(2), 1, ZZ) == dmp_normal([[3], [1], [4]], 1, ZZ) + + +def test_dup_lshift(): + assert dup_lshift([], 3, ZZ) == [] + assert dup_lshift([1], 3, ZZ) == [1, 0, 0, 0] + + +def test_dup_rshift(): + assert dup_rshift([], 3, ZZ) == [] + assert dup_rshift([1, 0, 0, 0], 3, ZZ) == [1] + + +def test_dup_abs(): + assert dup_abs([], ZZ) == [] + assert dup_abs([ZZ( 1)], ZZ) == [ZZ(1)] + assert dup_abs([ZZ(-7)], ZZ) == [ZZ(7)] + assert dup_abs([ZZ(-1), ZZ(2), ZZ(3)], ZZ) == [ZZ(1), ZZ(2), ZZ(3)] + + assert dup_abs([], QQ) == [] + assert dup_abs([QQ( 1, 2)], QQ) == [QQ(1, 2)] + assert dup_abs([QQ(-7, 3)], QQ) == [QQ(7, 3)] + assert dup_abs( + [QQ(-1, 7), QQ(2, 7), QQ(3, 7)], QQ) == [QQ(1, 7), QQ(2, 7), QQ(3, 7)] + + +def test_dmp_abs(): + assert dmp_abs([ZZ(-1)], 0, ZZ) == [ZZ(1)] + assert dmp_abs([QQ(-1, 2)], 0, QQ) == [QQ(1, 2)] + + assert dmp_abs([[[]]], 2, ZZ) == [[[]]] + assert dmp_abs([[[ZZ(1)]]], 2, ZZ) == [[[ZZ(1)]]] + assert dmp_abs([[[ZZ(-7)]]], 2, ZZ) == [[[ZZ(7)]]] + + assert dmp_abs([[[]]], 2, QQ) == [[[]]] + assert dmp_abs([[[QQ(1, 2)]]], 2, QQ) == [[[QQ(1, 2)]]] + assert dmp_abs([[[QQ(-7, 9)]]], 2, QQ) == [[[QQ(7, 9)]]] + + +def test_dup_neg(): + assert dup_neg([], ZZ) == [] + assert dup_neg([ZZ(1)], ZZ) == [ZZ(-1)] + assert dup_neg([ZZ(-7)], ZZ) == [ZZ(7)] + assert dup_neg([ZZ(-1), ZZ(2), ZZ(3)], ZZ) == [ZZ(1), ZZ(-2), ZZ(-3)] + + assert dup_neg([], QQ) == [] + assert dup_neg([QQ(1, 2)], QQ) == [QQ(-1, 2)] + assert dup_neg([QQ(-7, 9)], QQ) == [QQ(7, 9)] + assert dup_neg([QQ( + -1, 7), QQ(2, 7), QQ(3, 7)], QQ) == [QQ(1, 7), QQ(-2, 7), QQ(-3, 7)] + + +def test_dmp_neg(): + assert dmp_neg([ZZ(-1)], 0, ZZ) == [ZZ(1)] + assert dmp_neg([QQ(-1, 2)], 0, QQ) == [QQ(1, 2)] + + assert dmp_neg([[[]]], 2, ZZ) == [[[]]] + assert dmp_neg([[[ZZ(1)]]], 2, ZZ) == [[[ZZ(-1)]]] + assert dmp_neg([[[ZZ(-7)]]], 2, ZZ) == [[[ZZ(7)]]] + + assert dmp_neg([[[]]], 2, QQ) == [[[]]] + assert dmp_neg([[[QQ(1, 9)]]], 2, QQ) == [[[QQ(-1, 9)]]] + assert dmp_neg([[[QQ(-7, 9)]]], 2, QQ) == [[[QQ(7, 9)]]] + + +def test_dup_add(): + assert dup_add([], [], ZZ) == [] + assert dup_add([ZZ(1)], [], ZZ) == [ZZ(1)] + assert dup_add([], [ZZ(1)], ZZ) == [ZZ(1)] + assert dup_add([ZZ(1)], [ZZ(1)], ZZ) == [ZZ(2)] + assert dup_add([ZZ(1)], [ZZ(2)], ZZ) == [ZZ(3)] + + assert dup_add([ZZ(1), ZZ(2)], [ZZ(1)], ZZ) == [ZZ(1), ZZ(3)] + assert dup_add([ZZ(1)], [ZZ(1), ZZ(2)], ZZ) == [ZZ(1), ZZ(3)] + + assert dup_add([ZZ(1), ZZ( + 2), ZZ(3)], [ZZ(8), ZZ(9), ZZ(10)], ZZ) == [ZZ(9), ZZ(11), ZZ(13)] + + assert dup_add([], [], QQ) == [] + assert dup_add([QQ(1, 2)], [], QQ) == [QQ(1, 2)] + assert dup_add([], [QQ(1, 2)], QQ) == [QQ(1, 2)] + assert dup_add([QQ(1, 4)], [QQ(1, 4)], QQ) == [QQ(1, 2)] + assert dup_add([QQ(1, 4)], [QQ(1, 2)], QQ) == [QQ(3, 4)] + + assert dup_add([QQ(1, 2), QQ(2, 3)], [QQ(1)], QQ) == [QQ(1, 2), QQ(5, 3)] + assert dup_add([QQ(1)], [QQ(1, 2), QQ(2, 3)], QQ) == [QQ(1, 2), QQ(5, 3)] + + assert dup_add([QQ(1, 7), QQ(2, 7), QQ(3, 7)], [QQ( + 8, 7), QQ(9, 7), QQ(10, 7)], QQ) == [QQ(9, 7), QQ(11, 7), QQ(13, 7)] + + +def test_dmp_add(): + assert dmp_add([ZZ(1), ZZ(2)], [ZZ(1)], 0, ZZ) == \ + dup_add([ZZ(1), ZZ(2)], [ZZ(1)], ZZ) + assert dmp_add([QQ(1, 2), QQ(2, 3)], [QQ(1)], 0, QQ) == \ + dup_add([QQ(1, 2), QQ(2, 3)], [QQ(1)], QQ) + + assert dmp_add([[[]]], [[[]]], 2, ZZ) == [[[]]] + assert dmp_add([[[ZZ(1)]]], [[[]]], 2, ZZ) == [[[ZZ(1)]]] + assert dmp_add([[[]]], [[[ZZ(1)]]], 2, ZZ) == [[[ZZ(1)]]] + assert dmp_add([[[ZZ(2)]]], [[[ZZ(1)]]], 2, ZZ) == [[[ZZ(3)]]] + assert dmp_add([[[ZZ(1)]]], [[[ZZ(2)]]], 2, ZZ) == [[[ZZ(3)]]] + + assert dmp_add([[[]]], [[[]]], 2, QQ) == [[[]]] + assert dmp_add([[[QQ(1, 2)]]], [[[]]], 2, QQ) == [[[QQ(1, 2)]]] + assert dmp_add([[[]]], [[[QQ(1, 2)]]], 2, QQ) == [[[QQ(1, 2)]]] + assert dmp_add([[[QQ(2, 7)]]], [[[QQ(1, 7)]]], 2, QQ) == [[[QQ(3, 7)]]] + assert dmp_add([[[QQ(1, 7)]]], [[[QQ(2, 7)]]], 2, QQ) == [[[QQ(3, 7)]]] + + +def test_dup_sub(): + assert dup_sub([], [], ZZ) == [] + assert dup_sub([ZZ(1)], [], ZZ) == [ZZ(1)] + assert dup_sub([], [ZZ(1)], ZZ) == [ZZ(-1)] + assert dup_sub([ZZ(1)], [ZZ(1)], ZZ) == [] + assert dup_sub([ZZ(1)], [ZZ(2)], ZZ) == [ZZ(-1)] + + assert dup_sub([ZZ(1), ZZ(2)], [ZZ(1)], ZZ) == [ZZ(1), ZZ(1)] + assert dup_sub([ZZ(1)], [ZZ(1), ZZ(2)], ZZ) == [ZZ(-1), ZZ(-1)] + + assert dup_sub([ZZ(3), ZZ( + 2), ZZ(1)], [ZZ(8), ZZ(9), ZZ(10)], ZZ) == [ZZ(-5), ZZ(-7), ZZ(-9)] + + assert dup_sub([], [], QQ) == [] + assert dup_sub([QQ(1, 2)], [], QQ) == [QQ(1, 2)] + assert dup_sub([], [QQ(1, 2)], QQ) == [QQ(-1, 2)] + assert dup_sub([QQ(1, 3)], [QQ(1, 3)], QQ) == [] + assert dup_sub([QQ(1, 3)], [QQ(2, 3)], QQ) == [QQ(-1, 3)] + + assert dup_sub([QQ(1, 7), QQ(2, 7)], [QQ(1)], QQ) == [QQ(1, 7), QQ(-5, 7)] + assert dup_sub([QQ(1)], [QQ(1, 7), QQ(2, 7)], QQ) == [QQ(-1, 7), QQ(5, 7)] + + assert dup_sub([QQ(3, 7), QQ(2, 7), QQ(1, 7)], [QQ( + 8, 7), QQ(9, 7), QQ(10, 7)], QQ) == [QQ(-5, 7), QQ(-7, 7), QQ(-9, 7)] + + +def test_dmp_sub(): + assert dmp_sub([ZZ(1), ZZ(2)], [ZZ(1)], 0, ZZ) == \ + dup_sub([ZZ(1), ZZ(2)], [ZZ(1)], ZZ) + assert dmp_sub([QQ(1, 2), QQ(2, 3)], [QQ(1)], 0, QQ) == \ + dup_sub([QQ(1, 2), QQ(2, 3)], [QQ(1)], QQ) + + assert dmp_sub([[[]]], [[[]]], 2, ZZ) == [[[]]] + assert dmp_sub([[[ZZ(1)]]], [[[]]], 2, ZZ) == [[[ZZ(1)]]] + assert dmp_sub([[[]]], [[[ZZ(1)]]], 2, ZZ) == [[[ZZ(-1)]]] + assert dmp_sub([[[ZZ(2)]]], [[[ZZ(1)]]], 2, ZZ) == [[[ZZ(1)]]] + assert dmp_sub([[[ZZ(1)]]], [[[ZZ(2)]]], 2, ZZ) == [[[ZZ(-1)]]] + + assert dmp_sub([[[]]], [[[]]], 2, QQ) == [[[]]] + assert dmp_sub([[[QQ(1, 2)]]], [[[]]], 2, QQ) == [[[QQ(1, 2)]]] + assert dmp_sub([[[]]], [[[QQ(1, 2)]]], 2, QQ) == [[[QQ(-1, 2)]]] + assert dmp_sub([[[QQ(2, 7)]]], [[[QQ(1, 7)]]], 2, QQ) == [[[QQ(1, 7)]]] + assert dmp_sub([[[QQ(1, 7)]]], [[[QQ(2, 7)]]], 2, QQ) == [[[QQ(-1, 7)]]] + + +def test_dup_add_mul(): + assert dup_add_mul([ZZ(1), ZZ(2), ZZ(3)], [ZZ(3), ZZ(2), ZZ(1)], + [ZZ(1), ZZ(2)], ZZ) == [ZZ(3), ZZ(9), ZZ(7), ZZ(5)] + assert dmp_add_mul([[ZZ(1), ZZ(2)], [ZZ(3)]], [[ZZ(3)], [ZZ(2), ZZ(1)]], + [[ZZ(1)], [ZZ(2)]], 1, ZZ) == [[ZZ(3)], [ZZ(3), ZZ(9)], [ZZ(4), ZZ(5)]] + + +def test_dup_sub_mul(): + assert dup_sub_mul([ZZ(1), ZZ(2), ZZ(3)], [ZZ(3), ZZ(2), ZZ(1)], + [ZZ(1), ZZ(2)], ZZ) == [ZZ(-3), ZZ(-7), ZZ(-3), ZZ(1)] + assert dmp_sub_mul([[ZZ(1), ZZ(2)], [ZZ(3)]], [[ZZ(3)], [ZZ(2), ZZ(1)]], + [[ZZ(1)], [ZZ(2)]], 1, ZZ) == [[ZZ(-3)], [ZZ(-1), ZZ(-5)], [ZZ(-4), ZZ(1)]] + + +def test_dup_mul(): + assert dup_mul([], [], ZZ) == [] + assert dup_mul([], [ZZ(1)], ZZ) == [] + assert dup_mul([ZZ(1)], [], ZZ) == [] + assert dup_mul([ZZ(1)], [ZZ(1)], ZZ) == [ZZ(1)] + assert dup_mul([ZZ(5)], [ZZ(7)], ZZ) == [ZZ(35)] + + assert dup_mul([], [], QQ) == [] + assert dup_mul([], [QQ(1, 2)], QQ) == [] + assert dup_mul([QQ(1, 2)], [], QQ) == [] + assert dup_mul([QQ(1, 2)], [QQ(4, 7)], QQ) == [QQ(2, 7)] + assert dup_mul([QQ(5, 7)], [QQ(3, 7)], QQ) == [QQ(15, 49)] + + f = dup_normal([3, 0, 0, 6, 1, 2], ZZ) + g = dup_normal([4, 0, 1, 0], ZZ) + h = dup_normal([12, 0, 3, 24, 4, 14, 1, 2, 0], ZZ) + + assert dup_mul(f, g, ZZ) == h + assert dup_mul(g, f, ZZ) == h + + f = dup_normal([2, 0, 0, 1, 7], ZZ) + h = dup_normal([4, 0, 0, 4, 28, 0, 1, 14, 49], ZZ) + + assert dup_mul(f, f, ZZ) == h + + K = FF(6) + + assert dup_mul([K(2), K(1)], [K(3), K(4)], K) == [K(5), K(4)] + + p1 = dup_normal([79, -1, 78, -94, -10, 11, 32, -19, 78, 2, -89, 30, 73, 42, + 85, 77, 83, -30, -34, -2, 95, -81, 37, -49, -46, -58, -16, 37, 35, -11, + -57, -15, -31, 67, -20, 27, 76, 2, 70, 67, -65, 65, -26, -93, -44, -12, + -92, 57, -90, -57, -11, -67, -98, -69, 97, -41, 89, 33, 89, -50, 81, + -31, 60, -27, 43, 29, -77, 44, 21, -91, 32, -57, 33, 3, 53, -51, -38, + -99, -84, 23, -50, 66, -100, 1, -75, -25, 27, -60, 98, -51, -87, 6, 8, + 78, -28, -95, -88, 12, -35, 26, -9, 16, -92, 55, -7, -86, 68, -39, -46, + 84, 94, 45, 60, 92, 68, -75, -74, -19, 8, 75, 78, 91, 57, 34, 14, -3, + -49, 65, 78, -18, 6, -29, -80, -98, 17, 13, 58, 21, 20, 9, 37, 7, -30, + -53, -20, 34, 67, -42, 89, -22, 73, 43, -6, 5, 51, -8, -15, -52, -22, + -58, -72, -3, 43, -92, 82, 83, -2, -13, -23, -60, 16, -94, -8, -28, + -95, -72, 63, -90, 76, 6, -43, -100, -59, 76, 3, 3, 46, -85, 75, 62, + -71, -76, 88, 97, -72, -1, 30, -64, 72, -48, 14, -78, 58, 63, -91, 24, + -87, -27, -80, -100, -44, 98, 70, 100, -29, -38, 11, 77, 100, 52, 86, + 65, -5, -42, -81, -38, -42, 43, -2, -70, -63, -52], ZZ) + p2 = dup_normal([65, -19, -47, 1, 90, 81, -15, -34, 25, -75, 9, -83, 50, -5, + -44, 31, 1, 70, -7, 78, 74, 80, 85, 65, 21, 41, 66, 19, -40, 63, -21, + -27, 32, 69, 83, 34, -35, 14, 81, 57, -75, 32, -67, -89, -100, -61, 46, + 84, -78, -29, -50, -94, -24, -32, -68, -16, 100, -7, -72, -89, 35, 82, + 58, 81, -92, 62, 5, -47, -39, -58, -72, -13, 84, 44, 55, -25, 48, -54, + -31, -56, -11, -50, -84, 10, 67, 17, 13, -14, 61, 76, -64, -44, -40, + -96, 11, -11, -94, 2, 6, 27, -6, 68, -54, 66, -74, -14, -1, -24, -73, + 96, 89, -11, -89, 56, -53, 72, -43, 96, 25, 63, -31, 29, 68, 83, 91, + -93, -19, -38, -40, 40, -12, -19, -79, 44, 100, -66, -29, -77, 62, 39, + -8, 11, -97, 14, 87, 64, 21, -18, 13, 15, -59, -75, -99, -88, 57, 54, + 56, -67, 6, -63, -59, -14, 28, 87, -20, -39, 84, -91, -2, 49, -75, 11, + -24, -95, 36, 66, 5, 25, -72, -40, 86, 90, 37, -33, 57, -35, 29, -18, + 4, -79, 64, -17, -27, 21, 29, -5, -44, -87, -24, 52, 78, 11, -23, -53, + 36, 42, 21, -68, 94, -91, -51, -21, 51, -76, 72, 31, 24, -48, -80, -9, + 37, -47, -6, -8, -63, -91, 79, -79, -100, 38, -20, 38, 100, 83, -90, + 87, 63, -36, 82, -19, 18, -98, -38, 26, 98, -70, 79, 92, 12, 12, 70, + 74, 36, 48, -13, 31, 31, -47, -71, -12, -64, 36, -42, 32, -86, 60, 83, + 70, 55, 0, 1, 29, -35, 8, -82, 8, -73, -46, -50, 43, 48, -5, -86, -72, + 44, -90, 19, 19, 5, -20, 97, -13, -66, -5, 5, -69, 64, -30, 41, 51, 36, + 13, -99, -61, 94, -12, 74, 98, 68, 24, 46, -97, -87, -6, -27, 82, 62, + -11, -77, 86, 66, -47, -49, -50, 13, 18, 89, -89, 46, -80, 13, 98, -35, + -36, -25, 12, 20, 26, -52, 79, 27, 79, 100, 8, 62, -58, -28, 37], ZZ) + res = dup_normal([5135, -1566, 1376, -7466, 4579, 11710, 8001, -7183, + -3737, -7439, 345, -10084, 24522, -1201, 1070, -10245, 9582, 9264, + 1903, 23312, 18953, 10037, -15268, -5450, 6442, -6243, -3777, 5110, + 10936, -16649, -6022, 16255, 31300, 24818, 31922, 32760, 7854, 27080, + 15766, 29596, 7139, 31945, -19810, 465, -38026, -3971, 9641, 465, + -19375, 5524, -30112, -11960, -12813, 13535, 30670, 5925, -43725, + -14089, 11503, -22782, 6371, 43881, 37465, -33529, -33590, -39798, + -37854, -18466, -7908, -35825, -26020, -36923, -11332, -5699, 25166, + -3147, 19885, 12962, -20659, -1642, 27723, -56331, -24580, -11010, + -20206, 20087, -23772, -16038, 38580, 20901, -50731, 32037, -4299, + 26508, 18038, -28357, 31846, -7405, -20172, -15894, 2096, 25110, + -45786, 45918, -55333, -31928, -49428, -29824, -58796, -24609, -15408, + 69, -35415, -18439, 10123, -20360, -65949, 33356, -20333, 26476, + -32073, 33621, 930, 28803, -42791, 44716, 38164, 12302, -1739, 11421, + 73385, -7613, 14297, 38155, -414, 77587, 24338, -21415, 29367, 42639, + 13901, -288, 51027, -11827, 91260, 43407, 88521, -15186, 70572, -12049, + 5090, -12208, -56374, 15520, -623, -7742, 50825, 11199, -14894, 40892, + 59591, -31356, -28696, -57842, -87751, -33744, -28436, -28945, -40287, + 37957, -35638, 33401, -61534, 14870, 40292, 70366, -10803, 102290, + -71719, -85251, 7902, -22409, 75009, 99927, 35298, -1175, -762, -34744, + -10587, -47574, -62629, -19581, -43659, -54369, -32250, -39545, 15225, + -24454, 11241, -67308, -30148, 39929, 37639, 14383, -73475, -77636, + -81048, -35992, 41601, -90143, 76937, -8112, 56588, 9124, -40094, + -32340, 13253, 10898, -51639, 36390, 12086, -1885, 100714, -28561, + -23784, -18735, 18916, 16286, 10742, -87360, -13697, 10689, -19477, + -29770, 5060, 20189, -8297, 112407, 47071, 47743, 45519, -4109, 17468, + -68831, 78325, -6481, -21641, -19459, 30919, 96115, 8607, 53341, 32105, + -16211, 23538, 57259, -76272, -40583, 62093, 38511, -34255, -40665, + -40604, -37606, -15274, 33156, -13885, 103636, 118678, -14101, -92682, + -100791, 2634, 63791, 98266, 19286, -34590, -21067, -71130, 25380, + -40839, -27614, -26060, 52358, -15537, 27138, -6749, 36269, -33306, + 13207, -91084, -5540, -57116, 69548, 44169, -57742, -41234, -103327, + -62904, -8566, 41149, -12866, 71188, 23980, 1838, 58230, 73950, 5594, + 43113, -8159, -15925, 6911, 85598, -75016, -16214, -62726, -39016, + 8618, -63882, -4299, 23182, 49959, 49342, -3238, -24913, -37138, 78361, + 32451, 6337, -11438, -36241, -37737, 8169, -3077, -24829, 57953, 53016, + -31511, -91168, 12599, -41849, 41576, 55275, -62539, 47814, -62319, + 12300, -32076, -55137, -84881, -27546, 4312, -3433, -54382, 113288, + -30157, 74469, 18219, 79880, -2124, 98911, 17655, -33499, -32861, + 47242, -37393, 99765, 14831, -44483, 10800, -31617, -52710, 37406, + 22105, 29704, -20050, 13778, 43683, 36628, 8494, 60964, -22644, 31550, + -17693, 33805, -124879, -12302, 19343, 20400, -30937, -21574, -34037, + -33380, 56539, -24993, -75513, -1527, 53563, 65407, -101, 53577, 37991, + 18717, -23795, -8090, -47987, -94717, 41967, 5170, -14815, -94311, + 17896, -17734, -57718, -774, -38410, 24830, 29682, 76480, 58802, + -46416, -20348, -61353, -68225, -68306, 23822, -31598, 42972, 36327, + 28968, -65638, -21638, 24354, -8356, 26777, 52982, -11783, -44051, + -26467, -44721, -28435, -53265, -25574, -2669, 44155, 22946, -18454, + -30718, -11252, 58420, 8711, 67447, 4425, 41749, 67543, 43162, 11793, + -41907, 20477, -13080, 6559, -6104, -13244, 42853, 42935, 29793, 36730, + -28087, 28657, 17946, 7503, 7204, 21491, -27450, -24241, -98156, + -18082, -42613, -24928, 10775, -14842, -44127, 55910, 14777, 31151, -2194, + 39206, -2100, -4211, 11827, -8918, -19471, 72567, 36447, -65590, -34861, + -17147, -45303, 9025, -7333, -35473, 11101, 11638, 3441, 6626, -41800, + 9416, 13679, 33508, 40502, -60542, 16358, 8392, -43242, -35864, -34127, + -48721, 35878, 30598, 28630, 20279, -19983, -14638, -24455, -1851, -11344, + 45150, 42051, 26034, -28889, -32382, -3527, -14532, 22564, -22346, 477, + 11706, 28338, -25972, -9185, -22867, -12522, 32120, -4424, 11339, -33913, + -7184, 5101, -23552, -17115, -31401, -6104, 21906, 25708, 8406, 6317, + -7525, 5014, 20750, 20179, 22724, 11692, 13297, 2493, -253, -16841, -17339, + -6753, -4808, 2976, -10881, -10228, -13816, -12686, 1385, 2316, 2190, -875, + -1924], ZZ) + + assert dup_mul(p1, p2, ZZ) == res + + p1 = dup_normal([83, -61, -86, -24, 12, 43, -88, -9, 42, 55, -66, 74, 95, + -25, -12, 68, -99, 4, 45, 6, -15, -19, 78, 65, -55, 47, -13, 17, 86, + 81, -58, -27, 50, -40, -24, 39, -41, -92, 75, 90, -1, 40, -15, -27, + -35, 68, 70, -64, -40, 78, -88, -58, -39, 69, 46, 12, 28, -94, -37, + -50, -80, -96, -61, 25, 1, 71, 4, 12, 48, 4, 34, -47, -75, 5, 48, 82, + 88, 23, 98, 35, 17, -10, 48, -61, -95, 47, 65, -19, -66, -57, -6, -51, + -42, -89, 66, -13, 18, 37, 90, -23, 72, 96, -53, 0, 40, -73, -52, -68, + 32, -25, -53, 79, -52, 18, 44, 73, -81, 31, -90, 70, 3, 36, 48, 76, + -24, -44, 23, 98, -4, 73, 69, 88, -70, 14, -68, 94, -78, -15, -64, -97, + -70, -35, 65, 88, 49, -53, -7, 12, -45, -7, 59, -94, 99, -2, 67, -60, + -71, 29, -62, -77, 1, 51, 17, 80, -20, -47, -19, 24, -9, 39, -23, 21, + -84, 10, 84, 56, -17, -21, -66, 85, 70, 46, -51, -22, -95, 78, -60, + -96, -97, -45, 72, 35, 30, -61, -92, -93, -60, -61, 4, -4, -81, -73, + 46, 53, -11, 26, 94, 45, 14, -78, 55, 84, -68, 98, 60, 23, 100, -63, + 68, 96, -16, 3, 56, 21, -58, 62, -67, 66, 85, 41, -79, -22, 97, -67, + 82, 82, -96, -20, -7, 48, -67, 48, -9, -39, 78], ZZ) + p2 = dup_normal([52, 88, 76, 66, 9, -64, 46, -20, -28, 69, 60, 96, -36, + -92, -30, -11, -35, 35, 55, 63, -92, -7, 25, -58, 74, 55, -6, 4, 47, + -92, -65, 67, -45, 74, -76, 59, -6, 69, 39, 24, -71, -7, 39, -45, 60, + -68, 98, 97, -79, 17, 4, 94, -64, 68, -100, -96, -2, 3, 22, 96, 54, + -77, -86, 67, 6, 57, 37, 40, 89, -78, 64, -94, -45, -92, 57, 87, -26, + 36, 19, 97, 25, 77, -87, 24, 43, -5, 35, 57, 83, 71, 35, 63, 61, 96, + -22, 8, -1, 96, 43, 45, 94, -93, 36, 71, -41, -99, 85, -48, 59, 52, + -17, 5, 87, -16, -68, -54, 76, -18, 100, 91, -42, -70, -66, -88, -12, + 1, 95, -82, 52, 43, -29, 3, 12, 72, -99, -43, -32, -93, -51, 16, -20, + -12, -11, 5, 33, -38, 93, -5, -74, 25, 74, -58, 93, 59, -63, -86, 63, + -20, -4, -74, -73, -95, 29, -28, 93, -91, -2, -38, -62, 77, -58, -85, + -28, 95, 38, 19, -69, 86, 94, 25, -2, -4, 47, 34, -59, 35, -48, 29, + -63, -53, 34, 29, 66, 73, 6, 92, -84, 89, 15, 81, 93, 97, 51, -72, -78, + 25, 60, 90, -45, 39, 67, -84, -62, 57, 26, -32, -56, -14, -83, 76, 5, + -2, 99, -100, 28, 46, 94, -7, 53, -25, 16, -23, -36, 89, -78, -63, 31, + 1, 84, -99, -52, 76, 48, 90, -76, 44, -19, 54, -36, -9, -73, -100, -69, + 31, 42, 25, -39, 76, -26, -8, -14, 51, 3, 37, 45, 2, -54, 13, -34, -92, + 17, -25, -65, 53, -63, 30, 4, -70, -67, 90, 52, 51, 18, -3, 31, -45, + -9, 59, 63, -87, 22, -32, 29, -38, 21, 36, -82, 27, -11], ZZ) + res = dup_normal([4316, 4132, -3532, -7974, -11303, -10069, 5484, -3330, + -5874, 7734, 4673, 11327, -9884, -8031, 17343, 21035, -10570, -9285, + 15893, 3780, -14083, 8819, 17592, 10159, 7174, -11587, 8598, -16479, + 3602, 25596, 9781, 12163, 150, 18749, -21782, -12307, 27578, -2757, + -12573, 12565, 6345, -18956, 19503, -15617, 1443, -16778, 36851, 23588, + -28474, 5749, 40695, -7521, -53669, -2497, -18530, 6770, 57038, 3926, + -6927, -15399, 1848, -64649, -27728, 3644, 49608, 15187, -8902, -9480, + -7398, -40425, 4824, 23767, -7594, -6905, 33089, 18786, 12192, 24670, + 31114, 35334, -4501, -14676, 7107, -59018, -21352, 20777, 19661, 20653, + 33754, -885, -43758, 6269, 51897, -28719, -97488, -9527, 13746, 11644, + 17644, -21720, 23782, -10481, 47867, 20752, 33810, -1875, 39918, -7710, + -40840, 19808, -47075, 23066, 46616, 25201, 9287, 35436, -1602, 9645, + -11978, 13273, 15544, 33465, 20063, 44539, 11687, 27314, -6538, -37467, + 14031, 32970, -27086, 41323, 29551, 65910, -39027, -37800, -22232, + 8212, 46316, -28981, -55282, 50417, -44929, -44062, 73879, 37573, + -2596, -10877, -21893, -133218, -33707, -25753, -9531, 17530, 61126, + 2748, -56235, 43874, -10872, -90459, -30387, 115267, -7264, -44452, + 122626, 14839, -599, 10337, 57166, -67467, -54957, 63669, 1202, 18488, + 52594, 7205, -97822, 612, 78069, -5403, -63562, 47236, 36873, -154827, + -26188, 82427, -39521, 5628, 7416, 5276, -53095, 47050, 26121, -42207, + 79021, -13035, 2499, -66943, 29040, -72355, -23480, 23416, -12885, + -44225, -42688, -4224, 19858, 55299, 15735, 11465, 101876, -39169, + 51786, 14723, 43280, -68697, 16410, 92295, 56767, 7183, 111850, 4550, + 115451, -38443, -19642, -35058, 10230, 93829, 8925, 63047, 3146, 29250, + 8530, 5255, -98117, -115517, -76817, -8724, 41044, 1312, -35974, 79333, + -28567, 7547, -10580, -24559, -16238, 10794, -3867, 24848, 57770, + -51536, -35040, 71033, 29853, 62029, -7125, -125585, -32169, -47907, + 156811, -65176, -58006, -15757, -57861, 11963, 30225, -41901, -41681, + 31310, 27982, 18613, 61760, 60746, -59096, 33499, 30097, -17997, 24032, + 56442, -83042, 23747, -20931, -21978, -158752, -9883, -73598, -7987, + -7333, -125403, -116329, 30585, 53281, 51018, -29193, 88575, 8264, + -40147, -16289, 113088, 12810, -6508, 101552, -13037, 34440, -41840, + 101643, 24263, 80532, 61748, 65574, 6423, -20672, 6591, -10834, -71716, + 86919, -92626, 39161, 28490, 81319, 46676, 106720, 43530, 26998, 57456, + -8862, 60989, 13982, 3119, -2224, 14743, 55415, -49093, -29303, 28999, + 1789, 55953, -84043, -7780, -65013, 57129, -47251, 61484, 61994, + -78361, -82778, 22487, -26894, 9756, -74637, -15519, -4360, 30115, + 42433, 35475, 15286, 69768, 21509, -20214, 78675, -21163, 13596, 11443, + -10698, -53621, -53867, -24155, 64500, -42784, -33077, -16500, 873, + -52788, 14546, -38011, 36974, -39849, -34029, -94311, 83068, -50437, + -26169, -46746, 59185, 42259, -101379, -12943, 30089, -59086, 36271, + 22723, -30253, -52472, -70826, -23289, 3331, -31687, 14183, -857, + -28627, 35246, -51284, 5636, -6933, 66539, 36654, 50927, 24783, 3457, + 33276, 45281, 45650, -4938, -9968, -22590, 47995, 69229, 5214, -58365, + -17907, -14651, 18668, 18009, 12649, -11851, -13387, 20339, 52472, + -1087, -21458, -68647, 52295, 15849, 40608, 15323, 25164, -29368, + 10352, -7055, 7159, 21695, -5373, -54849, 101103, -24963, -10511, + 33227, 7659, 41042, -69588, 26718, -20515, 6441, 38135, -63, 24088, + -35364, -12785, -18709, 47843, 48533, -48575, 17251, -19394, 32878, + -9010, -9050, 504, -12407, 28076, -3429, 25324, -4210, -26119, 752, + -29203, 28251, -11324, -32140, -3366, -25135, 18702, -31588, -7047, + -24267, 49987, -14975, -33169, 37744, -7720, -9035, 16964, -2807, -421, + 14114, -17097, -13662, 40628, -12139, -9427, 5369, 17551, -13232, -16211, + 9804, -7422, 2677, 28635, -8280, -4906, 2908, -22558, 5604, 12459, 8756, + -3980, -4745, -18525, 7913, 5970, -16457, 20230, -6247, -13812, 2505, + 11899, 1409, -15094, 22540, -18863, 137, 11123, -4516, 2290, -8594, 12150, + -10380, 3005, 5235, -7350, 2535, -858], ZZ) + + assert dup_mul(p1, p2, ZZ) == res + + +def test_dmp_mul(): + assert dmp_mul([ZZ(5)], [ZZ(7)], 0, ZZ) == \ + dup_mul([ZZ(5)], [ZZ(7)], ZZ) + assert dmp_mul([QQ(5, 7)], [QQ(3, 7)], 0, QQ) == \ + dup_mul([QQ(5, 7)], [QQ(3, 7)], QQ) + + assert dmp_mul([[[]]], [[[]]], 2, ZZ) == [[[]]] + assert dmp_mul([[[ZZ(1)]]], [[[]]], 2, ZZ) == [[[]]] + assert dmp_mul([[[]]], [[[ZZ(1)]]], 2, ZZ) == [[[]]] + assert dmp_mul([[[ZZ(2)]]], [[[ZZ(1)]]], 2, ZZ) == [[[ZZ(2)]]] + assert dmp_mul([[[ZZ(1)]]], [[[ZZ(2)]]], 2, ZZ) == [[[ZZ(2)]]] + + assert dmp_mul([[[]]], [[[]]], 2, QQ) == [[[]]] + assert dmp_mul([[[QQ(1, 2)]]], [[[]]], 2, QQ) == [[[]]] + assert dmp_mul([[[]]], [[[QQ(1, 2)]]], 2, QQ) == [[[]]] + assert dmp_mul([[[QQ(2, 7)]]], [[[QQ(1, 3)]]], 2, QQ) == [[[QQ(2, 21)]]] + assert dmp_mul([[[QQ(1, 7)]]], [[[QQ(2, 3)]]], 2, QQ) == [[[QQ(2, 21)]]] + + K = FF(6) + + assert dmp_mul( + [[K(2)], [K(1)]], [[K(3)], [K(4)]], 1, K) == [[K(5)], [K(4)]] + + +def test_dup_sqr(): + assert dup_sqr([], ZZ) == [] + assert dup_sqr([ZZ(2)], ZZ) == [ZZ(4)] + assert dup_sqr([ZZ(1), ZZ(2)], ZZ) == [ZZ(1), ZZ(4), ZZ(4)] + + assert dup_sqr([], QQ) == [] + assert dup_sqr([QQ(2, 3)], QQ) == [QQ(4, 9)] + assert dup_sqr([QQ(1, 3), QQ(2, 3)], QQ) == [QQ(1, 9), QQ(4, 9), QQ(4, 9)] + + f = dup_normal([2, 0, 0, 1, 7], ZZ) + + assert dup_sqr(f, ZZ) == dup_normal([4, 0, 0, 4, 28, 0, 1, 14, 49], ZZ) + + K = FF(9) + + assert dup_sqr([K(3), K(4)], K) == [K(6), K(7)] + + +def test_dmp_sqr(): + assert dmp_sqr([ZZ(1), ZZ(2)], 0, ZZ) == \ + dup_sqr([ZZ(1), ZZ(2)], ZZ) + + assert dmp_sqr([[[]]], 2, ZZ) == [[[]]] + assert dmp_sqr([[[ZZ(2)]]], 2, ZZ) == [[[ZZ(4)]]] + + assert dmp_sqr([[[]]], 2, QQ) == [[[]]] + assert dmp_sqr([[[QQ(2, 3)]]], 2, QQ) == [[[QQ(4, 9)]]] + + K = FF(9) + + assert dmp_sqr([[K(3)], [K(4)]], 1, K) == [[K(6)], [K(7)]] + + +def test_dup_pow(): + assert dup_pow([], 0, ZZ) == [ZZ(1)] + assert dup_pow([], 0, QQ) == [QQ(1)] + + assert dup_pow([], 1, ZZ) == [] + assert dup_pow([], 7, ZZ) == [] + + assert dup_pow([ZZ(1)], 0, ZZ) == [ZZ(1)] + assert dup_pow([ZZ(1)], 1, ZZ) == [ZZ(1)] + assert dup_pow([ZZ(1)], 7, ZZ) == [ZZ(1)] + + assert dup_pow([ZZ(3)], 0, ZZ) == [ZZ(1)] + assert dup_pow([ZZ(3)], 1, ZZ) == [ZZ(3)] + assert dup_pow([ZZ(3)], 7, ZZ) == [ZZ(2187)] + + assert dup_pow([QQ(1, 1)], 0, QQ) == [QQ(1, 1)] + assert dup_pow([QQ(1, 1)], 1, QQ) == [QQ(1, 1)] + assert dup_pow([QQ(1, 1)], 7, QQ) == [QQ(1, 1)] + + assert dup_pow([QQ(3, 7)], 0, QQ) == [QQ(1, 1)] + assert dup_pow([QQ(3, 7)], 1, QQ) == [QQ(3, 7)] + assert dup_pow([QQ(3, 7)], 7, QQ) == [QQ(2187, 823543)] + + f = dup_normal([2, 0, 0, 1, 7], ZZ) + + assert dup_pow(f, 0, ZZ) == dup_normal([1], ZZ) + assert dup_pow(f, 1, ZZ) == dup_normal([2, 0, 0, 1, 7], ZZ) + assert dup_pow(f, 2, ZZ) == dup_normal([4, 0, 0, 4, 28, 0, 1, 14, 49], ZZ) + assert dup_pow(f, 3, ZZ) == dup_normal( + [8, 0, 0, 12, 84, 0, 6, 84, 294, 1, 21, 147, 343], ZZ) + + +def test_dmp_pow(): + assert dmp_pow([[]], 0, 1, ZZ) == [[ZZ(1)]] + assert dmp_pow([[]], 0, 1, QQ) == [[QQ(1)]] + + assert dmp_pow([[]], 1, 1, ZZ) == [[]] + assert dmp_pow([[]], 7, 1, ZZ) == [[]] + + assert dmp_pow([[ZZ(1)]], 0, 1, ZZ) == [[ZZ(1)]] + assert dmp_pow([[ZZ(1)]], 1, 1, ZZ) == [[ZZ(1)]] + assert dmp_pow([[ZZ(1)]], 7, 1, ZZ) == [[ZZ(1)]] + + assert dmp_pow([[QQ(3, 7)]], 0, 1, QQ) == [[QQ(1, 1)]] + assert dmp_pow([[QQ(3, 7)]], 1, 1, QQ) == [[QQ(3, 7)]] + assert dmp_pow([[QQ(3, 7)]], 7, 1, QQ) == [[QQ(2187, 823543)]] + + f = dup_normal([2, 0, 0, 1, 7], ZZ) + + assert dmp_pow(f, 2, 0, ZZ) == dup_pow(f, 2, ZZ) + + +def test_dup_pdiv(): + f = dup_normal([3, 1, 1, 5], ZZ) + g = dup_normal([5, -3, 1], ZZ) + + q = dup_normal([15, 14], ZZ) + r = dup_normal([52, 111], ZZ) + + assert dup_pdiv(f, g, ZZ) == (q, r) + assert dup_pquo(f, g, ZZ) == q + assert dup_prem(f, g, ZZ) == r + + raises(ExactQuotientFailed, lambda: dup_pexquo(f, g, ZZ)) + + f = dup_normal([3, 1, 1, 5], QQ) + g = dup_normal([5, -3, 1], QQ) + + q = dup_normal([15, 14], QQ) + r = dup_normal([52, 111], QQ) + + assert dup_pdiv(f, g, QQ) == (q, r) + assert dup_pquo(f, g, QQ) == q + assert dup_prem(f, g, QQ) == r + + raises(ExactQuotientFailed, lambda: dup_pexquo(f, g, QQ)) + + +def test_dmp_pdiv(): + f = dmp_normal([[1], [], [1, 0, 0]], 1, ZZ) + g = dmp_normal([[1], [-1, 0]], 1, ZZ) + + q = dmp_normal([[1], [1, 0]], 1, ZZ) + r = dmp_normal([[2, 0, 0]], 1, ZZ) + + assert dmp_pdiv(f, g, 1, ZZ) == (q, r) + assert dmp_pquo(f, g, 1, ZZ) == q + assert dmp_prem(f, g, 1, ZZ) == r + + raises(ExactQuotientFailed, lambda: dmp_pexquo(f, g, 1, ZZ)) + + f = dmp_normal([[1], [], [1, 0, 0]], 1, ZZ) + g = dmp_normal([[2], [-2, 0]], 1, ZZ) + + q = dmp_normal([[2], [2, 0]], 1, ZZ) + r = dmp_normal([[8, 0, 0]], 1, ZZ) + + assert dmp_pdiv(f, g, 1, ZZ) == (q, r) + assert dmp_pquo(f, g, 1, ZZ) == q + assert dmp_prem(f, g, 1, ZZ) == r + + raises(ExactQuotientFailed, lambda: dmp_pexquo(f, g, 1, ZZ)) + + +def test_dup_rr_div(): + raises(ZeroDivisionError, lambda: dup_rr_div([1, 2, 3], [], ZZ)) + + f = dup_normal([3, 1, 1, 5], ZZ) + g = dup_normal([5, -3, 1], ZZ) + + q, r = [], f + + assert dup_rr_div(f, g, ZZ) == (q, r) + + +def test_dmp_rr_div(): + raises(ZeroDivisionError, lambda: dmp_rr_div([[1, 2], [3]], [[]], 1, ZZ)) + + f = dmp_normal([[1], [], [1, 0, 0]], 1, ZZ) + g = dmp_normal([[1], [-1, 0]], 1, ZZ) + + q = dmp_normal([[1], [1, 0]], 1, ZZ) + r = dmp_normal([[2, 0, 0]], 1, ZZ) + + assert dmp_rr_div(f, g, 1, ZZ) == (q, r) + + f = dmp_normal([[1], [], [1, 0, 0]], 1, ZZ) + g = dmp_normal([[-1], [1, 0]], 1, ZZ) + + q = dmp_normal([[-1], [-1, 0]], 1, ZZ) + r = dmp_normal([[2, 0, 0]], 1, ZZ) + + assert dmp_rr_div(f, g, 1, ZZ) == (q, r) + + f = dmp_normal([[1], [], [1, 0, 0]], 1, ZZ) + g = dmp_normal([[2], [-2, 0]], 1, ZZ) + + q, r = [[]], f + + assert dmp_rr_div(f, g, 1, ZZ) == (q, r) + + +def test_dup_ff_div(): + raises(ZeroDivisionError, lambda: dup_ff_div([1, 2, 3], [], QQ)) + + f = dup_normal([3, 1, 1, 5], QQ) + g = dup_normal([5, -3, 1], QQ) + + q = [QQ(3, 5), QQ(14, 25)] + r = [QQ(52, 25), QQ(111, 25)] + + assert dup_ff_div(f, g, QQ) == (q, r) + +def test_dup_ff_div_gmpy2(): + if GROUND_TYPES != 'gmpy2': + return + + from gmpy2 import mpq + from sympy.polys.domains import GMPYRationalField + K = GMPYRationalField() + + f = [mpq(1,3), mpq(3,2)] + g = [mpq(2,1)] + assert dmp_ff_div(f, g, 0, K) == ([mpq(1,6), mpq(3,4)], []) + + f = [mpq(1,2), mpq(1,3), mpq(1,4), mpq(1,5)] + g = [mpq(-1,1), mpq(1,1), mpq(-1,1)] + assert dmp_ff_div(f, g, 0, K) == ([mpq(-1,2), mpq(-5,6)], [mpq(7,12), mpq(-19,30)]) + +def test_dmp_ff_div(): + raises(ZeroDivisionError, lambda: dmp_ff_div([[1, 2], [3]], [[]], 1, QQ)) + + f = dmp_normal([[1], [], [1, 0, 0]], 1, QQ) + g = dmp_normal([[1], [-1, 0]], 1, QQ) + + q = [[QQ(1, 1)], [QQ(1, 1), QQ(0, 1)]] + r = [[QQ(2, 1), QQ(0, 1), QQ(0, 1)]] + + assert dmp_ff_div(f, g, 1, QQ) == (q, r) + + f = dmp_normal([[1], [], [1, 0, 0]], 1, QQ) + g = dmp_normal([[-1], [1, 0]], 1, QQ) + + q = [[QQ(-1, 1)], [QQ(-1, 1), QQ(0, 1)]] + r = [[QQ(2, 1), QQ(0, 1), QQ(0, 1)]] + + assert dmp_ff_div(f, g, 1, QQ) == (q, r) + + f = dmp_normal([[1], [], [1, 0, 0]], 1, QQ) + g = dmp_normal([[2], [-2, 0]], 1, QQ) + + q = [[QQ(1, 2)], [QQ(1, 2), QQ(0, 1)]] + r = [[QQ(2, 1), QQ(0, 1), QQ(0, 1)]] + + assert dmp_ff_div(f, g, 1, QQ) == (q, r) + + +def test_dup_div(): + f, g, q, r = [5, 4, 3, 2, 1], [1, 2, 3], [5, -6, 0], [20, 1] + + assert dup_div(f, g, ZZ) == (q, r) + assert dup_quo(f, g, ZZ) == q + assert dup_rem(f, g, ZZ) == r + + raises(ExactQuotientFailed, lambda: dup_exquo(f, g, ZZ)) + + f, g, q, r = [5, 4, 3, 2, 1, 0], [1, 2, 0, 0, 9], [5, -6], [15, 2, -44, 54] + + assert dup_div(f, g, ZZ) == (q, r) + assert dup_quo(f, g, ZZ) == q + assert dup_rem(f, g, ZZ) == r + + raises(ExactQuotientFailed, lambda: dup_exquo(f, g, ZZ)) + + +def test_dmp_div(): + f, g, q, r = [5, 4, 3, 2, 1], [1, 2, 3], [5, -6, 0], [20, 1] + + assert dmp_div(f, g, 0, ZZ) == (q, r) + assert dmp_quo(f, g, 0, ZZ) == q + assert dmp_rem(f, g, 0, ZZ) == r + + raises(ExactQuotientFailed, lambda: dmp_exquo(f, g, 0, ZZ)) + + f, g, q, r = [[[1]]], [[[2]], [1]], [[[]]], [[[1]]] + + assert dmp_div(f, g, 2, ZZ) == (q, r) + assert dmp_quo(f, g, 2, ZZ) == q + assert dmp_rem(f, g, 2, ZZ) == r + + raises(ExactQuotientFailed, lambda: dmp_exquo(f, g, 2, ZZ)) + + +def test_dup_max_norm(): + assert dup_max_norm([], ZZ) == 0 + assert dup_max_norm([1], ZZ) == 1 + + assert dup_max_norm([1, 4, 2, 3], ZZ) == 4 + + +def test_dmp_max_norm(): + assert dmp_max_norm([[[]]], 2, ZZ) == 0 + assert dmp_max_norm([[[1]]], 2, ZZ) == 1 + + assert dmp_max_norm(f_0, 2, ZZ) == 6 + + +def test_dup_l1_norm(): + assert dup_l1_norm([], ZZ) == 0 + assert dup_l1_norm([1], ZZ) == 1 + assert dup_l1_norm([1, 4, 2, 3], ZZ) == 10 + + +def test_dmp_l1_norm(): + assert dmp_l1_norm([[[]]], 2, ZZ) == 0 + assert dmp_l1_norm([[[1]]], 2, ZZ) == 1 + + assert dmp_l1_norm(f_0, 2, ZZ) == 31 + + +def test_dup_l2_norm_squared(): + assert dup_l2_norm_squared([], ZZ) == 0 + assert dup_l2_norm_squared([1], ZZ) == 1 + assert dup_l2_norm_squared([1, 4, 2, 3], ZZ) == 30 + + +def test_dmp_l2_norm_squared(): + assert dmp_l2_norm_squared([[[]]], 2, ZZ) == 0 + assert dmp_l2_norm_squared([[[1]]], 2, ZZ) == 1 + assert dmp_l2_norm_squared(f_0, 2, ZZ) == 111 + + +def test_dup_expand(): + assert dup_expand((), ZZ) == [1] + assert dup_expand(([1, 2, 3], [1, 2], [7, 5, 4, 3]), ZZ) == \ + dup_mul([1, 2, 3], dup_mul([1, 2], [7, 5, 4, 3], ZZ), ZZ) + + +def test_dmp_expand(): + assert dmp_expand((), 1, ZZ) == [[1]] + assert dmp_expand(([[1], [2], [3]], [[1], [2]], [[7], [5], [4], [3]]), 1, ZZ) == \ + dmp_mul([[1], [2], [3]], dmp_mul([[1], [2]], [[7], [5], [ + 4], [3]], 1, ZZ), 1, ZZ) diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_dispersion.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_dispersion.py new file mode 100644 index 0000000000000000000000000000000000000000..5fc4c078bd4b0e1d89add93979787ec7b40899b1 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_dispersion.py @@ -0,0 +1,95 @@ +from sympy.core import Symbol, S, oo +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.polys import poly +from sympy.polys.dispersion import dispersion, dispersionset + + +def test_dispersion(): + x = Symbol("x") + a = Symbol("a") + + fp = poly(S.Zero, x) + assert sorted(dispersionset(fp)) == [0] + + fp = poly(S(2), x) + assert sorted(dispersionset(fp)) == [0] + + fp = poly(x + 1, x) + assert sorted(dispersionset(fp)) == [0] + assert dispersion(fp) == 0 + + fp = poly((x + 1)*(x + 2), x) + assert sorted(dispersionset(fp)) == [0, 1] + assert dispersion(fp) == 1 + + fp = poly(x*(x + 3), x) + assert sorted(dispersionset(fp)) == [0, 3] + assert dispersion(fp) == 3 + + fp = poly((x - 3)*(x + 3), x) + assert sorted(dispersionset(fp)) == [0, 6] + assert dispersion(fp) == 6 + + fp = poly(x**4 - 3*x**2 + 1, x) + gp = fp.shift(-3) + assert sorted(dispersionset(fp, gp)) == [2, 3, 4] + assert dispersion(fp, gp) == 4 + assert sorted(dispersionset(gp, fp)) == [] + assert dispersion(gp, fp) is -oo + + fp = poly(x*(3*x**2+a)*(x-2536)*(x**3+a), x) + gp = fp.as_expr().subs(x, x-345).as_poly(x) + assert sorted(dispersionset(fp, gp)) == [345, 2881] + assert sorted(dispersionset(gp, fp)) == [2191] + + gp = poly((x-2)**2*(x-3)**3*(x-5)**3, x) + assert sorted(dispersionset(gp)) == [0, 1, 2, 3] + assert sorted(dispersionset(gp, (gp+4)**2)) == [1, 2] + + fp = poly(x*(x+2)*(x-1), x) + assert sorted(dispersionset(fp)) == [0, 1, 2, 3] + + fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ') + gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ') + assert sorted(dispersionset(fp, gp)) == [2] + assert sorted(dispersionset(gp, fp)) == [1, 4] + + # There are some difficulties if we compute over Z[a] + # and alpha happenes to lie in Z[a] instead of simply Z. + # Hence we can not decide if alpha is indeed integral + # in general. + + fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x) + assert sorted(dispersionset(fp)) == [0, 1] + + # For any specific value of a, the dispersion is 3*a + # but the algorithm can not find this in general. + # This is the point where the resultant based Ansatz + # is superior to the current one. + fp = poly(a**2*x**3 + (a**3 + a**2 + a + 1)*x, x) + gp = fp.as_expr().subs(x, x - 3*a).as_poly(x) + assert sorted(dispersionset(fp, gp)) == [] + + fpa = fp.as_expr().subs(a, 2).as_poly(x) + gpa = gp.as_expr().subs(a, 2).as_poly(x) + assert sorted(dispersionset(fpa, gpa)) == [6] + + # Work with Expr instead of Poly + f = (x + 1)*(x + 2) + assert sorted(dispersionset(f)) == [0, 1] + assert dispersion(f) == 1 + + f = x**4 - 3*x**2 + 1 + g = x**4 - 12*x**3 + 51*x**2 - 90*x + 55 + assert sorted(dispersionset(f, g)) == [2, 3, 4] + assert dispersion(f, g) == 4 + + # Work with Expr and specify a generator + f = (x + 1)*(x + 2) + assert sorted(dispersionset(f, None, x)) == [0, 1] + assert dispersion(f, None, x) == 1 + + f = x**4 - 3*x**2 + 1 + g = x**4 - 12*x**3 + 51*x**2 - 90*x + 55 + assert sorted(dispersionset(f, g, x)) == [2, 3, 4] + assert dispersion(f, g, x) == 4 diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_distributedmodules.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_distributedmodules.py new file mode 100644 index 0000000000000000000000000000000000000000..c95672f99f878f3def660aadec901afbde9adf8b --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_distributedmodules.py @@ -0,0 +1,208 @@ +"""Tests for sparse distributed modules. """ + +from sympy.polys.distributedmodules import ( + sdm_monomial_mul, sdm_monomial_deg, sdm_monomial_divides, + sdm_add, sdm_LM, sdm_LT, sdm_mul_term, sdm_zero, sdm_deg, + sdm_LC, sdm_from_dict, + sdm_spoly, sdm_ecart, sdm_nf_mora, sdm_groebner, + sdm_from_vector, sdm_to_vector, sdm_monomial_lcm +) + +from sympy.polys.orderings import lex, grlex, InverseOrder +from sympy.polys.domains import QQ + +from sympy.abc import x, y, z + + +def test_sdm_monomial_mul(): + assert sdm_monomial_mul((1, 1, 0), (1, 3)) == (1, 2, 3) + + +def test_sdm_monomial_deg(): + assert sdm_monomial_deg((5, 2, 1)) == 3 + + +def test_sdm_monomial_lcm(): + assert sdm_monomial_lcm((1, 2, 3), (1, 5, 0)) == (1, 5, 3) + + +def test_sdm_monomial_divides(): + assert sdm_monomial_divides((1, 0, 0), (1, 0, 0)) is True + assert sdm_monomial_divides((1, 0, 0), (1, 2, 1)) is True + assert sdm_monomial_divides((5, 1, 1), (5, 2, 1)) is True + + assert sdm_monomial_divides((1, 0, 0), (2, 0, 0)) is False + assert sdm_monomial_divides((1, 1, 0), (1, 0, 0)) is False + assert sdm_monomial_divides((5, 1, 2), (5, 0, 1)) is False + + +def test_sdm_LC(): + assert sdm_LC([((1, 2, 3), QQ(5))], QQ) == QQ(5) + + +def test_sdm_from_dict(): + dic = {(1, 2, 1, 1): QQ(1), (1, 1, 2, 1): QQ(1), (1, 0, 2, 1): QQ(1), + (1, 0, 0, 3): QQ(1), (1, 1, 1, 0): QQ(1)} + assert sdm_from_dict(dic, grlex) == \ + [((1, 2, 1, 1), QQ(1)), ((1, 1, 2, 1), QQ(1)), + ((1, 0, 2, 1), QQ(1)), ((1, 0, 0, 3), QQ(1)), ((1, 1, 1, 0), QQ(1))] + +# TODO test to_dict? + + +def test_sdm_add(): + assert sdm_add([((1, 1, 1), QQ(1))], [((2, 0, 0), QQ(1))], lex, QQ) == \ + [((2, 0, 0), QQ(1)), ((1, 1, 1), QQ(1))] + assert sdm_add([((1, 1, 1), QQ(1))], [((1, 1, 1), QQ(-1))], lex, QQ) == [] + assert sdm_add([((1, 0, 0), QQ(1))], [((1, 0, 0), QQ(2))], lex, QQ) == \ + [((1, 0, 0), QQ(3))] + assert sdm_add([((1, 0, 1), QQ(1))], [((1, 1, 0), QQ(1))], lex, QQ) == \ + [((1, 1, 0), QQ(1)), ((1, 0, 1), QQ(1))] + + +def test_sdm_LM(): + dic = {(1, 2, 3): QQ(1), (4, 0, 0): QQ(1), (4, 0, 1): QQ(1)} + assert sdm_LM(sdm_from_dict(dic, lex)) == (4, 0, 1) + + +def test_sdm_LT(): + dic = {(1, 2, 3): QQ(1), (4, 0, 0): QQ(2), (4, 0, 1): QQ(3)} + assert sdm_LT(sdm_from_dict(dic, lex)) == ((4, 0, 1), QQ(3)) + + +def test_sdm_mul_term(): + assert sdm_mul_term([((1, 0, 0), QQ(1))], ((0, 0), QQ(0)), lex, QQ) == [] + assert sdm_mul_term([], ((1, 0), QQ(1)), lex, QQ) == [] + assert sdm_mul_term([((1, 0, 0), QQ(1))], ((1, 0), QQ(1)), lex, QQ) == \ + [((1, 1, 0), QQ(1))] + f = [((2, 0, 1), QQ(4)), ((1, 1, 0), QQ(3))] + assert sdm_mul_term(f, ((1, 1), QQ(2)), lex, QQ) == \ + [((2, 1, 2), QQ(8)), ((1, 2, 1), QQ(6))] + + +def test_sdm_zero(): + assert sdm_zero() == [] + + +def test_sdm_deg(): + assert sdm_deg([((1, 2, 3), 1), ((10, 0, 1), 1), ((2, 3, 4), 4)]) == 7 + + +def test_sdm_spoly(): + f = [((2, 1, 1), QQ(1)), ((1, 0, 1), QQ(1))] + g = [((2, 3, 0), QQ(1))] + h = [((1, 2, 3), QQ(1))] + assert sdm_spoly(f, h, lex, QQ) == [] + assert sdm_spoly(f, g, lex, QQ) == [((1, 2, 1), QQ(1))] + + +def test_sdm_ecart(): + assert sdm_ecart([((1, 2, 3), 1), ((1, 0, 1), 1)]) == 0 + assert sdm_ecart([((2, 2, 1), 1), ((1, 5, 1), 1)]) == 3 + + +def test_sdm_nf_mora(): + f = sdm_from_dict({(1, 2, 1, 1): QQ(1), (1, 1, 2, 1): QQ(1), + (1, 0, 2, 1): QQ(1), (1, 0, 0, 3): QQ(1), (1, 1, 1, 0): QQ(1)}, + grlex) + f1 = sdm_from_dict({(1, 1, 1, 0): QQ(1), (1, 0, 2, 0): QQ(1), + (1, 0, 0, 0): QQ(-1)}, grlex) + f2 = sdm_from_dict({(1, 1, 1, 0): QQ(1)}, grlex) + (id0, id1, id2) = [sdm_from_dict({(i, 0, 0, 0): QQ(1)}, grlex) + for i in range(3)] + + assert sdm_nf_mora(f, [f1, f2], grlex, QQ, phantom=(id0, [id1, id2])) == \ + ([((1, 0, 2, 1), QQ(1)), ((1, 0, 0, 3), QQ(1)), ((1, 1, 1, 0), QQ(1)), + ((1, 1, 0, 1), QQ(1))], + [((1, 1, 0, 1), QQ(-1)), ((0, 0, 0, 0), QQ(1))]) + assert sdm_nf_mora(f, [f2, f1], grlex, QQ, phantom=(id0, [id2, id1])) == \ + ([((1, 0, 2, 1), QQ(1)), ((1, 0, 0, 3), QQ(1)), ((1, 1, 1, 0), QQ(1))], + [((2, 1, 0, 1), QQ(-1)), ((2, 0, 1, 1), QQ(-1)), ((0, 0, 0, 0), QQ(1))]) + + f = sdm_from_vector([x*z, y**2 + y*z - z, y], lex, QQ, gens=[x, y, z]) + f1 = sdm_from_vector([x, y, 1], lex, QQ, gens=[x, y, z]) + f2 = sdm_from_vector([x*y, z, z**2], lex, QQ, gens=[x, y, z]) + assert sdm_nf_mora(f, [f1, f2], lex, QQ) == \ + sdm_nf_mora(f, [f2, f1], lex, QQ) == \ + [((1, 0, 1, 1), QQ(1)), ((1, 0, 0, 1), QQ(-1)), ((0, 1, 1, 0), QQ(-1)), + ((0, 1, 0, 1), QQ(1))] + + +def test_conversion(): + f = [x**2 + y**2, 2*z] + g = [((1, 0, 0, 1), QQ(2)), ((0, 2, 0, 0), QQ(1)), ((0, 0, 2, 0), QQ(1))] + assert sdm_to_vector(g, [x, y, z], QQ) == f + assert sdm_from_vector(f, lex, QQ) == g + assert sdm_from_vector( + [x, 1], lex, QQ) == [((1, 0), QQ(1)), ((0, 1), QQ(1))] + assert sdm_to_vector([((1, 1, 0, 0), 1)], [x, y, z], QQ, n=3) == [0, x, 0] + assert sdm_from_vector([0, 0], lex, QQ, gens=[x, y]) == sdm_zero() + + +def test_nontrivial(): + gens = [x, y, z] + + def contains(I, f): + S = [sdm_from_vector([g], lex, QQ, gens=gens) for g in I] + G = sdm_groebner(S, sdm_nf_mora, lex, QQ) + return sdm_nf_mora(sdm_from_vector([f], lex, QQ, gens=gens), + G, lex, QQ) == sdm_zero() + + assert contains([x, y], x) + assert contains([x, y], x + y) + assert not contains([x, y], 1) + assert not contains([x, y], z) + assert contains([x**2 + y, x**2 + x], x - y) + assert not contains([x + y + z, x*y + x*z + y*z, x*y*z], x**2) + assert contains([x + y + z, x*y + x*z + y*z, x*y*z], x**3) + assert contains([x + y + z, x*y + x*z + y*z, x*y*z], x**4) + assert not contains([x + y + z, x*y + x*z + y*z, x*y*z], x*y**2) + assert contains([x + y + z, x*y + x*z + y*z, x*y*z], x**4 + y**3 + 2*z*y*x) + assert contains([x + y + z, x*y + x*z + y*z, x*y*z], x*y*z) + assert contains([x, 1 + x + y, 5 - 7*y], 1) + assert contains( + [x**3 + y**3, y**3 + z**3, z**3 + x**3, x**2*y + x**2*z + y**2*z], + x**3) + assert not contains( + [x**3 + y**3, y**3 + z**3, z**3 + x**3, x**2*y + x**2*z + y**2*z], + x**2 + y**2) + + # compare local order + assert not contains([x*(1 + x + y), y*(1 + z)], x) + assert not contains([x*(1 + x + y), y*(1 + z)], x + y) + + +def test_local(): + igrlex = InverseOrder(grlex) + gens = [x, y, z] + + def contains(I, f): + S = [sdm_from_vector([g], igrlex, QQ, gens=gens) for g in I] + G = sdm_groebner(S, sdm_nf_mora, igrlex, QQ) + return sdm_nf_mora(sdm_from_vector([f], lex, QQ, gens=gens), + G, lex, QQ) == sdm_zero() + assert contains([x, y], x) + assert contains([x, y], x + y) + assert not contains([x, y], 1) + assert not contains([x, y], z) + assert contains([x**2 + y, x**2 + x], x - y) + assert not contains([x + y + z, x*y + x*z + y*z, x*y*z], x**2) + assert contains([x*(1 + x + y), y*(1 + z)], x) + assert contains([x*(1 + x + y), y*(1 + z)], x + y) + + +def test_uncovered_line(): + gens = [x, y] + f1 = sdm_zero() + f2 = sdm_from_vector([x, 0], lex, QQ, gens=gens) + f3 = sdm_from_vector([0, y], lex, QQ, gens=gens) + + assert sdm_spoly(f1, f2, lex, QQ) == sdm_zero() + assert sdm_spoly(f3, f2, lex, QQ) == sdm_zero() + + +def test_chain_criterion(): + gens = [x] + f1 = sdm_from_vector([1, x], grlex, QQ, gens=gens) + f2 = sdm_from_vector([0, x - 2], grlex, QQ, gens=gens) + assert len(sdm_groebner([f1, f2], sdm_nf_mora, grlex, QQ)) == 2 diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_euclidtools.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_euclidtools.py new file mode 100644 index 0000000000000000000000000000000000000000..3061be73f987163951a5836ff50125d29abc60c7 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_euclidtools.py @@ -0,0 +1,712 @@ +"""Tests for Euclidean algorithms, GCDs, LCMs and polynomial remainder sequences. """ + +from sympy.polys.rings import ring +from sympy.polys.domains import ZZ, QQ, RR + +from sympy.polys.specialpolys import ( + f_polys, + dmp_fateman_poly_F_1, + dmp_fateman_poly_F_2, + dmp_fateman_poly_F_3) + +f_0, f_1, f_2, f_3, f_4, f_5, f_6 = f_polys() + +def test_dup_gcdex(): + R, x = ring("x", QQ) + + f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15 + g = x**3 + x**2 - 4*x - 4 + + s = -QQ(1,5)*x + QQ(3,5) + t = QQ(1,5)*x**2 - QQ(6,5)*x + 2 + h = x + 1 + + assert R.dup_half_gcdex(f, g) == (s, h) + assert R.dup_gcdex(f, g) == (s, t, h) + + f = x**4 + 4*x**3 - x + 1 + g = x**3 - x + 1 + + s, t, h = R.dup_gcdex(f, g) + S, T, H = R.dup_gcdex(g, f) + + assert R.dup_add(R.dup_mul(s, f), + R.dup_mul(t, g)) == h + assert R.dup_add(R.dup_mul(S, g), + R.dup_mul(T, f)) == H + + f = 2*x + g = x**2 - 16 + + s = QQ(1,32)*x + t = -QQ(1,16) + h = 1 + + assert R.dup_half_gcdex(f, g) == (s, h) + assert R.dup_gcdex(f, g) == (s, t, h) + + +def test_dup_invert(): + R, x = ring("x", QQ) + assert R.dup_invert(2*x, x**2 - 16) == QQ(1,32)*x + + +def test_dup_euclidean_prs(): + R, x = ring("x", QQ) + + f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5 + g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21 + + assert R.dup_euclidean_prs(f, g) == [ + f, + g, + -QQ(5,9)*x**4 + QQ(1,9)*x**2 - QQ(1,3), + -QQ(117,25)*x**2 - 9*x + QQ(441,25), + QQ(233150,19773)*x - QQ(102500,6591), + -QQ(1288744821,543589225)] + + +def test_dup_primitive_prs(): + R, x = ring("x", ZZ) + + f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5 + g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21 + + assert R.dup_primitive_prs(f, g) == [ + f, + g, + -5*x**4 + x**2 - 3, + 13*x**2 + 25*x - 49, + 4663*x - 6150, + 1] + + +def test_dup_subresultants(): + R, x = ring("x", ZZ) + + assert R.dup_resultant(0, 0) == 0 + + assert R.dup_resultant(1, 0) == 0 + assert R.dup_resultant(0, 1) == 0 + + f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5 + g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21 + + a = 15*x**4 - 3*x**2 + 9 + b = 65*x**2 + 125*x - 245 + c = 9326*x - 12300 + d = 260708 + + assert R.dup_subresultants(f, g) == [f, g, a, b, c, d] + assert R.dup_resultant(f, g) == R.dup_LC(d) + + f = x**2 - 2*x + 1 + g = x**2 - 1 + + a = 2*x - 2 + + assert R.dup_subresultants(f, g) == [f, g, a] + assert R.dup_resultant(f, g) == 0 + + f = x**2 + 1 + g = x**2 - 1 + + a = -2 + + assert R.dup_subresultants(f, g) == [f, g, a] + assert R.dup_resultant(f, g) == 4 + + f = x**2 - 1 + g = x**3 - x**2 + 2 + + assert R.dup_resultant(f, g) == 0 + + f = 3*x**3 - x + g = 5*x**2 + 1 + + assert R.dup_resultant(f, g) == 64 + + f = x**2 - 2*x + 7 + g = x**3 - x + 5 + + assert R.dup_resultant(f, g) == 265 + + f = x**3 - 6*x**2 + 11*x - 6 + g = x**3 - 15*x**2 + 74*x - 120 + + assert R.dup_resultant(f, g) == -8640 + + f = x**3 - 6*x**2 + 11*x - 6 + g = x**3 - 10*x**2 + 29*x - 20 + + assert R.dup_resultant(f, g) == 0 + + f = x**3 - 1 + g = x**3 + 2*x**2 + 2*x - 1 + + assert R.dup_resultant(f, g) == 16 + + f = x**8 - 2 + g = x - 1 + + assert R.dup_resultant(f, g) == -1 + + +def test_dmp_subresultants(): + R, x, y = ring("x,y", ZZ) + + assert R.dmp_resultant(0, 0) == 0 + assert R.dmp_prs_resultant(0, 0)[0] == 0 + assert R.dmp_zz_collins_resultant(0, 0) == 0 + assert R.dmp_qq_collins_resultant(0, 0) == 0 + + assert R.dmp_resultant(1, 0) == 0 + assert R.dmp_resultant(1, 0) == 0 + assert R.dmp_resultant(1, 0) == 0 + + assert R.dmp_resultant(0, 1) == 0 + assert R.dmp_prs_resultant(0, 1)[0] == 0 + assert R.dmp_zz_collins_resultant(0, 1) == 0 + assert R.dmp_qq_collins_resultant(0, 1) == 0 + + f = 3*x**2*y - y**3 - 4 + g = x**2 + x*y**3 - 9 + + a = 3*x*y**4 + y**3 - 27*y + 4 + b = -3*y**10 - 12*y**7 + y**6 - 54*y**4 + 8*y**3 + 729*y**2 - 216*y + 16 + + r = R.dmp_LC(b) + + assert R.dmp_subresultants(f, g) == [f, g, a, b] + + assert R.dmp_resultant(f, g) == r + assert R.dmp_prs_resultant(f, g)[0] == r + assert R.dmp_zz_collins_resultant(f, g) == r + assert R.dmp_qq_collins_resultant(f, g) == r + + f = -x**3 + 5 + g = 3*x**2*y + x**2 + + a = 45*y**2 + 30*y + 5 + b = 675*y**3 + 675*y**2 + 225*y + 25 + + r = R.dmp_LC(b) + + assert R.dmp_subresultants(f, g) == [f, g, a] + assert R.dmp_resultant(f, g) == r + assert R.dmp_prs_resultant(f, g)[0] == r + assert R.dmp_zz_collins_resultant(f, g) == r + assert R.dmp_qq_collins_resultant(f, g) == r + + R, x, y, z, u, v = ring("x,y,z,u,v", ZZ) + + f = 6*x**2 - 3*x*y - 2*x*z + y*z + g = x**2 - x*u - x*v + u*v + + r = y**2*z**2 - 3*y**2*z*u - 3*y**2*z*v + 9*y**2*u*v - 2*y*z**2*u \ + - 2*y*z**2*v + 6*y*z*u**2 + 12*y*z*u*v + 6*y*z*v**2 - 18*y*u**2*v \ + - 18*y*u*v**2 + 4*z**2*u*v - 12*z*u**2*v - 12*z*u*v**2 + 36*u**2*v**2 + + assert R.dmp_zz_collins_resultant(f, g) == r.drop(x) + + R, x, y, z, u, v = ring("x,y,z,u,v", QQ) + + f = x**2 - QQ(1,2)*x*y - QQ(1,3)*x*z + QQ(1,6)*y*z + g = x**2 - x*u - x*v + u*v + + r = QQ(1,36)*y**2*z**2 - QQ(1,12)*y**2*z*u - QQ(1,12)*y**2*z*v + QQ(1,4)*y**2*u*v \ + - QQ(1,18)*y*z**2*u - QQ(1,18)*y*z**2*v + QQ(1,6)*y*z*u**2 + QQ(1,3)*y*z*u*v \ + + QQ(1,6)*y*z*v**2 - QQ(1,2)*y*u**2*v - QQ(1,2)*y*u*v**2 + QQ(1,9)*z**2*u*v \ + - QQ(1,3)*z*u**2*v - QQ(1,3)*z*u*v**2 + u**2*v**2 + + assert R.dmp_qq_collins_resultant(f, g) == r.drop(x) + + Rt, t = ring("t", ZZ) + Rx, x = ring("x", Rt) + + f = x**6 - 5*x**4 + 5*x**2 + 4 + g = -6*t*x**5 + x**4 + 20*t*x**3 - 3*x**2 - 10*t*x + 6 + + assert Rx.dup_resultant(f, g) == 2930944*t**6 + 2198208*t**4 + 549552*t**2 + 45796 + + +def test_dup_discriminant(): + R, x = ring("x", ZZ) + + assert R.dup_discriminant(0) == 0 + assert R.dup_discriminant(x) == 1 + + assert R.dup_discriminant(x**3 + 3*x**2 + 9*x - 13) == -11664 + assert R.dup_discriminant(5*x**5 + x**3 + 2) == 31252160 + assert R.dup_discriminant(x**4 + 2*x**3 + 6*x**2 - 22*x + 13) == 0 + assert R.dup_discriminant(12*x**7 + 15*x**4 + 30*x**3 + x**2 + 1) == -220289699947514112 + + +def test_dmp_discriminant(): + R, x = ring("x", ZZ) + + assert R.dmp_discriminant(0) == 0 + + R, x, y = ring("x,y", ZZ) + + assert R.dmp_discriminant(0) == 0 + assert R.dmp_discriminant(y) == 0 + + assert R.dmp_discriminant(x**3 + 3*x**2 + 9*x - 13) == -11664 + assert R.dmp_discriminant(5*x**5 + x**3 + 2) == 31252160 + assert R.dmp_discriminant(x**4 + 2*x**3 + 6*x**2 - 22*x + 13) == 0 + assert R.dmp_discriminant(12*x**7 + 15*x**4 + 30*x**3 + x**2 + 1) == -220289699947514112 + + assert R.dmp_discriminant(x**2*y + 2*y) == (-8*y**2).drop(x) + assert R.dmp_discriminant(x*y**2 + 2*x) == 1 + + R, x, y, z = ring("x,y,z", ZZ) + assert R.dmp_discriminant(x*y + z) == 1 + + R, x, y, z, u = ring("x,y,z,u", ZZ) + assert R.dmp_discriminant(x**2*y + x*z + u) == (-4*y*u + z**2).drop(x) + + R, x, y, z, u, v = ring("x,y,z,u,v", ZZ) + assert R.dmp_discriminant(x**3*y + x**2*z + x*u + v) == \ + (-27*y**2*v**2 + 18*y*z*u*v - 4*y*u**3 - 4*z**3*v + z**2*u**2).drop(x) + + +def test_dup_gcd(): + R, x = ring("x", ZZ) + + f, g = 0, 0 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (0, 0, 0) + + f, g = 2, 0 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, 1, 0) + + f, g = -2, 0 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, -1, 0) + + f, g = 0, -2 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, 0, -1) + + f, g = 0, 2*x + 4 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2*x + 4, 0, 1) + + f, g = 2*x + 4, 0 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2*x + 4, 1, 0) + + f, g = 2, 2 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, 1, 1) + + f, g = -2, 2 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, -1, 1) + + f, g = 2, -2 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, 1, -1) + + f, g = -2, -2 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, -1, -1) + + f, g = x**2 + 2*x + 1, 1 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (1, x**2 + 2*x + 1, 1) + + f, g = x**2 + 2*x + 1, 2 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (1, x**2 + 2*x + 1, 2) + + f, g = 2*x**2 + 4*x + 2, 2 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, x**2 + 2*x + 1, 1) + + f, g = 2, 2*x**2 + 4*x + 2 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (2, 1, x**2 + 2*x + 1) + + f, g = 2*x**2 + 4*x + 2, x + 1 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (x + 1, 2*x + 2, 1) + + f, g = x + 1, 2*x**2 + 4*x + 2 + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (x + 1, 1, 2*x + 2) + + f, g = x - 31, x + assert R.dup_zz_heu_gcd(f, g) == R.dup_rr_prs_gcd(f, g) == (1, f, g) + + f = x**4 + 8*x**3 + 21*x**2 + 22*x + 8 + g = x**3 + 6*x**2 + 11*x + 6 + + h = x**2 + 3*x + 2 + + cff = x**2 + 5*x + 4 + cfg = x + 3 + + assert R.dup_zz_heu_gcd(f, g) == (h, cff, cfg) + assert R.dup_rr_prs_gcd(f, g) == (h, cff, cfg) + + f = x**4 - 4 + g = x**4 + 4*x**2 + 4 + + h = x**2 + 2 + + cff = x**2 - 2 + cfg = x**2 + 2 + + assert R.dup_zz_heu_gcd(f, g) == (h, cff, cfg) + assert R.dup_rr_prs_gcd(f, g) == (h, cff, cfg) + + f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5 + g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21 + + h = 1 + + cff = f + cfg = g + + assert R.dup_zz_heu_gcd(f, g) == (h, cff, cfg) + assert R.dup_rr_prs_gcd(f, g) == (h, cff, cfg) + + R, x = ring("x", QQ) + + f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5 + g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21 + + h = 1 + + cff = f + cfg = g + + assert R.dup_qq_heu_gcd(f, g) == (h, cff, cfg) + assert R.dup_ff_prs_gcd(f, g) == (h, cff, cfg) + + R, x = ring("x", ZZ) + + f = - 352518131239247345597970242177235495263669787845475025293906825864749649589178600387510272*x**49 \ + + 46818041807522713962450042363465092040687472354933295397472942006618953623327997952*x**42 \ + + 378182690892293941192071663536490788434899030680411695933646320291525827756032*x**35 \ + + 112806468807371824947796775491032386836656074179286744191026149539708928*x**28 \ + - 12278371209708240950316872681744825481125965781519138077173235712*x**21 \ + + 289127344604779611146960547954288113529690984687482920704*x**14 \ + + 19007977035740498977629742919480623972236450681*x**7 \ + + 311973482284542371301330321821976049 + + g = 365431878023781158602430064717380211405897160759702125019136*x**21 \ + + 197599133478719444145775798221171663643171734081650688*x**14 \ + - 9504116979659010018253915765478924103928886144*x**7 \ + - 311973482284542371301330321821976049 + + assert R.dup_zz_heu_gcd(f, R.dup_diff(f, 1))[0] == g + assert R.dup_rr_prs_gcd(f, R.dup_diff(f, 1))[0] == g + + R, x = ring("x", QQ) + + f = QQ(1,2)*x**2 + x + QQ(1,2) + g = QQ(1,2)*x + QQ(1,2) + + h = x + 1 + + assert R.dup_qq_heu_gcd(f, g) == (h, g, QQ(1,2)) + assert R.dup_ff_prs_gcd(f, g) == (h, g, QQ(1,2)) + + R, x = ring("x", ZZ) + + f = 1317378933230047068160*x + 2945748836994210856960 + g = 120352542776360960*x + 269116466014453760 + + h = 120352542776360960*x + 269116466014453760 + cff = 10946 + cfg = 1 + + assert R.dup_zz_heu_gcd(f, g) == (h, cff, cfg) + + +def test_dmp_gcd(): + R, x, y = ring("x,y", ZZ) + + f, g = 0, 0 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (0, 0, 0) + + f, g = 2, 0 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, 1, 0) + + f, g = -2, 0 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, -1, 0) + + f, g = 0, -2 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, 0, -1) + + f, g = 0, 2*x + 4 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2*x + 4, 0, 1) + + f, g = 2*x + 4, 0 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2*x + 4, 1, 0) + + f, g = 2, 2 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, 1, 1) + + f, g = -2, 2 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, -1, 1) + + f, g = 2, -2 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, 1, -1) + + f, g = -2, -2 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, -1, -1) + + f, g = x**2 + 2*x + 1, 1 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (1, x**2 + 2*x + 1, 1) + + f, g = x**2 + 2*x + 1, 2 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (1, x**2 + 2*x + 1, 2) + + f, g = 2*x**2 + 4*x + 2, 2 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, x**2 + 2*x + 1, 1) + + f, g = 2, 2*x**2 + 4*x + 2 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (2, 1, x**2 + 2*x + 1) + + f, g = 2*x**2 + 4*x + 2, x + 1 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (x + 1, 2*x + 2, 1) + + f, g = x + 1, 2*x**2 + 4*x + 2 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (x + 1, 1, 2*x + 2) + + R, x, y, z, u = ring("x,y,z,u", ZZ) + + f, g = u**2 + 2*u + 1, 2*u + 2 + assert R.dmp_zz_heu_gcd(f, g) == R.dmp_rr_prs_gcd(f, g) == (u + 1, u + 1, 2) + + f, g = z**2*u**2 + 2*z**2*u + z**2 + z*u + z, u**2 + 2*u + 1 + h, cff, cfg = u + 1, z**2*u + z**2 + z, u + 1 + + assert R.dmp_zz_heu_gcd(f, g) == (h, cff, cfg) + assert R.dmp_rr_prs_gcd(f, g) == (h, cff, cfg) + + assert R.dmp_zz_heu_gcd(g, f) == (h, cfg, cff) + assert R.dmp_rr_prs_gcd(g, f) == (h, cfg, cff) + + R, x, y, z = ring("x,y,z", ZZ) + + f, g, h = map(R.from_dense, dmp_fateman_poly_F_1(2, ZZ)) + H, cff, cfg = R.dmp_zz_heu_gcd(f, g) + + assert H == h and R.dmp_mul(H, cff) == f \ + and R.dmp_mul(H, cfg) == g + + H, cff, cfg = R.dmp_rr_prs_gcd(f, g) + + assert H == h and R.dmp_mul(H, cff) == f \ + and R.dmp_mul(H, cfg) == g + + R, x, y, z, u, v = ring("x,y,z,u,v", ZZ) + + f, g, h = map(R.from_dense, dmp_fateman_poly_F_1(4, ZZ)) + H, cff, cfg = R.dmp_zz_heu_gcd(f, g) + + assert H == h and R.dmp_mul(H, cff) == f \ + and R.dmp_mul(H, cfg) == g + + R, x, y, z, u, v, a, b = ring("x,y,z,u,v,a,b", ZZ) + + f, g, h = map(R.from_dense, dmp_fateman_poly_F_1(6, ZZ)) + H, cff, cfg = R.dmp_zz_heu_gcd(f, g) + + assert H == h and R.dmp_mul(H, cff) == f \ + and R.dmp_mul(H, cfg) == g + + R, x, y, z, u, v, a, b, c, d = ring("x,y,z,u,v,a,b,c,d", ZZ) + + f, g, h = map(R.from_dense, dmp_fateman_poly_F_1(8, ZZ)) + H, cff, cfg = R.dmp_zz_heu_gcd(f, g) + + assert H == h and R.dmp_mul(H, cff) == f \ + and R.dmp_mul(H, cfg) == g + + R, x, y, z = ring("x,y,z", ZZ) + + f, g, h = map(R.from_dense, dmp_fateman_poly_F_2(2, ZZ)) + H, cff, cfg = R.dmp_zz_heu_gcd(f, g) + + assert H == h and R.dmp_mul(H, cff) == f \ + and R.dmp_mul(H, cfg) == g + + H, cff, cfg = R.dmp_rr_prs_gcd(f, g) + + assert H == h and R.dmp_mul(H, cff) == f \ + and R.dmp_mul(H, cfg) == g + + f, g, h = map(R.from_dense, dmp_fateman_poly_F_3(2, ZZ)) + H, cff, cfg = R.dmp_zz_heu_gcd(f, g) + + assert H == h and R.dmp_mul(H, cff) == f \ + and R.dmp_mul(H, cfg) == g + + H, cff, cfg = R.dmp_rr_prs_gcd(f, g) + + assert H == h and R.dmp_mul(H, cff) == f \ + and R.dmp_mul(H, cfg) == g + + R, x, y, z, u, v = ring("x,y,z,u,v", ZZ) + + f, g, h = map(R.from_dense, dmp_fateman_poly_F_3(4, ZZ)) + H, cff, cfg = R.dmp_inner_gcd(f, g) + + assert H == h and R.dmp_mul(H, cff) == f \ + and R.dmp_mul(H, cfg) == g + + R, x, y = ring("x,y", QQ) + + f = QQ(1,2)*x**2 + x + QQ(1,2) + g = QQ(1,2)*x + QQ(1,2) + + h = x + 1 + + assert R.dmp_qq_heu_gcd(f, g) == (h, g, QQ(1,2)) + assert R.dmp_ff_prs_gcd(f, g) == (h, g, QQ(1,2)) + + R, x, y = ring("x,y", RR) + + f = 2.1*x*y**2 - 2.2*x*y + 2.1*x + g = 1.0*x**3 + + assert R.dmp_ff_prs_gcd(f, g) == \ + (1.0*x, 2.1*y**2 - 2.2*y + 2.1, 1.0*x**2) + + +def test_dup_lcm(): + R, x = ring("x", ZZ) + + assert R.dup_lcm(2, 6) == 6 + + assert R.dup_lcm(2*x**3, 6*x) == 6*x**3 + assert R.dup_lcm(2*x**3, 3*x) == 6*x**3 + + assert R.dup_lcm(x**2 + x, x) == x**2 + x + assert R.dup_lcm(x**2 + x, 2*x) == 2*x**2 + 2*x + assert R.dup_lcm(x**2 + 2*x, x) == x**2 + 2*x + assert R.dup_lcm(2*x**2 + x, x) == 2*x**2 + x + assert R.dup_lcm(2*x**2 + x, 2*x) == 4*x**2 + 2*x + + +def test_dmp_lcm(): + R, x, y = ring("x,y", ZZ) + + assert R.dmp_lcm(2, 6) == 6 + assert R.dmp_lcm(x, y) == x*y + + assert R.dmp_lcm(2*x**3, 6*x*y**2) == 6*x**3*y**2 + assert R.dmp_lcm(2*x**3, 3*x*y**2) == 6*x**3*y**2 + + assert R.dmp_lcm(x**2*y, x*y**2) == x**2*y**2 + + f = 2*x*y**5 - 3*x*y**4 - 2*x*y**3 + 3*x*y**2 + g = y**5 - 2*y**3 + y + h = 2*x*y**7 - 3*x*y**6 - 4*x*y**5 + 6*x*y**4 + 2*x*y**3 - 3*x*y**2 + + assert R.dmp_lcm(f, g) == h + + f = x**3 - 3*x**2*y - 9*x*y**2 - 5*y**3 + g = x**4 + 6*x**3*y + 12*x**2*y**2 + 10*x*y**3 + 3*y**4 + h = x**5 + x**4*y - 18*x**3*y**2 - 50*x**2*y**3 - 47*x*y**4 - 15*y**5 + + assert R.dmp_lcm(f, g) == h + + +def test_dmp_content(): + R, x,y = ring("x,y", ZZ) + + assert R.dmp_content(-2) == 2 + + f, g, F = 3*y**2 + 2*y + 1, 1, 0 + + for i in range(0, 5): + g *= f + F += x**i*g + + assert R.dmp_content(F) == f.drop(x) + + R, x,y,z = ring("x,y,z", ZZ) + + assert R.dmp_content(f_4) == 1 + assert R.dmp_content(f_5) == 1 + + R, x,y,z,t = ring("x,y,z,t", ZZ) + assert R.dmp_content(f_6) == 1 + + +def test_dmp_primitive(): + R, x,y = ring("x,y", ZZ) + + assert R.dmp_primitive(0) == (0, 0) + assert R.dmp_primitive(1) == (1, 1) + + f, g, F = 3*y**2 + 2*y + 1, 1, 0 + + for i in range(0, 5): + g *= f + F += x**i*g + + assert R.dmp_primitive(F) == (f.drop(x), F / f) + + R, x,y,z = ring("x,y,z", ZZ) + + cont, f = R.dmp_primitive(f_4) + assert cont == 1 and f == f_4 + cont, f = R.dmp_primitive(f_5) + assert cont == 1 and f == f_5 + + R, x,y,z,t = ring("x,y,z,t", ZZ) + + cont, f = R.dmp_primitive(f_6) + assert cont == 1 and f == f_6 + + +def test_dup_cancel(): + R, x = ring("x", ZZ) + + f = 2*x**2 - 2 + g = x**2 - 2*x + 1 + + p = 2*x + 2 + q = x - 1 + + assert R.dup_cancel(f, g) == (p, q) + assert R.dup_cancel(f, g, include=False) == (1, 1, p, q) + + f = -x - 2 + g = 3*x - 4 + + F = x + 2 + G = -3*x + 4 + + assert R.dup_cancel(f, g) == (f, g) + assert R.dup_cancel(F, G) == (f, g) + + assert R.dup_cancel(0, 0) == (0, 0) + assert R.dup_cancel(0, 0, include=False) == (1, 1, 0, 0) + + assert R.dup_cancel(x, 0) == (1, 0) + assert R.dup_cancel(x, 0, include=False) == (1, 1, 1, 0) + + assert R.dup_cancel(0, x) == (0, 1) + assert R.dup_cancel(0, x, include=False) == (1, 1, 0, 1) + + f = 0 + g = x + one = 1 + + assert R.dup_cancel(f, g, include=True) == (f, one) + + +def test_dmp_cancel(): + R, x, y = ring("x,y", ZZ) + + f = 2*x**2 - 2 + g = x**2 - 2*x + 1 + + p = 2*x + 2 + q = x - 1 + + assert R.dmp_cancel(f, g) == (p, q) + assert R.dmp_cancel(f, g, include=False) == (1, 1, p, q) + + assert R.dmp_cancel(0, 0) == (0, 0) + assert R.dmp_cancel(0, 0, include=False) == (1, 1, 0, 0) + + assert R.dmp_cancel(y, 0) == (1, 0) + assert R.dmp_cancel(y, 0, include=False) == (1, 1, 1, 0) + + assert R.dmp_cancel(0, y) == (0, 1) + assert R.dmp_cancel(0, y, include=False) == (1, 1, 0, 1) diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_fields.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_fields.py new file mode 100644 index 0000000000000000000000000000000000000000..da9f3910159929cb0b7bb44dd08d879bdc3b61d6 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_fields.py @@ -0,0 +1,362 @@ +"""Test sparse rational functions. """ + +from sympy.polys.fields import field, sfield, FracField, FracElement +from sympy.polys.rings import ring +from sympy.polys.domains import ZZ, QQ +from sympy.polys.orderings import lex + +from sympy.testing.pytest import raises, XFAIL +from sympy.core import symbols, E +from sympy.core.numbers import Rational +from sympy.functions.elementary.exponential import (exp, log) +from sympy.functions.elementary.miscellaneous import sqrt + +def test_FracField___init__(): + F1 = FracField("x,y", ZZ, lex) + F2 = FracField("x,y", ZZ, lex) + F3 = FracField("x,y,z", ZZ, lex) + + assert F1.x == F1.gens[0] + assert F1.y == F1.gens[1] + assert F1.x == F2.x + assert F1.y == F2.y + assert F1.x != F3.x + assert F1.y != F3.y + +def test_FracField___hash__(): + F, x, y, z = field("x,y,z", QQ) + assert hash(F) + +def test_FracField___eq__(): + assert field("x,y,z", QQ)[0] == field("x,y,z", QQ)[0] + assert field("x,y,z", QQ)[0] is field("x,y,z", QQ)[0] + + assert field("x,y,z", QQ)[0] != field("x,y,z", ZZ)[0] + assert field("x,y,z", QQ)[0] is not field("x,y,z", ZZ)[0] + + assert field("x,y,z", ZZ)[0] != field("x,y,z", QQ)[0] + assert field("x,y,z", ZZ)[0] is not field("x,y,z", QQ)[0] + + assert field("x,y,z", QQ)[0] != field("x,y", QQ)[0] + assert field("x,y,z", QQ)[0] is not field("x,y", QQ)[0] + + assert field("x,y", QQ)[0] != field("x,y,z", QQ)[0] + assert field("x,y", QQ)[0] is not field("x,y,z", QQ)[0] + +def test_sfield(): + x = symbols("x") + + F = FracField((E, exp(exp(x)), exp(x)), ZZ, lex) + e, exex, ex = F.gens + assert sfield(exp(x)*exp(exp(x) + 1 + log(exp(x) + 3)/2)**2/(exp(x) + 3)) \ + == (F, e**2*exex**2*ex) + + F = FracField((x, exp(1/x), log(x), x**QQ(1, 3)), ZZ, lex) + _, ex, lg, x3 = F.gens + assert sfield(((x-3)*log(x)+4*x**2)*exp(1/x+log(x)/3)/x**2) == \ + (F, (4*F.x**2*ex + F.x*ex*lg - 3*ex*lg)/x3**5) + + F = FracField((x, log(x), sqrt(x + log(x))), ZZ, lex) + _, lg, srt = F.gens + assert sfield((x + 1) / (x * (x + log(x))**QQ(3, 2)) - 1/(x * log(x)**2)) \ + == (F, (F.x*lg**2 - F.x*srt + lg**2 - lg*srt)/ + (F.x**2*lg**2*srt + F.x*lg**3*srt)) + +def test_FracElement___hash__(): + F, x, y, z = field("x,y,z", QQ) + assert hash(x*y/z) + +def test_FracElement_copy(): + F, x, y, z = field("x,y,z", ZZ) + + f = x*y/3*z + g = f.copy() + + assert f == g + g.numer[(1, 1, 1)] = 7 + assert f != g + +def test_FracElement_as_expr(): + F, x, y, z = field("x,y,z", ZZ) + f = (3*x**2*y - x*y*z)/(7*z**3 + 1) + + X, Y, Z = F.symbols + g = (3*X**2*Y - X*Y*Z)/(7*Z**3 + 1) + + assert f != g + assert f.as_expr() == g + + X, Y, Z = symbols("x,y,z") + g = (3*X**2*Y - X*Y*Z)/(7*Z**3 + 1) + + assert f != g + assert f.as_expr(X, Y, Z) == g + + raises(ValueError, lambda: f.as_expr(X)) + +def test_FracElement_from_expr(): + x, y, z = symbols("x,y,z") + F, X, Y, Z = field((x, y, z), ZZ) + + f = F.from_expr(1) + assert f == 1 and isinstance(f, F.dtype) + + f = F.from_expr(Rational(3, 7)) + assert f == F(3)/7 and isinstance(f, F.dtype) + + f = F.from_expr(x) + assert f == X and isinstance(f, F.dtype) + + f = F.from_expr(Rational(3,7)*x) + assert f == X*Rational(3, 7) and isinstance(f, F.dtype) + + f = F.from_expr(1/x) + assert f == 1/X and isinstance(f, F.dtype) + + f = F.from_expr(x*y*z) + assert f == X*Y*Z and isinstance(f, F.dtype) + + f = F.from_expr(x*y/z) + assert f == X*Y/Z and isinstance(f, F.dtype) + + f = F.from_expr(x*y*z + x*y + x) + assert f == X*Y*Z + X*Y + X and isinstance(f, F.dtype) + + f = F.from_expr((x*y*z + x*y + x)/(x*y + 7)) + assert f == (X*Y*Z + X*Y + X)/(X*Y + 7) and isinstance(f, F.dtype) + + f = F.from_expr(x**3*y*z + x**2*y**7 + 1) + assert f == X**3*Y*Z + X**2*Y**7 + 1 and isinstance(f, F.dtype) + + raises(ValueError, lambda: F.from_expr(2**x)) + raises(ValueError, lambda: F.from_expr(7*x + sqrt(2))) + + assert isinstance(ZZ[2**x].get_field().convert(2**(-x)), + FracElement) + assert isinstance(ZZ[x**2].get_field().convert(x**(-6)), + FracElement) + assert isinstance(ZZ[exp(Rational(1, 3))].get_field().convert(E), + FracElement) + + +def test_FracField_nested(): + a, b, x = symbols('a b x') + F1 = ZZ.frac_field(a, b) + F2 = F1.frac_field(x) + frac = F2(a + b) + assert frac.numer == F1.poly_ring(x)(a + b) + assert frac.numer.coeffs() == [F1(a + b)] + assert frac.denom == F1.poly_ring(x)(1) + + F3 = ZZ.poly_ring(a, b) + F4 = F3.frac_field(x) + frac = F4(a + b) + assert frac.numer == F3.poly_ring(x)(a + b) + assert frac.numer.coeffs() == [F3(a + b)] + assert frac.denom == F3.poly_ring(x)(1) + + frac = F2(F3(a + b)) + assert frac.numer == F1.poly_ring(x)(a + b) + assert frac.numer.coeffs() == [F1(a + b)] + assert frac.denom == F1.poly_ring(x)(1) + + frac = F4(F1(a + b)) + assert frac.numer == F3.poly_ring(x)(a + b) + assert frac.numer.coeffs() == [F3(a + b)] + assert frac.denom == F3.poly_ring(x)(1) + + +def test_FracElement__lt_le_gt_ge__(): + F, x, y = field("x,y", ZZ) + + assert F(1) < 1/x < 1/x**2 < 1/x**3 + assert F(1) <= 1/x <= 1/x**2 <= 1/x**3 + + assert -7/x < 1/x < 3/x < y/x < 1/x**2 + assert -7/x <= 1/x <= 3/x <= y/x <= 1/x**2 + + assert 1/x**3 > 1/x**2 > 1/x > F(1) + assert 1/x**3 >= 1/x**2 >= 1/x >= F(1) + + assert 1/x**2 > y/x > 3/x > 1/x > -7/x + assert 1/x**2 >= y/x >= 3/x >= 1/x >= -7/x + +def test_FracElement___neg__(): + F, x,y = field("x,y", QQ) + + f = (7*x - 9)/y + g = (-7*x + 9)/y + + assert -f == g + assert -g == f + +def test_FracElement___add__(): + F, x,y = field("x,y", QQ) + + f, g = 1/x, 1/y + assert f + g == g + f == (x + y)/(x*y) + + assert x + F.ring.gens[0] == F.ring.gens[0] + x == 2*x + + F, x,y = field("x,y", ZZ) + assert x + 3 == 3 + x + assert x + QQ(3,7) == QQ(3,7) + x == (7*x + 3)/7 + + Fuv, u,v = field("u,v", ZZ) + Fxyzt, x,y,z,t = field("x,y,z,t", Fuv) + + f = (u*v + x)/(y + u*v) + assert dict(f.numer) == {(1, 0, 0, 0): 1, (0, 0, 0, 0): u*v} + assert dict(f.denom) == {(0, 1, 0, 0): 1, (0, 0, 0, 0): u*v} + + Ruv, u,v = ring("u,v", ZZ) + Fxyzt, x,y,z,t = field("x,y,z,t", Ruv) + + f = (u*v + x)/(y + u*v) + assert dict(f.numer) == {(1, 0, 0, 0): 1, (0, 0, 0, 0): u*v} + assert dict(f.denom) == {(0, 1, 0, 0): 1, (0, 0, 0, 0): u*v} + +def test_FracElement___sub__(): + F, x,y = field("x,y", QQ) + + f, g = 1/x, 1/y + assert f - g == (-x + y)/(x*y) + + assert x - F.ring.gens[0] == F.ring.gens[0] - x == 0 + + F, x,y = field("x,y", ZZ) + assert x - 3 == -(3 - x) + assert x - QQ(3,7) == -(QQ(3,7) - x) == (7*x - 3)/7 + + Fuv, u,v = field("u,v", ZZ) + Fxyzt, x,y,z,t = field("x,y,z,t", Fuv) + + f = (u*v - x)/(y - u*v) + assert dict(f.numer) == {(1, 0, 0, 0):-1, (0, 0, 0, 0): u*v} + assert dict(f.denom) == {(0, 1, 0, 0): 1, (0, 0, 0, 0):-u*v} + + Ruv, u,v = ring("u,v", ZZ) + Fxyzt, x,y,z,t = field("x,y,z,t", Ruv) + + f = (u*v - x)/(y - u*v) + assert dict(f.numer) == {(1, 0, 0, 0):-1, (0, 0, 0, 0): u*v} + assert dict(f.denom) == {(0, 1, 0, 0): 1, (0, 0, 0, 0):-u*v} + +def test_FracElement___mul__(): + F, x,y = field("x,y", QQ) + + f, g = 1/x, 1/y + assert f*g == g*f == 1/(x*y) + + assert x*F.ring.gens[0] == F.ring.gens[0]*x == x**2 + + F, x,y = field("x,y", ZZ) + assert x*3 == 3*x + assert x*QQ(3,7) == QQ(3,7)*x == x*Rational(3, 7) + + Fuv, u,v = field("u,v", ZZ) + Fxyzt, x,y,z,t = field("x,y,z,t", Fuv) + + f = ((u + 1)*x*y + 1)/((v - 1)*z - t*u*v - 1) + assert dict(f.numer) == {(1, 1, 0, 0): u + 1, (0, 0, 0, 0): 1} + assert dict(f.denom) == {(0, 0, 1, 0): v - 1, (0, 0, 0, 1): -u*v, (0, 0, 0, 0): -1} + + Ruv, u,v = ring("u,v", ZZ) + Fxyzt, x,y,z,t = field("x,y,z,t", Ruv) + + f = ((u + 1)*x*y + 1)/((v - 1)*z - t*u*v - 1) + assert dict(f.numer) == {(1, 1, 0, 0): u + 1, (0, 0, 0, 0): 1} + assert dict(f.denom) == {(0, 0, 1, 0): v - 1, (0, 0, 0, 1): -u*v, (0, 0, 0, 0): -1} + +def test_FracElement___truediv__(): + F, x,y = field("x,y", QQ) + + f, g = 1/x, 1/y + assert f/g == y/x + + assert x/F.ring.gens[0] == F.ring.gens[0]/x == 1 + + F, x,y = field("x,y", ZZ) + assert x*3 == 3*x + assert x/QQ(3,7) == (QQ(3,7)/x)**-1 == x*Rational(7, 3) + + raises(ZeroDivisionError, lambda: x/0) + raises(ZeroDivisionError, lambda: 1/(x - x)) + raises(ZeroDivisionError, lambda: x/(x - x)) + + Fuv, u,v = field("u,v", ZZ) + Fxyzt, x,y,z,t = field("x,y,z,t", Fuv) + + f = (u*v)/(x*y) + assert dict(f.numer) == {(0, 0, 0, 0): u*v} + assert dict(f.denom) == {(1, 1, 0, 0): 1} + + g = (x*y)/(u*v) + assert dict(g.numer) == {(1, 1, 0, 0): 1} + assert dict(g.denom) == {(0, 0, 0, 0): u*v} + + Ruv, u,v = ring("u,v", ZZ) + Fxyzt, x,y,z,t = field("x,y,z,t", Ruv) + + f = (u*v)/(x*y) + assert dict(f.numer) == {(0, 0, 0, 0): u*v} + assert dict(f.denom) == {(1, 1, 0, 0): 1} + + g = (x*y)/(u*v) + assert dict(g.numer) == {(1, 1, 0, 0): 1} + assert dict(g.denom) == {(0, 0, 0, 0): u*v} + +def test_FracElement___pow__(): + F, x,y = field("x,y", QQ) + + f, g = 1/x, 1/y + + assert f**3 == 1/x**3 + assert g**3 == 1/y**3 + + assert (f*g)**3 == 1/(x**3*y**3) + assert (f*g)**-3 == (x*y)**3 + + raises(ZeroDivisionError, lambda: (x - x)**-3) + +def test_FracElement_diff(): + F, x,y,z = field("x,y,z", ZZ) + + assert ((x**2 + y)/(z + 1)).diff(x) == 2*x/(z + 1) + +@XFAIL +def test_FracElement___call__(): + F, x,y,z = field("x,y,z", ZZ) + f = (x**2 + 3*y)/z + + r = f(1, 1, 1) + assert r == 4 and not isinstance(r, FracElement) + raises(ZeroDivisionError, lambda: f(1, 1, 0)) + +def test_FracElement_evaluate(): + F, x,y,z = field("x,y,z", ZZ) + Fyz = field("y,z", ZZ)[0] + f = (x**2 + 3*y)/z + + assert f.evaluate(x, 0) == 3*Fyz.y/Fyz.z + raises(ZeroDivisionError, lambda: f.evaluate(z, 0)) + +def test_FracElement_subs(): + F, x,y,z = field("x,y,z", ZZ) + f = (x**2 + 3*y)/z + + assert f.subs(x, 0) == 3*y/z + raises(ZeroDivisionError, lambda: f.subs(z, 0)) + +def test_FracElement_compose(): + pass + +def test_FracField_index(): + a = symbols("a") + F, x, y, z = field('x y z', QQ) + assert F.index(x) == 0 + assert F.index(y) == 1 + + raises(ValueError, lambda: F.index(1)) + raises(ValueError, lambda: F.index(a)) + pass diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_heuristicgcd.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_heuristicgcd.py new file mode 100644 index 0000000000000000000000000000000000000000..7ff6bd6ea4effbd49c5e942ea8925cfcca4ba162 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_heuristicgcd.py @@ -0,0 +1,152 @@ +from sympy.polys.rings import ring +from sympy.polys.domains import ZZ +from sympy.polys.heuristicgcd import heugcd + + +def test_heugcd_univariate_integers(): + R, x = ring("x", ZZ) + + f = x**4 + 8*x**3 + 21*x**2 + 22*x + 8 + g = x**3 + 6*x**2 + 11*x + 6 + + h = x**2 + 3*x + 2 + + cff = x**2 + 5*x + 4 + cfg = x + 3 + + assert heugcd(f, g) == (h, cff, cfg) + + f = x**4 - 4 + g = x**4 + 4*x**2 + 4 + + h = x**2 + 2 + + cff = x**2 - 2 + cfg = x**2 + 2 + + assert heugcd(f, g) == (h, cff, cfg) + + f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5 + g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21 + + h = 1 + + cff = f + cfg = g + + assert heugcd(f, g) == (h, cff, cfg) + + f = - 352518131239247345597970242177235495263669787845475025293906825864749649589178600387510272*x**49 \ + + 46818041807522713962450042363465092040687472354933295397472942006618953623327997952*x**42 \ + + 378182690892293941192071663536490788434899030680411695933646320291525827756032*x**35 \ + + 112806468807371824947796775491032386836656074179286744191026149539708928*x**28 \ + - 12278371209708240950316872681744825481125965781519138077173235712*x**21 \ + + 289127344604779611146960547954288113529690984687482920704*x**14 \ + + 19007977035740498977629742919480623972236450681*x**7 \ + + 311973482284542371301330321821976049 + + g = 365431878023781158602430064717380211405897160759702125019136*x**21 \ + + 197599133478719444145775798221171663643171734081650688*x**14 \ + - 9504116979659010018253915765478924103928886144*x**7 \ + - 311973482284542371301330321821976049 + + # TODO: assert heugcd(f, f.diff(x))[0] == g + + f = 1317378933230047068160*x + 2945748836994210856960 + g = 120352542776360960*x + 269116466014453760 + + h = 120352542776360960*x + 269116466014453760 + cff = 10946 + cfg = 1 + + assert heugcd(f, g) == (h, cff, cfg) + +def test_heugcd_multivariate_integers(): + R, x, y = ring("x,y", ZZ) + + f, g = 2*x**2 + 4*x + 2, x + 1 + assert heugcd(f, g) == (x + 1, 2*x + 2, 1) + + f, g = x + 1, 2*x**2 + 4*x + 2 + assert heugcd(f, g) == (x + 1, 1, 2*x + 2) + + R, x, y, z, u = ring("x,y,z,u", ZZ) + + f, g = u**2 + 2*u + 1, 2*u + 2 + assert heugcd(f, g) == (u + 1, u + 1, 2) + + f, g = z**2*u**2 + 2*z**2*u + z**2 + z*u + z, u**2 + 2*u + 1 + h, cff, cfg = u + 1, z**2*u + z**2 + z, u + 1 + + assert heugcd(f, g) == (h, cff, cfg) + assert heugcd(g, f) == (h, cfg, cff) + + R, x, y, z = ring("x,y,z", ZZ) + + f, g, h = R.fateman_poly_F_1() + H, cff, cfg = heugcd(f, g) + + assert H == h and H*cff == f and H*cfg == g + + R, x, y, z, u, v = ring("x,y,z,u,v", ZZ) + + f, g, h = R.fateman_poly_F_1() + H, cff, cfg = heugcd(f, g) + + assert H == h and H*cff == f and H*cfg == g + + R, x, y, z, u, v, a, b = ring("x,y,z,u,v,a,b", ZZ) + + f, g, h = R.fateman_poly_F_1() + H, cff, cfg = heugcd(f, g) + + assert H == h and H*cff == f and H*cfg == g + + R, x, y, z, u, v, a, b, c, d = ring("x,y,z,u,v,a,b,c,d", ZZ) + + f, g, h = R.fateman_poly_F_1() + H, cff, cfg = heugcd(f, g) + + assert H == h and H*cff == f and H*cfg == g + + R, x, y, z = ring("x,y,z", ZZ) + + f, g, h = R.fateman_poly_F_2() + H, cff, cfg = heugcd(f, g) + + assert H == h and H*cff == f and H*cfg == g + + f, g, h = R.fateman_poly_F_3() + H, cff, cfg = heugcd(f, g) + + assert H == h and H*cff == f and H*cfg == g + + R, x, y, z, t = ring("x,y,z,t", ZZ) + + f, g, h = R.fateman_poly_F_3() + H, cff, cfg = heugcd(f, g) + + assert H == h and H*cff == f and H*cfg == g + + +def test_issue_10996(): + R, x, y, z = ring("x,y,z", ZZ) + + f = 12*x**6*y**7*z**3 - 3*x**4*y**9*z**3 + 12*x**3*y**5*z**4 + g = -48*x**7*y**8*z**3 + 12*x**5*y**10*z**3 - 48*x**5*y**7*z**2 + \ + 36*x**4*y**7*z - 48*x**4*y**6*z**4 + 12*x**3*y**9*z**2 - 48*x**3*y**4 \ + - 9*x**2*y**9*z - 48*x**2*y**5*z**3 + 12*x*y**6 + 36*x*y**5*z**2 - 48*y**2*z + + H, cff, cfg = heugcd(f, g) + + assert H == 12*x**3*y**4 - 3*x*y**6 + 12*y**2*z + assert H*cff == f and H*cfg == g + + +def test_issue_25793(): + R, x = ring("x", ZZ) + f = x - 4851 # failure starts for values more than 4850 + g = f*(2*x + 1) + H, cff, cfg = R.dup_zz_heu_gcd(f, g) + assert H == f + # needs a test for dmp, too, that fails in master before this change diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_orderings.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_orderings.py new file mode 100644 index 0000000000000000000000000000000000000000..d61d4887754c9d9f49905c2e131d253a45cf2ffd --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_orderings.py @@ -0,0 +1,124 @@ +"""Tests of monomial orderings. """ + +from sympy.polys.orderings import ( + monomial_key, lex, grlex, grevlex, ilex, igrlex, + LexOrder, InverseOrder, ProductOrder, build_product_order, +) + +from sympy.abc import x, y, z, t +from sympy.core import S +from sympy.testing.pytest import raises + +def test_lex_order(): + assert lex((1, 2, 3)) == (1, 2, 3) + assert str(lex) == 'lex' + + assert lex((1, 2, 3)) == lex((1, 2, 3)) + + assert lex((2, 2, 3)) > lex((1, 2, 3)) + assert lex((1, 3, 3)) > lex((1, 2, 3)) + assert lex((1, 2, 4)) > lex((1, 2, 3)) + + assert lex((0, 2, 3)) < lex((1, 2, 3)) + assert lex((1, 1, 3)) < lex((1, 2, 3)) + assert lex((1, 2, 2)) < lex((1, 2, 3)) + + assert lex.is_global is True + assert lex == LexOrder() + assert lex != grlex + +def test_grlex_order(): + assert grlex((1, 2, 3)) == (6, (1, 2, 3)) + assert str(grlex) == 'grlex' + + assert grlex((1, 2, 3)) == grlex((1, 2, 3)) + + assert grlex((2, 2, 3)) > grlex((1, 2, 3)) + assert grlex((1, 3, 3)) > grlex((1, 2, 3)) + assert grlex((1, 2, 4)) > grlex((1, 2, 3)) + + assert grlex((0, 2, 3)) < grlex((1, 2, 3)) + assert grlex((1, 1, 3)) < grlex((1, 2, 3)) + assert grlex((1, 2, 2)) < grlex((1, 2, 3)) + + assert grlex((2, 2, 3)) > grlex((1, 2, 4)) + assert grlex((1, 3, 3)) > grlex((1, 2, 4)) + + assert grlex((0, 2, 3)) < grlex((1, 2, 2)) + assert grlex((1, 1, 3)) < grlex((1, 2, 2)) + + assert grlex((0, 1, 1)) > grlex((0, 0, 2)) + assert grlex((0, 3, 1)) < grlex((2, 2, 1)) + + assert grlex.is_global is True + +def test_grevlex_order(): + assert grevlex((1, 2, 3)) == (6, (-3, -2, -1)) + assert str(grevlex) == 'grevlex' + + assert grevlex((1, 2, 3)) == grevlex((1, 2, 3)) + + assert grevlex((2, 2, 3)) > grevlex((1, 2, 3)) + assert grevlex((1, 3, 3)) > grevlex((1, 2, 3)) + assert grevlex((1, 2, 4)) > grevlex((1, 2, 3)) + + assert grevlex((0, 2, 3)) < grevlex((1, 2, 3)) + assert grevlex((1, 1, 3)) < grevlex((1, 2, 3)) + assert grevlex((1, 2, 2)) < grevlex((1, 2, 3)) + + assert grevlex((2, 2, 3)) > grevlex((1, 2, 4)) + assert grevlex((1, 3, 3)) > grevlex((1, 2, 4)) + + assert grevlex((0, 2, 3)) < grevlex((1, 2, 2)) + assert grevlex((1, 1, 3)) < grevlex((1, 2, 2)) + + assert grevlex((0, 1, 1)) > grevlex((0, 0, 2)) + assert grevlex((0, 3, 1)) < grevlex((2, 2, 1)) + + assert grevlex.is_global is True + +def test_InverseOrder(): + ilex = InverseOrder(lex) + igrlex = InverseOrder(grlex) + + assert ilex((1, 2, 3)) > ilex((2, 0, 3)) + assert igrlex((1, 2, 3)) < igrlex((0, 2, 3)) + assert str(ilex) == "ilex" + assert str(igrlex) == "igrlex" + assert ilex.is_global is False + assert igrlex.is_global is False + assert ilex != igrlex + assert ilex == InverseOrder(LexOrder()) + +def test_ProductOrder(): + P = ProductOrder((grlex, lambda m: m[:2]), (grlex, lambda m: m[2:])) + assert P((1, 3, 3, 4, 5)) > P((2, 1, 5, 5, 5)) + assert str(P) == "ProductOrder(grlex, grlex)" + assert P.is_global is True + assert ProductOrder((grlex, None), (ilex, None)).is_global is None + assert ProductOrder((igrlex, None), (ilex, None)).is_global is False + +def test_monomial_key(): + assert monomial_key() == lex + + assert monomial_key('lex') == lex + assert monomial_key('grlex') == grlex + assert monomial_key('grevlex') == grevlex + + raises(ValueError, lambda: monomial_key('foo')) + raises(ValueError, lambda: monomial_key(1)) + + M = [x, x**2*z**2, x*y, x**2, S.One, y**2, x**3, y, z, x*y**2*z, x**2*y**2] + assert sorted(M, key=monomial_key('lex', [z, y, x])) == \ + [S.One, x, x**2, x**3, y, x*y, y**2, x**2*y**2, z, x*y**2*z, x**2*z**2] + assert sorted(M, key=monomial_key('grlex', [z, y, x])) == \ + [S.One, x, y, z, x**2, x*y, y**2, x**3, x**2*y**2, x*y**2*z, x**2*z**2] + assert sorted(M, key=monomial_key('grevlex', [z, y, x])) == \ + [S.One, x, y, z, x**2, x*y, y**2, x**3, x**2*y**2, x**2*z**2, x*y**2*z] + +def test_build_product_order(): + assert build_product_order((("grlex", x, y), ("grlex", z, t)), [x, y, z, t])((4, 5, 6, 7)) == \ + ((9, (4, 5)), (13, (6, 7))) + + assert build_product_order((("grlex", x, y), ("grlex", z, t)), [x, y, z, t]) == \ + build_product_order((("grlex", x, y), ("grlex", z, t)), [x, y, z, t]) diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_partfrac.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_partfrac.py new file mode 100644 index 0000000000000000000000000000000000000000..83c5d48383d20e67dbb53c081093ad35e654c9a0 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_partfrac.py @@ -0,0 +1,249 @@ +"""Tests for algorithms for partial fraction decomposition of rational +functions. """ + +from sympy.polys.partfrac import ( + apart_undetermined_coeffs, + apart, + apart_list, assemble_partfrac_list +) + +from sympy.core.expr import Expr +from sympy.core.function import Lambda +from sympy.core.numbers import (E, I, Rational, pi, all_close) +from sympy.core.relational import Eq +from sympy.core.singleton import S +from sympy.core.symbol import (Dummy, Symbol) +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.matrices.dense import Matrix +from sympy.polys.polytools import (Poly, factor) +from sympy.polys.rationaltools import together +from sympy.polys.rootoftools import RootSum +from sympy.testing.pytest import raises, XFAIL +from sympy.abc import x, y, a, b, c + + +def test_apart(): + assert apart(1) == 1 + assert apart(1, x) == 1 + + f, g = (x**2 + 1)/(x + 1), 2/(x + 1) + x - 1 + + assert apart(f, full=False) == g + assert apart(f, full=True) == g + + f, g = 1/(x + 2)/(x + 1), 1/(1 + x) - 1/(2 + x) + + assert apart(f, full=False) == g + assert apart(f, full=True) == g + + f, g = 1/(x + 1)/(x + 5), -1/(5 + x)/4 + 1/(1 + x)/4 + + assert apart(f, full=False) == g + assert apart(f, full=True) == g + + assert apart((E*x + 2)/(x - pi)*(x - 1), x) == \ + 2 - E + E*pi + E*x + (E*pi + 2)*(pi - 1)/(x - pi) + + assert apart(Eq((x**2 + 1)/(x + 1), x), x) == Eq(x - 1 + 2/(x + 1), x) + + assert apart(x/2, y) == x/2 + + f, g = (x+y)/(2*x - y), Rational(3, 2)*y/(2*x - y) + S.Half + + assert apart(f, x, full=False) == g + assert apart(f, x, full=True) == g + + f, g = (x+y)/(2*x - y), 3*x/(2*x - y) - 1 + + assert apart(f, y, full=False) == g + assert apart(f, y, full=True) == g + + raises(NotImplementedError, lambda: apart(1/(x + 1)/(y + 2))) + + +def test_apart_matrix(): + M = Matrix(2, 2, lambda i, j: 1/(x + i + 1)/(x + j)) + + assert apart(M) == Matrix([ + [1/x - 1/(x + 1), (x + 1)**(-2)], + [1/(2*x) - (S.Half)/(x + 2), 1/(x + 1) - 1/(x + 2)], + ]) + + +def test_apart_symbolic(): + f = a*x**4 + (2*b + 2*a*c)*x**3 + (4*b*c - a**2 + a*c**2)*x**2 + \ + (-2*a*b + 2*b*c**2)*x - b**2 + g = a**2*x**4 + (2*a*b + 2*c*a**2)*x**3 + (4*a*b*c + b**2 + + a**2*c**2)*x**2 + (2*c*b**2 + 2*a*b*c**2)*x + b**2*c**2 + + assert apart(f/g, x) == 1/a - 1/(x + c)**2 - b**2/(a*(a*x + b)**2) + + assert apart(1/((x + a)*(x + b)*(x + c)), x) == \ + 1/((a - c)*(b - c)*(c + x)) - 1/((a - b)*(b - c)*(b + x)) + \ + 1/((a - b)*(a - c)*(a + x)) + + +def _make_extension_example(): + # https://github.com/sympy/sympy/issues/18531 + from sympy.core import Mul + def mul2(expr): + # 2-arg mul hack... + return Mul(2, expr, evaluate=False) + + f = ((x**2 + 1)**3/((x - 1)**2*(x + 1)**2*(-x**2 + 2*x + 1)*(x**2 + 2*x - 1))) + g = (1/mul2(x - sqrt(2) + 1) + - 1/mul2(x - sqrt(2) - 1) + + 1/mul2(x + 1 + sqrt(2)) + - 1/mul2(x - 1 + sqrt(2)) + + 1/mul2((x + 1)**2) + + 1/mul2((x - 1)**2)) + return f, g + + +def test_apart_extension(): + f = 2/(x**2 + 1) + g = I/(x + I) - I/(x - I) + + assert apart(f, extension=I) == g + assert apart(f, gaussian=True) == g + + f = x/((x - 2)*(x + I)) + + assert factor(together(apart(f)).expand()) == f + + f, g = _make_extension_example() + + # XXX: Only works with dotprodsimp. See test_apart_extension_xfail below + from sympy.matrices import dotprodsimp + with dotprodsimp(True): + assert apart(f, x, extension={sqrt(2)}) == g + + +def test_apart_extension_xfail(): + f, g = _make_extension_example() + assert apart(f, x, extension={sqrt(2)}) == g + + +def test_apart_full(): + f = 1/(x**2 + 1) + + assert apart(f, full=False) == f + assert apart(f, full=True).dummy_eq( + -RootSum(x**2 + 1, Lambda(a, a/(x - a)), auto=False)/2) + + f = 1/(x**3 + x + 1) + + assert apart(f, full=False) == f + assert apart(f, full=True).dummy_eq( + RootSum(x**3 + x + 1, + Lambda(a, (a**2*Rational(6, 31) - a*Rational(9, 31) + Rational(4, 31))/(x - a)), auto=False)) + + f = 1/(x**5 + 1) + + assert apart(f, full=False) == \ + (Rational(-1, 5))*((x**3 - 2*x**2 + 3*x - 4)/(x**4 - x**3 + x**2 - + x + 1)) + (Rational(1, 5))/(x + 1) + assert apart(f, full=True).dummy_eq( + -RootSum(x**4 - x**3 + x**2 - x + 1, + Lambda(a, a/(x - a)), auto=False)/5 + (Rational(1, 5))/(x + 1)) + + +def test_apart_full_floats(): + # https://github.com/sympy/sympy/issues/26648 + f = ( + 6.43369157032015e-9*x**3 + 1.35203404799555e-5*x**2 + + 0.00357538393743079*x + 0.085 + )/( + 4.74334912634438e-11*x**4 + 4.09576274286244e-6*x**3 + + 0.00334241812250921*x**2 + 0.15406018058983*x + 1.0 + ) + + expected = ( + 133.599202650992/(x + 85524.0054884464) + + 1.07757928431867/(x + 774.88576677949) + + 0.395006955518971/(x + 40.7977016133126) + + 0.564264854137341/(x + 7.79746609204661) + ) + + f_apart = apart(f, full=True).evalf() + + # There is a significant floating point error in this operation. + assert all_close(f_apart, expected, rtol=1e-3, atol=1e-5) + + +def test_apart_undetermined_coeffs(): + p = Poly(2*x - 3) + q = Poly(x**9 - x**8 - x**6 + x**5 - 2*x**2 + 3*x - 1) + r = (-x**7 - x**6 - x**5 + 4)/(x**8 - x**5 - 2*x + 1) + 1/(x - 1) + + assert apart_undetermined_coeffs(p, q) == r + + p = Poly(1, x, domain='ZZ[a,b]') + q = Poly((x + a)*(x + b), x, domain='ZZ[a,b]') + r = 1/((a - b)*(b + x)) - 1/((a - b)*(a + x)) + + assert apart_undetermined_coeffs(p, q) == r + + +def test_apart_list(): + from sympy.utilities.iterables import numbered_symbols + def dummy_eq(i, j): + if type(i) in (list, tuple): + return all(dummy_eq(i, j) for i, j in zip(i, j)) + return i == j or i.dummy_eq(j) + + w0, w1, w2 = Symbol("w0"), Symbol("w1"), Symbol("w2") + _a = Dummy("a") + + f = (-2*x - 2*x**2) / (3*x**2 - 6*x) + got = apart_list(f, x, dummies=numbered_symbols("w")) + ans = (-1, Poly(Rational(2, 3), x, domain='QQ'), + [(Poly(w0 - 2, w0, domain='ZZ'), Lambda(_a, 2), Lambda(_a, -_a + x), 1)]) + assert dummy_eq(got, ans) + + got = apart_list(2/(x**2-2), x, dummies=numbered_symbols("w")) + ans = (1, Poly(0, x, domain='ZZ'), [(Poly(w0**2 - 2, w0, domain='ZZ'), + Lambda(_a, _a/2), + Lambda(_a, -_a + x), 1)]) + assert dummy_eq(got, ans) + + f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2) + got = apart_list(f, x, dummies=numbered_symbols("w")) + ans = (1, Poly(0, x, domain='ZZ'), + [(Poly(w0 - 2, w0, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1), + (Poly(w1**2 - 1, w1, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2), + (Poly(w2 + 1, w2, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)]) + assert dummy_eq(got, ans) + + +def test_assemble_partfrac_list(): + f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2) + pfd = apart_list(f) + assert assemble_partfrac_list(pfd) == -4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2) + + a = Dummy("a") + pfd = (1, Poly(0, x, domain='ZZ'), [([sqrt(2),-sqrt(2)], Lambda(a, a/2), Lambda(a, -a + x), 1)]) + assert assemble_partfrac_list(pfd) == -1/(sqrt(2)*(x + sqrt(2))) + 1/(sqrt(2)*(x - sqrt(2))) + + +@XFAIL +def test_noncommutative_pseudomultivariate(): + # apart doesn't go inside noncommutative expressions + class foo(Expr): + is_commutative=False + e = x/(x + x*y) + c = 1/(1 + y) + assert apart(e + foo(e)) == c + foo(c) + assert apart(e*foo(e)) == c*foo(c) + +def test_noncommutative(): + class foo(Expr): + is_commutative=False + e = x/(x + x*y) + c = 1/(1 + y) + assert apart(e + foo()) == c + foo() + +def test_issue_5798(): + assert apart( + 2*x/(x**2 + 1) - (x - 1)/(2*(x**2 + 1)) + 1/(2*(x + 1)) - 2/x) == \ + (3*x + 1)/(x**2 + 1)/2 + 1/(x + 1)/2 - 2/x diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_polyclasses.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_polyclasses.py new file mode 100644 index 0000000000000000000000000000000000000000..da7a924528702bfb2e6527bd68a566be41583221 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_polyclasses.py @@ -0,0 +1,588 @@ +"""Tests for OO layer of several polynomial representations. """ + +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.polys.domains import ZZ, QQ +from sympy.polys.polyclasses import DMP, DMF, ANP +from sympy.polys.polyerrors import (CoercionFailed, ExactQuotientFailed, + NotInvertible) +from sympy.polys.specialpolys import f_polys +from sympy.testing.pytest import raises, warns_deprecated_sympy + +f_0, f_1, f_2, f_3, f_4, f_5, f_6 = [ f.to_dense() for f in f_polys() ] + +def test_DMP___init__(): + f = DMP([[ZZ(0)], [], [ZZ(0), ZZ(1), ZZ(2)], [ZZ(3)]], ZZ) + + assert f._rep == [[1, 2], [3]] + assert f.dom == ZZ + assert f.lev == 1 + + f = DMP([[ZZ(1), ZZ(2)], [ZZ(3)]], ZZ, 1) + + assert f._rep == [[1, 2], [3]] + assert f.dom == ZZ + assert f.lev == 1 + + f = DMP.from_dict({(1, 1): ZZ(1), (0, 0): ZZ(2)}, 1, ZZ) + + assert f._rep == [[1, 0], [2]] + assert f.dom == ZZ + assert f.lev == 1 + + +def test_DMP_rep_deprecation(): + f = DMP([1, 2, 3], ZZ) + + with warns_deprecated_sympy(): + assert f.rep == [1, 2, 3] + + +def test_DMP___eq__(): + assert DMP([[ZZ(1), ZZ(2)], [ZZ(3)]], ZZ) == \ + DMP([[ZZ(1), ZZ(2)], [ZZ(3)]], ZZ) + + assert DMP([[ZZ(1), ZZ(2)], [ZZ(3)]], ZZ) == \ + DMP([[QQ(1), QQ(2)], [QQ(3)]], QQ) + assert DMP([[QQ(1), QQ(2)], [QQ(3)]], QQ) == \ + DMP([[ZZ(1), ZZ(2)], [ZZ(3)]], ZZ) + + assert DMP([[[ZZ(1)]]], ZZ) != DMP([[ZZ(1)]], ZZ) + assert DMP([[ZZ(1)]], ZZ) != DMP([[[ZZ(1)]]], ZZ) + + +def test_DMP___bool__(): + assert bool(DMP([[]], ZZ)) is False + assert bool(DMP([[ZZ(1)]], ZZ)) is True + + +def test_DMP_to_dict(): + f = DMP([[ZZ(3)], [], [ZZ(2)], [], [ZZ(8)]], ZZ) + + assert f.to_dict() == \ + {(4, 0): 3, (2, 0): 2, (0, 0): 8} + assert f.to_sympy_dict() == \ + {(4, 0): ZZ.to_sympy(3), (2, 0): ZZ.to_sympy(2), (0, 0): + ZZ.to_sympy(8)} + + +def test_DMP_properties(): + assert DMP([[]], ZZ).is_zero is True + assert DMP([[ZZ(1)]], ZZ).is_zero is False + + assert DMP([[ZZ(1)]], ZZ).is_one is True + assert DMP([[ZZ(2)]], ZZ).is_one is False + + assert DMP([[ZZ(1)]], ZZ).is_ground is True + assert DMP([[ZZ(1)], [ZZ(2)], [ZZ(1)]], ZZ).is_ground is False + + assert DMP([[ZZ(1)], [ZZ(2), ZZ(0)], [ZZ(1), ZZ(0)]], ZZ).is_sqf is True + assert DMP([[ZZ(1)], [ZZ(2), ZZ(0)], [ZZ(1), ZZ(0), ZZ(0)]], ZZ).is_sqf is False + + assert DMP([[ZZ(1), ZZ(2)], [ZZ(3)]], ZZ).is_monic is True + assert DMP([[ZZ(2), ZZ(2)], [ZZ(3)]], ZZ).is_monic is False + + assert DMP([[ZZ(1), ZZ(2)], [ZZ(3)]], ZZ).is_primitive is True + assert DMP([[ZZ(2), ZZ(4)], [ZZ(6)]], ZZ).is_primitive is False + + +def test_DMP_arithmetics(): + f = DMP([[ZZ(2)], [ZZ(2), ZZ(0)]], ZZ) + + assert f.mul_ground(2) == DMP([[ZZ(4)], [ZZ(4), ZZ(0)]], ZZ) + assert f.quo_ground(2) == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ) + + raises(ExactQuotientFailed, lambda: f.exquo_ground(3)) + + f = DMP([[ZZ(-5)]], ZZ) + g = DMP([[ZZ(5)]], ZZ) + + assert f.abs() == g + assert abs(f) == g + + assert g.neg() == f + assert -g == f + + h = DMP([[]], ZZ) + + assert f.add(g) == h + assert f + g == h + assert g + f == h + assert f + 5 == h + assert 5 + f == h + + h = DMP([[ZZ(-10)]], ZZ) + + assert f.sub(g) == h + assert f - g == h + assert g - f == -h + assert f - 5 == h + assert 5 - f == -h + + h = DMP([[ZZ(-25)]], ZZ) + + assert f.mul(g) == h + assert f * g == h + assert g * f == h + assert f * 5 == h + assert 5 * f == h + + h = DMP([[ZZ(25)]], ZZ) + + assert f.sqr() == h + assert f.pow(2) == h + assert f**2 == h + + raises(TypeError, lambda: f.pow('x')) + + f = DMP([[ZZ(1)], [], [ZZ(1), ZZ(0), ZZ(0)]], ZZ) + g = DMP([[ZZ(2)], [ZZ(-2), ZZ(0)]], ZZ) + + q = DMP([[ZZ(2)], [ZZ(2), ZZ(0)]], ZZ) + r = DMP([[ZZ(8), ZZ(0), ZZ(0)]], ZZ) + + assert f.pdiv(g) == (q, r) + assert f.pquo(g) == q + assert f.prem(g) == r + + raises(ExactQuotientFailed, lambda: f.pexquo(g)) + + f = DMP([[ZZ(1)], [], [ZZ(1), ZZ(0), ZZ(0)]], ZZ) + g = DMP([[ZZ(1)], [ZZ(-1), ZZ(0)]], ZZ) + + q = DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ) + r = DMP([[ZZ(2), ZZ(0), ZZ(0)]], ZZ) + + assert f.div(g) == (q, r) + assert f.quo(g) == q + assert f.rem(g) == r + + assert divmod(f, g) == (q, r) + assert f // g == q + assert f % g == r + + raises(ExactQuotientFailed, lambda: f.exquo(g)) + + f = DMP([ZZ(1), ZZ(0), ZZ(-1)], ZZ) + g = DMP([ZZ(2), ZZ(-2)], ZZ) + + q = DMP([], ZZ) + r = f + + pq = DMP([ZZ(2), ZZ(2)], ZZ) + pr = DMP([], ZZ) + + assert f.div(g) == (q, r) + assert f.quo(g) == q + assert f.rem(g) == r + + assert divmod(f, g) == (q, r) + assert f // g == q + assert f % g == r + + raises(ExactQuotientFailed, lambda: f.exquo(g)) + + assert f.pdiv(g) == (pq, pr) + assert f.pquo(g) == pq + assert f.prem(g) == pr + assert f.pexquo(g) == pq + + +def test_DMP_functionality(): + f = DMP([[ZZ(1)], [ZZ(2), ZZ(0)], [ZZ(1), ZZ(0), ZZ(0)]], ZZ) + g = DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ) + h = DMP([[ZZ(1)]], ZZ) + + assert f.degree() == 2 + assert f.degree_list() == (2, 2) + assert f.total_degree() == 2 + + assert f.LC() == ZZ(1) + assert f.TC() == ZZ(0) + assert f.nth(1, 1) == ZZ(2) + + raises(TypeError, lambda: f.nth(0, 'x')) + + assert f.max_norm() == 2 + assert f.l1_norm() == 4 + + u = DMP([[ZZ(2)], [ZZ(2), ZZ(0)]], ZZ) + + assert f.diff(m=1, j=0) == u + assert f.diff(m=1, j=1) == u + + raises(TypeError, lambda: f.diff(m='x', j=0)) + + u = DMP([ZZ(1), ZZ(2), ZZ(1)], ZZ) + v = DMP([ZZ(1), ZZ(2), ZZ(1)], ZZ) + + assert f.eval(a=1, j=0) == u + assert f.eval(a=1, j=1) == v + + assert f.eval(1).eval(1) == ZZ(4) + + assert f.cofactors(g) == (g, g, h) + assert f.gcd(g) == g + assert f.lcm(g) == f + + u = DMP([[QQ(45), QQ(30), QQ(5)]], QQ) + v = DMP([[QQ(1), QQ(2, 3), QQ(1, 9)]], QQ) + + assert u.monic() == v + + assert (4*f).content() == ZZ(4) + assert (4*f).primitive() == (ZZ(4), f) + + f = DMP([QQ(1,3), QQ(1)], QQ) + g = DMP([QQ(1,7), QQ(1)], QQ) + + assert f.cancel(g) == f.cancel(g, include=True) == ( + DMP([QQ(7), QQ(21)], QQ), + DMP([QQ(3), QQ(21)], QQ) + ) + assert f.cancel(g, include=False) == ( + QQ(7), + QQ(3), + DMP([QQ(1), QQ(3)], QQ), + DMP([QQ(1), QQ(7)], QQ) + ) + + f = DMP([[ZZ(1)], [ZZ(2)], [ZZ(3)], [ZZ(4)], [ZZ(5)], [ZZ(6)]], ZZ) + + assert f.trunc(3) == DMP([[ZZ(1)], [ZZ(-1)], [], [ZZ(1)], [ZZ(-1)], []], ZZ) + + f = DMP(f_4, ZZ) + + assert f.sqf_part() == -f + assert f.sqf_list() == (ZZ(-1), [(-f, 1)]) + + f = DMP([[ZZ(-1)], [], [], [ZZ(5)]], ZZ) + g = DMP([[ZZ(3), ZZ(1)], [], []], ZZ) + h = DMP([[ZZ(45), ZZ(30), ZZ(5)]], ZZ) + + r = DMP([ZZ(675), ZZ(675), ZZ(225), ZZ(25)], ZZ) + + assert f.subresultants(g) == [f, g, h] + assert f.resultant(g) == r + + f = DMP([ZZ(1), ZZ(3), ZZ(9), ZZ(-13)], ZZ) + + assert f.discriminant() == -11664 + + f = DMP([QQ(2), QQ(0)], QQ) + g = DMP([QQ(1), QQ(0), QQ(-16)], QQ) + + s = DMP([QQ(1, 32), QQ(0)], QQ) + t = DMP([QQ(-1, 16)], QQ) + h = DMP([QQ(1)], QQ) + + assert f.half_gcdex(g) == (s, h) + assert f.gcdex(g) == (s, t, h) + + assert f.invert(g) == s + + f = DMP([[QQ(1)], [QQ(2)], [QQ(3)]], QQ) + + raises(ValueError, lambda: f.half_gcdex(f)) + raises(ValueError, lambda: f.gcdex(f)) + + raises(ValueError, lambda: f.invert(f)) + + f = DMP(ZZ.map([1, 0, 20, 0, 150, 0, 500, 0, 625, -2, 0, -10, 9]), ZZ) + g = DMP([ZZ(1), ZZ(0), ZZ(0), ZZ(-2), ZZ(9)], ZZ) + h = DMP([ZZ(1), ZZ(0), ZZ(5), ZZ(0)], ZZ) + + assert g.compose(h) == f + assert f.decompose() == [g, h] + + f = DMP([[QQ(1)], [QQ(2)], [QQ(3)]], QQ) + + raises(ValueError, lambda: f.decompose()) + raises(ValueError, lambda: f.sturm()) + + +def test_DMP_exclude(): + f = [[[[[[[[[[[[[[[[[[[[[[[[[[ZZ(1)]], [[]]]]]]]]]]]]]]]]]]]]]]]]]] + J = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 24, 25] + + assert DMP(f, ZZ).exclude() == (J, DMP([ZZ(1), ZZ(0)], ZZ)) + assert DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ).exclude() ==\ + ([], DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)) + + +def test_DMF__init__(): + f = DMF(([[0], [], [0, 1, 2], [3]], [[1, 2, 3]]), ZZ) + + assert f.num == [[1, 2], [3]] + assert f.den == [[1, 2, 3]] + assert f.lev == 1 + assert f.dom == ZZ + + f = DMF(([[1, 2], [3]], [[1, 2, 3]]), ZZ, 1) + + assert f.num == [[1, 2], [3]] + assert f.den == [[1, 2, 3]] + assert f.lev == 1 + assert f.dom == ZZ + + f = DMF(([[-1], [-2]], [[3], [-4]]), ZZ) + + assert f.num == [[-1], [-2]] + assert f.den == [[3], [-4]] + assert f.lev == 1 + assert f.dom == ZZ + + f = DMF(([[1], [2]], [[-3], [4]]), ZZ) + + assert f.num == [[-1], [-2]] + assert f.den == [[3], [-4]] + assert f.lev == 1 + assert f.dom == ZZ + + f = DMF(([[1], [2]], [[-3], [4]]), ZZ) + + assert f.num == [[-1], [-2]] + assert f.den == [[3], [-4]] + assert f.lev == 1 + assert f.dom == ZZ + + f = DMF(([[]], [[-3], [4]]), ZZ) + + assert f.num == [[]] + assert f.den == [[1]] + assert f.lev == 1 + assert f.dom == ZZ + + f = DMF(17, ZZ, 1) + + assert f.num == [[17]] + assert f.den == [[1]] + assert f.lev == 1 + assert f.dom == ZZ + + f = DMF(([[1], [2]]), ZZ) + + assert f.num == [[1], [2]] + assert f.den == [[1]] + assert f.lev == 1 + assert f.dom == ZZ + + f = DMF([[0], [], [0, 1, 2], [3]], ZZ) + + assert f.num == [[1, 2], [3]] + assert f.den == [[1]] + assert f.lev == 1 + assert f.dom == ZZ + + f = DMF({(1, 1): 1, (0, 0): 2}, ZZ, 1) + + assert f.num == [[1, 0], [2]] + assert f.den == [[1]] + assert f.lev == 1 + assert f.dom == ZZ + + f = DMF(([[QQ(1)], [QQ(2)]], [[-QQ(3)], [QQ(4)]]), QQ) + + assert f.num == [[-QQ(1)], [-QQ(2)]] + assert f.den == [[QQ(3)], [-QQ(4)]] + assert f.lev == 1 + assert f.dom == QQ + + f = DMF(([[QQ(1, 5)], [QQ(2, 5)]], [[-QQ(3, 7)], [QQ(4, 7)]]), QQ) + + assert f.num == [[-QQ(7)], [-QQ(14)]] + assert f.den == [[QQ(15)], [-QQ(20)]] + assert f.lev == 1 + assert f.dom == QQ + + raises(ValueError, lambda: DMF(([1], [[1]]), ZZ)) + raises(ZeroDivisionError, lambda: DMF(([1], []), ZZ)) + + +def test_DMF__bool__(): + assert bool(DMF([[]], ZZ)) is False + assert bool(DMF([[1]], ZZ)) is True + + +def test_DMF_properties(): + assert DMF([[]], ZZ).is_zero is True + assert DMF([[]], ZZ).is_one is False + + assert DMF([[1]], ZZ).is_zero is False + assert DMF([[1]], ZZ).is_one is True + + assert DMF(([[1]], [[2]]), ZZ).is_one is False + + +def test_DMF_arithmetics(): + f = DMF([[7], [-9]], ZZ) + g = DMF([[-7], [9]], ZZ) + + assert f.neg() == -f == g + + f = DMF(([[1]], [[1], []]), ZZ) + g = DMF(([[1]], [[1, 0]]), ZZ) + + h = DMF(([[1], [1, 0]], [[1, 0], []]), ZZ) + + assert f.add(g) == f + g == h + assert g.add(f) == g + f == h + + h = DMF(([[-1], [1, 0]], [[1, 0], []]), ZZ) + + assert f.sub(g) == f - g == h + + h = DMF(([[1]], [[1, 0], []]), ZZ) + + assert f.mul(g) == f*g == h + assert g.mul(f) == g*f == h + + h = DMF(([[1, 0]], [[1], []]), ZZ) + + assert f.quo(g) == f/g == h + + h = DMF(([[1]], [[1], [], [], []]), ZZ) + + assert f.pow(3) == f**3 == h + + h = DMF(([[1]], [[1, 0, 0, 0]]), ZZ) + + assert g.pow(3) == g**3 == h + + h = DMF(([[1, 0]], [[1]]), ZZ) + + assert g.pow(-1) == g**-1 == h + + +def test_ANP___init__(): + rep = [QQ(1), QQ(1)] + mod = [QQ(1), QQ(0), QQ(1)] + + f = ANP(rep, mod, QQ) + + assert f.to_list() == [QQ(1), QQ(1)] + assert f.mod_to_list() == [QQ(1), QQ(0), QQ(1)] + assert f.dom == QQ + + rep = {1: QQ(1), 0: QQ(1)} + mod = {2: QQ(1), 0: QQ(1)} + + f = ANP(rep, mod, QQ) + + assert f.to_list() == [QQ(1), QQ(1)] + assert f.mod_to_list() == [QQ(1), QQ(0), QQ(1)] + assert f.dom == QQ + + f = ANP(1, mod, QQ) + + assert f.to_list() == [QQ(1)] + assert f.mod_to_list() == [QQ(1), QQ(0), QQ(1)] + assert f.dom == QQ + + f = ANP([1, 0.5], mod, QQ) + + assert all(QQ.of_type(a) for a in f.to_list()) + + raises(CoercionFailed, lambda: ANP([sqrt(2)], mod, QQ)) + + +def test_ANP___eq__(): + a = ANP([QQ(1), QQ(1)], [QQ(1), QQ(0), QQ(1)], QQ) + b = ANP([QQ(1), QQ(1)], [QQ(1), QQ(0), QQ(2)], QQ) + + assert (a == a) is True + assert (a != a) is False + + assert (a == b) is False + assert (a != b) is True + + b = ANP([QQ(1), QQ(2)], [QQ(1), QQ(0), QQ(1)], QQ) + + assert (a == b) is False + assert (a != b) is True + + +def test_ANP___bool__(): + assert bool(ANP([], [QQ(1), QQ(0), QQ(1)], QQ)) is False + assert bool(ANP([QQ(1)], [QQ(1), QQ(0), QQ(1)], QQ)) is True + + +def test_ANP_properties(): + mod = [QQ(1), QQ(0), QQ(1)] + + assert ANP([QQ(0)], mod, QQ).is_zero is True + assert ANP([QQ(1)], mod, QQ).is_zero is False + + assert ANP([QQ(1)], mod, QQ).is_one is True + assert ANP([QQ(2)], mod, QQ).is_one is False + + +def test_ANP_arithmetics(): + mod = [QQ(1), QQ(0), QQ(0), QQ(-2)] + + a = ANP([QQ(2), QQ(-1), QQ(1)], mod, QQ) + b = ANP([QQ(1), QQ(2)], mod, QQ) + + c = ANP([QQ(-2), QQ(1), QQ(-1)], mod, QQ) + + assert a.neg() == -a == c + + c = ANP([QQ(2), QQ(0), QQ(3)], mod, QQ) + + assert a.add(b) == a + b == c + assert b.add(a) == b + a == c + + c = ANP([QQ(2), QQ(-2), QQ(-1)], mod, QQ) + + assert a.sub(b) == a - b == c + + c = ANP([QQ(-2), QQ(2), QQ(1)], mod, QQ) + + assert b.sub(a) == b - a == c + + c = ANP([QQ(3), QQ(-1), QQ(6)], mod, QQ) + + assert a.mul(b) == a*b == c + assert b.mul(a) == b*a == c + + c = ANP([QQ(-1, 43), QQ(9, 43), QQ(5, 43)], mod, QQ) + + assert a.pow(0) == a**(0) == ANP(1, mod, QQ) + assert a.pow(1) == a**(1) == a + + assert a.pow(-1) == a**(-1) == c + + assert a.quo(a) == a.mul(a.pow(-1)) == a*a**(-1) == ANP(1, mod, QQ) + + c = ANP([], [1, 0, 0, -2], QQ) + r1 = a.rem(b) + + (q, r2) = a.div(b) + + assert r1 == r2 == c == a % b + + raises(NotInvertible, lambda: a.div(c)) + raises(NotInvertible, lambda: a.rem(c)) + + # Comparison with "hard-coded" value fails despite looking identical + # from sympy import Rational + # c = ANP([Rational(11, 10), Rational(-1, 5), Rational(-3, 5)], [1, 0, 0, -2], QQ) + + assert q == a/b # == c + +def test_ANP_unify(): + mod_z = [ZZ(1), ZZ(0), ZZ(-2)] + mod_q = [QQ(1), QQ(0), QQ(-2)] + + a = ANP([QQ(1)], mod_q, QQ) + b = ANP([ZZ(1)], mod_z, ZZ) + + assert a.unify(b)[0] == QQ + assert b.unify(a)[0] == QQ + assert a.unify(a)[0] == QQ + assert b.unify(b)[0] == ZZ + + assert a.unify_ANP(b)[-1] == QQ + assert b.unify_ANP(a)[-1] == QQ + assert a.unify_ANP(a)[-1] == QQ + assert b.unify_ANP(b)[-1] == ZZ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_polyoptions.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_polyoptions.py new file mode 100644 index 0000000000000000000000000000000000000000..fa2e6054bad43aef5470949180ea5c2ffdc11f30 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_polyoptions.py @@ -0,0 +1,485 @@ +"""Tests for options manager for :class:`Poly` and public API functions. """ + +from sympy.polys.polyoptions import ( + Options, Expand, Gens, Wrt, Sort, Order, Field, Greedy, Domain, + Split, Gaussian, Extension, Modulus, Symmetric, Strict, Auto, + Frac, Formal, Polys, Include, All, Gen, Symbols, Method) + +from sympy.polys.orderings import lex +from sympy.polys.domains import FF, GF, ZZ, QQ, QQ_I, RR, CC, EX + +from sympy.polys.polyerrors import OptionError, GeneratorsError + +from sympy.core.numbers import (I, Integer) +from sympy.core.symbol import Symbol +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.testing.pytest import raises +from sympy.abc import x, y, z + + +def test_Options_clone(): + opt = Options((x, y, z), {'domain': 'ZZ'}) + + assert opt.gens == (x, y, z) + assert opt.domain == ZZ + assert ('order' in opt) is False + + new_opt = opt.clone({'gens': (x, y), 'order': 'lex'}) + + assert opt.gens == (x, y, z) + assert opt.domain == ZZ + assert ('order' in opt) is False + + assert new_opt.gens == (x, y) + assert new_opt.domain == ZZ + assert ('order' in new_opt) is True + + +def test_Expand_preprocess(): + assert Expand.preprocess(False) is False + assert Expand.preprocess(True) is True + + assert Expand.preprocess(0) is False + assert Expand.preprocess(1) is True + + raises(OptionError, lambda: Expand.preprocess(x)) + + +def test_Expand_postprocess(): + opt = {'expand': True} + Expand.postprocess(opt) + + assert opt == {'expand': True} + + +def test_Gens_preprocess(): + assert Gens.preprocess((None,)) == () + assert Gens.preprocess((x, y, z)) == (x, y, z) + assert Gens.preprocess(((x, y, z),)) == (x, y, z) + + a = Symbol('a', commutative=False) + + raises(GeneratorsError, lambda: Gens.preprocess((x, x, y))) + raises(GeneratorsError, lambda: Gens.preprocess((x, y, a))) + + +def test_Gens_postprocess(): + opt = {'gens': (x, y)} + Gens.postprocess(opt) + + assert opt == {'gens': (x, y)} + + +def test_Wrt_preprocess(): + assert Wrt.preprocess(x) == ['x'] + assert Wrt.preprocess('') == [] + assert Wrt.preprocess(' ') == [] + assert Wrt.preprocess('x,y') == ['x', 'y'] + assert Wrt.preprocess('x y') == ['x', 'y'] + assert Wrt.preprocess('x, y') == ['x', 'y'] + assert Wrt.preprocess('x , y') == ['x', 'y'] + assert Wrt.preprocess(' x, y') == ['x', 'y'] + assert Wrt.preprocess(' x, y') == ['x', 'y'] + assert Wrt.preprocess([x, y]) == ['x', 'y'] + + raises(OptionError, lambda: Wrt.preprocess(',')) + raises(OptionError, lambda: Wrt.preprocess(0)) + + +def test_Wrt_postprocess(): + opt = {'wrt': ['x']} + Wrt.postprocess(opt) + + assert opt == {'wrt': ['x']} + + +def test_Sort_preprocess(): + assert Sort.preprocess([x, y, z]) == ['x', 'y', 'z'] + assert Sort.preprocess((x, y, z)) == ['x', 'y', 'z'] + + assert Sort.preprocess('x > y > z') == ['x', 'y', 'z'] + assert Sort.preprocess('x>y>z') == ['x', 'y', 'z'] + + raises(OptionError, lambda: Sort.preprocess(0)) + raises(OptionError, lambda: Sort.preprocess({x, y, z})) + + +def test_Sort_postprocess(): + opt = {'sort': 'x > y'} + Sort.postprocess(opt) + + assert opt == {'sort': 'x > y'} + + +def test_Order_preprocess(): + assert Order.preprocess('lex') == lex + + +def test_Order_postprocess(): + opt = {'order': True} + Order.postprocess(opt) + + assert opt == {'order': True} + + +def test_Field_preprocess(): + assert Field.preprocess(False) is False + assert Field.preprocess(True) is True + + assert Field.preprocess(0) is False + assert Field.preprocess(1) is True + + raises(OptionError, lambda: Field.preprocess(x)) + + +def test_Field_postprocess(): + opt = {'field': True} + Field.postprocess(opt) + + assert opt == {'field': True} + + +def test_Greedy_preprocess(): + assert Greedy.preprocess(False) is False + assert Greedy.preprocess(True) is True + + assert Greedy.preprocess(0) is False + assert Greedy.preprocess(1) is True + + raises(OptionError, lambda: Greedy.preprocess(x)) + + +def test_Greedy_postprocess(): + opt = {'greedy': True} + Greedy.postprocess(opt) + + assert opt == {'greedy': True} + + +def test_Domain_preprocess(): + assert Domain.preprocess(ZZ) == ZZ + assert Domain.preprocess(QQ) == QQ + assert Domain.preprocess(EX) == EX + assert Domain.preprocess(FF(2)) == FF(2) + assert Domain.preprocess(ZZ[x, y]) == ZZ[x, y] + + assert Domain.preprocess('Z') == ZZ + assert Domain.preprocess('Q') == QQ + + assert Domain.preprocess('ZZ') == ZZ + assert Domain.preprocess('QQ') == QQ + + assert Domain.preprocess('EX') == EX + + assert Domain.preprocess('FF(23)') == FF(23) + assert Domain.preprocess('GF(23)') == GF(23) + + raises(OptionError, lambda: Domain.preprocess('Z[]')) + + assert Domain.preprocess('Z[x]') == ZZ[x] + assert Domain.preprocess('Q[x]') == QQ[x] + assert Domain.preprocess('R[x]') == RR[x] + assert Domain.preprocess('C[x]') == CC[x] + + assert Domain.preprocess('ZZ[x]') == ZZ[x] + assert Domain.preprocess('QQ[x]') == QQ[x] + assert Domain.preprocess('RR[x]') == RR[x] + assert Domain.preprocess('CC[x]') == CC[x] + + assert Domain.preprocess('Z[x,y]') == ZZ[x, y] + assert Domain.preprocess('Q[x,y]') == QQ[x, y] + assert Domain.preprocess('R[x,y]') == RR[x, y] + assert Domain.preprocess('C[x,y]') == CC[x, y] + + assert Domain.preprocess('ZZ[x,y]') == ZZ[x, y] + assert Domain.preprocess('QQ[x,y]') == QQ[x, y] + assert Domain.preprocess('RR[x,y]') == RR[x, y] + assert Domain.preprocess('CC[x,y]') == CC[x, y] + + raises(OptionError, lambda: Domain.preprocess('Z()')) + + assert Domain.preprocess('Z(x)') == ZZ.frac_field(x) + assert Domain.preprocess('Q(x)') == QQ.frac_field(x) + + assert Domain.preprocess('ZZ(x)') == ZZ.frac_field(x) + assert Domain.preprocess('QQ(x)') == QQ.frac_field(x) + + assert Domain.preprocess('Z(x,y)') == ZZ.frac_field(x, y) + assert Domain.preprocess('Q(x,y)') == QQ.frac_field(x, y) + + assert Domain.preprocess('ZZ(x,y)') == ZZ.frac_field(x, y) + assert Domain.preprocess('QQ(x,y)') == QQ.frac_field(x, y) + + assert Domain.preprocess('Q') == QQ.algebraic_field(I) + assert Domain.preprocess('QQ') == QQ.algebraic_field(I) + + assert Domain.preprocess('Q') == QQ.algebraic_field(sqrt(2), I) + assert Domain.preprocess( + 'QQ') == QQ.algebraic_field(sqrt(2), I) + + raises(OptionError, lambda: Domain.preprocess('abc')) + + +def test_Domain_postprocess(): + raises(GeneratorsError, lambda: Domain.postprocess({'gens': (x, y), + 'domain': ZZ[y, z]})) + + raises(GeneratorsError, lambda: Domain.postprocess({'gens': (), + 'domain': EX})) + raises(GeneratorsError, lambda: Domain.postprocess({'domain': EX})) + + +def test_Split_preprocess(): + assert Split.preprocess(False) is False + assert Split.preprocess(True) is True + + assert Split.preprocess(0) is False + assert Split.preprocess(1) is True + + raises(OptionError, lambda: Split.preprocess(x)) + + +def test_Split_postprocess(): + raises(NotImplementedError, lambda: Split.postprocess({'split': True})) + + +def test_Gaussian_preprocess(): + assert Gaussian.preprocess(False) is False + assert Gaussian.preprocess(True) is True + + assert Gaussian.preprocess(0) is False + assert Gaussian.preprocess(1) is True + + raises(OptionError, lambda: Gaussian.preprocess(x)) + + +def test_Gaussian_postprocess(): + opt = {'gaussian': True} + Gaussian.postprocess(opt) + + assert opt == { + 'gaussian': True, + 'domain': QQ_I, + } + + +def test_Extension_preprocess(): + assert Extension.preprocess(True) is True + assert Extension.preprocess(1) is True + + assert Extension.preprocess([]) is None + + assert Extension.preprocess(sqrt(2)) == {sqrt(2)} + assert Extension.preprocess([sqrt(2)]) == {sqrt(2)} + + assert Extension.preprocess([sqrt(2), I]) == {sqrt(2), I} + + raises(OptionError, lambda: Extension.preprocess(False)) + raises(OptionError, lambda: Extension.preprocess(0)) + + +def test_Extension_postprocess(): + opt = {'extension': {sqrt(2)}} + Extension.postprocess(opt) + + assert opt == { + 'extension': {sqrt(2)}, + 'domain': QQ.algebraic_field(sqrt(2)), + } + + opt = {'extension': True} + Extension.postprocess(opt) + + assert opt == {'extension': True} + + +def test_Modulus_preprocess(): + assert Modulus.preprocess(23) == 23 + assert Modulus.preprocess(Integer(23)) == 23 + + raises(OptionError, lambda: Modulus.preprocess(0)) + raises(OptionError, lambda: Modulus.preprocess(x)) + + +def test_Modulus_postprocess(): + opt = {'modulus': 5} + Modulus.postprocess(opt) + + assert opt == { + 'modulus': 5, + 'domain': FF(5), + } + + opt = {'modulus': 5, 'symmetric': False} + Modulus.postprocess(opt) + + assert opt == { + 'modulus': 5, + 'domain': FF(5, False), + 'symmetric': False, + } + + +def test_Symmetric_preprocess(): + assert Symmetric.preprocess(False) is False + assert Symmetric.preprocess(True) is True + + assert Symmetric.preprocess(0) is False + assert Symmetric.preprocess(1) is True + + raises(OptionError, lambda: Symmetric.preprocess(x)) + + +def test_Symmetric_postprocess(): + opt = {'symmetric': True} + Symmetric.postprocess(opt) + + assert opt == {'symmetric': True} + + +def test_Strict_preprocess(): + assert Strict.preprocess(False) is False + assert Strict.preprocess(True) is True + + assert Strict.preprocess(0) is False + assert Strict.preprocess(1) is True + + raises(OptionError, lambda: Strict.preprocess(x)) + + +def test_Strict_postprocess(): + opt = {'strict': True} + Strict.postprocess(opt) + + assert opt == {'strict': True} + + +def test_Auto_preprocess(): + assert Auto.preprocess(False) is False + assert Auto.preprocess(True) is True + + assert Auto.preprocess(0) is False + assert Auto.preprocess(1) is True + + raises(OptionError, lambda: Auto.preprocess(x)) + + +def test_Auto_postprocess(): + opt = {'auto': True} + Auto.postprocess(opt) + + assert opt == {'auto': True} + + +def test_Frac_preprocess(): + assert Frac.preprocess(False) is False + assert Frac.preprocess(True) is True + + assert Frac.preprocess(0) is False + assert Frac.preprocess(1) is True + + raises(OptionError, lambda: Frac.preprocess(x)) + + +def test_Frac_postprocess(): + opt = {'frac': True} + Frac.postprocess(opt) + + assert opt == {'frac': True} + + +def test_Formal_preprocess(): + assert Formal.preprocess(False) is False + assert Formal.preprocess(True) is True + + assert Formal.preprocess(0) is False + assert Formal.preprocess(1) is True + + raises(OptionError, lambda: Formal.preprocess(x)) + + +def test_Formal_postprocess(): + opt = {'formal': True} + Formal.postprocess(opt) + + assert opt == {'formal': True} + + +def test_Polys_preprocess(): + assert Polys.preprocess(False) is False + assert Polys.preprocess(True) is True + + assert Polys.preprocess(0) is False + assert Polys.preprocess(1) is True + + raises(OptionError, lambda: Polys.preprocess(x)) + + +def test_Polys_postprocess(): + opt = {'polys': True} + Polys.postprocess(opt) + + assert opt == {'polys': True} + + +def test_Include_preprocess(): + assert Include.preprocess(False) is False + assert Include.preprocess(True) is True + + assert Include.preprocess(0) is False + assert Include.preprocess(1) is True + + raises(OptionError, lambda: Include.preprocess(x)) + + +def test_Include_postprocess(): + opt = {'include': True} + Include.postprocess(opt) + + assert opt == {'include': True} + + +def test_All_preprocess(): + assert All.preprocess(False) is False + assert All.preprocess(True) is True + + assert All.preprocess(0) is False + assert All.preprocess(1) is True + + raises(OptionError, lambda: All.preprocess(x)) + + +def test_All_postprocess(): + opt = {'all': True} + All.postprocess(opt) + + assert opt == {'all': True} + + +def test_Gen_postprocess(): + opt = {'gen': x} + Gen.postprocess(opt) + + assert opt == {'gen': x} + + +def test_Symbols_preprocess(): + raises(OptionError, lambda: Symbols.preprocess(x)) + + +def test_Symbols_postprocess(): + opt = {'symbols': [x, y, z]} + Symbols.postprocess(opt) + + assert opt == {'symbols': [x, y, z]} + + +def test_Method_preprocess(): + raises(OptionError, lambda: Method.preprocess(10)) + + +def test_Method_postprocess(): + opt = {'method': 'f5b'} + Method.postprocess(opt) + + assert opt == {'method': 'f5b'} diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_pythonrational.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_pythonrational.py new file mode 100644 index 0000000000000000000000000000000000000000..547a5679626fd3a6165b151364bb506a574bb1db --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_pythonrational.py @@ -0,0 +1,139 @@ +"""Tests for PythonRational type. """ + +from sympy.polys.domains import PythonRational as QQ +from sympy.testing.pytest import raises + +def test_PythonRational__init__(): + assert QQ(0).numerator == 0 + assert QQ(0).denominator == 1 + assert QQ(0, 1).numerator == 0 + assert QQ(0, 1).denominator == 1 + assert QQ(0, -1).numerator == 0 + assert QQ(0, -1).denominator == 1 + + assert QQ(1).numerator == 1 + assert QQ(1).denominator == 1 + assert QQ(1, 1).numerator == 1 + assert QQ(1, 1).denominator == 1 + assert QQ(-1, -1).numerator == 1 + assert QQ(-1, -1).denominator == 1 + + assert QQ(-1).numerator == -1 + assert QQ(-1).denominator == 1 + assert QQ(-1, 1).numerator == -1 + assert QQ(-1, 1).denominator == 1 + assert QQ( 1, -1).numerator == -1 + assert QQ( 1, -1).denominator == 1 + + assert QQ(1, 2).numerator == 1 + assert QQ(1, 2).denominator == 2 + assert QQ(3, 4).numerator == 3 + assert QQ(3, 4).denominator == 4 + + assert QQ(2, 2).numerator == 1 + assert QQ(2, 2).denominator == 1 + assert QQ(2, 4).numerator == 1 + assert QQ(2, 4).denominator == 2 + +def test_PythonRational__hash__(): + assert hash(QQ(0)) == hash(0) + assert hash(QQ(1)) == hash(1) + assert hash(QQ(117)) == hash(117) + +def test_PythonRational__int__(): + assert int(QQ(-1, 4)) == 0 + assert int(QQ( 1, 4)) == 0 + assert int(QQ(-5, 4)) == -1 + assert int(QQ( 5, 4)) == 1 + +def test_PythonRational__float__(): + assert float(QQ(-1, 2)) == -0.5 + assert float(QQ( 1, 2)) == 0.5 + +def test_PythonRational__abs__(): + assert abs(QQ(-1, 2)) == QQ(1, 2) + assert abs(QQ( 1, 2)) == QQ(1, 2) + +def test_PythonRational__pos__(): + assert +QQ(-1, 2) == QQ(-1, 2) + assert +QQ( 1, 2) == QQ( 1, 2) + +def test_PythonRational__neg__(): + assert -QQ(-1, 2) == QQ( 1, 2) + assert -QQ( 1, 2) == QQ(-1, 2) + +def test_PythonRational__add__(): + assert QQ(-1, 2) + QQ( 1, 2) == QQ(0) + assert QQ( 1, 2) + QQ(-1, 2) == QQ(0) + + assert QQ(1, 2) + QQ(1, 2) == QQ(1) + assert QQ(1, 2) + QQ(3, 2) == QQ(2) + assert QQ(3, 2) + QQ(1, 2) == QQ(2) + assert QQ(3, 2) + QQ(3, 2) == QQ(3) + + assert 1 + QQ(1, 2) == QQ(3, 2) + assert QQ(1, 2) + 1 == QQ(3, 2) + +def test_PythonRational__sub__(): + assert QQ(-1, 2) - QQ( 1, 2) == QQ(-1) + assert QQ( 1, 2) - QQ(-1, 2) == QQ( 1) + + assert QQ(1, 2) - QQ(1, 2) == QQ( 0) + assert QQ(1, 2) - QQ(3, 2) == QQ(-1) + assert QQ(3, 2) - QQ(1, 2) == QQ( 1) + assert QQ(3, 2) - QQ(3, 2) == QQ( 0) + + assert 1 - QQ(1, 2) == QQ( 1, 2) + assert QQ(1, 2) - 1 == QQ(-1, 2) + +def test_PythonRational__mul__(): + assert QQ(-1, 2) * QQ( 1, 2) == QQ(-1, 4) + assert QQ( 1, 2) * QQ(-1, 2) == QQ(-1, 4) + + assert QQ(1, 2) * QQ(1, 2) == QQ(1, 4) + assert QQ(1, 2) * QQ(3, 2) == QQ(3, 4) + assert QQ(3, 2) * QQ(1, 2) == QQ(3, 4) + assert QQ(3, 2) * QQ(3, 2) == QQ(9, 4) + + assert 2 * QQ(1, 2) == QQ(1) + assert QQ(1, 2) * 2 == QQ(1) + +def test_PythonRational__truediv__(): + assert QQ(-1, 2) / QQ( 1, 2) == QQ(-1) + assert QQ( 1, 2) / QQ(-1, 2) == QQ(-1) + + assert QQ(1, 2) / QQ(1, 2) == QQ(1) + assert QQ(1, 2) / QQ(3, 2) == QQ(1, 3) + assert QQ(3, 2) / QQ(1, 2) == QQ(3) + assert QQ(3, 2) / QQ(3, 2) == QQ(1) + + assert 2 / QQ(1, 2) == QQ(4) + assert QQ(1, 2) / 2 == QQ(1, 4) + + raises(ZeroDivisionError, lambda: QQ(1, 2) / QQ(0)) + raises(ZeroDivisionError, lambda: QQ(1, 2) / 0) + +def test_PythonRational__pow__(): + assert QQ(1)**10 == QQ(1) + assert QQ(2)**10 == QQ(1024) + + assert QQ(1)**(-10) == QQ(1) + assert QQ(2)**(-10) == QQ(1, 1024) + +def test_PythonRational__eq__(): + assert (QQ(1, 2) == QQ(1, 2)) is True + assert (QQ(1, 2) != QQ(1, 2)) is False + + assert (QQ(1, 2) == QQ(1, 3)) is False + assert (QQ(1, 2) != QQ(1, 3)) is True + +def test_PythonRational__lt_le_gt_ge__(): + assert (QQ(1, 2) < QQ(1, 4)) is False + assert (QQ(1, 2) <= QQ(1, 4)) is False + assert (QQ(1, 2) > QQ(1, 4)) is True + assert (QQ(1, 2) >= QQ(1, 4)) is True + + assert (QQ(1, 4) < QQ(1, 2)) is True + assert (QQ(1, 4) <= QQ(1, 2)) is True + assert (QQ(1, 4) > QQ(1, 2)) is False + assert (QQ(1, 4) >= QQ(1, 2)) is False diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_rationaltools.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_rationaltools.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee0192a3fbc8997347df081663015afd91dd8ad --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_rationaltools.py @@ -0,0 +1,63 @@ +"""Tests for tools for manipulation of rational expressions. """ + +from sympy.polys.rationaltools import together + +from sympy.core.mul import Mul +from sympy.core.numbers import Rational +from sympy.core.relational import Eq +from sympy.core.singleton import S +from sympy.core.symbol import symbols +from sympy.functions.elementary.exponential import exp +from sympy.functions.elementary.trigonometric import sin +from sympy.integrals.integrals import Integral +from sympy.abc import x, y, z + +A, B = symbols('A,B', commutative=False) + + +def test_together(): + assert together(0) == 0 + assert together(1) == 1 + + assert together(x*y*z) == x*y*z + assert together(x + y) == x + y + + assert together(1/x) == 1/x + + assert together(1/x + 1) == (x + 1)/x + assert together(1/x + 3) == (3*x + 1)/x + assert together(1/x + x) == (x**2 + 1)/x + + assert together(1/x + S.Half) == (x + 2)/(2*x) + assert together(S.Half + x/2) == Mul(S.Half, x + 1, evaluate=False) + + assert together(1/x + 2/y) == (2*x + y)/(y*x) + assert together(1/(1 + 1/x)) == x/(1 + x) + assert together(x/(1 + 1/x)) == x**2/(1 + x) + + assert together(1/x + 1/y + 1/z) == (x*y + x*z + y*z)/(x*y*z) + assert together(1/(1 + x + 1/y + 1/z)) == y*z/(y + z + y*z + x*y*z) + + assert together(1/(x*y) + 1/(x*y)**2) == y**(-2)*x**(-2)*(1 + x*y) + assert together(1/(x*y) + 1/(x*y)**4) == y**(-4)*x**(-4)*(1 + x**3*y**3) + assert together(1/(x**7*y) + 1/(x*y)**4) == y**(-4)*x**(-7)*(x**3 + y**3) + + assert together(5/(2 + 6/(3 + 7/(4 + 8/(5 + 9/x))))) == \ + Rational(5, 2)*((171 + 119*x)/(279 + 203*x)) + + assert together(1 + 1/(x + 1)**2) == (1 + (x + 1)**2)/(x + 1)**2 + assert together(1 + 1/(x*(1 + x))) == (1 + x*(1 + x))/(x*(1 + x)) + assert together( + 1/(x*(x + 1)) + 1/(x*(x + 2))) == (3 + 2*x)/(x*(1 + x)*(2 + x)) + assert together(1 + 1/(2*x + 2)**2) == (4*(x + 1)**2 + 1)/(4*(x + 1)**2) + + assert together(sin(1/x + 1/y)) == sin(1/x + 1/y) + assert together(sin(1/x + 1/y), deep=True) == sin((x + y)/(x*y)) + + assert together(1/exp(x) + 1/(x*exp(x))) == (1 + x)/(x*exp(x)) + assert together(1/exp(2*x) + 1/(x*exp(3*x))) == (1 + exp(x)*x)/(x*exp(3*x)) + + assert together(Integral(1/x + 1/y, x)) == Integral((x + y)/(x*y), x) + assert together(Eq(1/x + 1/y, 1 + 1/z)) == Eq((x + y)/(x*y), (z + 1)/z) + + assert together((A*B)**-1 + (B*A)**-1) == (A*B)**-1 + (B*A)**-1 diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_rootoftools.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_rootoftools.py new file mode 100644 index 0000000000000000000000000000000000000000..e7a1ae0cb2b034fbeb013560669189a03eacf0f3 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_rootoftools.py @@ -0,0 +1,653 @@ +"""Tests for the implementation of RootOf class and related tools. """ + +from sympy.polys.polytools import Poly +import sympy.polys.rootoftools as rootoftools +from sympy.polys.rootoftools import (rootof, RootOf, CRootOf, RootSum, + _pure_key_dict as D) + +from sympy.polys.polyerrors import ( + MultivariatePolynomialError, + GeneratorsNeeded, + PolynomialError, +) + +from sympy.core.function import (Function, Lambda) +from sympy.core.numbers import (Float, I, Rational) +from sympy.core.relational import Eq +from sympy.core.singleton import S +from sympy.functions.elementary.exponential import (exp, log) +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.trigonometric import tan +from sympy.integrals.integrals import Integral +from sympy.polys.orthopolys import legendre_poly +from sympy.solvers.solvers import solve + + +from sympy.testing.pytest import raises, slow +from sympy.core.expr import unchanged + +from sympy.abc import a, b, x, y, z, r + + +def test_CRootOf___new__(): + assert rootof(x, 0) == 0 + assert rootof(x, -1) == 0 + + assert rootof(x, S.Zero) == 0 + + assert rootof(x - 1, 0) == 1 + assert rootof(x - 1, -1) == 1 + + assert rootof(x + 1, 0) == -1 + assert rootof(x + 1, -1) == -1 + + assert rootof(x**2 + 2*x + 3, 0) == -1 - I*sqrt(2) + assert rootof(x**2 + 2*x + 3, 1) == -1 + I*sqrt(2) + assert rootof(x**2 + 2*x + 3, -1) == -1 + I*sqrt(2) + assert rootof(x**2 + 2*x + 3, -2) == -1 - I*sqrt(2) + + r = rootof(x**2 + 2*x + 3, 0, radicals=False) + assert isinstance(r, RootOf) is True + + r = rootof(x**2 + 2*x + 3, 1, radicals=False) + assert isinstance(r, RootOf) is True + + r = rootof(x**2 + 2*x + 3, -1, radicals=False) + assert isinstance(r, RootOf) is True + + r = rootof(x**2 + 2*x + 3, -2, radicals=False) + assert isinstance(r, RootOf) is True + + assert rootof((x - 1)*(x + 1), 0, radicals=False) == -1 + assert rootof((x - 1)*(x + 1), 1, radicals=False) == 1 + assert rootof((x - 1)*(x + 1), -1, radicals=False) == 1 + assert rootof((x - 1)*(x + 1), -2, radicals=False) == -1 + + assert rootof((x - 1)*(x + 1), 0, radicals=True) == -1 + assert rootof((x - 1)*(x + 1), 1, radicals=True) == 1 + assert rootof((x - 1)*(x + 1), -1, radicals=True) == 1 + assert rootof((x - 1)*(x + 1), -2, radicals=True) == -1 + + assert rootof((x - 1)*(x**3 + x + 3), 0) == rootof(x**3 + x + 3, 0) + assert rootof((x - 1)*(x**3 + x + 3), 1) == 1 + assert rootof((x - 1)*(x**3 + x + 3), 2) == rootof(x**3 + x + 3, 1) + assert rootof((x - 1)*(x**3 + x + 3), 3) == rootof(x**3 + x + 3, 2) + assert rootof((x - 1)*(x**3 + x + 3), -1) == rootof(x**3 + x + 3, 2) + assert rootof((x - 1)*(x**3 + x + 3), -2) == rootof(x**3 + x + 3, 1) + assert rootof((x - 1)*(x**3 + x + 3), -3) == 1 + assert rootof((x - 1)*(x**3 + x + 3), -4) == rootof(x**3 + x + 3, 0) + + assert rootof(x**4 + 3*x**3, 0) == -3 + assert rootof(x**4 + 3*x**3, 1) == 0 + assert rootof(x**4 + 3*x**3, 2) == 0 + assert rootof(x**4 + 3*x**3, 3) == 0 + + raises(GeneratorsNeeded, lambda: rootof(0, 0)) + raises(GeneratorsNeeded, lambda: rootof(1, 0)) + + raises(PolynomialError, lambda: rootof(Poly(0, x), 0)) + raises(PolynomialError, lambda: rootof(Poly(1, x), 0)) + raises(PolynomialError, lambda: rootof(x - y, 0)) + # issue 8617 + raises(PolynomialError, lambda: rootof(exp(x), 0)) + + raises(NotImplementedError, lambda: rootof(x**3 - x + sqrt(2), 0)) + raises(NotImplementedError, lambda: rootof(x**3 - x + I, 0)) + + raises(IndexError, lambda: rootof(x**2 - 1, -4)) + raises(IndexError, lambda: rootof(x**2 - 1, -3)) + raises(IndexError, lambda: rootof(x**2 - 1, 2)) + raises(IndexError, lambda: rootof(x**2 - 1, 3)) + raises(ValueError, lambda: rootof(x**2 - 1, x)) + + assert rootof(Poly(x - y, x), 0) == y + + assert rootof(Poly(x**2 - y, x), 0) == -sqrt(y) + assert rootof(Poly(x**2 - y, x), 1) == sqrt(y) + + assert rootof(Poly(x**3 - y, x), 0) == y**Rational(1, 3) + + assert rootof(y*x**3 + y*x + 2*y, x, 0) == -1 + raises(NotImplementedError, lambda: rootof(x**3 + x + 2*y, x, 0)) + + assert rootof(x**3 + x + 1, 0).is_commutative is True + + +def test_CRootOf_attributes(): + r = rootof(x**3 + x + 3, 0) + assert r.is_number + assert r.free_symbols == set() + # if the following assertion fails then multivariate polynomials + # are apparently supported and the RootOf.free_symbols routine + # should be changed to return whatever symbols would not be + # the PurePoly dummy symbol + raises(NotImplementedError, lambda: rootof(Poly(x**3 + y*x + 1, x), 0)) + + +def test_CRootOf___eq__(): + assert (rootof(x**3 + x + 3, 0) == rootof(x**3 + x + 3, 0)) is True + assert (rootof(x**3 + x + 3, 0) == rootof(x**3 + x + 3, 1)) is False + assert (rootof(x**3 + x + 3, 1) == rootof(x**3 + x + 3, 1)) is True + assert (rootof(x**3 + x + 3, 1) == rootof(x**3 + x + 3, 2)) is False + assert (rootof(x**3 + x + 3, 2) == rootof(x**3 + x + 3, 2)) is True + + assert (rootof(x**3 + x + 3, 0) == rootof(y**3 + y + 3, 0)) is True + assert (rootof(x**3 + x + 3, 0) == rootof(y**3 + y + 3, 1)) is False + assert (rootof(x**3 + x + 3, 1) == rootof(y**3 + y + 3, 1)) is True + assert (rootof(x**3 + x + 3, 1) == rootof(y**3 + y + 3, 2)) is False + assert (rootof(x**3 + x + 3, 2) == rootof(y**3 + y + 3, 2)) is True + + +def test_CRootOf___eval_Eq__(): + f = Function('f') + eq = x**3 + x + 3 + r = rootof(eq, 2) + r1 = rootof(eq, 1) + assert Eq(r, r1) is S.false + assert Eq(r, r) is S.true + assert unchanged(Eq, r, x) + assert Eq(r, 0) is S.false + assert Eq(r, S.Infinity) is S.false + assert Eq(r, I) is S.false + assert unchanged(Eq, r, f(0)) + sol = solve(eq) + for s in sol: + if s.is_real: + assert Eq(r, s) is S.false + r = rootof(eq, 0) + for s in sol: + if s.is_real: + assert Eq(r, s) is S.true + eq = x**3 + x + 1 + sol = solve(eq) + assert [Eq(rootof(eq, i), j) for i in range(3) for j in sol + ].count(True) == 3 + assert Eq(rootof(eq, 0), 1 + S.ImaginaryUnit) == False + + +def test_CRootOf_is_real(): + assert rootof(x**3 + x + 3, 0).is_real is True + assert rootof(x**3 + x + 3, 1).is_real is False + assert rootof(x**3 + x + 3, 2).is_real is False + + +def test_CRootOf_is_complex(): + assert rootof(x**3 + x + 3, 0).is_complex is True + + +def test_CRootOf_subs(): + assert rootof(x**3 + x + 1, 0).subs(x, y) == rootof(y**3 + y + 1, 0) + + +def test_CRootOf_diff(): + assert rootof(x**3 + x + 1, 0).diff(x) == 0 + assert rootof(x**3 + x + 1, 0).diff(y) == 0 + + +@slow +def test_CRootOf_evalf(): + real = rootof(x**3 + x + 3, 0).evalf(n=20) + + assert real.epsilon_eq(Float("-1.2134116627622296341")) + + re, im = rootof(x**3 + x + 3, 1).evalf(n=20).as_real_imag() + + assert re.epsilon_eq( Float("0.60670583138111481707")) + assert im.epsilon_eq(-Float("1.45061224918844152650")) + + re, im = rootof(x**3 + x + 3, 2).evalf(n=20).as_real_imag() + + assert re.epsilon_eq(Float("0.60670583138111481707")) + assert im.epsilon_eq(Float("1.45061224918844152650")) + + p = legendre_poly(4, x, polys=True) + roots = [str(r.n(17)) for r in p.real_roots()] + # magnitudes are given by + # sqrt(3/S(7) - 2*sqrt(6/S(5))/7) + # and + # sqrt(3/S(7) + 2*sqrt(6/S(5))/7) + assert roots == [ + "-0.86113631159405258", + "-0.33998104358485626", + "0.33998104358485626", + "0.86113631159405258", + ] + + re = rootof(x**5 - 5*x + 12, 0).evalf(n=20) + assert re.epsilon_eq(Float("-1.84208596619025438271")) + + re, im = rootof(x**5 - 5*x + 12, 1).evalf(n=20).as_real_imag() + assert re.epsilon_eq(Float("-0.351854240827371999559")) + assert im.epsilon_eq(Float("-1.709561043370328882010")) + + re, im = rootof(x**5 - 5*x + 12, 2).evalf(n=20).as_real_imag() + assert re.epsilon_eq(Float("-0.351854240827371999559")) + assert im.epsilon_eq(Float("+1.709561043370328882010")) + + re, im = rootof(x**5 - 5*x + 12, 3).evalf(n=20).as_real_imag() + assert re.epsilon_eq(Float("+1.272897223922499190910")) + assert im.epsilon_eq(Float("-0.719798681483861386681")) + + re, im = rootof(x**5 - 5*x + 12, 4).evalf(n=20).as_real_imag() + assert re.epsilon_eq(Float("+1.272897223922499190910")) + assert im.epsilon_eq(Float("+0.719798681483861386681")) + + # issue 6393 + assert str(rootof(x**5 + 2*x**4 + x**3 - 68719476736, 0).n(3)) == '147.' + eq = (531441*x**11 + 3857868*x**10 + 13730229*x**9 + 32597882*x**8 + + 55077472*x**7 + 60452000*x**6 + 32172064*x**5 - 4383808*x**4 - + 11942912*x**3 - 1506304*x**2 + 1453312*x + 512) + a, b = rootof(eq, 1).n(2).as_real_imag() + c, d = rootof(eq, 2).n(2).as_real_imag() + assert a == c + assert b < d + assert b == -d + # issue 6451 + r = rootof(legendre_poly(64, x), 7) + assert r.n(2) == r.n(100).n(2) + # issue 9019 + r0 = rootof(x**2 + 1, 0, radicals=False) + r1 = rootof(x**2 + 1, 1, radicals=False) + assert r0.n(4) == Float(-1.0, 4) * I + assert r1.n(4) == Float(1.0, 4) * I + + # make sure verification is used in case a max/min traps the "root" + assert str(rootof(4*x**5 + 16*x**3 + 12*x**2 + 7, 0).n(3)) == '-0.976' + + # watch out for UnboundLocalError + c = CRootOf(90720*x**6 - 4032*x**4 + 84*x**2 - 1, 0) + assert c._eval_evalf(2) # doesn't fail + + # watch out for imaginary parts that don't want to evaluate + assert str(RootOf(x**16 + 32*x**14 + 508*x**12 + 5440*x**10 + + 39510*x**8 + 204320*x**6 + 755548*x**4 + 1434496*x**2 + + 877969, 10).n(2)) == '-3.4*I' + assert abs(RootOf(x**4 + 10*x**2 + 1, 0).n(2)) < 0.4 + + # check reset and args + r = [RootOf(x**3 + x + 3, i) for i in range(3)] + r[0]._reset() + for ri in r: + i = ri._get_interval() + ri.n(2) + assert i != ri._get_interval() + ri._reset() + assert i == ri._get_interval() + assert i == i.func(*i.args) + + +def test_issue_24978(): + # Irreducible poly with negative leading coeff is normalized + # (factor of -1 is extracted), before being stored as CRootOf.poly. + f = -x**2 + 2 + r = CRootOf(f, 0) + assert r.poly.as_expr() == x**2 - 2 + # An action that prompts calculation of an interval puts r.poly in + # the cache. + r.n() + assert r.poly in rootoftools._reals_cache + + +def test_CRootOf_evalf_caching_bug(): + r = rootof(x**5 - 5*x + 12, 1) + r.n() + a = r._get_interval() + r = rootof(x**5 - 5*x + 12, 1) + r.n() + b = r._get_interval() + assert a == b + + +def test_CRootOf_real_roots(): + assert Poly(x**5 + x + 1).real_roots() == [rootof(x**3 - x**2 + 1, 0)] + assert Poly(x**5 + x + 1).real_roots(radicals=False) == [rootof( + x**3 - x**2 + 1, 0)] + + # https://github.com/sympy/sympy/issues/20902 + p = Poly(-3*x**4 - 10*x**3 - 12*x**2 - 6*x - 1, x, domain='ZZ') + assert CRootOf.real_roots(p) == [S(-1), S(-1), S(-1), S(-1)/3] + + +def test_CRootOf_all_roots(): + assert Poly(x**5 + x + 1).all_roots() == [ + rootof(x**3 - x**2 + 1, 0), + Rational(-1, 2) - sqrt(3)*I/2, + Rational(-1, 2) + sqrt(3)*I/2, + rootof(x**3 - x**2 + 1, 1), + rootof(x**3 - x**2 + 1, 2), + ] + + assert Poly(x**5 + x + 1).all_roots(radicals=False) == [ + rootof(x**3 - x**2 + 1, 0), + rootof(x**2 + x + 1, 0, radicals=False), + rootof(x**2 + x + 1, 1, radicals=False), + rootof(x**3 - x**2 + 1, 1), + rootof(x**3 - x**2 + 1, 2), + ] + + +def test_CRootOf_eval_rational(): + p = legendre_poly(4, x, polys=True) + roots = [r.eval_rational(n=18) for r in p.real_roots()] + for root in roots: + assert isinstance(root, Rational) + roots = [str(root.n(17)) for root in roots] + assert roots == [ + "-0.86113631159405258", + "-0.33998104358485626", + "0.33998104358485626", + "0.86113631159405258", + ] + + +def test_CRootOf_lazy(): + # irreducible poly with both real and complex roots: + f = Poly(x**3 + 2*x + 2) + + # real root: + CRootOf.clear_cache() + r = CRootOf(f, 0) + # Not yet in cache, after construction: + assert r.poly not in rootoftools._reals_cache + assert r.poly not in rootoftools._complexes_cache + r.evalf() + # In cache after evaluation: + assert r.poly in rootoftools._reals_cache + assert r.poly not in rootoftools._complexes_cache + + # complex root: + CRootOf.clear_cache() + r = CRootOf(f, 1) + # Not yet in cache, after construction: + assert r.poly not in rootoftools._reals_cache + assert r.poly not in rootoftools._complexes_cache + r.evalf() + # In cache after evaluation: + assert r.poly in rootoftools._reals_cache + assert r.poly in rootoftools._complexes_cache + + # composite poly with both real and complex roots: + f = Poly((x**2 - 2)*(x**2 + 1)) + + # real root: + CRootOf.clear_cache() + r = CRootOf(f, 0) + # In cache immediately after construction: + assert r.poly in rootoftools._reals_cache + assert r.poly not in rootoftools._complexes_cache + + # complex root: + CRootOf.clear_cache() + r = CRootOf(f, 2) + # In cache immediately after construction: + assert r.poly in rootoftools._reals_cache + assert r.poly in rootoftools._complexes_cache + + +def test_RootSum___new__(): + f = x**3 + x + 3 + + g = Lambda(r, log(r*x)) + s = RootSum(f, g) + + assert isinstance(s, RootSum) is True + + assert RootSum(f**2, g) == 2*RootSum(f, g) + assert RootSum((x - 7)*f**3, g) == log(7*x) + 3*RootSum(f, g) + + # issue 5571 + assert hash(RootSum((x - 7)*f**3, g)) == hash(log(7*x) + 3*RootSum(f, g)) + + raises(MultivariatePolynomialError, lambda: RootSum(x**3 + x + y)) + raises(ValueError, lambda: RootSum(x**2 + 3, lambda x: x)) + + assert RootSum(f, exp) == RootSum(f, Lambda(x, exp(x))) + assert RootSum(f, log) == RootSum(f, Lambda(x, log(x))) + + assert isinstance(RootSum(f, auto=False), RootSum) is True + + assert RootSum(f) == 0 + assert RootSum(f, Lambda(x, x)) == 0 + assert RootSum(f, Lambda(x, x**2)) == -2 + + assert RootSum(f, Lambda(x, 1)) == 3 + assert RootSum(f, Lambda(x, 2)) == 6 + + assert RootSum(f, auto=False).is_commutative is True + + assert RootSum(f, Lambda(x, 1/(x + x**2))) == Rational(11, 3) + assert RootSum(f, Lambda(x, y/(x + x**2))) == Rational(11, 3)*y + + assert RootSum(x**2 - 1, Lambda(x, 3*x**2), x) == 6 + assert RootSum(x**2 - y, Lambda(x, 3*x**2), x) == 6*y + + assert RootSum(x**2 - 1, Lambda(x, z*x**2), x) == 2*z + assert RootSum(x**2 - y, Lambda(x, z*x**2), x) == 2*z*y + + assert RootSum( + x**2 - 1, Lambda(x, exp(x)), quadratic=True) == exp(-1) + exp(1) + + assert RootSum(x**3 + a*x + a**3, tan, x) == \ + RootSum(x**3 + x + 1, Lambda(x, tan(a*x))) + assert RootSum(a**3*x**3 + a*x + 1, tan, x) == \ + RootSum(x**3 + x + 1, Lambda(x, tan(x/a))) + + +def test_RootSum_free_symbols(): + assert RootSum(x**3 + x + 3, Lambda(r, exp(r))).free_symbols == set() + assert RootSum(x**3 + x + 3, Lambda(r, exp(a*r))).free_symbols == {a} + assert RootSum( + x**3 + x + y, Lambda(r, exp(a*r)), x).free_symbols == {a, y} + + +def test_RootSum___eq__(): + f = Lambda(x, exp(x)) + + assert (RootSum(x**3 + x + 1, f) == RootSum(x**3 + x + 1, f)) is True + assert (RootSum(x**3 + x + 1, f) == RootSum(y**3 + y + 1, f)) is True + + assert (RootSum(x**3 + x + 1, f) == RootSum(x**3 + x + 2, f)) is False + assert (RootSum(x**3 + x + 1, f) == RootSum(y**3 + y + 2, f)) is False + + +def test_RootSum_doit(): + rs = RootSum(x**2 + 1, exp) + + assert isinstance(rs, RootSum) is True + assert rs.doit() == exp(-I) + exp(I) + + rs = RootSum(x**2 + a, exp, x) + + assert isinstance(rs, RootSum) is True + assert rs.doit() == exp(-sqrt(-a)) + exp(sqrt(-a)) + + +def test_RootSum_evalf(): + rs = RootSum(x**2 + 1, exp) + + assert rs.evalf(n=20, chop=True).epsilon_eq(Float("1.0806046117362794348")) + assert rs.evalf(n=15, chop=True).epsilon_eq(Float("1.08060461173628")) + + rs = RootSum(x**2 + a, exp, x) + + assert rs.evalf() == rs + + +def test_RootSum_diff(): + f = x**3 + x + 3 + + g = Lambda(r, exp(r*x)) + h = Lambda(r, r*exp(r*x)) + + assert RootSum(f, g).diff(x) == RootSum(f, h) + + +def test_RootSum_subs(): + f = x**3 + x + 3 + g = Lambda(r, exp(r*x)) + + F = y**3 + y + 3 + G = Lambda(r, exp(r*y)) + + assert RootSum(f, g).subs(y, 1) == RootSum(f, g) + assert RootSum(f, g).subs(x, y) == RootSum(F, G) + + +def test_RootSum_rational(): + assert RootSum( + z**5 - z + 1, Lambda(z, z/(x - z))) == (4*x - 5)/(x**5 - x + 1) + + f = 161*z**3 + 115*z**2 + 19*z + 1 + g = Lambda(z, z*log( + -3381*z**4/4 - 3381*z**3/4 - 625*z**2/2 - z*Rational(125, 2) - 5 + exp(x))) + + assert RootSum(f, g).diff(x) == -( + (5*exp(2*x) - 6*exp(x) + 4)*exp(x)/(exp(3*x) - exp(2*x) + 1))/7 + + +def test_RootSum_independent(): + f = (x**3 - a)**2*(x**4 - b)**3 + + g = Lambda(x, 5*tan(x) + 7) + h = Lambda(x, tan(x)) + + r0 = RootSum(x**3 - a, h, x) + r1 = RootSum(x**4 - b, h, x) + + assert RootSum(f, g, x).as_ordered_terms() == [10*r0, 15*r1, 126] + + +def test_issue_7876(): + l1 = Poly(x**6 - x + 1, x).all_roots() + l2 = [rootof(x**6 - x + 1, i) for i in range(6)] + assert frozenset(l1) == frozenset(l2) + + +def test_issue_8316(): + f = Poly(7*x**8 - 9) + assert len(f.all_roots()) == 8 + f = Poly(7*x**8 - 10) + assert len(f.all_roots()) == 8 + + +def test__imag_count(): + from sympy.polys.rootoftools import _imag_count_of_factor + def imag_count(p): + return sum(_imag_count_of_factor(f)*m for f, m in + p.factor_list()[1]) + assert imag_count(Poly(x**6 + 10*x**2 + 1)) == 2 + assert imag_count(Poly(x**2)) == 0 + assert imag_count(Poly([1]*3 + [-1], x)) == 0 + assert imag_count(Poly(x**3 + 1)) == 0 + assert imag_count(Poly(x**2 + 1)) == 2 + assert imag_count(Poly(x**2 - 1)) == 0 + assert imag_count(Poly(x**4 - 1)) == 2 + assert imag_count(Poly(x**4 + 1)) == 0 + assert imag_count(Poly([1, 2, 3], x)) == 0 + assert imag_count(Poly(x**3 + x + 1)) == 0 + assert imag_count(Poly(x**4 + x + 1)) == 0 + def q(r1, r2, p): + return Poly(((x - r1)*(x - r2)).subs(x, x**p), x) + assert imag_count(q(-1, -2, 2)) == 4 + assert imag_count(q(-1, 2, 2)) == 2 + assert imag_count(q(1, 2, 2)) == 0 + assert imag_count(q(1, 2, 4)) == 4 + assert imag_count(q(-1, 2, 4)) == 2 + assert imag_count(q(-1, -2, 4)) == 0 + + +def test_RootOf_is_imaginary(): + r = RootOf(x**4 + 4*x**2 + 1, 1) + i = r._get_interval() + assert r.is_imaginary and i.ax*i.bx <= 0 + + +def test_is_disjoint(): + eq = x**3 + 5*x + 1 + ir = rootof(eq, 0)._get_interval() + ii = rootof(eq, 1)._get_interval() + assert ir.is_disjoint(ii) + assert ii.is_disjoint(ir) + + +def test_pure_key_dict(): + p = D() + assert (x in p) is False + assert (1 in p) is False + p[x] = 1 + assert x in p + assert y in p + assert p[y] == 1 + raises(KeyError, lambda: p[1]) + def dont(k): + p[k] = 2 + raises(ValueError, lambda: dont(1)) + + +@slow +def test_eval_approx_relative(): + CRootOf.clear_cache() + t = [CRootOf(x**3 + 10*x + 1, i) for i in range(3)] + assert [i.eval_rational(1e-1) for i in t] == [ + Rational(-21, 220), Rational(15, 256) - I*805/256, + Rational(15, 256) + I*805/256] + t[0]._reset() + assert [i.eval_rational(1e-1, 1e-4) for i in t] == [ + Rational(-21, 220), Rational(3275, 65536) - I*414645/131072, + Rational(3275, 65536) + I*414645/131072] + assert S(t[0]._get_interval().dx) < 1e-1 + assert S(t[1]._get_interval().dx) < 1e-1 + assert S(t[1]._get_interval().dy) < 1e-4 + assert S(t[2]._get_interval().dx) < 1e-1 + assert S(t[2]._get_interval().dy) < 1e-4 + t[0]._reset() + assert [i.eval_rational(1e-4, 1e-4) for i in t] == [ + Rational(-2001, 20020), Rational(6545, 131072) - I*414645/131072, + Rational(6545, 131072) + I*414645/131072] + assert S(t[0]._get_interval().dx) < 1e-4 + assert S(t[1]._get_interval().dx) < 1e-4 + assert S(t[1]._get_interval().dy) < 1e-4 + assert S(t[2]._get_interval().dx) < 1e-4 + assert S(t[2]._get_interval().dy) < 1e-4 + # in the following, the actual relative precision is + # less than tested, but it should never be greater + t[0]._reset() + assert [i.eval_rational(n=2) for i in t] == [ + Rational(-202201, 2024022), Rational(104755, 2097152) - I*6634255/2097152, + Rational(104755, 2097152) + I*6634255/2097152] + assert abs(S(t[0]._get_interval().dx)/t[0]) < 1e-2 + assert abs(S(t[1]._get_interval().dx)/t[1]).n() < 1e-2 + assert abs(S(t[1]._get_interval().dy)/t[1]).n() < 1e-2 + assert abs(S(t[2]._get_interval().dx)/t[2]).n() < 1e-2 + assert abs(S(t[2]._get_interval().dy)/t[2]).n() < 1e-2 + t[0]._reset() + assert [i.eval_rational(n=3) for i in t] == [ + Rational(-202201, 2024022), Rational(1676045, 33554432) - I*106148135/33554432, + Rational(1676045, 33554432) + I*106148135/33554432] + assert abs(S(t[0]._get_interval().dx)/t[0]) < 1e-3 + assert abs(S(t[1]._get_interval().dx)/t[1]).n() < 1e-3 + assert abs(S(t[1]._get_interval().dy)/t[1]).n() < 1e-3 + assert abs(S(t[2]._get_interval().dx)/t[2]).n() < 1e-3 + assert abs(S(t[2]._get_interval().dy)/t[2]).n() < 1e-3 + + t[0]._reset() + a = [i.eval_approx(2) for i in t] + assert [str(i) for i in a] == [ + '-0.10', '0.05 - 3.2*I', '0.05 + 3.2*I'] + assert all(abs(((a[i] - t[i])/t[i]).n()) < 1e-2 for i in range(len(a))) + + +def test_issue_15920(): + r = rootof(x**5 - x + 1, 0) + p = Integral(x, (x, 1, y)) + assert unchanged(Eq, r, p) + + +def test_issue_19113(): + eq = y**3 - y + 1 + # generator is a canonical x in RootOf + assert str(Poly(eq).real_roots()) == '[CRootOf(x**3 - x + 1, 0)]' + assert str(Poly(eq.subs(y, tan(y))).real_roots() + ) == '[CRootOf(x**3 - x + 1, 0)]' + assert str(Poly(eq.subs(y, tan(x))).real_roots() + ) == '[CRootOf(x**3 - x + 1, 0)]' diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_specialpolys.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_specialpolys.py new file mode 100644 index 0000000000000000000000000000000000000000..39f551c9e70b5c2bae748ea681b9c8a8cb349fe1 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_specialpolys.py @@ -0,0 +1,152 @@ +"""Tests for functions for generating interesting polynomials. """ + +from sympy.core.add import Add +from sympy.core.symbol import symbols +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.ntheory.generate import prime +from sympy.polys.domains.integerring import ZZ +from sympy.polys.polytools import Poly +from sympy.utilities.iterables import permute_signs +from sympy.testing.pytest import raises + +from sympy.polys.specialpolys import ( + swinnerton_dyer_poly, + cyclotomic_poly, + symmetric_poly, + random_poly, + interpolating_poly, + fateman_poly_F_1, + dmp_fateman_poly_F_1, + fateman_poly_F_2, + dmp_fateman_poly_F_2, + fateman_poly_F_3, + dmp_fateman_poly_F_3, +) + +from sympy.abc import x, y, z + + +def test_swinnerton_dyer_poly(): + raises(ValueError, lambda: swinnerton_dyer_poly(0, x)) + + assert swinnerton_dyer_poly(1, x, polys=True) == Poly(x**2 - 2) + + assert swinnerton_dyer_poly(1, x) == x**2 - 2 + assert swinnerton_dyer_poly(2, x) == x**4 - 10*x**2 + 1 + assert swinnerton_dyer_poly( + 3, x) == x**8 - 40*x**6 + 352*x**4 - 960*x**2 + 576 + # we only need to check that the polys arg works but + # we may as well test that the roots are correct + p = [sqrt(prime(i)) for i in range(1, 5)] + assert str([i.n(3) for i in + swinnerton_dyer_poly(4, polys=True).all_roots()] + ) == str(sorted([Add(*i).n(3) for i in permute_signs(p)])) + + +def test_cyclotomic_poly(): + raises(ValueError, lambda: cyclotomic_poly(0, x)) + + assert cyclotomic_poly(1, x, polys=True) == Poly(x - 1) + + assert cyclotomic_poly(1, x) == x - 1 + assert cyclotomic_poly(2, x) == x + 1 + assert cyclotomic_poly(3, x) == x**2 + x + 1 + assert cyclotomic_poly(4, x) == x**2 + 1 + assert cyclotomic_poly(5, x) == x**4 + x**3 + x**2 + x + 1 + assert cyclotomic_poly(6, x) == x**2 - x + 1 + + +def test_symmetric_poly(): + raises(ValueError, lambda: symmetric_poly(-1, x, y, z)) + raises(ValueError, lambda: symmetric_poly(5, x, y, z)) + + assert symmetric_poly(1, x, y, z, polys=True) == Poly(x + y + z) + assert symmetric_poly(1, (x, y, z), polys=True) == Poly(x + y + z) + + assert symmetric_poly(0, x, y, z) == 1 + assert symmetric_poly(1, x, y, z) == x + y + z + assert symmetric_poly(2, x, y, z) == x*y + x*z + y*z + assert symmetric_poly(3, x, y, z) == x*y*z + + +def test_random_poly(): + poly = random_poly(x, 10, -100, 100, polys=False) + + assert Poly(poly).degree() == 10 + assert all(-100 <= coeff <= 100 for coeff in Poly(poly).coeffs()) is True + + poly = random_poly(x, 10, -100, 100, polys=True) + + assert poly.degree() == 10 + assert all(-100 <= coeff <= 100 for coeff in poly.coeffs()) is True + + +def test_interpolating_poly(): + x0, x1, x2, x3, y0, y1, y2, y3 = symbols('x:4, y:4') + + assert interpolating_poly(0, x) == 0 + assert interpolating_poly(1, x) == y0 + + assert interpolating_poly(2, x) == \ + y0*(x - x1)/(x0 - x1) + y1*(x - x0)/(x1 - x0) + + assert interpolating_poly(3, x) == \ + y0*(x - x1)*(x - x2)/((x0 - x1)*(x0 - x2)) + \ + y1*(x - x0)*(x - x2)/((x1 - x0)*(x1 - x2)) + \ + y2*(x - x0)*(x - x1)/((x2 - x0)*(x2 - x1)) + + assert interpolating_poly(4, x) == \ + y0*(x - x1)*(x - x2)*(x - x3)/((x0 - x1)*(x0 - x2)*(x0 - x3)) + \ + y1*(x - x0)*(x - x2)*(x - x3)/((x1 - x0)*(x1 - x2)*(x1 - x3)) + \ + y2*(x - x0)*(x - x1)*(x - x3)/((x2 - x0)*(x2 - x1)*(x2 - x3)) + \ + y3*(x - x0)*(x - x1)*(x - x2)/((x3 - x0)*(x3 - x1)*(x3 - x2)) + + raises(ValueError, lambda: + interpolating_poly(2, x, (x, 2), (1, 3))) + raises(ValueError, lambda: + interpolating_poly(2, x, (x + y, 2), (1, 3))) + raises(ValueError, lambda: + interpolating_poly(2, x + y, (x, 2), (1, 3))) + raises(ValueError, lambda: + interpolating_poly(2, 3, (4, 5), (6, 7))) + raises(ValueError, lambda: + interpolating_poly(2, 3, (4, 5), (6, 7, 8))) + assert interpolating_poly(0, x, (1, 2), (3, 4)) == 0 + assert interpolating_poly(1, x, (1, 2), (3, 4)) == 3 + assert interpolating_poly(2, x, (1, 2), (3, 4)) == x + 2 + + +def test_fateman_poly_F_1(): + f, g, h = fateman_poly_F_1(1) + F, G, H = dmp_fateman_poly_F_1(1, ZZ) + + assert [ t.rep.to_list() for t in [f, g, h] ] == [F, G, H] + + f, g, h = fateman_poly_F_1(3) + F, G, H = dmp_fateman_poly_F_1(3, ZZ) + + assert [ t.rep.to_list() for t in [f, g, h] ] == [F, G, H] + + +def test_fateman_poly_F_2(): + f, g, h = fateman_poly_F_2(1) + F, G, H = dmp_fateman_poly_F_2(1, ZZ) + + assert [ t.rep.to_list() for t in [f, g, h] ] == [F, G, H] + + f, g, h = fateman_poly_F_2(3) + F, G, H = dmp_fateman_poly_F_2(3, ZZ) + + assert [ t.rep.to_list() for t in [f, g, h] ] == [F, G, H] + + +def test_fateman_poly_F_3(): + f, g, h = fateman_poly_F_3(1) + F, G, H = dmp_fateman_poly_F_3(1, ZZ) + + assert [ t.rep.to_list() for t in [f, g, h] ] == [F, G, H] + + f, g, h = fateman_poly_F_3(3) + F, G, H = dmp_fateman_poly_F_3(3, ZZ) + + assert [ t.rep.to_list() for t in [f, g, h] ] == [F, G, H] diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_sqfreetools.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_sqfreetools.py new file mode 100644 index 0000000000000000000000000000000000000000..b772a05a50e2eacd5a7c80352b1eadd52c69c3fa --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/polys/tests/test_sqfreetools.py @@ -0,0 +1,160 @@ +"""Tests for square-free decomposition algorithms and related tools. """ + +from sympy.polys.rings import ring +from sympy.polys.domains import FF, ZZ, QQ +from sympy.polys.specialpolys import f_polys + +from sympy.testing.pytest import raises +from sympy.external.gmpy import MPQ + +f_0, f_1, f_2, f_3, f_4, f_5, f_6 = f_polys() + +def test_dup_sqf(): + R, x = ring("x", ZZ) + + assert R.dup_sqf_part(0) == 0 + assert R.dup_sqf_p(0) is True + + assert R.dup_sqf_part(7) == 1 + assert R.dup_sqf_p(7) is True + + assert R.dup_sqf_part(2*x + 2) == x + 1 + assert R.dup_sqf_p(2*x + 2) is True + + assert R.dup_sqf_part(x**3 + x + 1) == x**3 + x + 1 + assert R.dup_sqf_p(x**3 + x + 1) is True + + assert R.dup_sqf_part(-x**3 + x + 1) == x**3 - x - 1 + assert R.dup_sqf_p(-x**3 + x + 1) is True + + assert R.dup_sqf_part(2*x**3 + 3*x**2) == 2*x**2 + 3*x + assert R.dup_sqf_p(2*x**3 + 3*x**2) is False + + assert R.dup_sqf_part(-2*x**3 + 3*x**2) == 2*x**2 - 3*x + assert R.dup_sqf_p(-2*x**3 + 3*x**2) is False + + assert R.dup_sqf_list(0) == (0, []) + assert R.dup_sqf_list(1) == (1, []) + + assert R.dup_sqf_list(x) == (1, [(x, 1)]) + assert R.dup_sqf_list(2*x**2) == (2, [(x, 2)]) + assert R.dup_sqf_list(3*x**3) == (3, [(x, 3)]) + + assert R.dup_sqf_list(-x**5 + x**4 + x - 1) == \ + (-1, [(x**3 + x**2 + x + 1, 1), (x - 1, 2)]) + assert R.dup_sqf_list(x**8 + 6*x**6 + 12*x**4 + 8*x**2) == \ + ( 1, [(x, 2), (x**2 + 2, 3)]) + + assert R.dup_sqf_list(2*x**2 + 4*x + 2) == (2, [(x + 1, 2)]) + + R, x = ring("x", QQ) + assert R.dup_sqf_list(2*x**2 + 4*x + 2) == (2, [(x + 1, 2)]) + + R, x = ring("x", FF(2)) + assert R.dup_sqf_list(x**2 + 1) == (1, [(x + 1, 2)]) + + R, x = ring("x", FF(3)) + assert R.dup_sqf_list(x**10 + 2*x**7 + 2*x**4 + x) == \ + (1, [(x, 1), + (x + 1, 3), + (x + 2, 6)]) + + R1, x = ring("x", ZZ) + R2, y = ring("y", FF(3)) + + f = x**3 + 1 + g = y**3 + 1 + + assert R1.dup_sqf_part(f) == f + assert R2.dup_sqf_part(g) == y + 1 + + assert R1.dup_sqf_p(f) is True + assert R2.dup_sqf_p(g) is False + + R, x, y = ring("x,y", ZZ) + + A = x**4 - 3*x**2 + 6 + D = x**6 - 5*x**4 + 5*x**2 + 4 + + f, g = D, R.dmp_sub(A, R.dmp_mul(R.dmp_diff(D, 1), y)) + res = R.dmp_resultant(f, g) + h = (4*y**2 + 1).drop(x) + + assert R.drop(x).dup_sqf_list(res) == (45796, [(h, 3)]) + + Rt, t = ring("t", ZZ) + R, x = ring("x", Rt) + assert R.dup_sqf_list_include(t**3*x**2) == [(t**3, 1), (x, 2)] + + +def test_dmp_sqf(): + R, x, y = ring("x,y", ZZ) + assert R.dmp_sqf_part(0) == 0 + assert R.dmp_sqf_p(0) is True + + assert R.dmp_sqf_part(7) == 1 + assert R.dmp_sqf_p(7) is True + + assert R.dmp_sqf_list(3) == (3, []) + assert R.dmp_sqf_list_include(3) == [(3, 1)] + + R, x, y, z = ring("x,y,z", ZZ) + assert R.dmp_sqf_p(f_0) is True + assert R.dmp_sqf_p(f_0**2) is False + assert R.dmp_sqf_p(f_1) is True + assert R.dmp_sqf_p(f_1**2) is False + assert R.dmp_sqf_p(f_2) is True + assert R.dmp_sqf_p(f_2**2) is False + assert R.dmp_sqf_p(f_3) is True + assert R.dmp_sqf_p(f_3**2) is False + assert R.dmp_sqf_p(f_5) is False + assert R.dmp_sqf_p(f_5**2) is False + + assert R.dmp_sqf_p(f_4) is True + assert R.dmp_sqf_part(f_4) == -f_4 + + assert R.dmp_sqf_part(f_5) == x + y - z + + R, x, y, z, t = ring("x,y,z,t", ZZ) + assert R.dmp_sqf_p(f_6) is True + assert R.dmp_sqf_part(f_6) == f_6 + + R, x = ring("x", ZZ) + f = -x**5 + x**4 + x - 1 + + assert R.dmp_sqf_list(f) == (-1, [(x**3 + x**2 + x + 1, 1), (x - 1, 2)]) + assert R.dmp_sqf_list_include(f) == [(-x**3 - x**2 - x - 1, 1), (x - 1, 2)] + + R, x, y = ring("x,y", ZZ) + f = -x**5 + x**4 + x - 1 + + assert R.dmp_sqf_list(f) == (-1, [(x**3 + x**2 + x + 1, 1), (x - 1, 2)]) + assert R.dmp_sqf_list_include(f) == [(-x**3 - x**2 - x - 1, 1), (x - 1, 2)] + + f = -x**2 + 2*x - 1 + assert R.dmp_sqf_list_include(f) == [(-1, 1), (x - 1, 2)] + + f = (y**2 + 1)**2*(x**2 + 2*x + 2) + assert R.dmp_sqf_p(f) is False + assert R.dmp_sqf_list(f) == (1, [(x**2 + 2*x + 2, 1), (y**2 + 1, 2)]) + + R, x, y = ring("x,y", FF(2)) + raises(NotImplementedError, lambda: R.dmp_sqf_list(y**2 + 1)) + + +def test_dup_gff_list(): + R, x = ring("x", ZZ) + + f = x**5 + 2*x**4 - x**3 - 2*x**2 + assert R.dup_gff_list(f) == [(x, 1), (x + 2, 4)] + + g = x**9 - 20*x**8 + 166*x**7 - 744*x**6 + 1965*x**5 - 3132*x**4 + 2948*x**3 - 1504*x**2 + 320*x + assert R.dup_gff_list(g) == [(x**2 - 5*x + 4, 1), (x**2 - 5*x + 4, 2), (x, 3)] + + raises(ValueError, lambda: R.dup_gff_list(0)) + +def test_issue_26178(): + R, x, y, z = ring(['x', 'y', 'z'], QQ) + assert (x**2 - 2*y**2 + 1).sqf_list() == (MPQ(1,1), [(x**2 - 2*y**2 + 1, 1)]) + assert (x**2 - 2*z**2 + 1).sqf_list() == (MPQ(1,1), [(x**2 - 2*z**2 + 1, 1)]) + assert (y**2 - 2*z**2 + 1).sqf_list() == (MPQ(1,1), [(y**2 - 2*z**2 + 1, 1)]) diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/stats/__pycache__/crv_types.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/stats/__pycache__/crv_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14cc9779789671a74e17f4f29247bb810c6e5cc0 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/stats/__pycache__/crv_types.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21065f3ec96be31a86edd9d79fe5d1446785eb2778d8e763c82c51dc673e5d78 +size 129032 diff --git a/evalkit_tf437/lib/python3.10/site-packages/google_auth_oauthlib-1.2.1.dist-info/METADATA b/evalkit_tf437/lib/python3.10/site-packages/google_auth_oauthlib-1.2.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..42e66d0700f10f41902d13238bbf334d6b4ac883 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/google_auth_oauthlib-1.2.1.dist-info/METADATA @@ -0,0 +1,82 @@ +Metadata-Version: 2.1 +Name: google-auth-oauthlib +Version: 1.2.1 +Summary: Google Authentication Library +Home-page: https://github.com/GoogleCloudPlatform/google-auth-library-python-oauthlib +Author: Google Cloud Platform +Author-email: googleapis-packages@google.com +License: Apache 2.0 +Keywords: google auth oauth client oauthlib +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: POSIX +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: OS Independent +Classifier: Topic :: Internet :: WWW/HTTP +Requires-Python: >=3.6 +License-File: LICENSE +Requires-Dist: google-auth >=2.15.0 +Requires-Dist: requests-oauthlib >=0.7.0 +Provides-Extra: tool +Requires-Dist: click >=6.0.0 ; extra == 'tool' + +oauthlib integration for Google Auth +==================================== + +|pypi| + +This library provides `oauthlib`_ integration with `google-auth`_. + +.. |build| image:: https://travis-ci.org/googleapis/google-auth-library-python-oauthlib.svg?branch=main + :target: https://googleapis.dev/python/google-auth-oauthlib/latest/index.html +.. |pypi| image:: https://img.shields.io/pypi/v/google-auth-oauthlib.svg + :target: https://pypi.python.org/pypi/google-auth-oauthlib + +.. _oauthlib: https://github.com/idan/oauthlib +.. _google-auth: https://github.com/googleapis/google-auth-library-python + +Installing +---------- + +You can install using `pip`_:: + + $ pip install google-auth-oauthlib + +.. _pip: https://pip.pypa.io/en/stable/ + +Documentation +------------- + +The latest documentation is available at `google-auth-oauthlib.googleapis.dev`_. + +.. _google-auth-oauthlib.googleapis.dev: https://googleapis.dev/python/google-auth-oauthlib/latest/index.html + +Supported Python Versions +------------------------- +Python >= 3.6 + + +Unsupported Python Versions +--------------------------- + +Python == 2.7, Python == 3.5. + +The last version of this library compatible with Python 2.7 and 3.5 is +`google-auth-oauthlib==0.4.1`. + +License +------- + +Apache 2.0 - See `the LICENSE`_ for more information. + +.. _the LICENSE: https://github.com/googleapis/google-auth-library-python-oauthlib/blob/main/LICENSE diff --git a/evalkit_tf437/lib/python3.10/site-packages/google_auth_oauthlib-1.2.1.dist-info/top_level.txt b/evalkit_tf437/lib/python3.10/site-packages/google_auth_oauthlib-1.2.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..14627fc3bdf89b15dc6be3b2fb5ceaa533bd216a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/google_auth_oauthlib-1.2.1.dist-info/top_level.txt @@ -0,0 +1,4 @@ +docs +google_auth_oauthlib +scripts +testing diff --git a/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5ff06a2ea72d915bc2399a5415319a2dd46f175 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/__pycache__/ansi.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/__pycache__/ansi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a3e977da1a991cb865f93ae22d808a25f4ec1ab Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/__pycache__/ansi.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/__pycache__/html.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/__pycache__/html.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f67ae34138bd6b28a11faf5a1839384f4ffa4ac8 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/__pycache__/html.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/__pycache__/pygments.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/__pycache__/pygments.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..508556175df2c2edfe56f0c3c2b8d1e55b033c2a Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/__pycache__/pygments.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/__pycache__/utils.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de15eee9f17cbc09f2241c99004ee6fbf458797d Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/__pycache__/utils.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/ansi.py b/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/ansi.py new file mode 100644 index 0000000000000000000000000000000000000000..4761982ab38ade4de833bd69998e07aaff1abdf6 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/formatted_text/ansi.py @@ -0,0 +1,297 @@ +from __future__ import annotations + +from string import Formatter +from typing import Generator + +from prompt_toolkit.output.vt100 import BG_ANSI_COLORS, FG_ANSI_COLORS +from prompt_toolkit.output.vt100 import _256_colors as _256_colors_table + +from .base import StyleAndTextTuples + +__all__ = [ + "ANSI", + "ansi_escape", +] + + +class ANSI: + """ + ANSI formatted text. + Take something ANSI escaped text, for use as a formatted string. E.g. + + :: + + ANSI('\\x1b[31mhello \\x1b[32mworld') + + Characters between ``\\001`` and ``\\002`` are supposed to have a zero width + when printed, but these are literally sent to the terminal output. This can + be used for instance, for inserting Final Term prompt commands. They will + be translated into a prompt_toolkit '[ZeroWidthEscape]' fragment. + """ + + def __init__(self, value: str) -> None: + self.value = value + self._formatted_text: StyleAndTextTuples = [] + + # Default style attributes. + self._color: str | None = None + self._bgcolor: str | None = None + self._bold = False + self._underline = False + self._strike = False + self._italic = False + self._blink = False + self._reverse = False + self._hidden = False + + # Process received text. + parser = self._parse_corot() + parser.send(None) # type: ignore + for c in value: + parser.send(c) + + def _parse_corot(self) -> Generator[None, str, None]: + """ + Coroutine that parses the ANSI escape sequences. + """ + style = "" + formatted_text = self._formatted_text + + while True: + # NOTE: CSI is a special token within a stream of characters that + # introduces an ANSI control sequence used to set the + # style attributes of the following characters. + csi = False + + c = yield + + # Everything between \001 and \002 should become a ZeroWidthEscape. + if c == "\001": + escaped_text = "" + while c != "\002": + c = yield + if c == "\002": + formatted_text.append(("[ZeroWidthEscape]", escaped_text)) + c = yield + break + else: + escaped_text += c + + # Check for CSI + if c == "\x1b": + # Start of color escape sequence. + square_bracket = yield + if square_bracket == "[": + csi = True + else: + continue + elif c == "\x9b": + csi = True + + if csi: + # Got a CSI sequence. Color codes are following. + current = "" + params = [] + + while True: + char = yield + + # Construct number + if char.isdigit(): + current += char + + # Eval number + else: + # Limit and save number value + params.append(min(int(current or 0), 9999)) + + # Get delimiter token if present + if char == ";": + current = "" + + # Check and evaluate color codes + elif char == "m": + # Set attributes and token. + self._select_graphic_rendition(params) + style = self._create_style_string() + break + + # Check and evaluate cursor forward + elif char == "C": + for i in range(params[0]): + # add using current style + formatted_text.append((style, " ")) + break + + else: + # Ignore unsupported sequence. + break + else: + # Add current character. + # NOTE: At this point, we could merge the current character + # into the previous tuple if the style did not change, + # however, it's not worth the effort given that it will + # be "Exploded" once again when it's rendered to the + # output. + formatted_text.append((style, c)) + + def _select_graphic_rendition(self, attrs: list[int]) -> None: + """ + Taken a list of graphics attributes and apply changes. + """ + if not attrs: + attrs = [0] + else: + attrs = list(attrs[::-1]) + + while attrs: + attr = attrs.pop() + + if attr in _fg_colors: + self._color = _fg_colors[attr] + elif attr in _bg_colors: + self._bgcolor = _bg_colors[attr] + elif attr == 1: + self._bold = True + # elif attr == 2: + # self._faint = True + elif attr == 3: + self._italic = True + elif attr == 4: + self._underline = True + elif attr == 5: + self._blink = True # Slow blink + elif attr == 6: + self._blink = True # Fast blink + elif attr == 7: + self._reverse = True + elif attr == 8: + self._hidden = True + elif attr == 9: + self._strike = True + elif attr == 22: + self._bold = False # Normal intensity + elif attr == 23: + self._italic = False + elif attr == 24: + self._underline = False + elif attr == 25: + self._blink = False + elif attr == 27: + self._reverse = False + elif attr == 28: + self._hidden = False + elif attr == 29: + self._strike = False + elif not attr: + # Reset all style attributes + self._color = None + self._bgcolor = None + self._bold = False + self._underline = False + self._strike = False + self._italic = False + self._blink = False + self._reverse = False + self._hidden = False + + elif attr in (38, 48) and len(attrs) > 1: + n = attrs.pop() + + # 256 colors. + if n == 5 and len(attrs) >= 1: + if attr == 38: + m = attrs.pop() + self._color = _256_colors.get(m) + elif attr == 48: + m = attrs.pop() + self._bgcolor = _256_colors.get(m) + + # True colors. + if n == 2 and len(attrs) >= 3: + try: + color_str = ( + f"#{attrs.pop():02x}{attrs.pop():02x}{attrs.pop():02x}" + ) + except IndexError: + pass + else: + if attr == 38: + self._color = color_str + elif attr == 48: + self._bgcolor = color_str + + def _create_style_string(self) -> str: + """ + Turn current style flags into a string for usage in a formatted text. + """ + result = [] + if self._color: + result.append(self._color) + if self._bgcolor: + result.append("bg:" + self._bgcolor) + if self._bold: + result.append("bold") + if self._underline: + result.append("underline") + if self._strike: + result.append("strike") + if self._italic: + result.append("italic") + if self._blink: + result.append("blink") + if self._reverse: + result.append("reverse") + if self._hidden: + result.append("hidden") + + return " ".join(result) + + def __repr__(self) -> str: + return f"ANSI({self.value!r})" + + def __pt_formatted_text__(self) -> StyleAndTextTuples: + return self._formatted_text + + def format(self, *args: str, **kwargs: str) -> ANSI: + """ + Like `str.format`, but make sure that the arguments are properly + escaped. (No ANSI escapes can be injected.) + """ + return ANSI(FORMATTER.vformat(self.value, args, kwargs)) + + def __mod__(self, value: object) -> ANSI: + """ + ANSI('%s') % value + """ + if not isinstance(value, tuple): + value = (value,) + + value = tuple(ansi_escape(i) for i in value) + return ANSI(self.value % value) + + +# Mapping of the ANSI color codes to their names. +_fg_colors = {v: k for k, v in FG_ANSI_COLORS.items()} +_bg_colors = {v: k for k, v in BG_ANSI_COLORS.items()} + +# Mapping of the escape codes for 256colors to their 'ffffff' value. +_256_colors = {} + +for i, (r, g, b) in enumerate(_256_colors_table.colors): + _256_colors[i] = f"#{r:02x}{g:02x}{b:02x}" + + +def ansi_escape(text: object) -> str: + """ + Replace characters with a special meaning. + """ + return str(text).replace("\x1b", "?").replace("\b", "?") + + +class ANSIFormatter(Formatter): + def format_field(self, value: object, format_spec: str) -> str: + return ansi_escape(format(value, format_spec)) + + +FORMATTER = ANSIFormatter() diff --git a/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/shortcuts/__pycache__/utils.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/shortcuts/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3d88273e309720278300969c87eb630c09b5a4d Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/prompt_toolkit/shortcuts/__pycache__/utils.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/_deprecation_warning.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/_deprecation_warning.py new file mode 100644 index 0000000000000000000000000000000000000000..505ef15e65b7638dc6332f4204793c708e7c5ae3 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/_deprecation_warning.py @@ -0,0 +1,12 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + +import warnings + + +def deprecated_function(self): + name = repr(self) # self.__name__ + msg = f"{name} is deprecated and is not maintained anymore. It might be removed in a future version of xFormers" + warnings.warn(msg, FutureWarning, stacklevel=2) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..098367cd930d66f0f3ef17aae8236150dacab6aa --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/__init__.py @@ -0,0 +1,131 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + +import torch + +from .fmha import ( + AttentionBias, + AttentionOp, + AttentionOpBase, + AttentionOpDispatch, + LowerTriangularMask, + MemoryEfficientAttentionCutlassFwdFlashBwOp, + MemoryEfficientAttentionCutlassOp, + MemoryEfficientAttentionFlashAttentionOp, + MemoryEfficientAttentionOp, + MemoryEfficientAttentionTritonFwdFlashBwOp, + TritonFlashAttentionOp, + memory_efficient_attention, + memory_efficient_attention_backward, + memory_efficient_attention_forward, + memory_efficient_attention_forward_requires_grad, +) +from .indexing import index_select_cat, scaled_index_add +from .modpar_layers import ColumnParallelLinear, RowParallelLinear +from .rmsnorm import RMSNorm +from .rope_padded import rope_padded +from .seqpar import sequence_parallel_leading_matmul, sequence_parallel_trailing_matmul +from .sequence_parallel_fused_ops import ( + fused_allgather_and_anything, + fused_allgather_and_linear, + fused_anything_and_reducescatter, + fused_linear_and_reducescatter, +) +from .sp24 import Sparse24Tensor, sparsify24, sparsify24_like +from .swiglu_op import ( + SwiGLU, + SwiGLUEagerOp, + SwiGLUFusedOp, + SwiGLUOp, + SwiGLUOpDispatch, + SwiGLUPackedFusedOp, + swiglu, +) +from .tiled_matmul import tiled_matmul +from .unbind import get_stack_strides, stack_or_none, unbind + +# BW compatibility +AttentionMask = AttentionBias + + +def masked_matmul(a, b, mask=None): + if torch.overrides.has_torch_function((a, b, mask)): + return torch.overrides.handle_torch_function( + masked_matmul, (a, b, mask), a, b, mask + ) + + att = a @ b + + if mask is None: + return att + + if mask.dtype == torch.bool: + if mask.ndim == 2: + mask = mask.unsqueeze(0).expand(att.shape[0], -1, -1) + # mask is presumed false == ignore + att[~mask] = float("-inf") + else: + # mask is presumed additive + att += mask + return att + + +__all__ = [ + # fmha + "AttentionBias", + "AttentionMask", + "AttentionOp", + "AttentionOpBase", + "AttentionOpDispatch", + "LowerTriangularMask", + "MemoryEfficientAttentionCutlassFwdFlashBwOp", + "MemoryEfficientAttentionCutlassOp", + "MemoryEfficientAttentionFlashAttentionOp", + "MemoryEfficientAttentionOp", + "MemoryEfficientAttentionTritonFwdFlashBwOp", + "TritonFlashAttentionOp", + "memory_efficient_attention", + "memory_efficient_attention_backward", + "memory_efficient_attention_forward", + "memory_efficient_attention_forward_requires_grad", + # indexing + "index_select_cat", + "scaled_index_add", + # modpar_layers + "ColumnParallelLinear", + "RowParallelLinear", + # rmsnorm + "RMSNorm", + # rope_padded + "rope_padded", + # seqpar + "sequence_parallel_leading_matmul", + "sequence_parallel_trailing_matmul", + # sequence_parallel_fused_ops + "fused_allgather_and_anything", + "fused_allgather_and_linear", + "fused_anything_and_reducescatter", + "fused_linear_and_reducescatter", + # swiglu_op + "SwiGLU", + "SwiGLUEagerOp", + "SwiGLUFusedOp", + "SwiGLUOp", + "SwiGLUOpDispatch", + "SwiGLUPackedFusedOp", + "swiglu", + # tiled_matmul + "tiled_matmul", + # unbind + "get_stack_strides", + "stack_or_none", + "unbind", + # sp24 + "sparsify24", + "sparsify24_like", + "Sparse24Tensor", + # . + "masked_matmul", +] diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/__pycache__/differentiable_collectives.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/__pycache__/differentiable_collectives.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd0efc83b549ec4901c67dabbd10d99409023849 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/__pycache__/differentiable_collectives.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/__pycache__/seqpar.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/__pycache__/seqpar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f7d0e85eb138589dcea0a92aab79e1f9818c612 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/__pycache__/seqpar.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/__pycache__/sequence_parallel_fused_ops.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/__pycache__/sequence_parallel_fused_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41bfb0b6e9444eb29f27936930bd426d282df37b Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/__pycache__/sequence_parallel_fused_ops.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/__pycache__/swiglu_op.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/__pycache__/swiglu_op.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2368f14180589547419ff9de4ef8a8ba338a7629 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/__pycache__/swiglu_op.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0a8ab8e0f6ebcc23b8aa5eaa5dfad7dd70c8acee --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + + +# One reason this module is called `_triton` instead of just `triton` is this: +# https://github.com/openai/triton/commit/c6040bcbd8a046785462481b2830b3fff5fc4aab + +from typing import TYPE_CHECKING + +import xformers + +if TYPE_CHECKING or xformers._is_triton_available(): + from .k_index_select_cat import index_select_cat_bwd, index_select_cat_fwd + from .k_scaled_index_add import scaled_index_add_bwd, scaled_index_add_fwd +else: + index_select_cat_fwd = index_select_cat_bwd = None + scaled_index_add_fwd = scaled_index_add_bwd = None diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3b0309df4ee8bea497412a3b5264e1572d9ca8c Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/k_index_select_cat.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/k_index_select_cat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a59d306d3142fa065903cc5e0da92cc5f0b77b7 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/k_index_select_cat.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/k_scaled_index_add.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/k_scaled_index_add.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b8241f38a41256865d85495edc2f565850d7acf Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/k_scaled_index_add.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/rmsnorm_kernels.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/rmsnorm_kernels.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36996705e89248b8127dfe0890e03f91ea8c444b Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/rmsnorm_kernels.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/rope_padded_kernels.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/rope_padded_kernels.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb2ec87a29f186a9f27b6610cf7f5079ca966806 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/rope_padded_kernels.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/sequence_parallel_fused_kernels.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/sequence_parallel_fused_kernels.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd353cd6ade51d55a9c939920cf91e1e5a06538e Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/sequence_parallel_fused_kernels.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/tiled_matmul_kernels.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/tiled_matmul_kernels.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b719f1c9e61597b088f0cddf975821234a130cc1 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/__pycache__/tiled_matmul_kernels.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/k_index_select_cat.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/k_index_select_cat.py new file mode 100644 index 0000000000000000000000000000000000000000..d34c32c326d98be702ee7ff3109364a3738fa8c8 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/k_index_select_cat.py @@ -0,0 +1,184 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import triton +import triton.language as tl + + +@triton.jit +def index_select_cat_fwd_kernel( + output_ptr, # *Pointer* to output tensor. + source_ptr, # *Pointer* to source tensor. + index_ptr, # *Pointer* to index tensor. + num_indices, + num_cols, + stride0, # Stride information of source tensor. + stride1, + BLOCK_SIZE_INDEX: tl.constexpr, # Number of indices each program should process. + BLOCK_SIZE_COL: tl.constexpr, # Number of cols each program should process. +): + pid0 = tl.program_id(axis=0) # We use 2D launch grid + pid1 = tl.program_id(axis=1) + + indices = pid0 * BLOCK_SIZE_INDEX + tl.arange(0, BLOCK_SIZE_INDEX) + rows = tl.load(index_ptr + indices, mask=(indices < num_indices)) + cols = pid1 * BLOCK_SIZE_COL + tl.arange(0, BLOCK_SIZE_COL) + + source_offsets = source_ptr + rows[:, None] * stride0 + cols[None, :] * stride1 + mask = (indices[:, None] < num_indices) & (cols[None, :] < num_cols) + output = tl.load(source_offsets, mask=mask) + + output_offsets = output_ptr + indices[:, None] * stride0 + cols[None, :] * stride1 + tl.store(output_offsets, output, mask=mask) + + +def index_select_cat_fwd( + output: torch.Tensor, + source: torch.Tensor, + index: torch.Tensor, +): + if not (source.is_cuda and index.is_cuda): + raise ValueError("The index tensor and the source tensor must be of type CUDA!") + + if not source.ndim == 2: + raise ValueError(f"Expected 2-dimensional tensor, got {source.ndim}.") + if not index.ndim == 1: + raise ValueError(f"Expected 1-dimensional tensor, got {index.ndim}.") + + num_rows, num_cols = source.shape + num_indices = index.shape[0] + + if not num_indices < num_rows: + raise ValueError( + "The number of indices cannot exceed the number of rows in the source matrix." + ) + + stride0, stride1 = source.stride(0), source.stride(1) + + def grid(meta): + return ( + triton.cdiv(num_indices, meta["BLOCK_SIZE_INDEX"]), + triton.cdiv(num_cols, meta["BLOCK_SIZE_COL"]), + ) + + index_select_cat_fwd_kernel[grid]( + output, + source, + index, + num_indices, + num_cols, + stride0, + stride1, + BLOCK_SIZE_INDEX=1, + BLOCK_SIZE_COL=512, + ) + + return output + + +@triton.jit +def index_select_cat_bwd_kernel( + grad_source_ptr, # *Pointer* to grad_source tensor. + index_ptr, # *Pointer* to index tensor. + grad_output_ptr, # *Pointer* to grad_output tensor. + num_rows, + num_indices, + num_cols, + stride0, # Stride information of input and source tensor. + stride1, + BLOCK_SIZE_INDEX: tl.constexpr, # Number of indices each program should process. + BLOCK_SIZE_COL: tl.constexpr, # Number of cols each program should process. +): + pid0 = tl.program_id(axis=0) # We use 3D launch grid + pid1 = tl.program_id(axis=1) + + cols = pid1 * BLOCK_SIZE_COL + tl.arange(0, BLOCK_SIZE_COL) + + # load grad_output + grad_output_indices = pid0 * BLOCK_SIZE_INDEX + tl.arange(0, BLOCK_SIZE_INDEX) + grad_output_offsets = ( + grad_output_ptr + + grad_output_indices[:, None] * stride0 + + cols[None, :] * stride1 + ) + grad_output_mask = (grad_output_indices[:, None] < num_indices) & ( + cols[None, :] < num_cols + ) + grad_output = tl.load(grad_output_offsets, mask=grad_output_mask).to(tl.float32) + + # select indices from grad_source + grad_source_indices = tl.load( + index_ptr + grad_output_indices, mask=(grad_output_indices < num_indices) + ) + grad_source_offsets = ( + grad_source_ptr + + grad_source_indices[:, None] * stride0 + + cols[None, :] * stride1 + ) + + # compute scaled index add and save + tl.store(grad_source_offsets, grad_output, mask=grad_output_mask) + + +def index_select_cat_bwd( + grad_source: torch.Tensor, + index: torch.Tensor, + grad_output: torch.Tensor, +): + if not (grad_source.is_cuda and grad_output.is_cuda): + raise ValueError("The grad_source and grad_output tensor must be of type CUDA!") + + if not (grad_source.ndim == 2 and grad_output.ndim == 2): + raise ValueError( + f"The grad_source and grad_output must be three-dimensional " + f"(got {grad_source.ndim} and {grad_output.ndim})!" + ) + if not grad_source.shape[1] == grad_output.shape[1]: + raise ValueError( + f"The number of elements along dimension 1 of grad_source and grad_output must be the same " + f"(got {grad_source.shape[1]} and {grad_output.shape[1]})" + ) + + num_rows, num_cols = grad_source.shape + num_indices, num_cols = grad_output.shape + if not num_rows >= num_indices: + raise ValueError( + f"The number of elements along dimension 0 of grad_source must be larger than that of grad_output " + f"(got {num_rows} and {num_indices})!" + ) + if not index.shape[0] == num_indices: + raise ValueError( + f"The number of indices and the number of elements along dimension 0 of grad_output must match " + f"(got {index.shape[0]} and {num_indices})!" + ) + + stride0, stride1 = grad_source.stride(0), grad_source.stride(1) + if not (grad_output.stride(0) == stride0 and grad_output.stride(1) == stride1): + raise ValueError( + f"The strides of the grad_source and grad_output tensors must match " + f"(got {stride0} vs. {grad_output.stride(0)}, {stride1} vs. {grad_output.stride(1)})!" + ) + + def grid(meta): + return ( + triton.cdiv(num_indices, meta["BLOCK_SIZE_INDEX"]), + triton.cdiv(num_cols, meta["BLOCK_SIZE_COL"]), + ) + + index_select_cat_bwd_kernel[grid]( + grad_source, + index, + grad_output, + num_rows, + num_indices, + num_cols, + grad_source.stride(0), + grad_source.stride(1), + BLOCK_SIZE_INDEX=1, + BLOCK_SIZE_COL=512, + ) + + return diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/rmsnorm_kernels.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/rmsnorm_kernels.py new file mode 100644 index 0000000000000000000000000000000000000000..309e788771320a3184d3a955e41cfc80421df0eb --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/rmsnorm_kernels.py @@ -0,0 +1,158 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +import torch +import triton +import triton.language as tl + +if hasattr(tl, "libdevice"): + tl_math = tl.libdevice +else: + tl_math = tl.math + + +@triton.jit +def _rms_norm_kernel( + x_ptr, + h1_ptr, + w_ptr, + eps, + stride, + N_COLS: tl.constexpr, + BLOCK_SIZE: tl.constexpr, + INCLUDE_WEIGHT: tl.constexpr, +): + row = tl.program_id(0) + x_ptr += row * stride + h1_ptr += row * stride + + _mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32) + for offset in range(0, N_COLS, BLOCK_SIZE): + cols = offset + tl.arange(0, BLOCK_SIZE) + a = tl.load( + x_ptr + cols, mask=cols < N_COLS, other=0.0, eviction_policy="evict_last" + ).to(tl.float32) + _mean += a * a + rstd = tl_math.rsqrt((tl.sum(_mean, axis=0) / N_COLS) + eps) + for offset in range(0, N_COLS, BLOCK_SIZE): + cols = offset + tl.arange(0, BLOCK_SIZE) + mask = cols < N_COLS + a = tl.load( + x_ptr + cols, mask=mask, other=0.0, eviction_policy="evict_first" + ).to(tl.float32) + if INCLUDE_WEIGHT: + w = tl.load(w_ptr + cols, mask=mask) + tl.store(h1_ptr + cols, a * rstd * w, mask=mask) + else: + tl.store(h1_ptr + cols, a * rstd, mask=mask) + + +@triton.jit +def _rms_norm_add_kernel( + x_ptr, + y_ptr, + h1_ptr, + w_ptr, + eps, + stride, + N_COLS: tl.constexpr, + BLOCK_SIZE: tl.constexpr, + INCLUDE_WEIGHT: tl.constexpr, +): + row = tl.program_id(0) + x_ptr += row * stride + y_ptr += row * stride + h1_ptr += row * stride + + _mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32) + for offset in range(0, N_COLS, BLOCK_SIZE): + cols = offset + tl.arange(0, BLOCK_SIZE) + mask = cols < N_COLS + ax = tl.load( + x_ptr + cols, mask=mask, other=0.0, eviction_policy="evict_last" + ).to(tl.float32) + ay = tl.load( + y_ptr + cols, mask=mask, other=0.0, eviction_policy="evict_first" + ).to(tl.float32) + a = ax + ay + tl.store(x_ptr + cols, a, mask=mask) + _mean += a * a + rstd = tl_math.rsqrt((tl.sum(_mean, axis=0) / N_COLS) + eps) + for offset in range(0, N_COLS, BLOCK_SIZE): + cols = offset + tl.arange(0, BLOCK_SIZE) + mask = cols < N_COLS + a = tl.load( + x_ptr + cols, mask=mask, other=0.0, eviction_policy="evict_first" + ).to(tl.float32) + if INCLUDE_WEIGHT: + w = tl.load(w_ptr + cols, mask=mask) + tl.store(h1_ptr + cols, a * rstd * w, mask=mask) + else: + tl.store(h1_ptr + cols, a * rstd, mask=mask) + + +def _rms_norm_forward(x, attn_norm_weights, eps): + if not x.is_contiguous(): + raise ValueError("data must be contiguous") + if attn_norm_weights is not None: + if not attn_norm_weights.is_contiguous(): + raise ValueError("weights must be contiguous") + out = torch.empty_like(x) + x_arg = x.reshape(-1, x.shape[-1]) + M, N = x_arg.shape + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + BLOCK_SIZE = max(BLOCK_SIZE, 128) + BLOCK_SIZE = min(BLOCK_SIZE, 8192) + # heuristics for number of warps + num_warps = min(max(BLOCK_SIZE // 256, 1), 8) + _rms_norm_kernel[(M,)]( + x_arg, + out, + attn_norm_weights, + eps, + x_arg.stride(0), + N, + BLOCK_SIZE=BLOCK_SIZE, + num_warps=num_warps, + INCLUDE_WEIGHT=attn_norm_weights is not None, + ) + return out + + +def _rms_norm_add_forward(x, y, attn_norm_weights, eps): + # x, y contiguous of same shape [..., n] + # output of same shape, normed over the last dim. + if not x.is_contiguous(): + raise ValueError("x must be contiguous") + if not y.is_contiguous(): + raise ValueError("y must be contiguous") + if attn_norm_weights is not None: + if not attn_norm_weights.is_contiguous(): + raise ValueError("weights must be contiguous") + out = torch.empty_like(x) + x_arg = x.reshape(-1, x.shape[-1]) + y_arg = y.reshape(-1, x.shape[-1]) + M, N = x_arg.shape + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + BLOCK_SIZE = max(BLOCK_SIZE, 128) + BLOCK_SIZE = min(BLOCK_SIZE, 8192) + # heuristics for number of warps + num_warps = min(max(BLOCK_SIZE // 256, 1), 8) + _rms_norm_add_kernel[(M,)]( + x_arg, + y_arg, + out, + attn_norm_weights, + eps, + x_arg.stride(0), + N, + BLOCK_SIZE=BLOCK_SIZE, + num_warps=num_warps, + INCLUDE_WEIGHT=attn_norm_weights is not None, + ) + return out diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/rope_padded_kernels.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/rope_padded_kernels.py new file mode 100644 index 0000000000000000000000000000000000000000..e6b978ec8a787f7b987b459e392e0b0e26783f4b --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/rope_padded_kernels.py @@ -0,0 +1,188 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +import triton # type: ignore +import triton.language as tl # type: ignore + +if hasattr(tl, "libdevice"): + tl_math = tl.libdevice +else: + tl_math = tl.math + + +@triton.jit +def _rope_padded_kernel( + xq, + xk, + xv, + out_q, + cache_k, + cache_v, + seqstartq, + seqstartk, + seqlenk, + theta, + k_start: tl.constexpr, + v_start: tl.constexpr, + n_groups, + dim: tl.constexpr, # dimension of each head + stride_xqM, + stride_xqG, + stride_xqH, + stride_xkM, + stride_xkG, + stride_xkH, + stride_xvM, + stride_xvG, + stride_xvH, + stride_cachekM, + stride_cachekG, + stride_cachekH, + stride_cachevM, + stride_cachevG, + stride_cachevH, + stride_seqstartq, + stride_seqstartk, + stride_seqlenk, + stride_outqM, + stride_outqG, + stride_outqH, + internal_dtype: tl.constexpr, + # If True, seqstartq and seqstartk are not used but rather we + # assume that every batch element has the same number of + # queries (i.e. num_queries := tl.num_programs(1) ) + # and the same cache space cache_padding_length. + # Always False when called below. + const_batch_strides: tl.constexpr, + # If const_batch_strides==True, the common cache length for each batch element. + # (Only the first seqlenk[i] elements are actually in use, and only the last + # num_queries of those are actually written to.) + cache_padding_length, + # offset added to all values in seqlenk before using them. + # Always 0 when called below. + seqlenk_shift: tl.constexpr, + BLOCK_SIZE: tl.constexpr, + adjacents: tl.constexpr, +): + """ + Each letter in this diagram is a whole row of length dim. + + INPUT xq xk xv + + head_dim ─► + + batch qqqqqq kk vv + │ qqqqqq kk vv + ▼ qqqqqq kk vv + + head_idx: (goes across all heads of all 3 inputs) + ▲ ▲ ▲ ▲ ▲ ▲ + │ │ │ │ │ │ + │ │ + 0 k_start │v_start │n_total_heads + │ │ + │ │ + k_start v_start + + Output is to out_q (same shape as xq), an xk-shaped part + of cache_k and an xv-shaped part of cache_v + """ + batch_elt = tl.program_id(0) + query_pos_in_batch_elt = tl.program_id(1) + group_head_idx = tl.program_id(2) + group_idx = group_head_idx % n_groups + head_idx = group_head_idx // n_groups + + if internal_dtype == "f32": + theta = theta.to(tl.float32) + elif internal_dtype == "f64": + theta = theta.to(tl.float64) + + if const_batch_strides: + query_pos = query_pos_in_batch_elt + tl.num_programs(1) * batch_elt + end_query_pos = tl.num_programs(1) * (batch_elt + 1) + else: + query_pos = query_pos_in_batch_elt + tl.load( + seqstartq + batch_elt * stride_seqstartq + ) + end_query_pos = tl.load(seqstartq + (batch_elt + 1) * stride_seqstartq) + if query_pos >= end_query_pos: + return + + is_q = head_idx < k_start + is_v = head_idx >= v_start + + xq += query_pos * stride_xqM + head_idx * stride_xqH + group_idx * stride_xqG + out_q += ( + query_pos * stride_outqM + head_idx * stride_outqH + group_idx * stride_outqG + ) + + if const_batch_strides: + cache_start = cache_padding_length * batch_elt + else: + cache_start = tl.load(seqstartk + batch_elt * stride_seqstartk) + end_of_batch_elt_cache = ( + cache_start + tl.load(seqlenk + batch_elt * stride_seqlenk) + seqlenk_shift + ) + + cache_pos = end_of_batch_elt_cache - (end_query_pos - query_pos) + seq_pos = cache_pos - cache_start + cache_k += ( + (head_idx - k_start) * stride_cachekH + + cache_pos * stride_cachekM + + group_idx * stride_cachekG + ) + xk += ( + query_pos * stride_xkM + + (head_idx - k_start) * stride_xkH + + group_idx * stride_xkG + ) + in_qk = tl.where(is_q, xq, xk) + out_qk = tl.where(is_q, out_q, cache_k) + + cache_v += ( + (head_idx - v_start) * stride_cachevH + + cache_pos * stride_cachevM + + group_idx * stride_cachevG + ) + xv += ( + query_pos * stride_xvM + + (head_idx - v_start) * stride_xvH + + group_idx * stride_xvG + ) + + out = tl.where(is_v, cache_v, out_qk) + x_in = tl.where(is_v, xv, in_qk) + + for offset in range(0, dim // 2, BLOCK_SIZE // 2): + c = tl.arange(0, BLOCK_SIZE // 2) + powers = (offset + c) * 2.0 + if adjacents: + cols_re = (offset + c) * 2 + cols_im = cols_re + 1 + else: + cols_re = offset + c + cols_im = cols_re + dim // 2 + + mask = cols_im < dim + + re_x = tl.load(x_in + cols_re, mask=mask) + im_x = tl.load(x_in + cols_im, mask=mask) + # freqs = seq_pos / (theta ** (powers / dim)) + freqs = seq_pos * tl_math.pow(theta, powers / (-dim)) + sines = tl.sin(freqs) + cosines = tl.cos(freqs) + re_out = re_x * cosines - im_x * sines + im_out = im_x * cosines + re_x * sines + + re_out_ = tl.where(is_v, re_x, re_out) + im_out_ = tl.where(is_v, im_x, im_out) + if internal_dtype == "f64": + if re_x.dtype == tl.bfloat16: + # triton 2.0.0 crashes if you try to convert + # float64 directly to bfloat16, so make an intermediate step. + re_out_ = re_out_.to(tl.float32) + im_out_ = im_out_.to(tl.float32) + tl.store(out + cols_re, re_out_, mask=mask) + tl.store(out + cols_im, im_out_, mask=mask) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/tiled_matmul_kernels.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/tiled_matmul_kernels.py new file mode 100644 index 0000000000000000000000000000000000000000..8f77a0c816a818bb3c2335ce8c11758ef83a4468 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/_triton/tiled_matmul_kernels.py @@ -0,0 +1,430 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + + +import itertools +from typing import List, Tuple + +import torch +import triton +import triton.language as tl +from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time + + +def init_to_zero(*names): + def result(nargs): + for name in names: + nargs[name].zero_() + + return result + + +def gen_config( + block_m: int, + block_n: int, + block_k: int, + stages: int, + warps: int, + split_k: int = 1, + group_m: int = 8, +) -> triton.Config: + """A more compact way to define a triton.Config, so it fits on one line""" + + return triton.Config( + { + "BLOCK_M": block_m, + "BLOCK_N": block_n, + "BLOCK_K": block_k, + "SPLIT_K": split_k, + "GROUP_M": group_m, + }, + num_stages=stages, + num_warps=warps, + pre_hook=init_to_zero(*[f"C{i+1}{j+1}" for i in range(3) for j in range(3)]) + if split_k > 1 + else init_to_zero(), + ) + + +BASIC_MATMUL_CONFIGS = [ + gen_config(block_m=128, block_n=256, block_k=32, stages=3, warps=8), + gen_config(block_m=256, block_n=128, block_k=32, stages=3, warps=8), + gen_config(block_m=256, block_n=64, block_k=32, stages=4, warps=4), + gen_config(block_m=64, block_n=256, block_k=32, stages=4, warps=4), + gen_config(block_m=128, block_n=128, block_k=32, stages=4, warps=4), + gen_config(block_m=128, block_n=64, block_k=32, stages=4, warps=4), + gen_config(block_m=64, block_n=128, block_k=32, stages=4, warps=4), + gen_config(block_m=128, block_n=32, block_k=32, stages=4, warps=4), + gen_config(block_m=64, block_n=32, block_k=32, stages=5, warps=2), +] + + +INT8_MATMUL_CONFIGS = [ + gen_config(block_m=128, block_n=256, block_k=128, stages=3, warps=8), + gen_config(block_m=256, block_n=128, block_k=128, stages=3, warps=8), + gen_config(block_m=256, block_n=64, block_k=128, stages=4, warps=4), + gen_config(block_m=64, block_n=256, block_k=128, stages=4, warps=4), + gen_config(block_m=128, block_n=128, block_k=128, stages=4, warps=4), + gen_config(block_m=128, block_n=64, block_k=64, stages=4, warps=4), + gen_config(block_m=64, block_n=128, block_k=64, stages=4, warps=4), + gen_config(block_m=128, block_n=32, block_k=64, stages=4, warps=4), + gen_config(block_m=64, block_n=32, block_k=64, stages=5, warps=2), +] + + +IO_BOUND_MATMUL_CONFIGS_STAGES = [2, 3, 4, 5, 6] +IO_BOUND_MATMUL_CONFIGS_BLOCK_M = [16, 32] +IO_BOUND_MATMUL_CONFIGS_BLOCK_K = [32, 64] +IO_BOUND_MATMUL_CONFIGS_BLOCK_N = [32, 64, 128, 256] +IO_BOUND_MATMUL_CONFIGS_SPLIT_K = [1, 2, 4, 8, 16] + + +IO_BOUND_MATMUL_CONFIGS = [ + gen_config( + block_m=block_m, + block_n=block_n, + block_k=block_k, + stages=stages, + warps=2 if block_n <= 64 else 4, + split_k=split_k, + ) + for stages, block_m, block_k, block_n, split_k in itertools.product( + IO_BOUND_MATMUL_CONFIGS_STAGES, + IO_BOUND_MATMUL_CONFIGS_BLOCK_M, + IO_BOUND_MATMUL_CONFIGS_BLOCK_K, + IO_BOUND_MATMUL_CONFIGS_BLOCK_N, + IO_BOUND_MATMUL_CONFIGS_SPLIT_K, + ) +] + + +TRITON_CONFIGS = BASIC_MATMUL_CONFIGS + INT8_MATMUL_CONFIGS + IO_BOUND_MATMUL_CONFIGS + + +def our_estimate_matmul_time( + A11, B11, C11, M1, M2, M3, N1, N2, N3, K1, K2, K3, **kwargs +): + """Call into Triton's upstream cost model, with the right args + + The upstream function expects arguments to have certain names. Since we + renamed a few of them in our implementation, we rename them back. + + At the time of writing (July 2023) the arguments that Triton expects are: + M, N, K, A, B, C, BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages. + + """ + return estimate_matmul_time( + M=M1 + M2 + M3, N=N1 + N2 + N3, K=K1 + K2 + K3, A=A11, B=B11, C=C11, **kwargs + ) + + +def our_early_config_prune(config, named_args): + new_named_args = named_args.copy() + new_named_args["M"] = named_args["M1"] + named_args["M2"] + named_args["M3"] + new_named_args["N"] = named_args["N1"] + named_args["N2"] + named_args["N3"] + new_named_args["K"] = named_args["K1"] + named_args["K2"] + named_args["K3"] + new_named_args["A"] = named_args["A11"] + new_named_args["B"] = named_args["B11"] + new_named_args["C"] = named_args["C11"] + return early_config_prune(config, new_named_args) + + +@triton.autotune( + configs=TRITON_CONFIGS, + key=["M1", "M2", "M3", "N1", "N2", "N3", "K1", "K2", "K3"], + prune_configs_by={ + "early_config_prune": our_early_config_prune, + "perf_model": our_estimate_matmul_time, + "top_k": 10, + }, +) +@triton.heuristics( + { + "EVEN_K": lambda args: all( + k % (args["BLOCK_K"] * args["SPLIT_K"]) == 0 + for k in [args["K1"], args["K2"], args["K3"]] + ), + } +) +@triton.jit() +def _xformers_tiled_matmul_kernel( + A11, + A12, + A13, + A21, + A22, + A23, + A31, + A32, + A33, + B11, + B12, + B13, + B21, + B22, + B23, + B31, + B32, + B33, + C11, + C12, + C13, + C21, + C22, + C23, + C31, + C32, + C33, + M1, + M2, + M3, + N1, + N2, + N3, + K1, + K2, + K3, + stride_am1, + stride_am2, + stride_am3, + stride_ak1, + stride_ak2, + stride_ak3, + stride_bk1, + stride_bk2, + stride_bk3, + stride_bn1, + stride_bn2, + stride_bn3, + stride_cm1, + stride_cm2, + stride_cm3, + stride_cn1, + stride_cn2, + stride_cn3, + BLOCK_M: tl.constexpr, # DO NOT CHANGE NAME: MUST MATCH PERF MODEL + BLOCK_N: tl.constexpr, # DO NOT CHANGE NAME: MUST MATCH PERF MODEL + BLOCK_K: tl.constexpr, # DO NOT CHANGE NAME: MUST MATCH PERF MODEL + GROUP_M: tl.constexpr, + SPLIT_K: tl.constexpr, # DO NOT CHANGE NAME: MUST MATCH PERF MODEL + EVEN_K: tl.constexpr, + ACC_TYPE: tl.constexpr, +): + # matrix multiplication + pid = tl.program_id(0) + pid_k = tl.program_id(1) + grid_m1 = tl.cdiv(M1, BLOCK_M) + grid_m2 = tl.cdiv(M2, BLOCK_M) + grid_m3 = tl.cdiv(M3, BLOCK_M) + grid_n1 = tl.cdiv(N1, BLOCK_N) + grid_n2 = tl.cdiv(N2, BLOCK_N) + grid_n3 = tl.cdiv(N3, BLOCK_N) + grid_m = grid_m1 + grid_m2 + grid_m3 + grid_n = grid_n1 + grid_n2 + grid_n3 + + # re-order program ID for better L2 performance + width = GROUP_M * grid_n + group_id = pid // width + group_size = min(grid_m - group_id * GROUP_M, GROUP_M) + pid_m = group_id * GROUP_M + (pid % group_size) + pid_n = (pid % width) // (group_size) + + # We use tl.where to circumvent a regression in alignment auto-detection: + # https://github.com/openai/triton/issues/1784 + + A1 = tl.where(pid_m < grid_m1, A11, tl.where(pid_m < grid_m1 + grid_m2, A21, A31)) + A2 = tl.where(pid_m < grid_m1, A12, tl.where(pid_m < grid_m1 + grid_m2, A22, A32)) + A3 = tl.where(pid_m < grid_m1, A13, tl.where(pid_m < grid_m1 + grid_m2, A23, A33)) + B1 = tl.where(pid_n < grid_n1, B11, tl.where(pid_n < grid_n1 + grid_n2, B12, B13)) + B2 = tl.where(pid_n < grid_n1, B21, tl.where(pid_n < grid_n1 + grid_n2, B22, B23)) + B3 = tl.where(pid_n < grid_n1, B31, tl.where(pid_n < grid_n1 + grid_n2, B32, B33)) + C = tl.where( + pid_m < grid_m1, + tl.where(pid_n < grid_n1, C11, tl.where(pid_n < grid_n1 + grid_n2, C12, C13)), + tl.where( + pid_m < grid_m1 + grid_m2, + tl.where( + pid_n < grid_n1, C21, tl.where(pid_n < grid_n1 + grid_n2, C22, C23) + ), + tl.where( + pid_n < grid_n1, C31, tl.where(pid_n < grid_n1 + grid_n2, C32, C33) + ), + ), + ) + M = tl.where(pid_m < grid_m1, M1, tl.where(pid_m < grid_m1 + grid_m2, M2, M3)) + N = tl.where(pid_n < grid_n1, N1, tl.where(pid_n < grid_n1 + grid_n2, N2, N3)) + stride_ak = tl.where( + pid_m < grid_m1, + stride_ak1, + tl.where(pid_m < grid_m1 + grid_m2, stride_ak2, stride_ak3), + ) + stride_bk = tl.where( + pid_n < grid_n1, + stride_bk1, + tl.where(pid_n < grid_n1 + grid_n2, stride_bk2, stride_bk3), + ) + stride_cn = tl.where( + pid_m < grid_m1, + stride_cn1, + tl.where(pid_m < grid_m1 + grid_m2, stride_cn2, stride_cn3), + ) + stride_cm = tl.where( + pid_n < grid_n1, + stride_cm1, + tl.where(pid_n < grid_n1 + grid_n2, stride_cm2, stride_cm3), + ) + pid_m = tl.where( + pid_m < grid_m1, + pid_m, + tl.where(pid_m < grid_m1 + grid_m2, pid_m - grid_m1, pid_m - grid_m1 - grid_m2), + ) + pid_n = tl.where( + pid_n < grid_n1, + pid_n, + tl.where(pid_n < grid_n1 + grid_n2, pid_n - grid_n1, pid_n - grid_n1 - grid_n2), + ) + + # do matrix multiplication + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) + rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) + # pointers + acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE) + grid_k1 = tl.cdiv(K1, BLOCK_K) + grid_k2 = tl.cdiv(K2, BLOCK_K) + grid_k3 = tl.cdiv(K3, BLOCK_K) + for tile in range(pid_k, grid_k1 + grid_k2 + grid_k3, SPLIT_K): + A = tl.where(tile < grid_k1, A1, tl.where(tile < grid_k1 + grid_k2, A2, A3)) + B = tl.where(tile < grid_k1, B1, tl.where(tile < grid_k1 + grid_k2, B2, B3)) + K = tl.where(tile < grid_k1, K1, tl.where(tile < grid_k1 + grid_k2, K2, K3)) + stride_am = tl.where( + tile < grid_k1, + stride_am1, + tl.where(tile < grid_k1 + grid_k2, stride_am2, stride_am3), + ) + stride_bn = tl.where( + tile < grid_k1, + stride_bn1, + tl.where(tile < grid_k1 + grid_k2, stride_bn2, stride_bn3), + ) + my_tile = tl.where( + tile < grid_k1, + tile, + tl.where( + tile < grid_k1 + grid_k2, tile - grid_k1, tile - grid_k1 - grid_k2 + ), + ) + rk = my_tile * BLOCK_K + tl.arange(0, BLOCK_K) + Ain = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) + Bin = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) + if EVEN_K: + a = tl.load(Ain) + b = tl.load(Bin) + else: + a = tl.load(Ain, mask=rk[None, :] < K, other=0.0) + b = tl.load(Bin, mask=rk[:, None] < K, other=0.0) + acc += tl.dot(a, b, allow_tf32=False) + acc = acc.to(C.dtype.element_ty) + # rematerialize rm and rn to save registers + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn) + mask = (rm < M)[:, None] & (rn < N)[None, :] + # handles write-back with reduction-splitting + if SPLIT_K == 1: + tl.store(C, acc, mask=mask) + else: + tl.atomic_add(C, acc, mask=mask) + + +def _check_row_or_column(row_or_col_type, row_or_col_idx, tensor_name, dim_name, vals): + assert len(vals) > 0 + for pos, val in enumerate(vals[1:]): + assert val == vals[0], ( + f"the tensors on {row_or_col_type} {row_or_col_idx} of the {tensor_name} " + f"must all have the same stride along the {dim_name} dimension, got " + f"{vals[0]} at position 0 and {val} at position {pos + 1}" + ) + return vals[0] + + +def _get_strides( + ts: List[List[torch.Tensor]], tensor_name, dim_0_name, dim_1_name +) -> Tuple[List[int], List[int]]: + strides_0 = [ + _check_row_or_column( + "column", idx, tensor_name, dim_0_name, [y.stride(0) for y in x] + ) + for idx, x in enumerate(zip(*ts)) + ] + strides_1 = [ + _check_row_or_column( + "row", idx, tensor_name, dim_1_name, [y.stride(1) for y in x] + ) + for idx, x in enumerate(ts) + ] + assert all(s == 1 for s in strides_0) or all(s == 1 for s in strides_1) + while len(strides_0) < 3: + strides_0.append(1 if strides_0[0] == 1 else 0) + while len(strides_1) < 3: + strides_1.append(1 if strides_1[0] == 1 else 0) + return strides_0, strides_1 + + +def _launch_triton_matmul( + a: List[List[torch.Tensor]], + b: List[List[torch.Tensor]], + c: List[List[torch.Tensor]], + ms: List[int], + ns: List[int], + ks: List[int], +) -> None: + strides_am, strides_ak = _get_strides(a, "first operand", "m", "k") + strides_bk, strides_bn = _get_strides(b, "second operand", "k", "n") + strides_cm, strides_cn = _get_strides(c, "output", "m", "n") + + # accumulator types + ACC_TYPE = ( + tl.float32 + if c[0][0].dtype in [torch.float16, torch.bfloat16, torch.float32] + else tl.int32 + ) + + # launch kernel + def grid(META): + return ( + sum(triton.cdiv(m, META["BLOCK_M"]) for m in ms) + * sum(triton.cdiv(n, META["BLOCK_N"]) for n in ns), + META["SPLIT_K"], + ) + + _xformers_tiled_matmul_kernel[grid]( + *[ + a[min(i, len(a) - 1)][min(j, len(a[0]) - 1)] + for i in range(3) + for j in range(3) + ], + *[ + b[min(i, len(b) - 1)][min(j, len(b[0]) - 1)] + for i in range(3) + for j in range(3) + ], + *[ + c[min(i, len(c) - 1)][min(j, len(c[0]) - 1)] + for i in range(3) + for j in range(3) + ], + *[ms[i] if len(ms) > i else 0 for i in range(3)], + *[ns[i] if len(ns) > i else 0 for i in range(3)], + *[ks[i] if len(ks) > i else 0 for i in range(3)], + *strides_am, + *strides_ak, + *strides_bk, + *strides_bn, + *strides_cm, + *strides_cn, + ACC_TYPE=ACC_TYPE, + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/common.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/common.py new file mode 100644 index 0000000000000000000000000000000000000000..f86305c083dbb6963d7ed15cc5178567f3054792 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/common.py @@ -0,0 +1,186 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + +import inspect +from dataclasses import dataclass +from functools import wraps +from typing import Any, Callable, Dict, List, Type, TypeVar, Union + +import torch +from torch.torch_version import TorchVersion +from typing_extensions import Annotated, get_args, get_origin + +from .. import _is_triton_available + + +def get_operator(library: str, name: str): + def no_such_operator(*args, **kwargs): + raise RuntimeError( + f"No such operator {library}::{name} - did you forget to build xformers with `python setup.py develop`?" + ) + + try: + return getattr(getattr(torch.ops, library), name) + except (RuntimeError, AttributeError): + return no_such_operator + + +def get_xformers_operator(name: str): + return get_operator("xformers", name) + + +class BaseOperator: + OPERATOR: Any + NAME: str + OPERATOR_CATEGORY: str + + @classmethod + def is_available(cls) -> bool: + if cls.OPERATOR is None or cls.OPERATOR.__name__ == "no_such_operator": + return False + return True + + @classmethod + def operator_flop(cls, *inputs) -> int: + """Calculate number of FLOP given inputs to `OPERATOR`""" + return -1 + + +OPERATORS_REGISTRY: List[Type[BaseOperator]] = [] +FUNC_TO_XFORMERS_OPERATOR: Dict[Any, Type[BaseOperator]] = {} + +ClsT = TypeVar("ClsT") + + +def register_operator(cls: ClsT) -> ClsT: + global OPERATORS_REGISTRY, FUNC_TO_XFORMERS_OPERATOR + OPERATORS_REGISTRY.append(cls) # type: ignore + FUNC_TO_XFORMERS_OPERATOR[cls.OPERATOR] = cls # type: ignore + return cls + + +# post-2.0, avoids a warning +# (`torch.Tensor.storage` will also be deleted in the future) +_GET_TENSOR_STORAGE = getattr(torch.Tensor, "untyped_storage", None) +if _GET_TENSOR_STORAGE is None: # pre-2.0, `untyped_storage` didn't exist + _GET_TENSOR_STORAGE = torch.Tensor.storage + + +def _get_storage_base(x: torch.Tensor) -> int: + return _GET_TENSOR_STORAGE(x).data_ptr() # type: ignore + + +@dataclass(frozen=True) +class Alias: + name: str + write: bool + + +def make_pytorch_cuda_operator(fn: ClsT) -> ClsT: + return turn_into_pytorch_op(fn, "CUDA") + + +def make_pytorch_operator_for_dispatch_key(dispatch_key: str) -> Callable[[ClsT], ClsT]: + def decorator(fn: ClsT) -> ClsT: + return turn_into_pytorch_op(fn, dispatch_key) + + return decorator + + +def turn_into_pytorch_op(fn: ClsT, dispatch_key: str) -> ClsT: + from .. import get_python_lib + + def render_arg_type(annotation) -> str: + # Optional[T] is an alias for Union[T, None] + if get_origin(annotation) is Union: + inner_types = [ + t for t in get_args(annotation) if t is not type(None) # noqa: E721 + ] + if len(inner_types) == 1: + return f"{render_arg_type(inner_types[0])}?" + if get_origin(annotation) is list: + (inner_type,) = get_args(annotation) + return f"{render_arg_type(inner_type)}[]" + if get_origin(annotation) is tuple: + return ( + "(" + + ", ".join([render_arg_type(t) for t in get_args(annotation)]) + + ")" + ) + if get_origin(annotation) is Annotated: + inner_type, annotation = get_args(annotation) + if isinstance(annotation, Alias): + alias = annotation.name + ("!" if annotation.write else "") + return f"{render_arg_type(inner_type)}({alias})" + if annotation is torch.Tensor: + return "Tensor" + if annotation is bool: + return "bool" + if annotation is int: + return "int" + if annotation is float: + return "float" + if annotation is torch.dtype: + return "ScalarType" + if annotation is torch.distributed.ProcessGroup: + return "__torch__.torch.classes.c10d.ProcessGroup" + assert False, f"Unable to parse annotation: `{annotation}`" + + def render_default_value(default): + if default is inspect.Parameter.empty: + return "" + return f" = {default!r}" + + sign = inspect.signature(fn) # type: ignore + arguments = [ + f"{render_arg_type(arg.annotation)} {arg.name}{render_default_value(arg.default)}" + for arg in sign.parameters.values() + ] + op_name = fn.__name__ # type: ignore + definition = f"{op_name}({', '.join(arguments)}) -> {render_arg_type(sign.return_annotation)}" + + def callee(*args, **kwargs): + ba = sign.bind(*args, **kwargs) + for name, value in ba.arguments.items(): + if sign.parameters[name].annotation is torch.distributed.ProcessGroup: + from .._C import unbox_process_group + + ba.arguments[name] = unbox_process_group(value) + return fn(*ba.args, **ba.kwargs) + + xformers_lib = get_python_lib() + xformers_lib.define(definition) + xformers_lib.impl(op_name, callee, dispatch_key) + dispatcher_impl = getattr(getattr(torch.ops, xformers_lib.ns), op_name) + + @wraps(fn) # type: ignore[arg-type] + def caller(*args, **kwargs): + ba = sign.bind(*args, **kwargs) + for name, value in ba.arguments.items(): + if sign.parameters[name].annotation is torch.distributed.ProcessGroup: + from .._C import box_process_group + + ba.arguments[name] = box_process_group(value) + return dispatcher_impl(*ba.args, **ba.kwargs) + + return caller # type: ignore + + +def _has_triton2(): + if not _is_triton_available(): + return False + import triton + + tv = TorchVersion(triton.__version__) + return tv >= (2, 1) or tv == (2, 0) + + +def _has_triton21(): + if not _is_triton_available(): + return False + import triton + + tv = TorchVersion(triton.__version__) + return tv >= (2, 1) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/differentiable_collectives.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/differentiable_collectives.py new file mode 100644 index 0000000000000000000000000000000000000000..1cfb9afbe1a1f70c5f20fc3f563f16fe8a533941 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/differentiable_collectives.py @@ -0,0 +1,178 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + + +from typing import Optional, Tuple + +import torch +import torch.distributed + + +def all_reduce( + x: torch.Tensor, *, process_group: torch.distributed.ProcessGroup +) -> None: + assert x.is_contiguous() + + mp_size = process_group.size() + if mp_size == 1: + return + + torch.distributed.all_reduce( + tensor=x, op=torch.distributed.ReduceOp.SUM, group=process_group + ) + + +def gather_along_first_dim_async( + input_: torch.Tensor, *, process_group: torch.distributed.ProcessGroup +) -> Tuple[torch.Tensor, Optional[torch.distributed.Work]]: + assert input_.is_contiguous() + mp_size = process_group.size() + if mp_size == 1: + return input_, None + + output = input_.new_empty((input_.shape[0] * mp_size,) + input_.shape[1:]) + handle = torch.distributed.all_gather_into_tensor( + output_tensor=output, + input_tensor=input_, + group=process_group, + async_op=True, + ) + + return output, handle + + +def reduce_scatter_along_first_dim_async( + input_: torch.Tensor, *, process_group: torch.distributed.ProcessGroup +) -> Tuple[torch.Tensor, Optional[torch.distributed.Work]]: + assert input_.is_contiguous() + mp_size = process_group.size() + if mp_size == 1: + return input_, None + + assert input_.shape[0] % mp_size == 0 + output = input_.new_empty((input_.shape[0] // mp_size,) + input_.shape[1:]) + handle = torch.distributed.reduce_scatter_tensor( + output=output, + input=input_, + op=torch.distributed.ReduceOp.SUM, + group=process_group, + async_op=True, + ) + + return output, handle + + +def gather_along_first_dim( + input_: torch.Tensor, *, process_group: torch.distributed.ProcessGroup +) -> torch.Tensor: + output, handle = gather_along_first_dim_async(input_, process_group=process_group) + if handle is not None: + handle.wait() + return output + + +def reduce_scatter_along_first_dim( + input_: torch.Tensor, *, process_group: torch.distributed.ProcessGroup +) -> torch.Tensor: + output, handle = reduce_scatter_along_first_dim_async( + input_, process_group=process_group + ) + if handle is not None: + handle.wait() + return output + + +class _CopyToModelParallelRegion(torch.autograd.Function): + @staticmethod + def forward( # type: ignore[override] + ctx, input_: torch.Tensor, process_group: torch.distributed.ProcessGroup + ) -> torch.Tensor: + ctx.process_group = process_group + return input_ + + @staticmethod + def backward( # type: ignore[override] + ctx, grad_output: torch.Tensor + ) -> Tuple[torch.Tensor, None]: + all_reduce(grad_output, process_group=ctx.process_group) + return grad_output, None + + +def copy_to_model_parallel_region( + x: torch.Tensor, process_group: torch.distributed.ProcessGroup +) -> torch.Tensor: + return _CopyToModelParallelRegion.apply(x, process_group) + + +class _ReduceFromModelParallelRegion(torch.autograd.Function): + @staticmethod + def forward( # type: ignore[override] + ctx, input_: torch.Tensor, process_group: torch.distributed.ProcessGroup + ) -> torch.Tensor: + all_reduce(input_, process_group=process_group) + ctx.mark_dirty(input_) + return input_ + + @staticmethod + def backward( # type: ignore[override] + ctx, grad_output: torch.Tensor + ) -> Tuple[torch.Tensor, None]: + return grad_output, None + + +def reduce_from_model_parallel_region( + x: torch.Tensor, process_group: torch.distributed.ProcessGroup +) -> torch.Tensor: + return _ReduceFromModelParallelRegion.apply(x, process_group) + + +class _GatherFromSequenceParallelRegion(torch.autograd.Function): + @staticmethod + def forward( # type: ignore[override] + ctx, x: torch.Tensor, process_group: torch.distributed.ProcessGroup + ) -> torch.Tensor: + ctx.process_group = process_group + return gather_along_first_dim(x, process_group=process_group) + + @staticmethod + def backward( # type: ignore[override] + ctx, grad_output: torch.Tensor + ) -> Tuple[torch.Tensor, None]: + return ( + reduce_scatter_along_first_dim( + grad_output, process_group=ctx.process_group + ), + None, + ) + + +def gather_from_sequence_parallel_region( + x: torch.Tensor, process_group: torch.distributed.ProcessGroup +) -> torch.Tensor: + return _GatherFromSequenceParallelRegion.apply(x, process_group) + + +class _ScatterToSequenceParallelRegion(torch.autograd.Function): + @staticmethod + def forward( # type: ignore[override] + ctx, x: torch.Tensor, process_group: torch.distributed.ProcessGroup + ) -> torch.Tensor: + ctx.process_group = process_group + return reduce_scatter_along_first_dim(x, process_group=process_group) + + @staticmethod + def backward( # type: ignore[override] + ctx, grad_output: torch.Tensor + ) -> Tuple[torch.Tensor, None]: + return ( + gather_along_first_dim(grad_output, process_group=ctx.process_group), + None, + ) + + +def scatter_to_sequence_parallel_region( + x: torch.Tensor, process_group: torch.distributed.ProcessGroup +) -> torch.Tensor: + return _ScatterToSequenceParallelRegion.apply(x, process_group) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf76011eb5260a655e9be375d170e78ee43316bd Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/attn_bias.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/attn_bias.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c9bded9242f5ffbb50ad6cc65708154277256b3 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/attn_bias.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/common.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc8ecfe080435c72a97af1f63403ef4b341b0f49 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/common.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/cutlass.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/cutlass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4542719bd923a5f5bd57b3565cafa77bf001a67 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/cutlass.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/decoder.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3eb5e53bd8e6727332399a3a2338bb4a2491665 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/decoder.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/dispatch.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/dispatch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf1ad9ec96e59dd3aa807aea0dcff45bf293a327 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/dispatch.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/flash.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/flash.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0170c4b687d4306cb826f4232f2afa707b8a63ae Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/flash.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/small_k.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/small_k.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17b1a87de917f5c7d509f6d0a37cda4ae65e21e0 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/small_k.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/triton.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/triton.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fe31c76f50044ce5492220860539b25ae238135 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/__pycache__/triton.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/attn_bias.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/attn_bias.py new file mode 100644 index 0000000000000000000000000000000000000000..78044f7db51c4c0fc16aa665f42b1d7f7fdd00af --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/attn_bias.py @@ -0,0 +1,929 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + + +import math +from dataclasses import dataclass +from typing import Any, Iterable, List, Optional, Sequence, Tuple, Union + +import torch + + +class AttentionBias: + """Base class for a custom bias that can be applied \ + as the attn_bias argument in + :attr:`xformers.ops.memory_efficient_attention`. + + That function has the ability to add a tensor, the + attention bias, to the QK^T matrix before it is used + in the softmax part of the attention calculation. + The attention bias tensor with shape + (B or 1, n_queries, number of keys) + can be given as the attn_bias input. + The most common use case is for an attention bias is + to contain only zeros and negative infinities, which forms + a mask so that some queries only attend to some keys. + + Children of this class define alternative things which can + be used as the attn_bias input to define an attention bias which + forms such a mask, for some common cases. + + When using an :attr:`xformers.ops.AttentionBias` + instead of a :attr:`torch.Tensor`, the mask matrix does + not need to be materialized, and can be + hardcoded into some kernels for better performance. + + See: + + - :attr:`xformers.ops.fmha.attn_bias.LowerTriangularMask` + - :attr:`xformers.ops.fmha.attn_bias.LowerTriangularFromBottomRightMask` + - :attr:`xformers.ops.fmha.attn_bias.LowerTriangularMaskWithTensorBias` + - :attr:`xformers.ops.fmha.attn_bias.BlockDiagonalMask` + - :attr:`xformers.ops.fmha.attn_bias.BlockDiagonalCausalMask` + + """ + + def materialize( + self, + shape: Tuple[int, ...], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + ) -> torch.Tensor: + """ + Materializes the bias as a `torch.Tensor`. This is very slow + and we don't attempt to make it fast. Only use for debugging/testing. + + Shape should be like `[*, q_seqlen, k_seqlen]` + """ + raise NotImplementedError() + + +def _materialize_causal_mask( + shape: Tuple[int, ...], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + *, + window_size: Optional[int] = None, + from_bottomright: bool = False, +) -> torch.Tensor: + create_as = dtype if dtype is not torch.bfloat16 else torch.float32 + tensor = torch.full( # type: ignore + shape, + dtype=create_as, + fill_value=1, + device=device, + ) + + num_queries, num_keys = shape[-2:] + shift = 0 + if from_bottomright: + shift = num_keys - num_queries + + mask = torch.tril(tensor, diagonal=shift).to(dtype) # type: ignore + if window_size is not None: + mask = torch.triu(mask, diagonal=shift - window_size + 1) + mask = torch.log(mask) + return mask.to(dtype) + + +@dataclass +class LocalAttentionFromBottomRightMask(AttentionBias): + """ + A local attention mask + + The query at position :math:`q` can attend the key at position :math:`k` if + :math:`q - window\\_left <= k + s <= q + window\\_right` + + With :math:`s = num\\_queries - num\\_keys` + + :Example: + + .. code-block:: python + + import torch + from xformers.ops import fmha + + bias = fmha.attn_bias.LocalAttentionFromBottomRightMask(window_left=1, window_right=2) + print(bias.materialize(shape=(4, 4)).exp()) + print(bias.materialize(shape=(4, 5)).exp()) + + .. code-block:: text + + # 4x4 + tensor([[1., 1., 1., 0.], + [1., 1., 1., 1.], + [0., 1., 1., 1.], + [0., 0., 1., 1.]]) + + # 4x5 + tensor([[1., 1., 1., 1., 0.], + [0., 1., 1., 1., 1.], + [0., 0., 1., 1., 1.], + [0., 0., 0., 1., 1.]]) + + :Illustration: + + .. figure:: /_static/local_attn.png + :width: 240px + + The total window size is :math:`window\\_left + 1 + window\\_right` + """ + + window_left: int + window_right: int + + def __post_init__(self) -> None: + if self.window_left < 0: + raise ValueError( + "Invalid window value passed to " + "`LocalAttentionFromBottomRightMask`: expected" + f"`window_left > 0` but got window_left={self.window_left}" + ) + if self.window_right < 0: + raise ValueError( + "Invalid window value passed to " + "`LocalAttentionFromBottomRightMask`: expected" + f"`window_right > 0` but got window_right={self.window_right}" + ) + + def materialize( + self, + shape: Tuple[int, ...], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + ) -> torch.Tensor: + create_as = dtype if dtype is not torch.bfloat16 else torch.float32 + mask = torch.full( # type: ignore + shape, + dtype=create_as, + fill_value=1, + device=device, + ) + + num_queries, num_keys = shape[-2:] + shift = num_keys - num_queries + + mask = torch.triu(mask, diagonal=shift - self.window_left) + mask = torch.tril(mask, diagonal=shift + self.window_right) + mask = torch.log(mask) + return mask.to(dtype) + + +class LowerTriangularMask(AttentionBias): + """ + A lower-triangular (aka causal) mask + + A query Q cannot attend to a key which is farther from the + initial key than Q is from the initial query. + + See also :attr:`LowerTriangularFromBottomRightMask` if the number + of queries is not equal to the number of keys/values. + """ + + def __init__(self, *tensor_args, **tensor_kwargs) -> None: + # NOTE: Unused arguments, we keep them for backward compatibility + super().__init__() + + def materialize( + self, + shape: Tuple[int, ...], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + ) -> torch.Tensor: + return _materialize_causal_mask(shape, dtype=dtype, device=device) + + def add_bias(self, bias: torch.Tensor) -> "LowerTriangularMaskWithTensorBias": + """ + Creates a new causal mask with an arbitrary ``torch.Tensor`` bias + """ + return LowerTriangularMaskWithTensorBias(bias) + + +class LowerTriangularFromBottomRightMask(AttentionBias): + """ + A causal masking. + + This mask is exactly the same as :attr:`LowerTriangularMask` when there is + the same number of queries and keys. + When the number of queries is different from the number of keys, + it is a triangular mask shifted so that the last query can attend to + the last key. + In other words, a query Q cannot attend to a key which is nearer the + final key than Q is to the final query. + + + .. figure:: /_static/causal_bottom_right.png + + The difference between :attr:`LowerTriangularMask` (left) and + :attr:`LowerTriangularFromBottomRightMask` (right). They become + equivalent if the number of queries equals the number of keys. + """ + + def materialize( + self, + shape: Tuple[int, ...], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + ) -> torch.Tensor: + return _materialize_causal_mask( + shape, dtype=dtype, device=device, from_bottomright=True + ) + + def make_local_attention( + self, window_size: int + ) -> "LowerTriangularFromBottomRightLocalAttentionMask": + """ + Create a new bias which combines local + causal attention. + + See :attr:`LowerTriangularFromBottomRightLocalAttentionMask` + """ + return LowerTriangularFromBottomRightLocalAttentionMask(window_size) + + +@dataclass +class LowerTriangularFromBottomRightLocalAttentionMask( + LowerTriangularFromBottomRightMask +): + """ + A mask that combines both :attr:`LowerTriangularFromBottomRightMask` and + local attention. + + A query whose distance from the final query is X cannot attend to a key + whose distance to the final key is either of: + + * less than X (i.e. "causal attention", same as :attr:`LowerTriangularFromBottomRightMask`) + * greater than X + window_size (i.e. "local attention") + + + .. figure:: /_static/causal_bottom_right_local.png + + The mask from :attr:`LowerTriangularFromBottomRightLocalAttentionMask`. + The green area is calculated, and the grey area is masked out. + """ + + _window_size: int + + def __post_init__(self) -> None: + if self._window_size <= 0: + raise ValueError( + f"Expected `window_size > 0`, but window_size={self._window_size}" + ) + + def materialize( + self, + shape: Tuple[int, ...], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + ) -> torch.Tensor: + return _materialize_causal_mask( + shape, + dtype=dtype, + device=device, + window_size=self._window_size, + from_bottomright=True, + ) + + +class LowerTriangularMaskWithTensorBias(LowerTriangularMask): + """A lower-triangular (aka causal) mask with an additive bias""" + + def __init__(self, bias: torch.Tensor) -> None: + self._bias = bias + + def materialize( + self, + shape: Tuple[int, ...], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + ) -> torch.Tensor: + return super().materialize(shape, dtype=dtype, device=device) + self._bias + + +@dataclass +class _SeqLenInfo: + """ + (Internal) Represents the division of a dimension into blocks. + + For example, to represents a dimension of length 7 divided into + three blocks of lengths 2, 3 and 2, use `from_seqlength([2, 3, 2])`. + The members will be: + max_seqlen: 3 + min_seqlen: 2 + seqstart_py: [0, 2, 5, 7] + seqstart: torch.IntTensor([0, 2, 5, 7]) + """ + + seqstart: torch.Tensor + max_seqlen: int + min_seqlen: int + seqstart_py: List[int] + + def to(self, device: torch.device) -> None: + self.seqstart = self.seqstart.to(device, non_blocking=True) + + def intervals(self) -> Iterable[Tuple[int, int]]: + yield from zip(self.seqstart_py, self.seqstart_py[1:]) + + @classmethod + def from_seqlens(cls, seqlens: Iterable[int]) -> "_SeqLenInfo": + """ + Input tensors are assumed to be in shape [B, M, *] + """ + assert not isinstance(seqlens, torch.Tensor) + seqstart_py = [0] + max_seqlen = -1 + min_seqlen = -1 + for seqlen in seqlens: + min_seqlen = min(min_seqlen, seqlen) if min_seqlen != -1 else seqlen + max_seqlen = max(max_seqlen, seqlen) + seqstart_py.append(seqstart_py[len(seqstart_py) - 1] + seqlen) + seqstart = torch.tensor(seqstart_py, dtype=torch.int32) + return cls( + max_seqlen=max_seqlen, + min_seqlen=min_seqlen, + seqstart=seqstart, + seqstart_py=seqstart_py, + ) + + def split( + self, x: torch.Tensor, batch_sizes: Optional[Sequence[int]] = None + ) -> List[torch.Tensor]: + if self.seqstart_py[-1] != x.shape[1] or x.shape[0] != 1: + raise ValueError( + f"Invalid `torch.Tensor` of shape {x.shape}, expected format " + f"(B, M, *) with B=1 and M={self.seqstart_py[-1]}\n" + f" seqstart: {self.seqstart_py}" + ) + if batch_sizes is None: + batch_sizes = [1] * (len(self.seqstart_py) - 1) + split_chunks = [] + it = 0 + for batch_size in batch_sizes: + split_chunks.append( + self.seqstart_py[it + batch_size] - self.seqstart_py[it] + ) + it += batch_size + return [ + tensor.reshape([bs, -1, *tensor.shape[2:]]) + for bs, tensor in zip(batch_sizes, x.split(split_chunks, dim=1)) + ] + + +@dataclass +class _PaddedSeqLenInfo(_SeqLenInfo): + """ + (Internal) Represents the division of a dimension into blocks which are + padded out to the same total length. + + For example, to represent a dimension of length 12 with space for + three blocks of length 4, but where the occupied lengths are + 2, 3 and 2, use `from_seqlens_padded([2, 3, 2], 4)`. + + The layout along the dimension is + + 0 ─► block 0 + block 0 + + + 4 ─► block 1 + block 1 + block 1 + + 8 ─► block 2 + block 2 + + + 12 ─► + + The members will be: + max_seqlen: 3 + min_seqlen: 2 + seqstart_py: [0, 4, 8, 12] + seqstart: torch.IntTensor([0, 4, 8, 12]) + seqlen_py: [2, 3, 2] + seqlen: torch.IntTensor([2, 3, 2]) + padding: 4 + """ + + seqlen: torch.Tensor + seqlen_py: Sequence[int] + padding: int + # From parent: seqstart[i] contains the start position + # of the i-th sequence + # seqstart: torch.Tensor + + def __post_init__(self) -> None: + assert len(self.seqstart_py) == len(self.seqlen_py) + 1 + + def to(self, device: torch.device) -> None: + self.seqlen = self.seqlen.to(device, non_blocking=True) + super().to(device) + + def intervals(self) -> Iterable[Tuple[int, int]]: + for (start, _), length in zip(super().intervals(), self.seqlen_py): + yield start, start + length + + @classmethod + def from_seqlens(cls, seqlens: Iterable[int]) -> "_SeqLenInfo": + raise RuntimeError( + "Use either `_SeqLenInfo.from_seqlens` or `_PaddedSeqLenInfo.from_seqlens_padded`" + ) + + @classmethod + def from_seqlens_padded( + cls, seqlens: Sequence[int], padding: int + ) -> "_PaddedSeqLenInfo": + """ + Input tensors are assumed to be in shape [B, M, *] + seqstart = padding * torch.arange(batch_size) + """ + assert not isinstance(seqlens, torch.Tensor) + assert all(seqlen <= padding for seqlen in seqlens) + seqstart_py = list(range(0, len(seqlens) * padding + 1, padding)) + return cls( + seqlen=torch.tensor(seqlens, dtype=torch.int32), + seqlen_py=seqlens, + max_seqlen=max(seqlens), + min_seqlen=min(seqlens), + seqstart=torch.tensor(seqstart_py, dtype=torch.int32), + seqstart_py=seqstart_py, + padding=padding, + ) + + def split( + self, x: torch.Tensor, batch_sizes: Optional[Sequence[int]] = None + ) -> List[torch.Tensor]: + raise NotImplementedError("_PaddedSeqLenInfo.split") + + +@dataclass +class BlockDiagonalMask(AttentionBias): + """ + A block-diagonal mask that can be passed as ``attn_bias`` + argument to :attr:`xformers.ops.memory_efficient_attention`. + + Queries and Keys are each divided into the same number of blocks. + Queries in block i only attend to keys in block i. + + .. figure:: /_static/block_diag_bias.png + + This bias can be used to handle a batch of sequences of + different lengths, via :attr:`BlockDiagonalMask.from_tensor_list` + + :Example: + + .. code-block:: python + + import torch + from xformers.ops import fmha + + K = 16 + dtype = torch.float16 + device = "cuda" + list_x = [ + torch.randn([1, 3, 1, K], dtype=dtype, device=device), + torch.randn([1, 6, 1, K], dtype=dtype, device=device), + torch.randn([1, 2, 1, K], dtype=dtype, device=device), + ] + attn_bias, x = fmha.BlockDiagonalMask.from_tensor_list(list_x) + linear = torch.nn.Linear(K, K * 3).to(device=device, dtype=dtype) + + q, k, v = linear(x).reshape([1, -1, 1, 3, K]).unbind(-2) + out = fmha.memory_efficient_attention(q, k, v, attn_bias=attn_bias) + list_out = attn_bias.split(out) + print(list_out[0].shape) # [1, 3, 1, K] + assert tuple(list_out[0].shape) == (1, 3, 1, K) + + """ + + q_seqinfo: _SeqLenInfo + k_seqinfo: _SeqLenInfo + _batch_sizes: Optional[Sequence[int]] = None + + def _create_block_mask( + self, + shape: Tuple[int, ...], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + ) -> torch.Tensor: + return torch.zeros( + shape, + dtype=dtype, + device=device, + ) + + def materialize( + self, + shape: Tuple[int, ...], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + ) -> torch.Tensor: + """Materialize the attention bias - for debugging & testing""" + assert shape[-1] == self.k_seqinfo.seqstart_py[-1], ( + shape[-1], + self.k_seqinfo.seqstart_py[-1], + ) + assert shape[-2] == self.q_seqinfo.seqstart_py[-1], ( + shape[-2], + self.q_seqinfo.seqstart_py[-1], + ) + mask = torch.empty(shape[-2:], dtype=dtype, device=device) + mask.fill_(-math.inf) + for i, ((q_start, q_end), (k_start, k_end)) in enumerate( + zip( + self.q_seqinfo.intervals(), + self.k_seqinfo.intervals(), + ) + ): + mask[q_start:q_end, k_start:k_end] = self._create_block_mask( + (q_end - q_start, k_end - k_start), + dtype=dtype, + device=device, + ) + for _ in range(len(shape) - 2): + mask = mask.unsqueeze(0) + return mask.expand(shape) + + @classmethod + def from_seqlens( + cls, + q_seqlen: Sequence[int], + kv_seqlen: Optional[Sequence[int]] = None, + ) -> "BlockDiagonalMask": + """Creates a :attr:`BlockDiagonalMask` from a list of tensors lengths for query and key/value. + + Args: + q_seqlen (Union[Sequence[int], torch.Tensor]): List or tensor of sequence lengths for query tensors + kv_seqlen (Union[Sequence[int], torch.Tensor], optional): List or tensor of sequence lengths for key/value. + (Defaults to ``q_seqlen``.) + Returns: + BlockDiagonalMask + """ + assert kv_seqlen is None or len(q_seqlen) == len(kv_seqlen) + q_seqinfo = _SeqLenInfo.from_seqlens(q_seqlen) + if kv_seqlen is None or q_seqlen == kv_seqlen: + k_seqinfo = q_seqinfo + else: + k_seqinfo = _SeqLenInfo.from_seqlens(kv_seqlen) + return cls(q_seqinfo=q_seqinfo, k_seqinfo=k_seqinfo) + + @classmethod + def from_tensor_list( + cls, + tensors: Sequence[torch.Tensor], + ) -> Tuple["BlockDiagonalMask", torch.Tensor]: + """Creates a :attr:`BlockDiagonalMask` from a list of tensors, and returns the tensors + concatenated on the sequence length dimension + + .. figure:: /_static/block_diag_cat_split.png + + See also :attr:`BlockDiagonalMask.split` to split the returned + :attr:`torch.Tensor` back to a list of tensors of varying sequence length + + Args: + tensors (Sequence[torch.Tensor]): A list of tensors of shape ``[B, M_i, *]``. + All tensors should have the same dimension and the same batch size ``B``, but + they can have different sequence length ``M``. + + Returns: + Tuple[BlockDiagonalMask, torch.Tensor]: The corresponding bias for the attention + along with `tensors` concatenated on the sequence length dimension, with shape ``[1, sum_i{M_i}, *]`` + """ + batch_sizes = [tensor.shape[0] for tensor in tensors] + seqlens = [] + for x in tensors: + for _ in range(x.shape[0]): + seqlens.append(x.shape[1]) + block_diag = cls.from_seqlens(seqlens) + block_diag._batch_sizes = batch_sizes + tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in tensors) + concat_tensors = torch.cat(tensors_bs1, dim=1) + return block_diag, concat_tensors + + @classmethod + def from_tensor_lists_qkv( + cls, + tensors_q: Sequence[torch.Tensor], + tensors_k: Sequence[torch.Tensor], + tensors_v: Optional[Sequence[torch.Tensor]] = None, + ) -> Tuple["BlockDiagonalMask", torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: + assert len(tensors_q) == len(tensors_k) + assert tensors_v is None or len(tensors_v) == len(tensors_q) + batch_sizes = [tensor.shape[0] for tensor in tensors_q] + q_seqlens, kv_seqlens = [], [] + for i, (q, k) in enumerate(zip(tensors_q, tensors_k)): + assert q.shape[0] == k.shape[0] + q_seqlens += [q.shape[1]] * q.shape[0] + kv_seqlens += [k.shape[1]] * k.shape[0] + assert tensors_v is None or tensors_v[i].shape[:2] == k.shape[:2] + block_diag = cls.from_seqlens(q_seqlens, kv_seqlens) + block_diag._batch_sizes = batch_sizes + return ( + block_diag, + torch.cat([x.reshape([1, -1, *x.shape[2:]]) for x in tensors_q], dim=1), + torch.cat([x.reshape([1, -1, *x.shape[2:]]) for x in tensors_k], dim=1), + torch.cat([x.reshape([1, -1, *x.shape[2:]]) for x in tensors_v], dim=1) + if tensors_v is not None + else None, + ) + + def split_queries(self, tensor: torch.Tensor) -> Sequence[torch.Tensor]: + return self.q_seqinfo.split(tensor, self._batch_sizes) + + def split_kv(self, tensor: torch.Tensor) -> Sequence[torch.Tensor]: + return self.k_seqinfo.split(tensor, self._batch_sizes) + + def split(self, tensor: torch.Tensor) -> Sequence[torch.Tensor]: + """The inverse operation of :attr:`BlockDiagonalCausalMask.from_tensor_list` + + Args: + tensor (torch.Tensor): Tensor of tokens of shape ``[1, sum_i{M_i}, *]`` + + Returns: + Sequence[torch.Tensor]: A list of tokens with possibly different sequence lengths + """ + assert self.q_seqinfo is self.k_seqinfo + return self.q_seqinfo.split(tensor, self._batch_sizes) + + def make_causal(self) -> "BlockDiagonalCausalMask": + """Makes each block causal""" + return BlockDiagonalCausalMask( + q_seqinfo=self.q_seqinfo, + k_seqinfo=self.k_seqinfo, + _batch_sizes=self._batch_sizes, + ) + + def make_causal_from_bottomright(self) -> "BlockDiagonalCausalFromBottomRightMask": + """Makes each block causal with a possible non-causal prefix""" + return BlockDiagonalCausalFromBottomRightMask( + q_seqinfo=self.q_seqinfo, + k_seqinfo=self.k_seqinfo, + _batch_sizes=self._batch_sizes, + ) + + def make_local_attention( + self, window_size: int + ) -> "BlockDiagonalCausalLocalAttentionMask": + """Experimental: Makes each block causal with local attention""" + return BlockDiagonalCausalLocalAttentionMask( + q_seqinfo=self.q_seqinfo, + k_seqinfo=self.k_seqinfo, + _batch_sizes=self._batch_sizes, + _window_size=window_size, + ) + + def make_local_attention_from_bottomright( + self, window_size: int + ) -> "BlockDiagonalCausalLocalAttentionFromBottomRightMask": + """Experimental: Makes each block causal with local attention, start from bottom right""" + return BlockDiagonalCausalLocalAttentionFromBottomRightMask( + q_seqinfo=self.q_seqinfo, + k_seqinfo=self.k_seqinfo, + _batch_sizes=self._batch_sizes, + _window_size=window_size, + ) + + +@dataclass +class BlockDiagonalCausalMask(BlockDiagonalMask): + """ + Same as :attr:`xformers.ops.fmha.attn_bias.BlockDiagonalMask`, except that each block is causal. + + Queries and Keys are each divided into the same number of blocks. + A query Q in block i cannot attend to a key which is not in block i, + nor one which is farther from the initial key in block i than Q + is from the initial query in block i. + """ + + def _create_block_mask( + self, + shape: Tuple[int, ...], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + ) -> torch.Tensor: + return LowerTriangularMask().materialize( + shape, + dtype=dtype, + device=device, + ) + + +@dataclass +class BlockDiagonalCausalFromBottomRightMask(BlockDiagonalMask): + """ + Same as :attr:`xformers.ops.fmha.attn_bias.BlockDiagonalMask`, except that each block is causal. + This mask allows for a non-causal prefix + NOTE: Each block should have `num_keys >= num_queries` otherwise the forward pass is not + defined (softmax of vector of `-inf` in the attention) + + Queries and keys are each divided into the same number of blocks. + A query Q in block i cannot attend to a key which is not in block i, + nor one which nearer the final key in block i than Q is to the + final query in block i. + """ + + def __post_init__(self) -> None: + for i, ((q_start, q_end), (k_start, k_end)) in enumerate( + zip( + self.q_seqinfo.intervals(), + self.k_seqinfo.intervals(), + ) + ): + num_queries = q_end - q_start + num_keys = k_end - k_start + if num_keys < num_queries: + raise ValueError( + f"Block #{i} has num_keys={num_keys} and num_queries={num_queries}." + " Expected `num_keys >= num_queries`" + ) + + def _create_block_mask( + self, + shape: Tuple[int, ...], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + ) -> torch.Tensor: + return LowerTriangularFromBottomRightMask().materialize( + shape=shape, dtype=dtype, device=device + ) + + +@dataclass +class BlockDiagonalCausalWithOffsetPaddedKeysMask(AttentionBias): + """ + Same as :attr:`xformers.ops.fmha.attn_bias.BlockDiagonalCausalMask`, + except an offset on causality is allowed for each block and we support padding for k/v + + The keys and values are divided into blocks which are padded out to + the same total length. + For example, if there is space for 12 keys, for three blocks of + max length 4, but we only want to use the first 2, 3 and 2 + of each block, use `kv_padding=4` and `kv_seqlens=[2, 3, 2]`. + The queries are divided into blocks, without padding, of lengths given by + q_seqlen. + + A query Q in block i cannot attend to a key which is not in block i, + nor one which is not in use (i.e. in the padded area), + nor one which is nearer to the final key in block i + than Q is to the final query in block i. + """ + + q_seqinfo: _SeqLenInfo + k_seqinfo: _PaddedSeqLenInfo + causal_diagonal: Any = None # unused. Exists for BC only. + + def _create_block_mask( + self, + shape: Tuple[int, ...], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + ) -> torch.Tensor: + return LowerTriangularFromBottomRightMask().materialize( + shape=shape, dtype=dtype, device=device + ) + + def materialize( + self, + shape: Tuple[int, ...], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + ) -> torch.Tensor: + """Materialize the attention bias - for debugging & testing""" + if shape[-1] != self.k_seqinfo.seqstart_py[-1]: + raise ValueError("k shapes wrong") + if shape[-2] != self.q_seqinfo.seqstart_py[-1]: + raise ValueError("q shapes wrong") + mask = torch.empty(shape[-2:], dtype=dtype, device=device) + mask.fill_(-math.inf) + for i, ((q_start, q_end), (k_start, k_end)) in enumerate( + zip( + self.q_seqinfo.intervals(), + self.k_seqinfo.intervals(), + ) + ): + mask[q_start:q_end, k_start:k_end] = self._create_block_mask( + (q_end - q_start, k_end - k_start), + dtype=dtype, + device=device, + ) + for _ in range(len(shape) - 2): + mask = mask.unsqueeze(0) + return mask.expand(shape) + + @classmethod + def from_seqlens( + cls, + q_seqlen: Sequence[int], + kv_padding: int, + kv_seqlen: Sequence[int], + causal_diagonal: Any = None, + ) -> "BlockDiagonalCausalWithOffsetPaddedKeysMask": + """Creates a :attr:`BlockDiagonalCausalWithOffsetPaddedKeysMask` from a list of tensor + lengths for query and key/value. + + Args: + q_seqlen (Sequence[int]): List or tensor of sequence lengths for query tensors + kv_padding (int): Padding for k/v - also an upperbound on each individual key length + kv_seqlen (Sequence[int]): List or tensor of sequence lengths for key/value. + causal_diagonal: unused, for BC only + Returns: + BlockDiagonalCausalWithOffsetPaddedKeysMask + """ + assert kv_seqlen is None or len(q_seqlen) == len(kv_seqlen), ( + q_seqlen, + kv_seqlen, + ) + q_seqinfo = _SeqLenInfo.from_seqlens(q_seqlen) + k_seqinfo = _PaddedSeqLenInfo.from_seqlens_padded(kv_seqlen, kv_padding) + return cls(q_seqinfo=q_seqinfo, k_seqinfo=k_seqinfo) + + +@dataclass +class BlockDiagonalCausalLocalAttentionMask(BlockDiagonalCausalMask): + """ + (Experimental feature) + Same as :attr:`xformers.ops.fmha.attn_bias.BlockDiagonalCausalMask`. + This makes the mask "local" and the attention pattern banded. + + Query i only attends to keys in its block and cannot attend keys further than "window_size" + from it. + """ + + _window_size: int = 0 # forced due to inheritance and default arguments + + def __post_init__(self): + if self._window_size <= 0: + raise ValueError( + f"Expected `window_size > 0`, but window_size={self._window_size}" + ) + q_seqlen = [ + y - x + for x, y in zip( + self.q_seqinfo.seqstart_py[:-1], self.q_seqinfo.seqstart_py[1:] + ) + ] + kv_seqlen = [ + y - x + for x, y in zip( + self.k_seqinfo.seqstart_py[:-1], self.k_seqinfo.seqstart_py[1:] + ) + ] + for q, k in zip(q_seqlen, kv_seqlen): + if q - self._window_size >= k: + # Each query only attends to keys no further than window_size back. + # When q > k + window_size, there will be a query for which the window doesn't reach any key. + raise RuntimeError( + f"No keys are attended in q_seqlen {q} k_seqlen {k} with sliding window {self._window_size}" + ) + + def _create_block_mask( + self, + shape: Tuple[int, ...], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + ) -> torch.Tensor: + return _materialize_causal_mask( + shape, + dtype=dtype, + device=device, + window_size=self._window_size, + ) + + +@dataclass +class BlockDiagonalCausalLocalAttentionFromBottomRightMask( + BlockDiagonalCausalFromBottomRightMask +): + """ + (Experimental feature) + Same as :attr:`xformers.ops.fmha.attn_bias.BlockDiagonalCausalMask`. + This makes the mask "local" and the attention pattern banded. + + Query i only attends to keys in its block and cannot attend keys further than "window_size" + from it. + """ + + _window_size: int = 0 # forced due to inheritance and default arguments + + def __post_init__(self): + super().__post_init__() + if self._window_size <= 0: + raise ValueError( + f"Expected `window_size > 0`, but window_size={self._window_size}" + ) + + def _create_block_mask( + self, + shape: Tuple[int, ...], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + ) -> torch.Tensor: + return _materialize_causal_mask( + shape, + dtype=dtype, + device=device, + window_size=self._window_size, + from_bottomright=True, + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/common.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/common.py new file mode 100644 index 0000000000000000000000000000000000000000..ff7dea358763bfd7387eb5ee1e487a5daa72e4fa --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/common.py @@ -0,0 +1,550 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + +import math +from dataclasses import dataclass +from functools import partial +from typing import Any, Callable, List, Mapping, Optional, Set, Tuple, Type, Union + +import torch + +from ..._cpp_lib import _built_with_cuda +from ..common import BaseOperator +from .attn_bias import ( + AttentionBias, + BlockDiagonalMask, + LowerTriangularMask, + LowerTriangularMaskWithTensorBias, +) + + +def _is_bias_type_supported_in_BMK(attn_bias_type: Any) -> bool: + # NoneType + if isinstance(None, attn_bias_type): + return True + if attn_bias_type in [LowerTriangularMask, torch.Tensor]: + return True + return False + + +def _attn_bias_apply( + attn_bias: Optional[Union[torch.Tensor, AttentionBias]], + op: Callable[[torch.Tensor], torch.Tensor], +) -> Optional[Union[torch.Tensor, AttentionBias]]: + if isinstance(attn_bias, torch.Tensor): + return op(attn_bias) + if isinstance(attn_bias, LowerTriangularMaskWithTensorBias): + return LowerTriangularMaskWithTensorBias(op(attn_bias._bias)) + return attn_bias + + +@dataclass +class Inputs: + """ + Stores inputs to the `memory_efficient_attention` operators + """ + + query: torch.Tensor + key: torch.Tensor + value: torch.Tensor + attn_bias: Optional[Union[torch.Tensor, AttentionBias]] = None + p: float = 0.0 + scale: Optional[float] = None + + @property + def device(self) -> torch.device: + return self.query.device + + @property + def scale_float(self) -> float: + return self.query.shape[-1] ** (-0.5) if self.scale is None else self.scale + + def get_qkv_in_bmghk(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + if self.query.ndim == 5: + return self.query, self.key, self.value + if self.query.ndim == 4: + return ( + self.query.unsqueeze(2), + self.key.unsqueeze(2), + self.value.unsqueeze(2), + ) + if self.value.ndim == 3: + return ( + self.query[:, :, None, None], + self.key[:, :, None, None], + self.value[:, :, None, None], + ) + assert False + + def normalize_bmhk(self) -> Tuple[int, ...]: + if self.query.ndim not in [3, 4, 5]: + raise ValueError( + f"Invalid shape for query: {self.query.shape}. " + "Expected shape [batch, seqlen, head_groups, num_heads_per_group, K]" + ", [batch, seqlen, num_heads, K], or [batch, seqlen, K]." + ) + if self.value.dtype == torch.int32: + # Quantized K/V case, in which the last dims of Q and K are different. + # NB we currently don't have any implementations for quantized KV with + # SUPPORTS_DIFFERENT_VALUE_EMBED. + output_shape = tuple(self.query.shape) + else: + output_shape = (self.query.shape[:-1]) + (self.value.shape[-1],) + # Convert from legacy format + if self.query.ndim == 3: + self.query = self.query.unsqueeze(2) + self.key = self.key.unsqueeze(2) + self.value = self.value.unsqueeze(2) + self.attn_bias = _attn_bias_apply( + self.attn_bias, partial(torch.unsqueeze, dim=1) + ) + return output_shape + + def validate_inputs(self) -> None: + qkv = (self.query, self.key, self.value) + if self.query.ndim not in (3, 4, 5) or any( + x.ndim != self.query.ndim for x in qkv + ): + raise ValueError( + f"Query/Key/Value should all have BMGHK, BMHK, or BMK shape.\n" + f" query.shape: {self.query.shape}\n" + f" key.shape : {self.key.shape}\n" + f" value.shape: {self.value.shape}" + ) + if any(x.device != self.query.device for x in qkv): + raise ValueError("Query/Key/Value should all be on the same device") + quantized_dtypes = self.key.dtype == self.value.dtype == torch.int32 + non_quantized_dtypes = all(x.dtype == self.query.dtype for x in qkv) + if not (quantized_dtypes or non_quantized_dtypes): + raise ValueError( + "Query/Key/Value should either all have the same dtype, or " + "(in the quantized case) Key/Value should have dtype torch.int32\n" + f" query.dtype: {self.query.dtype}\n" + f" key.dtype : {self.key.dtype}\n" + f" value.dtype: {self.value.dtype}" + ) + # Biases with tensors attached are meant to be in BMHK format + # This would require to permute biases/gradients which can be expensive, + # so let's just forbid it - BMK is a legacy format anyway + if self.query.ndim == 3 and not _is_bias_type_supported_in_BMK( + type(self.attn_bias) + ): + raise ValueError( + f"Please provide inputs in BMHK format rather " + f"than BMK when using bias type `{type(self.attn_bias).__name__}`" + ) + attn_bias_t: Optional[torch.Tensor] = None + if isinstance(self.attn_bias, torch.Tensor): + attn_bias_t = self.attn_bias + if isinstance(self.attn_bias, LowerTriangularMaskWithTensorBias): + attn_bias_t = self.attn_bias._bias + if self.query.ndim == 4 and attn_bias_t is not None: + expected_shape = ( + self.query.shape[0], + self.query.shape[2], + self.query.shape[1], + self.key.shape[1], + ) + if attn_bias_t.shape != expected_shape: + raise ValueError( + f"Invalid shape for attention bias: {attn_bias_t.shape} (expected {expected_shape})\n" + f" query.shape: {self.query.shape}\n" + f" key.shape : {self.key.shape}\n" + f" value.shape: {self.value.shape}" + ) + if isinstance(self.attn_bias, BlockDiagonalMask): + if any(x.shape[0] != 1 for x in qkv): + raise ValueError( + f"Expected batch_size=1 when using block-diagonal bias\n" + f" query.shape: {self.query.shape}\n" + f" key.shape : {self.key.shape}\n" + f" value.shape: {self.value.shape}" + ) + if self.p < 0.0 or self.p > 1.0: + raise ValueError(f"Invalid dropout probability: p={self.p}") + # Check that shapes match between inputs + B, Mq = self.query.shape[:2] + K = self.query.shape[-1] + B, Mkv = self.key.shape[:2] + Kv = self.value.shape[-1] + quantized_kv_cache = self.value.dtype == torch.int32 + key_embed_dim = Kv if quantized_kv_cache else K + + valid_shapes = True + if self.query.ndim == 3: # BMK + valid_shapes = ( + self.query.shape == (B, Mq, K) + and self.key.shape == (B, Mkv, K) + and self.value.shape == (B, Mkv, Kv) + ) + H = self.query.shape[-2] + if self.query.ndim == 4: # BMHK + valid_shapes = ( + self.query.shape == (B, Mq, H, K) + and self.key.shape == (B, Mkv, H, key_embed_dim) + and self.value.shape == (B, Mkv, H, Kv) + ) + G = self.query.shape[2] + if self.query.ndim == 5: # BMNHK + valid_shapes = ( + self.query.shape == (B, Mq, G, H, K) + and self.key.shape == (B, Mkv, G, H, key_embed_dim) + and self.value.shape == (B, Mkv, G, H, Kv) + ) + if not valid_shapes: + raise ValueError( + f"Incompatible shapes for attention inputs:\n" + f" query.shape: {self.query.shape}\n" + f" key.shape : {self.key.shape}\n" + f" value.shape: {self.value.shape}\n" + "HINT: We don't support broadcasting, please use `expand` " + "yourself before calling `memory_efficient_attention` if you need to" + ) + + +@dataclass +class Context: + lse: torch.Tensor + out: torch.Tensor + op_bw: Optional[Type["AttentionBwOpBase"]] = None + rng_state: Optional[torch.Tensor] = None + + def get_padded_lse(self, pad_to: int, force_pad_inf: bool = False) -> torch.Tensor: + pad_amount = (pad_to - (self.lse.shape[2] % pad_to)) % pad_to + lse = self.lse + if pad_amount > 0: + if force_pad_inf: + lse = lse[:, :, : self.out.shape[1]] + pad_amount = (pad_to - (lse.shape[2] % pad_to)) % pad_to + lse = torch.nn.functional.pad(lse, [0, pad_amount], value=math.inf) + elif force_pad_inf and self.out.shape[1] != lse.shape[2]: + lse[:, :, self.out.shape[1] :].fill_(math.inf) + return lse + + +@dataclass +class Gradients: + dq: torch.Tensor + dk: torch.Tensor + dv: torch.Tensor + # bias gradient. None if there is no tensor bias or if it doesn't require grad + db: Optional[torch.Tensor] = None + + +class AttentionOpBase(BaseOperator): + """Base class for any attention operator in xFormers + + See: + + - :attr:`xformers.ops.fmha.cutlass.FwOp` + - :attr:`xformers.ops.fmha.cutlass.BwOp` + - :attr:`xformers.ops.fmha.flash.FwOp` + - :attr:`xformers.ops.fmha.flash.BwOp` + - :attr:`xformers.ops.fmha.triton.FwOp` + - :attr:`xformers.ops.fmha.triton.BwOp` + - :attr:`xformers.ops.fmha.small_k.FwOp` + - :attr:`xformers.ops.fmha.small_k.BwOp` + """ + + OPERATOR: Any + SUPPORTED_DEVICES: Set[str] + CUDA_MINIMUM_COMPUTE_CAPABILITY: Tuple[int, int] = (5, 0) + SUPPORTED_DTYPES: Set[torch.dtype] + SUPPORTED_MAX_K: float + SUPPORTED_ATTN_BIAS_TYPES: Set[Any] = {type(None)} + SUPPORTS_DROPOUT: bool + SUPPORTS_CUSTOM_SCALE: bool = False + SUPPORTS_DIFFERENT_VALUE_EMBED: bool = False + IS_DETERMINISTIC: bool = True + SUPPORTS_BMGHK: bool = False + NAME: str + OPERATOR_CATEGORY = "memory_efficient_attention" + + _TEST_BATCH_SIZES: List[int] = [1, 300] + _TEST_K: List[int] = [32, 128] + + @classmethod + def supports(cls, d: Inputs) -> bool: + return not cls.not_supported_reasons(d) + + @classmethod + def shape_not_supported_reasons( + cls, Mq: int, Mkv: int, K: int, Kv: int + ) -> List[str]: + reasons = [] + if not cls.SUPPORTS_DIFFERENT_VALUE_EMBED and K != Kv: + reasons.append("query.shape[-1] != value.shape[-1]") + if max(K, Kv) > cls.SUPPORTED_MAX_K: + reasons.append( + f"max(query.shape[-1] != value.shape[-1]) > {cls.SUPPORTED_MAX_K}" + ) + return reasons + + @classmethod + def not_supported_reasons(cls, d: Inputs) -> List[str]: + """ + Returns a list of reasons why this is not supported. + The kernel can run these inputs only if the returned list is empty + """ + reasons = cls.shape_not_supported_reasons( + Mq=d.query.shape[1], + Mkv=d.key.shape[1], + K=d.query.shape[-1], + Kv=d.value.shape[-1], + ) + device_type = d.query.device.type + dtype = d.query.dtype + if device_type not in cls.SUPPORTED_DEVICES: + reasons.append(f"device={device_type} (supported: {cls.SUPPORTED_DEVICES})") + if device_type == "cuda" and not _built_with_cuda: + reasons.append("xFormers wasn't build with CUDA support") + if device_type == "cuda": + device_capability = torch.cuda.get_device_capability(d.device) + if device_capability < cls.CUDA_MINIMUM_COMPUTE_CAPABILITY: + reasons.append( + f"requires device with capability > {cls.CUDA_MINIMUM_COMPUTE_CAPABILITY} " + f"but your GPU has capability {device_capability} (too old)" + ) + if dtype not in cls.SUPPORTED_DTYPES: + reasons.append(f"dtype={dtype} (supported: {cls.SUPPORTED_DTYPES})") + if type(d.attn_bias) not in cls.SUPPORTED_ATTN_BIAS_TYPES: + reasons.append(f"attn_bias type is {type(d.attn_bias)}") + if (d.p != 0.0) and not cls.SUPPORTS_DROPOUT: + reasons.append("dropout > 0.0") + if d.scale is not None and not cls.SUPPORTS_CUSTOM_SCALE: + reasons.append("has custom scale") + # bfloat16 is only supported on A100+ + # ... although the kernels can still run and give the + # correct result + if dtype is torch.bfloat16 and ( + not device_type.startswith("cuda") + or torch.cuda.get_device_capability(d.query.device)[0] < 8 + ): + reasons.append("bf16 is only supported on A100+ GPUs") + if not cls.is_available(): + reasons.append( + "operator wasn't built - see `python -m xformers.info` for more info" + ) + if not cls.IS_DETERMINISTIC and torch.are_deterministic_algorithms_enabled(): + reasons.append( + "operator is non-deterministic, but `torch.use_deterministic_algorithms` is set" + ) + if not cls.SUPPORTS_BMGHK and d.query.ndim == 5: + reasons.append("operator does not support BMGHK format") + return reasons + + +class AttentionFwOpBase(AttentionOpBase): + ERROR_ATOL: Mapping[torch.dtype, float] = { + torch.float: 3e-4, + torch.half: 4e-3, + torch.bfloat16: 2e-2, + } + ERROR_RTOL: Mapping[torch.dtype, float] = { + torch.float: 2e-5, + torch.half: 4e-4, + torch.bfloat16: 5e-3, + } + + @classmethod + def apply( + cls, inp: Inputs, needs_gradient: bool + ) -> Tuple[torch.Tensor, Optional[Context]]: + raise NotImplementedError() + + @classmethod + def attn_operator_flop( + cls, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + causal: bool = False, + seqstart_k: Optional[torch.Tensor] = None, + seqstart_q: Optional[torch.Tensor] = None, + ) -> int: + """ + Computes total flops for the attention + Assumes inputs in format BMHK + """ + assert query.ndim == 4 + + if seqstart_q is not None: + seqstart_q_py = seqstart_q.tolist() + else: + seqstart_q_py = [0, query.shape[1]] + if seqstart_k is not None: + seqstart_k_py = seqstart_k.tolist() + else: + seqstart_k_py = [0, key.shape[1]] + + total_flop = 0 + for q_start, q_end, k_start, k_end in zip( + seqstart_q_py, seqstart_q_py[1:], seqstart_k_py, seqstart_k_py[1:] + ): + num_q = q_end - q_start + num_kv = k_end - k_start + # (M,K) @ (K,N) GEMM needs M*N*K*2 flop + # Q @ K.transpose + total_flop += num_q * num_kv * query.shape[-1] * 2 + # (ignore softmax) + # attn @ V + total_flop += num_q * key.shape[-1] * num_kv * 2 + # Multiply by num_heads and batches + total_flop = total_flop * value.shape[2] * value.shape[0] + if causal: + total_flop //= 2 + return total_flop + + +class AttentionBwOpBase(AttentionOpBase): + # NOTE on tolerances: These are tested for `scales => (1/32)**0.5` + # In the BW pass, imprecisions accumulate in the Q@K.T recalculation + # These imprecisions are multiplied by the `scale` and then exponentiated + # So if the scale is too high, we get a lot of errors + + ERROR_ATOL: Mapping[torch.dtype, float] = { + torch.float: 9e-4, + torch.half: 0.2, + torch.bfloat16: 0.9, + } + ERROR_RTOL: Mapping[torch.dtype, float] = { + torch.float: 1e-4, + torch.half: 2e-2, + torch.bfloat16: 0.1, + } + SUPPORTS_ATTN_BIAS_GRAD = False + + @classmethod + def not_supported_reasons(cls, d: Inputs) -> List[str]: + reasons = super(AttentionBwOpBase, cls).not_supported_reasons(d) + if ( + isinstance(d.attn_bias, torch.Tensor) + and d.attn_bias.requires_grad + and not cls.SUPPORTS_ATTN_BIAS_GRAD + ): + reasons.append( + "Computing the bias gradient is not supported (attn_bias.requires_grad = True)" + ) + + return reasons + + @classmethod + def apply(cls, ctx: Context, inp: Inputs, grad: torch.Tensor) -> Gradients: + raise NotImplementedError() + + @classmethod + def attn_operator_flop( + cls, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + causal: bool = False, + seqstart_k: Optional[torch.Tensor] = None, + seqstart_q: Optional[torch.Tensor] = None, + ) -> int: + """ + Computes total flops for the attention + Assumes inputs in format BMHK + """ + assert query.ndim == 4 + + if seqstart_q is not None: + seqstart_q_py = seqstart_q.tolist() + else: + seqstart_q_py = [0, query.shape[1]] + if seqstart_k is not None: + seqstart_k_py = seqstart_k.tolist() + else: + seqstart_k_py = [0, key.shape[1]] + + total_flop = 0 + for q_start, q_end, k_start, k_end in zip( + seqstart_q_py, seqstart_q_py[1:], seqstart_k_py, seqstart_k_py[1:] + ): + num_q = q_end - q_start + num_kv = k_end - k_start + Kqk = query.shape[-1] + Kv = value.shape[-1] + # (M,K) @ (K,N) GEMM needs M*N*K*2 flop + # att = Q @ K.transpose + total_flop += num_q * num_kv * Kqk * 2 + # att @ dO + total_flop += num_kv * num_q * Kv * 2 + # dov = dO @ V + total_flop += num_q * Kv * num_kv * 2 + # dov @ K + total_flop += num_q * Kqk * num_kv * 2 + # dov @ Q + total_flop += num_q * Kqk * num_kv * 2 + # Multiply by num_heads and batches + total_flop = total_flop * value.shape[2] * value.shape[0] + if causal: + total_flop //= 2 + return total_flop + + +AttentionOp = Tuple[ + Optional[Type[AttentionFwOpBase]], Optional[Type[AttentionBwOpBase]] +] + + +@dataclass +class AttentionOpDispatch: + """Dispatcher to automatically select + the best operator to run memory-efficient attention. + + :Deprecated: + + This class is deprecated and will be removed in a later version + """ + + op: AttentionOp + + @classmethod + def from_arguments( + cls, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attn_bias: Optional[Union[torch.Tensor, AttentionBias]] = None, + p: float = 0.0, + scale: Optional[float] = None, + ) -> "AttentionOpDispatch": + """Here for backward compatibility""" + from .dispatch import _dispatch_bw, _dispatch_fw + + inp = Inputs( + query=query, + key=key, + value=value, + attn_bias=attn_bias, + p=p, + scale=scale, + ) + return AttentionOpDispatch(op=(_dispatch_fw(inp, True), _dispatch_bw(inp))) + + +def bmk2bmhk(tensor, num_heads: int) -> torch.Tensor: + if tensor.ndim == 4: + return tensor + return tensor.reshape( + [tensor.shape[0] // num_heads, num_heads, tensor.shape[1], tensor.shape[2]] + ).permute((0, 2, 1, 3)) + + +def check_lastdim_alignment_stride1( + reasons: List[str], name: str, x: torch.Tensor, alignment: int +) -> None: + if x.shape[-1] % alignment != 0: + reasons.append(f"{name}.shape[-1] % {alignment} != 0") + elif x.stride(-2) % alignment != 0: + reasons.append( + f"{name}.stride(-2) % {alignment} != 0 ({name}.stride() = {x.stride()})" + ) + # We can have stride=0 sometimes if dimension=1 + if x.stride(-1) > 1: + reasons.append( + f"{name}.stride(-1) > 1 ({name}.stride() = {x.stride()}) - you should call `.contiguous()` on the input" + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/cutlass.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/cutlass.py new file mode 100644 index 0000000000000000000000000000000000000000..b1185fc0f3f52f06285c90de70bb8a0d9c322580 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/cutlass.py @@ -0,0 +1,500 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + + +from dataclasses import replace +from enum import Enum +from functools import partial +from typing import Any, List, Optional, Set, Tuple, Union + +import torch + +from ..common import get_xformers_operator, register_operator +from . import attn_bias +from .attn_bias import ( + AttentionBias, + BlockDiagonalCausalLocalAttentionFromBottomRightMask, + BlockDiagonalCausalLocalAttentionMask, + BlockDiagonalCausalMask, + BlockDiagonalCausalWithOffsetPaddedKeysMask, + BlockDiagonalMask, + LowerTriangularFromBottomRightLocalAttentionMask, + LowerTriangularFromBottomRightMask, + LowerTriangularMask, + LowerTriangularMaskWithTensorBias, +) +from .common import ( + AttentionBwOpBase, + AttentionFwOpBase, + Context, + Gradients, + Inputs, + _attn_bias_apply, + check_lastdim_alignment_stride1, +) + + +def _uses_tensorcores(sm: int, is_half: bool) -> bool: + if sm >= 80: + return True + if sm >= 70: + return is_half + return False + + +def _minimum_gemm_alignment(inp: Inputs) -> int: + if inp.device.type != "cuda": + return 1 + cap = torch.cuda.get_device_capability(inp.device) + sm = cap[0] * 10 + cap[1] + bits_per_scalar = {torch.float: 32, torch.half: 16, torch.bfloat16: 16}[ + inp.query.dtype + ] + uses_tensorcores = _uses_tensorcores(sm, bits_per_scalar == 16) + matmul_alignment_mn = 1 + if sm >= 80: + matmul_alignment_mn = 4 + if uses_tensorcores: + matmul_alignment_mn = max(matmul_alignment_mn, 128 // bits_per_scalar) + return matmul_alignment_mn + + +def _get_seqlen_info( + inp: Inputs, +) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], int, int]: + attn_bias = inp.attn_bias + if isinstance( + attn_bias, (BlockDiagonalMask, BlockDiagonalCausalWithOffsetPaddedKeysMask) + ): + attn_bias.k_seqinfo.to(inp.query.device) + attn_bias.q_seqinfo.to(inp.query.device) + seqstart_k = attn_bias.k_seqinfo.seqstart + seqstart_q = attn_bias.q_seqinfo.seqstart + max_seqlen_q = attn_bias.q_seqinfo.max_seqlen + max_seqlen_k = attn_bias.k_seqinfo.max_seqlen + else: + seqstart_k = None + seqstart_q = None + max_seqlen_q = -1 + max_seqlen_k = -1 + + return seqstart_k, seqstart_q, max_seqlen_q, max_seqlen_k + + +def _get_tensor_bias( + attn_bias: Optional[Union[torch.Tensor, AttentionBias]] +) -> Optional[torch.Tensor]: + if isinstance(attn_bias, torch.Tensor): + return attn_bias + elif isinstance(attn_bias, LowerTriangularMaskWithTensorBias): + return attn_bias._bias + return None + + +def _check_bias_alignment( + reasons: List[str], attn_bias: Optional[Union[torch.Tensor, AttentionBias]] +) -> None: + attn_bias_tensor = _get_tensor_bias(attn_bias) + if attn_bias_tensor is not None: + alignment = 128 // torch.finfo(attn_bias_tensor.dtype).bits + show_padding_hint = False + for d in range(attn_bias_tensor.ndim - 1): + if attn_bias_tensor.stride(d) % alignment != 0: + reasons.append( + f"attn_bias.stride(-2) % {alignment} != 0 (attn_bias.stride() = {attn_bias_tensor.stride()})" + ) + show_padding_hint = True + if show_padding_hint: + reasons.append( + """\ +HINT: To use an `attn_bias` with a sequence length that is not a multiple of 8, \ +you need to ensure memory is aligned by slicing a bigger tensor. \ +Example: use `attn_bias = torch.zeros([1, 1, 5, 8])[:,:,:,:5]` instead of `torch.zeros([1, 1, 5, 5])`""" + ) + # We can have stride=0 sometimes if dimension=1 + if attn_bias_tensor.stride(-1) > 1: + reasons.append( + f"attn_bias.stride(-1) > 1 (attn_bias.stride() = {attn_bias_tensor.stride()}) - " + "you should call `.contiguous()` on the bias" + ) + + +class _CustomMaskType(int, Enum): + """ + (Matches CustomMaskType in C++.) + """ + + NoCustomMask = 0 + CausalFromTopLeft = 1 + CausalFromBottomRight = 2 + + +def _custom_mask_type(bias: Optional[Union[torch.Tensor, AttentionBias]]) -> int: + if isinstance( + bias, + ( + LowerTriangularMask, + BlockDiagonalCausalMask, + BlockDiagonalCausalLocalAttentionMask, + ), + ): + return int(_CustomMaskType.CausalFromTopLeft) + if isinstance( + bias, + ( + LowerTriangularFromBottomRightMask, + LowerTriangularFromBottomRightLocalAttentionMask, + attn_bias.BlockDiagonalCausalFromBottomRightMask, + BlockDiagonalCausalWithOffsetPaddedKeysMask, + BlockDiagonalCausalLocalAttentionFromBottomRightMask, + ), + ): + return int(_CustomMaskType.CausalFromBottomRight) + return int(_CustomMaskType.NoCustomMask) + + +@register_operator +class FwOp(AttentionFwOpBase): + """xFormers' MHA kernel based on CUTLASS. + Supports a large number of settings (including without TensorCores, f32 ...) + and GPUs as old as P100 (Sm60) + """ + + OPERATOR = get_xformers_operator("efficient_attention_forward_cutlass") + SUPPORTED_DEVICES: Set[str] = {"cuda"} + SUPPORTED_DTYPES: Set[torch.dtype] = {torch.float, torch.half, torch.bfloat16} + SUPPORTED_MAX_K = 65536 + SUPPORTED_ATTN_BIAS_TYPES: Set[Any] = { + type(None), + torch.Tensor, + LowerTriangularMask, + LowerTriangularFromBottomRightMask, + LowerTriangularFromBottomRightLocalAttentionMask, + LowerTriangularMaskWithTensorBias, + BlockDiagonalMask, + BlockDiagonalCausalMask, + BlockDiagonalCausalWithOffsetPaddedKeysMask, + attn_bias.BlockDiagonalCausalFromBottomRightMask, + attn_bias.BlockDiagonalCausalLocalAttentionMask, + BlockDiagonalCausalLocalAttentionFromBottomRightMask, + } + SUPPORTS_DROPOUT = True + SUPPORTS_CUSTOM_SCALE = True + SUPPORTS_DIFFERENT_VALUE_EMBED = True + SUPPORTS_BMGHK = True + NAME = "cutlassF" + + _TEST_K: List[int] = [ + 32, # 64x64 kernel + 128, # 64x128 kernel + 256, # 64x128 with accumulation in gmem + ] + + @classmethod + def apply( + cls, inp: Inputs, needs_gradient: bool + ) -> Tuple[torch.Tensor, Optional[Context]]: + if type(inp.attn_bias) not in FwOp.SUPPORTED_ATTN_BIAS_TYPES: + raise NotImplementedError("Unsupported attn_bias type") + if inp.query.ndim in [3, 4]: + return cls.apply_bmhk(inp, needs_gradient=needs_gradient) + assert inp.query.ndim == 5, f"query has shape {inp.query.shape}" + ctx: Optional[Context] = None + # XXX: Hackfix for BMGHK with H=1 + # In that case we don't want to run G different streams because it adds + # some overhead + if inp.query.ndim == 5 and inp.query.shape[3] == 1: + slice_op = partial(torch.squeeze, dim=3) + inp = replace( + inp, + query=slice_op(inp.query), + key=slice_op(inp.key), + value=slice_op(inp.value), + attn_bias=_attn_bias_apply( + inp.attn_bias, partial(torch.squeeze, dim=2) + ), + ) + out, ctx = cls.apply_bmhk(inp, needs_gradient=needs_gradient) + out = out.unsqueeze(3) + if ctx is not None: + ctx = replace(ctx, lse=ctx.lse.unsqueeze(1), out=out) + return out, ctx + + # Workaround until this is properly implemented in C++ + # run each head group in a different stream + n_groups = inp.key.shape[2] + main_stream = torch.cuda.current_stream() + streams = [main_stream] + [ + torch.cuda.Stream(device=inp.query.device) for _ in range(n_groups - 1) + ] + outs = [] + for group, stream in enumerate(streams): + stream.wait_stream(main_stream) + with torch.cuda.stream(stream): + query = inp.query[:, :, group] + key = inp.key[:, :, group] + value = inp.value[:, :, group] + bias = _attn_bias_apply( + inp.attn_bias, partial(torch.select, dim=1, index=group) + ) + outs.append( + cls.apply_bmhk( + replace(inp, query=query, key=key, value=value, attn_bias=bias), + needs_gradient=needs_gradient, + ) + ) + for s in streams[1:]: + main_stream.wait_stream(s) + out = torch.stack([o[0] for o in outs], dim=2) + if needs_gradient: + ctx = Context( + out=out, + lse=torch.stack([o[1].lse for o in outs], dim=1), # type: ignore + op_bw=outs[0][1].op_bw, # type: ignore + ) + return out, ctx + + @classmethod + def apply_bmhk( + cls, inp: Inputs, needs_gradient: bool + ) -> Tuple[torch.Tensor, Optional[Context]]: + if type(inp.attn_bias) not in FwOp.SUPPORTED_ATTN_BIAS_TYPES: + raise NotImplementedError("Unsupported attn_bias type") + seqstart_k, seqstart_q, max_seqlen_q, _ = _get_seqlen_info(inp) + out, lse, rng_seed, rng_offset = cls.OPERATOR( + query=inp.query, + key=inp.key, + value=inp.value, + attn_bias=_get_tensor_bias(inp.attn_bias), + seqstart_q=seqstart_q, + seqstart_k=seqstart_k, + max_seqlen_q=max_seqlen_q, + dropout_p=inp.p, + compute_logsumexp=needs_gradient, + custom_mask_type=_custom_mask_type(inp.attn_bias), + scale=inp.scale, + seqlen_k=inp.attn_bias.k_seqinfo.seqlen + if isinstance(inp.attn_bias, BlockDiagonalCausalWithOffsetPaddedKeysMask) + else None, + window_size=inp.attn_bias._window_size + if isinstance( + inp.attn_bias, + ( + BlockDiagonalCausalLocalAttentionMask, + BlockDiagonalCausalLocalAttentionFromBottomRightMask, + LowerTriangularFromBottomRightLocalAttentionMask, + ), + ) + else None, + ) + ctx: Optional[Context] = None + if needs_gradient: + ctx = Context( + out=out, + lse=lse, + # cutlass forward is only compatible with cutlass backward if + # dropout is used (because of the way RNG states are passed and the + # way random numbers are generated during backward) + op_bw=BwOp if inp.p != 0 else None, + ) + if inp.p != 0: + ctx.rng_state = torch.tensor( + [rng_seed, rng_offset], dtype=torch.int64, device="cpu" + ) + return out, ctx + + @classmethod + def not_supported_reasons(cls, d: Inputs) -> List[str]: + reasons = super(FwOp, cls).not_supported_reasons(d) + matmul_alignment_mn = _minimum_gemm_alignment(d) + check_lastdim_alignment_stride1(reasons, "query", d.query, matmul_alignment_mn) + check_lastdim_alignment_stride1(reasons, "value", d.value, matmul_alignment_mn) + _check_bias_alignment(reasons, d.attn_bias) + return reasons + + @classmethod + # type: ignore + def operator_flop( + cls, + q, + k, + v, + b, + seqstart_q, + seqstart_k, + max_seqlen_q_, + compute_lse, + custom_mask_type, + *a, + ) -> int: + return cls.attn_operator_flop( + q, + k, + v, + causal=custom_mask_type > 0, + seqstart_k=seqstart_k, + seqstart_q=seqstart_q, + ) + + +@register_operator +class BwOp(AttentionBwOpBase): + __doc__ = FwOp.__doc__ + + OPERATOR = get_xformers_operator("efficient_attention_backward_cutlass") + SUPPORTED_DEVICES = FwOp.SUPPORTED_DEVICES + SUPPORTED_DTYPES = FwOp.SUPPORTED_DTYPES + SUPPORTED_MAX_K = FwOp.SUPPORTED_MAX_K + SUPPORTED_ATTN_BIAS_TYPES: Set[Any] = { + type(None), + torch.Tensor, + LowerTriangularMask, + LowerTriangularFromBottomRightMask, + # TODO: Still some infs/nans in the BW pass for + # local + causal + # LowerTriangularFromBottomRightLocalAttentionMask, + # TODO: Fix handling of gradient through the fMHA autograd function + # LowerTriangularMaskWithTensorBias, + BlockDiagonalMask, + BlockDiagonalCausalMask, + attn_bias.BlockDiagonalCausalFromBottomRightMask, + attn_bias.BlockDiagonalCausalLocalAttentionMask, + } + SUPPORTS_ATTN_BIAS_GRAD = True + SUPPORTS_DROPOUT = FwOp.SUPPORTS_DROPOUT + SUPPORTS_CUSTOM_SCALE = FwOp.SUPPORTS_CUSTOM_SCALE + SUPPORTS_DIFFERENT_VALUE_EMBED = FwOp.SUPPORTS_DIFFERENT_VALUE_EMBED + NAME = "cutlassB" + + _TEST_K: List[int] = [ + 32, # 64x64 kernel + 128, # 64x128/128x128 kernel + 256, # 64x128 with accumulation in gmem + ] + + @classmethod + def not_supported_reasons(cls, d: Inputs) -> List[str]: + reasons = super(BwOp, cls).not_supported_reasons(d) + matmul_alignment_mn = _minimum_gemm_alignment(d) + + check_lastdim_alignment_stride1(reasons, "query", d.query, matmul_alignment_mn) + check_lastdim_alignment_stride1(reasons, "key", d.key, matmul_alignment_mn) + check_lastdim_alignment_stride1(reasons, "value", d.value, matmul_alignment_mn) + _check_bias_alignment(reasons, d.attn_bias) + attn_bias_tensor = _get_tensor_bias(d.attn_bias) + + # Backprop of gradient through broadcasted bias is not supported + if attn_bias_tensor is not None and attn_bias_tensor.requires_grad: + # Don't forget that inputs are either in BMK or BMHK! + if d.query.ndim == 3 and attn_bias_tensor.ndim == 3: + expected_bias_shape = (*d.query.shape[:2], d.key.shape[1]) + else: + # bias is B H Mq Mk + expected_bias_shape = ( + d.query.shape[0], + d.query.shape[2] if d.query.ndim == 4 else 1, + d.query.shape[1], + d.key.shape[1], + ) + if tuple(attn_bias_tensor.shape) != expected_bias_shape: + reasons.append( + "Broadcasting the `attn_bias` tensor is not supported " + f"(shape: {tuple(attn_bias_tensor.shape)}" + f"/ expected: {expected_bias_shape})" + ) + return reasons + + @classmethod + def apply(cls, ctx: Context, inp: Inputs, grad: torch.Tensor) -> Gradients: + if type(inp.attn_bias) not in BwOp.SUPPORTED_ATTN_BIAS_TYPES: + raise NotImplementedError("Unsupported attn_bias type") + + seqstart_k, seqstart_q, max_seqlen_q, max_seqlen_k = _get_seqlen_info(inp) + dtype = inp.query.dtype + + rng_seed = rng_offset = 0 + if inp.p != 0.0: + if ( + ctx.rng_state is None + or ctx.rng_state.dtype != torch.int64 + or ctx.rng_state.device.type != "cpu" + or ctx.rng_state.shape != (2,) + ): + raise NotImplementedError(f"Invalid rng_state: {ctx.rng_state}") + rng_seed, rng_offset = ctx.rng_state.tolist() + + force_pad_inf = torch.cuda.get_device_capability(inp.query.device) == (7, 5) + (grad_q, grad_k, grad_v, grad_bias) = cls.OPERATOR( + grad.to(dtype), + inp.query, + inp.key, + inp.value, + _get_tensor_bias(inp.attn_bias), + cu_seqlens_q=seqstart_q, + cu_seqlens_k=seqstart_k, + max_seqlen_q=max_seqlen_q, + max_seqlen_k=max_seqlen_k, + logsumexp=ctx.get_padded_lse(32, force_pad_inf=force_pad_inf), + output=ctx.out.to(dtype), + dropout_p=inp.p, + # if not using dropout, seed and offset are irrelevant but still expected + # in function signature so just pass 0 + # seed and offset could be None if a different FW op other than cutlass + # was used. + rng_seed=rng_seed, + rng_offset=rng_offset, + custom_mask_type=_custom_mask_type(inp.attn_bias), + scale=inp.scale, + num_splits_key=-1, # Let C++ determine it + window_size=inp.attn_bias._window_size + if isinstance( + inp.attn_bias, + ( + BlockDiagonalCausalLocalAttentionMask, + BlockDiagonalCausalLocalAttentionFromBottomRightMask, + LowerTriangularFromBottomRightLocalAttentionMask, + ), + ) + else None, + ) + + # c++/CUDA implementation returns an uninitialized tensor if bias doesn't + # require grad + if not ( + isinstance(inp.attn_bias, torch.Tensor) and inp.attn_bias.requires_grad + ): + grad_bias = None + + return Gradients(dq=grad_q, dk=grad_k, dv=grad_v, db=grad_bias) + + @classmethod + # type: ignore + def operator_flop( + cls, + dO, + q, + k, + v, + b, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + logsumexp, + output, + dropout_p, + rng_seed, + rng_offset, + custom_mask_type, + scale, + ) -> int: + return cls.attn_operator_flop( + q, + k, + v, + seqstart_q=cu_seqlens_q, + seqstart_k=cu_seqlens_k, + causal=custom_mask_type > 0, + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/dispatch.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/dispatch.py new file mode 100644 index 0000000000000000000000000000000000000000..30d6ec615545c2200f786a1baa9e64ede5a79ae0 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/dispatch.py @@ -0,0 +1,145 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + + +import textwrap +from collections import deque +from typing import List, Sequence, Type, TypeVar + +from . import attn_bias, cutlass, decoder, flash, small_k, triton, triton_splitk +from .common import AttentionBwOpBase, AttentionFwOpBase, Inputs + + +def _is_cutlass_fwd_faster_than_flash(inp: Inputs) -> bool: + return False + + +def _is_triton_fwd_fastest(inp: Inputs) -> bool: + # TODO: fill out + return False + + +T = TypeVar("T", Type[AttentionFwOpBase], Type[AttentionBwOpBase]) + + +def _format_inputs_description(inp: Inputs) -> str: + return f"""query : shape={tuple(inp.query.shape)} ({inp.query.dtype}) +key : shape={tuple(inp.key.shape)} ({inp.key.dtype}) +value : shape={tuple(inp.value.shape)} ({inp.value.dtype}) +attn_bias : {type(inp.attn_bias)} +p : {inp.p}""" + + +def _ensure_op_supports_or_raise(exc_type, name: str, op, inp: Inputs) -> None: + reasons = op.not_supported_reasons(inp) + if not reasons: + return + raise exc_type( + f"""Operator `{name}` does not support inputs: +{textwrap.indent(_format_inputs_description(inp), ' ')} +{_format_not_supported_reasons(op, reasons)}""" + ) + + +def _format_not_supported_reasons(op, reasons: List[str]) -> str: + return f"`{op.NAME}` is not supported because:\n " + "\n ".join(reasons) + + +def _run_priority_list(name: str, priority_list: Sequence[T], inp: Inputs) -> T: + not_supported_reasons: List[List[str]] = [] + for op in priority_list: + not_supported = op.not_supported_reasons(inp) + if not not_supported: + return op + not_supported_reasons.append(not_supported) + + # Let's write a nice message explaining what we tried and why it's not supported + msg = f"""No operator found for `{name}` with inputs: +{textwrap.indent(_format_inputs_description(inp), ' ')}""" + for op, not_supported in zip(priority_list, not_supported_reasons): + msg += "\n" + _format_not_supported_reasons(op, not_supported) + raise NotImplementedError(msg) + + +def _dispatch_fw_priority_list( + inp: Inputs, needs_gradient: bool +) -> Sequence[Type[AttentionFwOpBase]]: + priority_list_ops = deque( + [ + flash.FwOp, + triton.FwOp, + cutlass.FwOp, + small_k.FwOp, + ] + ) + if _is_cutlass_fwd_faster_than_flash(inp): + priority_list_ops.remove(cutlass.FwOp) + priority_list_ops.appendleft(cutlass.FwOp) + if _is_triton_fwd_fastest(inp): + priority_list_ops.remove(triton.FwOp) + priority_list_ops.appendleft(triton.FwOp) + if not needs_gradient: + mqa_or_gqa = ( + inp.key.ndim > 3 and inp.key.stride(-2) == 0 and inp.key.shape[-2] > 1 + ) + if not mqa_or_gqa: + # With multiquery, cutlass is sometimes faster than decoder + # but it's not currently clear when. + priority_list_ops.appendleft(decoder.FwOp) + # Split-KV is useful with MQA + # for short Q-seqlen / long K-seqlen + if mqa_or_gqa and inp.query.shape[1] <= 32 and inp.key.shape[1] >= 256: + parallelism_BH = 0 # BMK + if inp.query.ndim == 3: + parallelism_BH = inp.query.shape[0] + elif inp.query.ndim == 4: # BMHK + parallelism_BH = inp.query.shape[0] * inp.query.shape[2] + elif inp.query.ndim == 5: # BMGHK + parallelism_BH = inp.query.shape[0] * inp.query.shape[2] + if parallelism_BH > 0 and parallelism_BH < 64: + priority_list_ops.appendleft(triton_splitk.FwOp) + # Without variable seqlen flash is fastest + if not isinstance(inp.attn_bias, attn_bias.BlockDiagonalMask): + priority_list_ops.remove(flash.FwOp) + priority_list_ops.appendleft(flash.FwOp) + + return priority_list_ops + + +def _dispatch_fw(inp: Inputs, needs_gradient: bool) -> Type[AttentionFwOpBase]: + """Computes the best operator for forward + + Raises: + NotImplementedError: if not operator was found + + Returns: + AttentionOp: The best operator for the configuration + """ + return _run_priority_list( + "memory_efficient_attention_forward", + _dispatch_fw_priority_list(inp, needs_gradient), + inp, + ) + + +def _is_cutlassB_faster_than_flash(inp: Inputs) -> bool: + return False + + +def _dispatch_bw(inp: Inputs) -> Type[AttentionBwOpBase]: + priority_list_ops: List[Type[AttentionBwOpBase]] = [ + flash.BwOp, + cutlass.BwOp, + # CUDA illegal memory issues, race conditions etc.. + # triton.BwOp, + # Deprecated + small_k.BwOp, + ] + if _is_cutlassB_faster_than_flash(inp): + priority_list_ops.remove(cutlass.BwOp) + priority_list_ops.insert(0, cutlass.BwOp) + return _run_priority_list( + "memory_efficient_attention_backward", priority_list_ops, inp + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/flash.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/flash.py new file mode 100644 index 0000000000000000000000000000000000000000..f2806f8a35643709e2d86962a2f64f335e2e0e00 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/flash.py @@ -0,0 +1,685 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + + +import os +from dataclasses import replace +from itertools import zip_longest +from typing import Any, List, Optional, Set, Tuple, Union + +import torch + +from ..common import _get_storage_base, get_operator, register_operator +from .attn_bias import ( + AttentionBias, + BlockDiagonalCausalFromBottomRightMask, + BlockDiagonalCausalLocalAttentionFromBottomRightMask, + BlockDiagonalCausalLocalAttentionMask, + BlockDiagonalCausalMask, + BlockDiagonalCausalWithOffsetPaddedKeysMask, + BlockDiagonalMask, + LocalAttentionFromBottomRightMask, + LowerTriangularFromBottomRightLocalAttentionMask, + LowerTriangularFromBottomRightMask, + LowerTriangularMask, +) +from .common import ( + AttentionBwOpBase, + AttentionFwOpBase, + Context, + Gradients, + Inputs, + check_lastdim_alignment_stride1, +) + +FLASH_VERSION = "0.0.0" +try: + try: + from ... import _C_flashattention # type: ignore[attr-defined] + from ..._cpp_lib import _build_metadata + + if _build_metadata is not None: + FLASH_VERSION = _build_metadata.flash_version + except ImportError: + import flash_attn + from flash_attn.flash_attn_interface import flash_attn_cuda as _C_flashattention + + FLASH_VERSION = flash_attn.__version__ + flash_ver_parsed = tuple(int(s) for s in FLASH_VERSION.split(".")[:3]) + if ( + flash_ver_parsed != (2, 3, 6) + and os.environ.get("XFORMERS_IGNORE_FLASH_VERSION_CHECK", "0") != "1" + ): + raise ImportError("Requires Flash attention 2.3.6 for varlen_fwd api") + + # create library so that flash-attn goes through the PyTorch Dispatcher + _flash_lib = torch.library.Library("xformers_flash", "DEF") + + _flash_lib.define( + "flash_fwd(Tensor query, Tensor key, Tensor value, " + "Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, Tensor? seqused_k, " + "int max_seqlen_q, int max_seqlen_k, " + "float p, float softmax_scale, " + "bool is_causal, int window_left, " + "int window_right, bool return_softmax) -> (Tensor, Tensor, Tensor)" + ) + + _flash_lib.define( + "flash_bwd(Tensor dout, Tensor query, Tensor key, Tensor value, " + "Tensor out, Tensor softmax_lse_, Tensor dq, Tensor dk, Tensor dv, " + "Tensor cu_seqlens_q, Tensor cu_seqlens_k, " + "int max_seqlen_q, int max_seqlen_k, " + "float p, float softmax_scale, bool is_causal, " + "int window_left, int window_right, Tensor rng_state) -> (Tensor, Tensor, Tensor)" + ) + + def _flash_fwd( + query, + key, + value, + cu_seq_lens_q, + cu_seq_lens_k, + seqused_k, + max_seq_len_q, + max_seq_len_k, + p, + softmax_scale, + is_causal, + window_left, + window_right, + return_softmax, + ): + if cu_seq_lens_q is None: + assert cu_seq_lens_k is None + assert seqused_k is None + ( + out, + q_padded, + k_padded, + v_padded, + out_padded, + softmax_lse, + p, + rng_state, + ) = _C_flashattention.fwd( + query, + key, + value, + None, # out + p, + softmax_scale, + is_causal, + window_left, # window_size_left + window_right, # window_size_right + return_softmax, + None, # rng + ) + else: + out = query.new_empty(query.shape[0], query.shape[1], value.shape[2]) + ( + out, + q_padded, + k_padded, + v_padded, + out_padded, + softmax_lse, + p, + rng_state, + ) = _C_flashattention.varlen_fwd( + query, + key, + value, + out, + cu_seq_lens_q, + cu_seq_lens_k, + seqused_k, + max_seq_len_q, + max_seq_len_k, + p, + softmax_scale, + False, + is_causal, + window_left, + window_right, + return_softmax, + None, + ) + return out, softmax_lse, rng_state + + def _flash_bwd( + grad, + query, + key, + value, + out, + lse, + dq, + dk, + dv, + cu_seq_lens_q, + cu_seq_lens_k, + max_seq_len_q, + max_seq_len_k, + p, + softmax_scale, + is_causal, + window_left, + window_right, + rng_state, + ): + if cu_seq_lens_k is None: + assert cu_seq_lens_q is None + _C_flashattention.bwd( + grad, + query, + key, + value, + out, + lse, + dq, + dk, + dv, + p, + softmax_scale, + is_causal, + window_left, + window_right, + None, + rng_state, + ) + else: + _C_flashattention.varlen_bwd( + grad, + query, + key, + value, + out, + lse, + dq, + dk, + dv, + cu_seq_lens_q, + cu_seq_lens_k, + max_seq_len_q, + max_seq_len_k, + p, + softmax_scale, + False, # zero_tensors + is_causal, + window_left, + window_right, + None, + rng_state, + ) + return dq, dk, dv + + _flash_lib.impl("flash_fwd", _flash_fwd, "CUDA") + _flash_lib.impl("flash_bwd", _flash_bwd, "CUDA") +except ImportError: + pass + + +def _convert_input_format( + inp: Inputs, + supports_mqa: bool, +) -> Tuple[ + Inputs, + Optional[torch.Tensor], + int, + Optional[torch.Tensor], + int, + Optional[torch.Tensor], +]: + assert inp.query.ndim in [4, 5] + query, key, value = inp.query, inp.key, inp.value + batch = query.shape[0] + seqlen_q = query.shape[1] + seqlen_kv = key.shape[1] + head_dim_q = query.shape[-1] + head_dim_v = value.shape[-1] + + attn_bias = inp.attn_bias + if isinstance(attn_bias, BlockDiagonalMask): + # BlockDiagonalMask or BlockDiagonalCausalMask + attn_bias.k_seqinfo.seqstart = attn_bias.k_seqinfo.seqstart.to( + inp.query.device, non_blocking=True + ) + attn_bias.q_seqinfo.seqstart = attn_bias.q_seqinfo.seqstart.to( + inp.query.device, non_blocking=True + ) + + cu_seqlen_k = attn_bias.k_seqinfo.seqstart + cu_seqlen_q = attn_bias.q_seqinfo.seqstart + max_seqlen_q = attn_bias.q_seqinfo.max_seqlen + max_seqlen_k = attn_bias.k_seqinfo.max_seqlen + seqused_k = None + elif isinstance(attn_bias, BlockDiagonalCausalWithOffsetPaddedKeysMask): + attn_bias.k_seqinfo.seqstart = attn_bias.k_seqinfo.seqstart.to( + inp.query.device, non_blocking=True + ) + attn_bias.q_seqinfo.seqstart = attn_bias.q_seqinfo.seqstart.to( + inp.query.device, non_blocking=True + ) + attn_bias.k_seqinfo.seqlen = attn_bias.k_seqinfo.seqlen.to( + inp.query.device, non_blocking=True + ) + cu_seqlen_k = attn_bias.k_seqinfo.seqstart + cu_seqlen_q = attn_bias.q_seqinfo.seqstart + max_seqlen_q = attn_bias.q_seqinfo.max_seqlen + max_seqlen_k = attn_bias.k_seqinfo.max_seqlen + seqused_k = attn_bias.k_seqinfo.seqlen + else: + cu_seqlen_k = None + cu_seqlen_q = None + seqused_k = None + max_seqlen_q = inp.query.shape[1] + max_seqlen_k = inp.key.shape[1] + + if query.ndim == 5: # QGA + assert supports_mqa + + # Fold the group/head_in_group dimensions together + def fold(x): + # Either the head is replicated + if x.stride(3) == 0: + return x[:, :, :, 0] + # Or we reshape + return x.reshape( + [ + x.shape[0], + x.shape[1], + -1, + x.shape[4], + ] + ) + + query = fold(query) + key = fold(key) + value = fold(value) + # Optimize for MHA + if key.ndim == 4 and key.stride(2) == 0 and value.stride(2) == 0 and supports_mqa: + key = key[:, :, :1] + value = value[:, :, :1] + # Initially we have `query.shape = [batch, seqlen, num_heads, head_dim_q]` + # We want format `[batch * seqlen, num_heads, head_dim_q]` + if cu_seqlen_k is not None: + query = query.reshape([batch * seqlen_q, -1, head_dim_q]) + key = key.reshape([batch * seqlen_kv, -1, head_dim_q]) + value = value.reshape([batch * seqlen_kv, -1, head_dim_v]) + new_inp = replace( + inp, + query=query, + key=key, + value=value, + ) + return new_inp, cu_seqlen_q, max_seqlen_q, cu_seqlen_k, max_seqlen_k, seqused_k + + +def _is_causal(attn_bias: Optional[Union[torch.Tensor, AttentionBias]]) -> bool: + return isinstance( + attn_bias, + ( + LowerTriangularMask, + LowerTriangularFromBottomRightMask, + LowerTriangularFromBottomRightLocalAttentionMask, + BlockDiagonalCausalMask, + BlockDiagonalCausalLocalAttentionMask, + BlockDiagonalCausalFromBottomRightMask, + BlockDiagonalCausalLocalAttentionFromBottomRightMask, + BlockDiagonalCausalWithOffsetPaddedKeysMask, + ), + ) + + +def _window_size( + attn_bias: Optional[Union[torch.Tensor, AttentionBias]] +) -> Tuple[int, int]: + win_left = -1 + win_right = -1 + if isinstance( + attn_bias, + ( + BlockDiagonalCausalLocalAttentionMask, + BlockDiagonalCausalLocalAttentionFromBottomRightMask, + LowerTriangularFromBottomRightLocalAttentionMask, + ), + ): + win_left = attn_bias._window_size - 1 + if isinstance(attn_bias, LocalAttentionFromBottomRightMask): + win_left = attn_bias.window_left + win_right = attn_bias.window_right + return (win_left, win_right) + + +def _check_needs_no_topleft(d: Inputs, reasons: List[str]) -> None: + # Flash does not support TopLeft, so only allow causal masks with TopLeft + # if each batch element has equal number of queries and keys. + if isinstance(d.attn_bias, BlockDiagonalCausalMask): + # Flash does not support TopLeft, so only allow BlockDiagonalCausalMask + # if each batch element has equal number of queries and keys. + for k_start, q_start in zip_longest( + d.attn_bias.k_seqinfo.seqstart_py, d.attn_bias.q_seqinfo.seqstart_py + ): + if k_start != q_start: + reasons.append( + "Only support BlockDiagonalCausalMask if equal" + " numbers of keys and queries" + ) + break + elif isinstance(d.attn_bias, LowerTriangularMask): + if d.query.shape[1] != d.key.shape[1]: + reasons.append( + "Only support LowerTriangularMask if equal number of" "keys and queries" + ) + + +def _check_strides_for_bmghk(x: torch.Tensor, name: str, reasons: List[str]) -> None: + """ + We want to be able to collapse the G/H dimensions together + """ + if x.ndim == 5: + stride_g, stride_h = x.stride(2), x.stride(3) + if x.shape[2] == 1: + return + if x.shape[3] == 1 or stride_h == 0: + return + if stride_g != stride_h * x.shape[-2]: + reasons.append( + f"GQA is only supported when the G/H dimensions are contiguous\n" + f" {name}.stride: {x.stride()}\n" + f" {name}.shape : {list(x.shape)}" + ) + + +@register_operator +class FwOp(AttentionFwOpBase): + """Operator that computes memory-efficient attention using \ + `Flash-Attention `_ \ + implementation. + """ + + OPERATOR = get_operator("xformers_flash", "flash_fwd") + SUPPORTED_DEVICES: Set[str] = {"cuda"} + CUDA_MINIMUM_COMPUTE_CAPABILITY = (8, 0) + SUPPORTED_DTYPES: Set[torch.dtype] = {torch.half, torch.bfloat16} + SUPPORTED_MAX_K = 256 + SUPPORTED_ATTN_BIAS_TYPES: Set[Any] = { + type(None), + LowerTriangularMask, + LowerTriangularFromBottomRightMask, + LowerTriangularFromBottomRightLocalAttentionMask, + BlockDiagonalMask, + BlockDiagonalCausalMask, + BlockDiagonalCausalLocalAttentionMask, + BlockDiagonalCausalLocalAttentionFromBottomRightMask, + BlockDiagonalCausalFromBottomRightMask, + BlockDiagonalCausalWithOffsetPaddedKeysMask, + LocalAttentionFromBottomRightMask, + } + SUPPORTS_DROPOUT = True + SUPPORTS_CUSTOM_SCALE = True + SUPPORTS_DIFFERENT_VALUE_EMBED = False + SUPPORTS_BMGHK = True + NAME = f"flshattF@{FLASH_VERSION}" + VERSION = FLASH_VERSION + + @classmethod + def not_supported_reasons(cls, d: Inputs) -> List[str]: + reasons = super(FwOp, cls).not_supported_reasons(d) + check_lastdim_alignment_stride1(reasons, "query", d.query, 8) + _check_needs_no_topleft(d, reasons) + _check_strides_for_bmghk(d.query, "query", reasons) + _check_strides_for_bmghk(d.key, "key", reasons) + _check_strides_for_bmghk(d.value, "value", reasons) + return reasons + + @classmethod + def apply( + cls, inp: Inputs, needs_gradient: bool + ) -> Tuple[torch.Tensor, Optional[Context]]: + return_softmax = False + out_shape = [ + *inp.query.shape[:-1], + inp.value.shape[-1], + ] + # no cumulative seqlen + ( + inp, + cu_seqlens_q, + max_seqlen_q, + cu_seqlens_k, + max_seqlen_k, + seqused_k, + ) = _convert_input_format(inp, supports_mqa=True) + if inp.query.numel() > 0 and inp.key.numel() > 0: + win_left, win_right = _window_size(inp.attn_bias) + out, softmax_lse, rng_state = cls.OPERATOR( + inp.query, + inp.key, + inp.value, + cu_seqlens_q, + cu_seqlens_k, + seqused_k, + max_seqlen_q, + max_seqlen_k, + inp.p, + inp.scale_float, + _is_causal(inp.attn_bias), + window_left=win_left, + window_right=win_right, + return_softmax=return_softmax, + ) + out = out.reshape(out_shape) + else: + out = torch.zeros(out_shape, device=inp.query.device, dtype=inp.query.dtype) + rng_state = None + softmax_lse = torch.empty( + [inp.query.shape[0], inp.query.shape[2], inp.query.shape[1]], + device=inp.query.device, + dtype=torch.float32, + ) + ctx = Context(out=out, lse=softmax_lse) + if inp.p != 0.0: + ctx.op_bw = BwOp + ctx.rng_state = rng_state + return (out, ctx) + + @classmethod + # type: ignore + def operator_flop( + cls, + query, + key, + value, + cu_seq_lens_q, + cu_seq_lens_k, + max_seq_len_q, + max_seq_len_k, + p, + softmax_scale, + causal, + return_softmax, + ) -> int: + return cls.attn_operator_flop( + query.unsqueeze(0), + key.unsqueeze(0), + value.unsqueeze(0), + causal=causal, + seqstart_k=cu_seq_lens_k, + seqstart_q=cu_seq_lens_q, + ) + + +@register_operator +class BwOp(AttentionBwOpBase): + __doc__ = FwOp.__doc__ + + OPERATOR = get_operator("xformers_flash", "flash_bwd") + SUPPORTED_DEVICES = FwOp.SUPPORTED_DEVICES + CUDA_MINIMUM_COMPUTE_CAPABILITY = FwOp.CUDA_MINIMUM_COMPUTE_CAPABILITY + SUPPORTED_DTYPES = FwOp.SUPPORTED_DTYPES + SUPPORTED_MAX_K = FwOp.SUPPORTED_MAX_K + SUPPORTED_ATTN_BIAS_TYPES = FwOp.SUPPORTED_ATTN_BIAS_TYPES.difference( + {BlockDiagonalCausalWithOffsetPaddedKeysMask} + ) + SUPPORTS_DROPOUT = FwOp.SUPPORTS_DROPOUT + SUPPORTS_CUSTOM_SCALE = FwOp.SUPPORTS_CUSTOM_SCALE + SUPPORTS_DIFFERENT_VALUE_EMBED = FwOp.SUPPORTS_DIFFERENT_VALUE_EMBED + IS_DETERMINISTIC = False + SUPPORTS_BMGHK = False # NOTE: Don't forget to update fmha doc when changing this! + NAME = f"flshattB@{FLASH_VERSION}" + VERSION = FLASH_VERSION + + MAX_HEADDIM_SM8x = 192 + + @classmethod + def shape_not_supported_reasons( + cls, Mq: int, Mkv: int, K: int, Kv: int + ) -> List[str]: + reasons = super().shape_not_supported_reasons(Mq, Mkv, K, Kv) + + # In fbcode in mode/dev-nosan, we get nans from flash v2.1 if there + # is a strange embedding dimension. + if K not in {8, 16, 32, 64, 128, 256}: + reasons.append(f"Embed dim {K} not supported") + + return reasons + + @classmethod + def not_supported_reasons(cls, d: Inputs) -> List[str]: + reasons = super(BwOp, cls).not_supported_reasons(d) + check_lastdim_alignment_stride1(reasons, "query", d.query, 8) + _check_needs_no_topleft(d, reasons) + if d.device.type == "cuda": + # Due to limited shared-memory, some GPUs are limited in head dimension + device_capability = torch.cuda.get_device_capability(d.device) + is_sm80_or_sm90 = device_capability in [(8, 0), (9, 0)] + if ( + max(d.key.shape[-1], d.query.shape[-1]) > cls.MAX_HEADDIM_SM8x + and not is_sm80_or_sm90 + ): + reasons.append( + "requires a GPU with compute capability 8.0 " + f"(A100) or 9.0 (H100) for 'query.shape[-1] > {cls.MAX_HEADDIM_SM8x}'" + ) + return reasons + + @classmethod + def apply(cls, ctx: Context, inp: Inputs, grad: torch.Tensor) -> Gradients: + dq_shape, dk_shape, dv_shape = inp.query.shape, inp.key.shape, inp.value.shape + ( + inp, + cu_seqlens_q, + max_seqlen_q, + cu_seqlens_k, + max_seqlen_k, + seqused_k, + ) = _convert_input_format(inp, supports_mqa=False) + assert ctx.lse.is_contiguous() + assert seqused_k is None + ctx_lse = ctx.lse + assert ctx_lse.shape[2] >= max_seqlen_q + if max_seqlen_q != ctx_lse.shape[2]: + ctx_lse = ctx_lse[:, :, :max_seqlen_q].contiguous() + kernel_out_shape = [ + *inp.query.shape[:-1], + inp.value.shape[-1], + ] + + # Create dq,dk,dv + # If Q/K/V come from a single QKV tensor, let's put the gradient in the + # right strides, so we can avoid a `cat` + if ( + inp.query.shape[0] == inp.key.shape[0] + and inp.query.shape[-1] == inp.value.shape[-1] + and _get_storage_base(inp.query) == _get_storage_base(inp.key) + and _get_storage_base(inp.query) == _get_storage_base(inp.value) + ): + # Create one big contiguous chunk + # This is because q, k and v usually come from a single + # output of a linear layer that is chunked. + # Creating the gradients with the right layout saves us + # a `torch.cat` call in the backward pass + chunk = torch.empty( + (*inp.query.shape[0:-2], 3, inp.query.shape[-2], inp.query.shape[-1]), + dtype=inp.query.dtype, + device=inp.device, + ) + grads = Gradients( + dq=chunk.select(-3, 0), + dk=chunk.select(-3, 1), + dv=chunk.select(-3, 2), + ) + else: + grads = Gradients( + dq=torch.empty_like(inp.query), + dk=torch.empty_like(inp.key), + dv=torch.empty_like(inp.value), + ) + + assert grad.dtype in cls.SUPPORTED_DTYPES + + if grads.dq.numel() == 0: + grads.dk.zero_() + grads.dv.zero_() + if grads.dv.numel() == 0: + grads.dq.zero_() + if grads.dq.numel() and grads.dk.numel(): + win_left, win_right = _window_size(inp.attn_bias) + cls.OPERATOR( + grad.reshape(kernel_out_shape).contiguous(), + inp.query, + inp.key, + inp.value, + ctx.out.reshape(kernel_out_shape), + ctx_lse, + grads.dq, + grads.dk, + grads.dv, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + inp.p, + inp.scale_float, + _is_causal(inp.attn_bias), + window_left=win_left, + window_right=win_right, + rng_state=ctx.rng_state, + ) + grads.dq = grads.dq.reshape(dq_shape) + grads.dk = grads.dk.reshape(dk_shape) + grads.dv = grads.dv.reshape(dv_shape) + return grads + + @classmethod + # type: ignore + def operator_flop( + cls, + grad, + query, + key, + value, + out, + lse, + dq, + dk, + dv, + cu_seq_lens_q, + cu_seq_lens_k, + max_seq_len_q, + max_seq_len_k, + p, + softmax_scale, + causal, + ) -> int: + return cls.attn_operator_flop( + query.unsqueeze(0), + key.unsqueeze(0), + value.unsqueeze(0), + causal=causal, + seqstart_k=cu_seq_lens_k, + seqstart_q=cu_seq_lens_q, + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/small_k.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/small_k.py new file mode 100644 index 0000000000000000000000000000000000000000..8817deb94d43b580d911622c9bdbfce2515ee552 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/small_k.py @@ -0,0 +1,186 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Any, List, Mapping, Optional, Set, Tuple, Union + +import torch + +from ..common import get_xformers_operator, register_operator +from .attn_bias import AttentionBias +from .common import ( + AttentionBwOpBase, + AttentionFwOpBase, + Context, + Gradients, + Inputs, + bmk2bmhk, +) + + +def _bmhk2bmk_contiguous(tensor) -> torch.Tensor: + return ( + tensor.permute((0, 2, 1, 3)) + .contiguous() + .view([tensor.shape[0] * tensor.shape[2], tensor.shape[1], tensor.shape[3]]) + .contiguous() + ) + + +def _get_tensor_bias_bmk( + attn_bias: Optional[Union[torch.Tensor, AttentionBias]] +) -> Optional[torch.Tensor]: + if not isinstance(attn_bias, torch.Tensor): + assert attn_bias is None + return None + # BMK -> BMHK + if attn_bias.ndim == 4: + attn_bias = attn_bias.reshape([-1, *attn_bias.shape[2:]]) + return attn_bias + + +@register_operator +class FwOp(AttentionFwOpBase): + """An operator optimized for very small values of K (``K <= 32``) \ + and f32 pre-Ampere as it does not use TensorCores. + Only supports contiguous inputs in BMK format, so an extra reshape \ + or contiguous call might be done. + + :Deprecated: + + This operator is deprecated and should not be used in new code + """ + + OPERATOR = get_xformers_operator("efficient_attention_forward_small_k") + SUPPORTED_DEVICES = {"cuda"} + SUPPORTED_DTYPES = {torch.float} + SUPPORTED_MAX_K: float = 32 + SUPPORTED_ATTN_BIAS_TYPES: Set[Any] = {type(None), torch.Tensor} + SUPPORTS_DROPOUT = True + SUPPORTS_CUSTOM_SCALE = False + NAME = "smallkF" + + BACKWARD_ERROR_ATOL: Mapping[torch.dtype, float] = { + torch.float: 4e-3, + } + # as this kernel is a bit slow, this should make tests run faster + _TEST_BATCH_SIZES = [1, 3] + _TEST_K = [2, 3, 8, 16, 32] + + @classmethod + def not_supported_reasons(cls, d: Inputs) -> List[str]: + reasons = super(FwOp, cls).not_supported_reasons(d) + if isinstance(d.attn_bias, torch.Tensor) and d.attn_bias.stride(1) != 0: + reasons.append("bias with non-zero stride not supported") + buffer_size = 8 + k = d.query.shape[-1] + for pack in [1, 2, 4]: + if (k % pack) == 0 and (k // pack) <= buffer_size: + return reasons + reasons.append(f"unsupported embed per head: {k}") + return reasons + + @classmethod + def apply( + cls, inp: Inputs, needs_gradient: bool + ) -> Tuple[torch.Tensor, Optional[Context]]: + if inp.scale is not None: + raise NotImplementedError("Unsupport custom scale") + num_heads = inp.query.shape[2] + query = _bmhk2bmk_contiguous(inp.query) + key = _bmhk2bmk_contiguous(inp.key) + value = _bmhk2bmk_contiguous(inp.value) + + out, lse, rng_seed, rng_offset = cls.OPERATOR( + query=query, + key=key, + value=value, + compute_logsumexp=needs_gradient, + attn_bias=_get_tensor_bias_bmk(inp.attn_bias), + p=inp.p, + ) + out = bmk2bmhk(out, num_heads) + lse = lse.reshape([lse.shape[0] // num_heads, num_heads, lse.shape[1]]) + if not needs_gradient: + return out, None + ctx = Context(out=out, lse=lse) + if inp.p != 0.0: + ctx.op_bw = BwOp + ctx.rng_state = torch.tensor( + [rng_seed, rng_offset], dtype=torch.int64, device="cpu" + ) + return out, ctx + + +@register_operator +class BwOp(AttentionBwOpBase): + __doc__ = FwOp.__doc__ + + OPERATOR = get_xformers_operator("efficient_attention_backward_small_k") + SUPPORTED_DEVICES = FwOp.SUPPORTED_DEVICES + SUPPORTED_DTYPES = FwOp.SUPPORTED_DTYPES + SUPPORTED_MAX_K = FwOp.SUPPORTED_MAX_K + SUPPORTED_ATTN_BIAS_TYPES = FwOp.SUPPORTED_ATTN_BIAS_TYPES + SUPPORTS_DROPOUT = FwOp.SUPPORTS_DROPOUT + SUPPORTS_CUSTOM_SCALE = FwOp.SUPPORTS_CUSTOM_SCALE + SUPPORTS_DIFFERENT_VALUE_EMBED = FwOp.SUPPORTS_DIFFERENT_VALUE_EMBED + + # there is some extra precision loss in the CPU implementation due to an + # extra accumulation step in grad_q, which is not present in the CUDA + # implementation + ERROR_ATOL: Mapping[torch.dtype, float] = { + torch.float: 4e-3, + } + NAME = "smallkB" + + @classmethod + def not_supported_reasons(cls, d: Inputs) -> List[str]: + reasons = super(BwOp, cls).not_supported_reasons(d) + if isinstance(d.attn_bias, torch.Tensor) and d.attn_bias.stride(1) != 0: + reasons.append("bias with non-zero stride not supported") + buffer_size = 8 + k = d.query.shape[-1] + for pack in [1, 2, 4]: + if (k % pack) == 0 and (k // pack) <= buffer_size: + return reasons + reasons.append(f"unsupported embed per head: {k}") + return reasons + + @classmethod + def apply(cls, ctx: Context, inp: Inputs, grad: torch.Tensor) -> Gradients: + num_heads = grad.shape[2] + grad = _bmhk2bmk_contiguous(grad) + query = _bmhk2bmk_contiguous(inp.query) + key = _bmhk2bmk_contiguous(inp.key) + value = _bmhk2bmk_contiguous(inp.value) + out = _bmhk2bmk_contiguous(ctx.out) + + rng_seed = rng_offset = 0 + if inp.p != 0.0: + if ( + ctx.rng_state is None + or ctx.rng_state.dtype != torch.int64 + or ctx.rng_state.device.type != "cpu" + or ctx.rng_state.shape != (2,) + ): + raise NotImplementedError(f"Invalid rng_state: {ctx.rng_state}") + rng_seed, rng_offset = ctx.rng_state.tolist() + grad_q, grad_k, grad_v = cls.OPERATOR( + grad, + query, + key, + value, + # LSE: BHM -> (BH)M + ctx.lse.flatten(end_dim=-2), + out, + _get_tensor_bias_bmk(inp.attn_bias), + inp.p, + rng_seed, + rng_offset, + ) + return Gradients( + dq=bmk2bmhk(grad_q, num_heads), + dk=bmk2bmhk(grad_k, num_heads), + dv=bmk2bmhk(grad_v, num_heads), + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/triton.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/triton.py new file mode 100644 index 0000000000000000000000000000000000000000..2d6e2a059a148dd87ac6c132771c73ba33ecf7a8 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/triton.py @@ -0,0 +1,201 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + + +from dataclasses import replace +from typing import TYPE_CHECKING, Any, List, Optional, Set, Tuple + +import torch + +from ... import _is_triton_available +from ..common import register_operator + +# This implementation needs pre-MLIR triton +# The BW pass is not stable/well tested +# And also does not have the latest improvements +if TYPE_CHECKING or (False and _is_triton_available()): + try: + from flash_attn.flash_attn_triton import ( + _flash_attn_backward, + _flash_attn_forward, + ) + except ImportError: + import importlib + import pathlib + import sys + import types + + def import_module_from_path(path: str) -> types.ModuleType: + """Import a module from the given path, w/o __init__.py""" + module_path = pathlib.Path(path).resolve() + module_name = module_path.stem # 'path/x.py' -> 'x' + spec = importlib.util.spec_from_file_location(module_name, module_path) # type: ignore + assert isinstance(spec, importlib.machinery.ModuleSpec) + module = importlib.util.module_from_spec(spec) # type: ignore + sys.modules[module_name] = module + assert isinstance(spec.loader, importlib.abc.Loader) + spec.loader.exec_module(module) + return module + + flash_attn = import_module_from_path( + "third_party/flash-attention/flash_attn/flash_attn_triton.py" + ) + _flash_attn_backward = flash_attn._flash_attn_backward + _flash_attn_forward = flash_attn._flash_attn_forward + + triton_flash_backward = _flash_attn_backward + triton_flash_forward = _flash_attn_forward +else: + triton_flash_backward = None + triton_flash_forward = None + +from .attn_bias import LowerTriangularMask +from .common import ( + AttentionBwOpBase, + AttentionFwOpBase, + Context, + Gradients, + Inputs, + check_lastdim_alignment_stride1, +) + + +def _prepare_inputs(inp: Inputs) -> Inputs: + attn_bias = inp.attn_bias + if isinstance(attn_bias, torch.Tensor) and attn_bias.ndim == 3: + B = inp.query.shape[0] + h = attn_bias.shape[0] // B + attn_bias = attn_bias.reshape(B, h, attn_bias.shape[1], attn_bias.shape[2]) + + # Make sure that the last dimension is contiguous + query, key, value = [ + x if x.stride(-1) == 1 else x.contiguous() + for x in [inp.query, inp.key, inp.value] + ] + return replace(inp, attn_bias=attn_bias, query=query, key=key, value=value) + + +@register_operator +class FwOp(AttentionFwOpBase): + """Operator that computes memory-efficient attention using \ + `Tri Dao's `_ \ + implementation, based on + `Phil Tillet's code `_ + """ + + OPERATOR = triton_flash_forward + SUPPORTED_DEVICES = {"cuda"} + CUDA_MINIMUM_COMPUTE_CAPABILITY = (8, 0) + SUPPORTED_DTYPES = {torch.half, torch.bfloat16} + SUPPORTED_MAX_K = 128 + SUPPORTED_ATTN_BIAS_TYPES: Set[Any] = { + type(None), + LowerTriangularMask, + # TODO: backwards accuracy is failing for a few cases, perhaps we want to disable this for now. + # torch.Tensor, + } + SUPPORTS_DROPOUT = False + SUPPORTS_CUSTOM_SCALE = True + NAME = "tritonflashattF" + + @classmethod + def not_supported_reasons(cls, d: Inputs) -> List[str]: + reasons = super(FwOp, cls).not_supported_reasons(d) + check_lastdim_alignment_stride1(reasons, "query", d.query, 8) + check_lastdim_alignment_stride1(reasons, "key", d.key, 8) + check_lastdim_alignment_stride1(reasons, "value", d.value, 8) + if cls.OPERATOR is None: + reasons.append("triton is not available") + if d.device.type == "cuda": + # Has only been tested on 8.0 / 9.0. + # Fails on 7.5 with illegal memory access + if torch.cuda.get_device_capability(d.device) < (8, 0): + reasons.append( + "requires GPU with sm80 minimum compute capacity, e.g., A100/H100/L4" + ) + if _is_triton_available(): + import triton + + if triton.__version__ > "2.0.0": + reasons.append("Only work on pre-MLIR triton for now") + return reasons + + @classmethod + def apply( + cls, inp: Inputs, needs_gradient: bool + ) -> Tuple[torch.Tensor, Optional[Context]]: + inp = _prepare_inputs(inp) + + out, lse, softmax_scale = triton_flash_forward( + q=inp.query, + k=inp.key, + v=inp.value, + bias=inp.attn_bias if isinstance(inp.attn_bias, torch.Tensor) else None, + softmax_scale=inp.scale_float, + causal=isinstance(inp.attn_bias, LowerTriangularMask), + ) + return out, Context(lse=lse, out=out) + + +@register_operator +class BwOp(AttentionBwOpBase): + __doc__ = FwOp.__doc__ + + OPERATOR = triton_flash_backward + SUPPORTED_DEVICES = FwOp.SUPPORTED_DEVICES + CUDA_MINIMUM_COMPUTE_CAPABILITY = FwOp.CUDA_MINIMUM_COMPUTE_CAPABILITY + SUPPORTED_DTYPES = FwOp.SUPPORTED_DTYPES + SUPPORTED_MAX_K = FwOp.SUPPORTED_MAX_K + SUPPORTED_ATTN_BIAS_TYPES = FwOp.SUPPORTED_ATTN_BIAS_TYPES + SUPPORTS_DROPOUT = FwOp.SUPPORTS_DROPOUT + SUPPORTS_CUSTOM_SCALE = FwOp.SUPPORTS_CUSTOM_SCALE + SUPPORTS_DIFFERENT_VALUE_EMBED = FwOp.SUPPORTS_DIFFERENT_VALUE_EMBED + NAME = "tritonflashattB" + + @classmethod + def not_supported_reasons(cls, d: Inputs) -> List[str]: + reasons = super(BwOp, cls).not_supported_reasons(d) + check_lastdim_alignment_stride1(reasons, "query", d.query, 8) + check_lastdim_alignment_stride1(reasons, "key", d.key, 8) + check_lastdim_alignment_stride1(reasons, "value", d.value, 8) + if cls.OPERATOR is None: + reasons.append("triton is not available") + if d.device.type == "cuda": + if torch.cuda.get_device_capability(d.device) != (8, 0): + reasons.append("requires A100 GPU") + if _is_triton_available(): + import triton + + if triton.__version__ > "2.0.0": + reasons.append("Only work on pre-MLIR triton for now") + return reasons + + @classmethod + def apply(cls, ctx: Context, inp: Inputs, grad: torch.Tensor) -> Gradients: + inp = _prepare_inputs(inp) + + # Triton's autotune causes the Tensor._version to change, and so Pytorch autograd + # does a memcpy. To avoid this we run in inference_mode, which doesn't track the version. + with torch.inference_mode(): + grads = Gradients( + dq=torch.empty_like(inp.query), + dk=torch.empty_like(inp.key), + dv=torch.empty_like(inp.value), + ) + cls.OPERATOR( + grad, + inp.query, + inp.key, + inp.value, + ctx.out, + ctx.get_padded_lse(128), + grads.dq, + grads.dk, + grads.dv, + bias=inp.attn_bias if isinstance(inp.attn_bias, torch.Tensor) else None, + softmax_scale=inp.scale_float, + causal=isinstance(inp.attn_bias, LowerTriangularMask), + ) + return grads diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/triton_splitk.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/triton_splitk.py new file mode 100644 index 0000000000000000000000000000000000000000..1c4f6d94218e39864c63fa8a77066cdce32050e8 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/fmha/triton_splitk.py @@ -0,0 +1,740 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + + +from typing import TYPE_CHECKING, Any, List, Optional, Set, Tuple + +import torch + +from ..common import _has_triton21, register_operator +from .attn_bias import BlockDiagonalCausalWithOffsetPaddedKeysMask +from .common import AttentionFwOpBase, Context, Inputs, check_lastdim_alignment_stride1 + + +def _strides(x: torch.Tensor, *stride_names: str): + assert x.ndim == len(stride_names) + return {f"stride_{s}": x.stride(i) for i, s in enumerate(stride_names)} + + +if TYPE_CHECKING or _has_triton21(): + import triton + import triton.language as tl + + from xformers.triton.vararg_kernel import VAR_ARGS_ARRAY, unroll_varargs + + @triton.jit + def _fwd_kernel_splitK( + Q, + K, + V, + sm_scale, + Out_splitK, # [B, H, split_k, Mq, K] + Metadata, # [B, H, 2, split_k, M_ceil] contains [mi, li] + Seq_len, + stride_qz, + stride_qm, + stride_qg, + stride_qh, + stride_qk, + stride_kz, + stride_kn, + stride_kg, + stride_kh, + stride_kk, + stride_vz, + stride_vn, + stride_vg, + stride_vh, + stride_vk, + stride_osk_zhg, + stride_osk_s, + stride_osk_m, + stride_osk_k, + stride_mzhg, + stride_m2, + stride_ms, + stride_mm, + Z, + N_CTX_Q, + N_CTX_K, + BLOCK_N_PER_SPLIT, + H: tl.constexpr, + G: tl.constexpr, + BLOCK_M: tl.constexpr, + BLOCK_DMODEL: tl.constexpr, + BLOCK_N: tl.constexpr, + BOUNDS_CHECKS_N: tl.constexpr, + USE_SEQ_LEN: tl.constexpr, + PACKED_PER_VAL: tl.constexpr = 1, + N_GROUPS: tl.constexpr = 1, + ): + """This kernel can accept non-quantized or int4-quantized keys/values. + PACKED_PER_VAL determines the quantization type: + - PACKED_PER_VAL == 1 means no quantization + - PACKED_PER_VAL == 8 means 4-bit quantization (8 packed quantized values inside one int32) + For the quantized case K/V should be int32 tensors. + Quantization can be row-wise (when N_GROUPS = 1) or group-wise with N_GROUPS = 2, 4, or 8. + Quantization coefficients are stored at the beginning of the row along the last dimension of K/V + So K[B, H, M, :] has a form + [ quant_coef0, quant_coef1, ...| + group0_quant_value0, group0_quant_value1,... | + group1_quant_value0, group1_quant_value1,...] + where each quant_coef is an int32 which should be interpreted as 2 packed float16: scale and offset. + + Note: this kernel needs to be processed by xformers.triton.vararg_kernel.unroll_varargs + before compilation. That will unroll variables marked with "VAR_ARGS_ARRAY" into lists. + See how FwOp.apply does it below. + """ + tl.static_assert( + (PACKED_PER_VAL == 1 and tl.constexpr(K.dtype.element_ty != tl.int32)) + or (PACKED_PER_VAL == 8 and tl.constexpr(K.dtype.element_ty == tl.int32)), + f"Only 4-bit quantization is supported, K/V should have dtype int32 in " + f"the quantized case: {PACKED_PER_VAL=} {tl.constexpr(K.dtype)=} {tl.constexpr(K.dtype.element_ty)=}", + ) + tl.static_assert( + (((N_GROUPS == 1 or N_GROUPS == 2) or N_GROUPS == 4) or N_GROUPS == 8), + "Number of quantization groups can be 1 (row-wise quantization), 2, 4, or 8.", + ) + + QUANTIZED: tl.constexpr = PACKED_PER_VAL > 1 + PACKED_D_PER_GROUP: tl.constexpr = BLOCK_DMODEL // PACKED_PER_VAL // N_GROUPS + D_PER_GROUP: tl.constexpr = BLOCK_DMODEL // N_GROUPS + + start_m = tl.program_id(0) + off_zhg = tl.program_id(1) + off_z = off_zhg // (H * G) + off_h = (off_zhg // G) % H + off_g = off_zhg % G + splitk_idx = tl.program_id(2) + + lo = splitk_idx * BLOCK_N_PER_SPLIT + if USE_SEQ_LEN: + kv_len = tl.load(Seq_len + off_z) + else: + kv_len = N_CTX_K + hi = tl.minimum((splitk_idx + 1) * BLOCK_N_PER_SPLIT, kv_len) + + Q_block_ptr = tl.make_block_ptr( + base=Q + off_h * stride_qh + off_z * stride_qz + off_g * stride_qg, + shape=(N_CTX_Q, D_PER_GROUP), + strides=(stride_qm, stride_qk), + offsets=(start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, D_PER_GROUP), + order=(1, 0), + ) + + k_base = K + off_h * stride_kh + off_z * stride_kz + off_g * stride_kg + # Additional shift by 1 along the last dimension in the quantized case, since + # the first element along that dim contains packed quantization coefficients. + K_block_ptr = tl.make_block_ptr( + base=k_base + stride_kk * QUANTIZED * N_GROUPS, + shape=(PACKED_D_PER_GROUP, hi), + strides=(stride_kk, stride_kn), + offsets=(0, lo), + block_shape=(PACKED_D_PER_GROUP, BLOCK_N), + order=(0, 1), + ) + v_base = V + off_h * stride_vh + off_z * stride_vz + off_g * stride_vg + V_block_ptr = tl.make_block_ptr( + base=v_base + stride_vk * QUANTIZED * N_GROUPS, + shape=(hi, PACKED_D_PER_GROUP), + strides=(stride_vn, stride_vk), + offsets=(lo, 0), + block_shape=(BLOCK_N, PACKED_D_PER_GROUP), + order=(1, 0), + ) + + if QUANTIZED: + # Pointers to quantization coefficients. Even those they are 1D, + # we have to use block pointers, since usual pointers + # don't support boundary checks + K_scale_shift_block_ptr = tl.make_block_ptr( + base=k_base, + shape=(1, hi), + strides=(stride_kk, stride_kn), + offsets=(0, lo), + block_shape=(1, BLOCK_N), + order=(0, 1), + ) + V_scale_shift_block_ptr = tl.make_block_ptr( + base=v_base, + shape=(hi, 1), + strides=(stride_vn, stride_vk), + offsets=(lo, 0), + block_shape=(BLOCK_N, 1), + order=(1, 0), + ) + else: + K_scale_shift_block_ptr = None + V_scale_shift_block_ptr = None + + # initialize pointer to m and l + m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") + l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + + # Before compilation, this kernel will be processed by xformers.triton.vararg_kernel.unroll_varargs. + # That turns tensors annotated as the one below into lists of tensors of length N_GROUPS. + # This is a solution for Triton native lack of support for lists of tensors. + acc: "VAR_ARGS_ARRAY" # noqa: F821 + + for i in range(len(acc)): # noqa: F821 + acc[i] = tl.zeros([BLOCK_M, D_PER_GROUP], dtype=tl.float32) # noqa: F821 + # scale sm_scale by log_2(e) and use + # 2^x instead of exp in the loop because CSE and LICM + # don't work as expected with `exp` in the loop + qk_scale = sm_scale * 1.44269504 + # load q: it will stay in SRAM throughout + q: "VAR_ARGS_ARRAY" # noqa: F821 + for i in range(len(acc)): # noqa: F821 + q[i] = tl.load( # noqa: F821 + tl.advance(Q_block_ptr, (0, i * D_PER_GROUP)), boundary_check=(0,) + ) + # loop over k, v and update accumulator + for start_n in range(lo, hi, BLOCK_N): + k: "VAR_ARGS_ARRAY" # noqa: F821 + v: "VAR_ARGS_ARRAY" # noqa: F821 + for i in range(len(acc)): # noqa: F821 + k[i], v[i] = load_dequantize_k_v_group( # noqa: F821 + K_block_ptr, + V_block_ptr, + K_scale_shift_block_ptr, + V_scale_shift_block_ptr, + BOUNDS_CHECKS_N, + PACKED_PER_VAL, + PACKED_D_PER_GROUP, + Q.dtype.element_ty, + i, + ) + + # -- compute qk --- + qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) + for i in range(len(acc)): # noqa: F821 + qk += tl.dot(q[i], k[i]) # noqa: F821 + qk *= qk_scale + + # TODO: This is slow, and only needed at the last iteration. + # Maybe we can unroll the last iteration instead? + if BOUNDS_CHECKS_N: + qk = tl.where(tl.arange(0, BLOCK_N) < hi - start_n, qk, float("-inf")) + # -- compute scaling constant --- + m_i_new = tl.maximum(m_i, tl.max(qk, 1)) + alpha = tl.math.exp2(m_i - m_i_new) + p = tl.math.exp2(qk - m_i_new[:, None]) + + # -- update m_i and l_i -- + l_i = l_i * alpha + tl.sum(p, 1) + m_i = m_i_new + p = p.to(Q.dtype.element_ty) + + # -- scale and update acc -- + for i in range(len(acc)): # noqa: F821 + acc[i] *= alpha[:, None] # noqa: F821 + acc[i] += tl.dot(p, v[i]) # noqa: F821 + # update pointers + K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N)) + V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0)) + if PACKED_PER_VAL > 1: + K_scale_shift_block_ptr = tl.advance( + K_scale_shift_block_ptr, (0, BLOCK_N) + ) + V_scale_shift_block_ptr = tl.advance( + V_scale_shift_block_ptr, (BLOCK_N, 0) + ) + + # write back O + O_block_ptr = tl.make_block_ptr( + base=Out_splitK + off_zhg * stride_osk_zhg + splitk_idx * stride_osk_s, + shape=(N_CTX_Q, D_PER_GROUP), + strides=(stride_osk_m, 1), + offsets=(start_m * BLOCK_M, 0), + block_shape=(BLOCK_M, D_PER_GROUP), + order=(1, 0), + ) + for i in range(len(acc)): # noqa: F821 + tl.store( + tl.advance(O_block_ptr, (0, i * D_PER_GROUP)), + acc[i], # noqa: F821 + boundary_check=(0,), + ) + # Write metadata for split-K reduction + Metadata_ptr = ( + Metadata + + off_zhg * stride_mzhg + + splitk_idx * stride_ms + + start_m * BLOCK_M + + tl.arange(0, BLOCK_M) + ) + tl.store(Metadata_ptr, m_i) + tl.store(Metadata_ptr + stride_m2, l_i) + + @triton.jit + def load_dequantize_k_v_group( + K_block_ptr, + V_block_ptr, + K_scale_shift_block_ptr, + V_scale_shift_block_ptr, + BOUNDS_CHECKS_N: tl.constexpr, + PACKED_PER_VAL: tl.constexpr, + PACKED_D_PER_GROUP: tl.constexpr, + dtype: tl.constexpr, + group_id: tl.constexpr, + ): + """Load K/V for a given block. In case of int4-quantized K/V, dequantize them after loading. + If quantization is group-wise, use group_id to advance the pointers to the current group. + """ + # Advance to the current quantization group + K_block_ptr = tl.advance(K_block_ptr, (PACKED_D_PER_GROUP * group_id, 0)) + V_block_ptr = tl.advance(V_block_ptr, (0, PACKED_D_PER_GROUP * group_id)) + + # -- load k, v -- + k = tl.load(K_block_ptr, boundary_check=(1,) if BOUNDS_CHECKS_N else ()) + v = tl.load(V_block_ptr, boundary_check=(0,) if BOUNDS_CHECKS_N else ()) + + if PACKED_PER_VAL > 1: + # K/V are quantized, load quantization coefficients and dequantize + + K_scale_shift_block_ptr = tl.advance(K_scale_shift_block_ptr, (group_id, 0)) + V_scale_shift_block_ptr = tl.advance(V_scale_shift_block_ptr, (0, group_id)) + + k_scale_shift = tl.load( + K_scale_shift_block_ptr, boundary_check=(1,) if BOUNDS_CHECKS_N else () + ) + v_scale_shift = tl.load( + V_scale_shift_block_ptr, boundary_check=(0,) if BOUNDS_CHECKS_N else () + ) + + k_scale, k_shift = cast_uint32_to_half2(k_scale_shift) + v_scale, v_shift = cast_uint32_to_half2(v_scale_shift) + v = dequantize(v, v_scale, v_shift, PACKED_PER_VAL).to(dtype) + k_t = dequantize( + tl.trans(k), + tl.trans(k_scale), + tl.trans(k_shift), + PACKED_PER_VAL, + ).to(dtype) + k = tl.trans(k_t) + return k, v + + @triton.jit + def cast_uint32_to_half2(scale_shift): + """Extract two float16 packed into one int32""" + scale = scale_shift & 0xFFFF + shift = scale_shift >> 16 + scale = scale.to(tl.uint16).to(tl.float16, bitcast=True) + shift = shift.to(tl.uint16).to(tl.float16, bitcast=True) + return scale, shift + + @triton.jit + def dequantize( + x_, + scale, + shift, + PACKED_PER_VAL: tl.constexpr = 8, + ): + """PACKED_PER_VAL is the number of values packed into each element x_. + For example, for int4 quantization and x_ of type int32, PACKED_PER_VAL is 8. + """ + + # Axis along which offsets are applied matters here + # It would be natural to have offsets in shape (BLOCK_N, D // PACKED_PER_VAL, PACKED_PER_VAL) + # and expand K/V to that shape before applying offsets + # However, Triton for some reason considers dim=1 as contiguous when doing tl.view below, and not dim=2 + # Note that tl.view doesn't guarantee the order of elements in the result - thus the code below depends + # on the implementation details which might change in the future. + # Ideally we would like to use tl.reshape, but it's not implemented yet. + # See https://github.com/openai/triton/blob/9055af1a5dadc576804b38dd77ee91dc42af0bf7/python/triton/language/semantic.py#L541 # noqa: E501 + + # x_ : (BLOCK_N, D // PACKED_PER_VAL) + # scale: (BLOCK_N, 1) + # offsets: (PACKED_PER_VAL,) + BLOCK_N: tl.constexpr = x_.shape[0] + BLOCK_DMODEL_PACKED: tl.constexpr = x_.shape[1] + offsets = tl.arange(0, PACKED_PER_VAL) * 4 + quant_offset = ( + x_[:, None, :] >> offsets[None, :, None] + ) # (BLOCK_N, PACKED_PER_VAL, D // PACKED_PER_VAL) + + quant_offset = tl.view( + quant_offset, (BLOCK_N, BLOCK_DMODEL_PACKED * PACKED_PER_VAL) + ) + # Trick - instead of converting int4 to float16 we view it as float16 + # and then multiply by 32768 * 512 == 2**24 + quant_offset = (quant_offset & 0xF).to(tl.uint16).to(tl.float16, bitcast=True) + quant_offset = (quant_offset * 32768.0).to(tl.float16) + scale_512 = scale * 512 + + dequant = quant_offset * scale_512 + shift + return dequant + + @triton.jit + def _splitK_reduce( + Out_splitK, # [B, H, split_k, Mq, K] + Metadata, # [B, H, 2, split_k, M_ceil] contains [mi, li] + Out, # [B, H, M, K] + LSE, # [B, H, M] + split_k, + stride_osk_zhg, + stride_osk_s, + stride_osk_m, + stride_osk_k, + stride_mzhg, + stride_m2, + stride_ms, + stride_mm, + stride_oz, + stride_oh, + stride_og, + stride_om, + stride_ok, + stride_lse_zhg, + stride_lse_m, + BLOCK_SIZE: tl.constexpr, + H: tl.constexpr, + G: tl.constexpr, + ): + off_zhg = tl.program_id(0) + off_z = off_zhg // (H * G) + off_h = (off_zhg // G) % H + off_g = off_zhg % G + off_m = tl.program_id(1) + + Out_splitK_ptr = ( + Out_splitK + + stride_osk_zhg * off_zhg + + stride_osk_m * off_m + + tl.arange(0, BLOCK_SIZE) + ) + Metadata_ptr = Metadata + stride_mzhg * off_zhg + off_m + m = tl.load(Metadata_ptr) + l_sum = tl.load(Metadata_ptr + stride_m2) + acc = tl.load(Out_splitK_ptr) + + for split_k_idx in range(1, split_k): + Metadata_ptr = Metadata_ptr + stride_ms + Out_splitK_ptr = Out_splitK_ptr + stride_osk_s + + m_k = tl.load(Metadata_ptr) + l_k = tl.load(Metadata_ptr + stride_m2) + acc_k = tl.load(Out_splitK_ptr) + + m_new = tl.maximum(m, m_k) + if m_k < m: + # Scale incoming values + alpha = tl.math.exp2(m_k - m_new) + acc_k = acc_k * alpha + l_k = l_k * alpha + else: + # Scale our values + alpha = tl.math.exp2(m - m_new) + acc = acc * alpha + l_sum = l_sum * alpha + + m = m_new + l_sum = l_sum + l_k + acc = acc + acc_k + + acc = acc / l_sum + Out_ptr = ( + Out + + stride_oz * off_z + + stride_oh * off_h + + stride_og * off_g + + stride_om * off_m + + tl.arange(0, BLOCK_SIZE) + ) + tl.store(Out_ptr, acc) + + l_ptrs = LSE + off_zhg * stride_lse_zhg + off_m + tl.store(l_ptrs, (m + tl.math.log2(l_sum)) / 1.44269504) + +else: + _fwd_kernel_splitK = None + _splitK_reduce = None + + +@register_operator +class FwOp(AttentionFwOpBase): + """Flash-Attention with Split-K. Supports fused int-4 K/V quantization. + Quantized path will be taken if input K/V have type int32. + + Quantization can be row-wise or group-wise (when cls.NUM_GROUPS > 1) along + the last dimension of K and V. Currently 1, 2, 4, or 8 groups per row are supported. + Quantization coefficients (scale and shift) are represented as two + float16 constants per group, packed into int32. Quantization coefficients of + all groups are placed at the beginning of the row. So, if unquantized K/V have head + dimension D, the quantized versions have head dimension D // 8 + NUM_GROUPS + and dtype int32. + Pseudocode for dequantizing one row can look like: + group_size = D // 8 + for i in range(NUM_GROUPS): + group_start = NUM_GROUPS + i * group_size + group_quant = K[..., group_start: group_start + group_size] + scale, shift = unpack_int32_into_float16x2(group_quant[0]) + group_dequant = group_quant[..., 1:] * scale + shift + ... + + """ + + OPERATOR = _fwd_kernel_splitK + SUPPORTED_DEVICES = {"cuda"} + CUDA_MINIMUM_COMPUTE_CAPABILITY = (8, 0) + SUPPORTED_DTYPES = { + torch.half, + torch.bfloat16, + } # Those are dtypes of Q. In the quantized case K/V has dtype int32 + SUPPORTED_MAX_K = 128 + SUPPORTED_ATTN_BIAS_TYPES: Set[Any] = { + type(None), + BlockDiagonalCausalWithOffsetPaddedKeysMask, + } + SUPPORTS_DROPOUT = False + SUPPORTS_CUSTOM_SCALE = True + SUPPORTS_BMGHK = True + NAME = "triton_splitKF" + + SPLIT_K: Optional[int] = None + BLOCK_M = 16 + BLOCK_N = 64 + + NUM_GROUPS = 1 # Default quantization is row-wise + + @classmethod + def shape_not_supported_reasons( + cls, Mq: int, Mkv: int, K: int, Kv: int + ) -> List[str]: + reasons = super().shape_not_supported_reasons(Mq, Mkv, K, Kv) + if K not in {16, 32, 64, 128}: + reasons.append(f"Embed dim {K} not supported") + return reasons + + @classmethod + def not_supported_reasons(cls, d: Inputs) -> List[str]: + reasons = super(FwOp, cls).not_supported_reasons(d) + check_lastdim_alignment_stride1(reasons, "query", d.query, 8) + if d.key.dtype != torch.int32: + check_lastdim_alignment_stride1(reasons, "key", d.key, 8) + check_lastdim_alignment_stride1(reasons, "value", d.value, 8) + if cls.OPERATOR is None: + reasons.append("triton is not available") + if d.device.type == "cuda": + # Has only been tested on 8.0 / 9.0. + if torch.cuda.get_device_capability(d.device) < (8, 0): + reasons.append( + "requires GPU with sm80 minimum compute capacity, e.g., A100/H100/L4" + ) + + q_len = d.query.shape[1] + if isinstance(d.attn_bias, BlockDiagonalCausalWithOffsetPaddedKeysMask): + seqinfo = d.attn_bias.q_seqinfo + if q_len != seqinfo.seqstart_py[-1]: + reasons.append( + f"Expected total {seqinfo.seqstart_py[-1]} queries not {q_len}" + ) + q_len = seqinfo.min_seqlen + if q_len != seqinfo.max_seqlen: + reasons.append( + "Variable query len is not supported in the presence of causal mask." + ) + + if d.key.ndim in [4, 5] and d.key.shape[-2] != 1: + if d.key.stride(-2) == 0 and d.value.stride(-2) == 0 and q_len > 1: + reasons.append("multiquery is only supported with query seqlen=1") + + if d.attn_bias is not None and q_len > 1: + reasons.append( + "query with seqlen > 1 is not supported in the presence of causal mask" + ) + return reasons + + @classmethod + def get_split_k(cls, B: int, H: int, Mk: int) -> int: + """Heuristic for the number of splits""" + bh = max(B * H, 1) # NOTE: Handle B*h=0 case + split_k = max(Mk, 1024) // bh + max_chunk_size = 64 if Mk <= 512 and bh <= 64 else 128 + while split_k > 0 and Mk / split_k < max_chunk_size: + split_k = split_k // 2 + split_k = min(split_k, 64) + split_k = max(split_k, 1) + return split_k + + @classmethod + def apply( + cls, inp: Inputs, needs_gradient: bool + ) -> Tuple[torch.Tensor, Optional[Context]]: + attn_bias = inp.attn_bias + seq_len = None + q, k, v = inp.get_qkv_in_bmghk() + + if attn_bias is not None: + assert isinstance(attn_bias, BlockDiagonalCausalWithOffsetPaddedKeysMask) + # TODO: do we really need to do this cast? seems fishy but + # I just copied it from the decoder.py + attn_bias.k_seqinfo.to(inp.query.device) + attn_bias.q_seqinfo.to(inp.query.device) + seq_len = attn_bias.k_seqinfo.seqlen + B = len(seq_len) + G, H, Kq = q.shape[-3:] + Kkv = v.shape[-1] + + # assume kv has been padded + q = q.reshape(B, -1, G, H, Kq) + k = k.reshape(B, -1, G, H, Kkv) + v = v.reshape(B, -1, G, H, Kkv) + + # Transpose in the case of MQA/GQA + mqa_swap_seqlen_head = False + if k.shape[3] > 1 and k.stride(3) == 0 and v.stride(3) == 0: + mqa_swap_seqlen_head = True + assert q.shape[1] == 1 + q = q.transpose(1, 3) + k = k[:, :, :, :1] + v = v[:, :, :, :1] + + if k.dtype == torch.int32: + # Quantized K/V + PACKED_PER_VAL = 8 + Lk = (k.shape[-1] - cls.NUM_GROUPS) * 8 + else: + Lk = k.shape[-1] + PACKED_PER_VAL = 1 + + B, Mk, G, H, Kkv = k.shape + B, M, G, H, Kq = q.shape + assert Lk == Kq, f"Keys have head dim {Lk} but queries have head dim {Kq}" + + BLOCK_M = cls.BLOCK_M + BLOCK_N = cls.BLOCK_N + if cls.SPLIT_K is not None: + split_k = cls.SPLIT_K + else: + # Use heuristics + split_k = cls.get_split_k(B, H, Mk) + + M_ceil = (M + BLOCK_M - 1) // BLOCK_M * BLOCK_M + o_splitk = torch.empty( + [B * G * H, split_k, M_ceil, Kq], dtype=torch.float32, device=q.device + ) + metadata = torch.empty( + [B * G * H, 2, split_k, M_ceil], dtype=torch.float32, device=q.device + ) + lse = torch.empty((B * G * H, M), device=q.device, dtype=torch.float32) + grid = (triton.cdiv(M, BLOCK_M), B * G * H, split_k) + + num_warps = 2 + split_size = (Mk + split_k - 1) // split_k + use_seq_len = seq_len is not None + _fwd_kernel_splitK_unrolled = unroll_varargs( + _fwd_kernel_splitK, N=cls.NUM_GROUPS if PACKED_PER_VAL > 1 else 1 + ) + + _fwd_kernel_splitK_unrolled[grid]( + Q=q, + K=k, + V=v, + sm_scale=inp.scale_float, + Out_splitK=o_splitk, + Metadata=metadata, + Seq_len=seq_len, + **_strides(q, "qz", "qm", "qg", "qh", "qk"), + **_strides(k, "kz", "kn", "kg", "kh", "kk"), + **_strides(v, "vz", "vn", "vg", "vh", "vk"), + **_strides(o_splitk, "osk_zhg", "osk_s", "osk_m", "osk_k"), + **_strides(metadata, "mzhg", "m2", "ms", "mm"), + Z=B, + H=H, + G=G, + N_CTX_Q=M, + N_CTX_K=Mk, + BLOCK_N_PER_SPLIT=split_size, + BLOCK_M=BLOCK_M, + BLOCK_N=BLOCK_N, + BLOCK_DMODEL=Lk, + BOUNDS_CHECKS_N=(split_size % BLOCK_N) > 0 or use_seq_len, + USE_SEQ_LEN=use_seq_len, + num_warps=num_warps, + num_stages=1, + PACKED_PER_VAL=PACKED_PER_VAL, + N_GROUPS=cls.NUM_GROUPS if PACKED_PER_VAL > 1 else 1, + ) + + if mqa_swap_seqlen_head: + out = torch.empty( + (B, H, G, M, Kq), device=q.device, dtype=q.dtype + ).transpose(1, 3) + else: + out = torch.empty((B, M, G, H, Kq), device=q.device, dtype=q.dtype) + + # Merge together + grid = (B * G * H, M, 1) + _splitK_reduce[grid]( + o_splitk, + metadata, + out, + lse, + split_k=split_k, + **_strides(o_splitk, "osk_zhg", "osk_s", "osk_m", "osk_k"), + **_strides(metadata, "mzhg", "m2", "ms", "mm"), + **_strides(out, "oz", "om", "og", "oh", "ok"), + **_strides(lse, "lse_zhg", "lse_m"), + BLOCK_SIZE=out.shape[-1], + G=G, + H=H, + # TODO: Tune num_warps + ) + lse = lse.reshape([B, G, H, M]) + if mqa_swap_seqlen_head: + # H/M dimensions have been swapped + out = out.transpose(1, 3) + lse = lse.transpose(2, 3) + if inp.query.ndim == 4: + # BMGHK -> BMHK + assert G == 1 + out = out[:, :, 0] + lse = lse[:, 0] + if Mk == 0: + out.zero_() + + return out, Context(out=out, lse=lse) + + +class FwOp_S1(FwOp): + SPLIT_K = 1 + NAME = "triton_splitK1" + + +class FwOp_S2(FwOp): + SPLIT_K = 2 + NAME = "triton_splitK2" + + +class FwOp_S4(FwOp): + SPLIT_K = 4 + NAME = "triton_splitK4" + + +class FwOp_S8(FwOp): + SPLIT_K = 8 + NAME = "triton_splitK8" + + +class FwOp_S16(FwOp): + SPLIT_K = 16 + NAME = "triton_splitK16" + + +class FwOp_S32(FwOp): + SPLIT_K = 32 + NAME = "triton_splitK32" + + +class FwOp_S64(FwOp): + SPLIT_K = 64 + NAME = "triton_splitK64" + + +class FwOp_S128(FwOp): + SPLIT_K = 128 + NAME = "triton_splitK128" diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/indexing.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..4fafda54406130daf957b428b3e5e446fbdeaf3a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/indexing.py @@ -0,0 +1,237 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Optional, Sequence + +import torch + +from xformers.ops._triton import ( + index_select_cat_bwd, + index_select_cat_fwd, + scaled_index_add_bwd, + scaled_index_add_fwd, +) + +from .common import BaseOperator, register_operator + + +# Keeping these operator registry here so that +# it's easy to check if they are available +@register_operator +class ScaledIndexAddFw(BaseOperator): + OPERATOR = scaled_index_add_fwd + OPERATOR_CATEGORY = "indexing" + NAME = "scaled_index_addF" + + +@register_operator +class ScaledIndexAddBw(BaseOperator): + OPERATOR = scaled_index_add_bwd + OPERATOR_CATEGORY = "indexing" + NAME = "scaled_index_addB" + + +@register_operator +class IndexSelect(BaseOperator): + OPERATOR = index_select_cat_fwd + OPERATOR_CATEGORY = "indexing" + NAME = "index_select" + + +class _ScaledIndexAdd(torch.autograd.Function): + @staticmethod + # type: ignore + def forward( + ctx, + x: torch.Tensor, + index: torch.Tensor, + source: torch.Tensor, + scaling: Optional[torch.Tensor], + alpha: float, + ) -> torch.Tensor: + if scaled_index_add_fwd is not None: + scaled_index_add_fwd(x, index, source, scaling, alpha) + else: + raise RuntimeError( + "Triton is needed for forward pass but it is not available!" + ) + + ctx.mark_dirty(x) + ctx.save_for_backward(index, scaling, source) + ctx.source_shape = source.shape + ctx.alpha = alpha + return x + + @staticmethod + @torch.autograd.function.once_differentiable + def backward(ctx, grad_output): + index, scaling, source = ctx.saved_tensors + grad_source = torch.empty_like(source) + grad_scaling = ( + None + if scaling is None + else torch.empty( + ctx.source_shape, dtype=scaling.dtype, device=scaling.device + ) + ) + + if scaled_index_add_bwd is not None: + scaled_index_add_bwd( + grad_output, + grad_source, + grad_scaling, + source, + scaling, + index, + ctx.alpha, + ) + else: + raise RuntimeError( + "Triton is needed for backward pass but it is not available!" + ) + + return ( + grad_output, # gradient of input + None, # gradient of index + grad_source, # gradient of source + grad_scaling, # gradient of scaling + None, # gradient of alpha + ) + + +def scaled_index_add( + input: torch.Tensor, # [B, M, D] + index: torch.Tensor, # [Bi] - int64 + source: torch.Tensor, # [Bi, M, D] + scaling: Optional[torch.Tensor] = None, # [D] + alpha: float = 1.0, +) -> torch.Tensor: + """ + In-place scaling+index_add + + Indices in ``index`` are assumed to be unique + + The max index in ``index`` is assumed to be less than the size of dim0 of ``input``. + + :Note: + + The FW pass is done in-place (``input`` is modified) + + :Equivalent pytorch code: + + .. code-block:: python + + return torch.index_add(input, dim=0, source=scaling * src, index=indices, alpha=alpha) + """ + + return _ScaledIndexAdd.apply(input, index, source, scaling, alpha) + + +class _IndexSelectCat(torch.autograd.Function): + @staticmethod + # type: ignore + def forward( + ctx, + *args: torch.Tensor, + ) -> torch.Tensor: + assert len(args) % 2 == 0 + sources = args[: len(args) // 2] + indices = args[len(args) // 2 :] + + output_numel = 0 + for source, index in zip(sources, indices): + num_rows, num_cols = source.shape + num_indices = index.shape[0] + output_numel += num_indices * num_cols + + output = torch.empty( + [output_numel], dtype=sources[0].dtype, device=sources[0].device + ) + + processed_numel = 0 + for source, index in zip(sources, indices): + num_indices = index.shape[0] + num_cols = source.shape[1] + + if index_select_cat_fwd is not None: + index_select_cat_fwd( + output[ + processed_numel : processed_numel + num_indices * num_cols + ].view([num_indices, num_cols]), + source, + index, + ) + else: + raise RuntimeError( + "Triton is needed for forward pass but it is not available!" + ) + + processed_numel += num_indices * num_cols + + ctx.save_for_backward(*indices) + ctx.source_shapes = [source.shape for source in sources] + + return output + + @staticmethod + @torch.autograd.function.once_differentiable + def backward(ctx, grad_output): + indices = ctx.saved_tensors + + gradients = [] + processed_numel = 0 + for source_shape, index in zip(ctx.source_shapes, indices): + num_rows, num_cols = source_shape + num_indices = index.shape[0] + + grad_output_slice = grad_output[ + processed_numel : processed_numel + num_indices * num_cols + ].reshape([num_indices, num_cols]) + processed_numel += num_indices * num_cols + + grad_source_slice = torch.zeros( + [num_rows, num_cols], + dtype=grad_output.dtype, + device=grad_output.device, + ) + + if index_select_cat_bwd is not None: + index_select_cat_bwd( + grad_source_slice, + index, + grad_output_slice, + ) + else: + raise RuntimeError( + "Triton is needed for backward pass but it is not available!" + ) + gradients.append(grad_source_slice) + + return (*gradients, *([None] * len(gradients))) + + +def index_select_cat( + sources: Sequence[torch.Tensor], indices: Sequence[torch.Tensor] +) -> torch.Tensor: + """ + Indices in ``index`` are assumed to be unique + In each (index, source) pair, the max index in ``index`` is assumed to be less than the size of dim0 of ``source`` + + :Example: + + Given: + - ``sources[0]`` of shape ``[S0, D0]`` + - ``indices[0]`` of shape ``[I0]`` + - ``sources[1]`` of shape ``[S1, D1]`` + - ``indices[1]`` of shape ``[I1]`` + returns a ``torch.Tensor`` of shape ``[I0 * D0 + I1 * D1]`` + + :Equivalent pytorch code: + + .. code-block:: python + + return torch.cat([s[i.long()].flatten() for s, i in zip(sources, indices)], dim=0) + """ + return _IndexSelectCat.apply(*sources, *indices) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/rmsnorm.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/rmsnorm.py new file mode 100644 index 0000000000000000000000000000000000000000..94a3743fbdf57e98fb1697bfc4f8a391d6a27651 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/rmsnorm.py @@ -0,0 +1,113 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +from typing import Optional + +import torch +from torch import nn + +from .. import _is_triton_available + + +def rms_norm(x, weight: Optional[torch.Tensor], eps: float = 1e-6): + """ + RMS Normalization along the last dimension. + + This is similar to torch.nn.functional.normalize but with eps being added + instead of max. + + Expects x contiguous of shape (..., dim), and returns normalized data + of the same shape. For each dim-length vector x, the result has + + x / sqrt( x*x.sum() + eps) + + If weights are included, they are a contiguous parameter of length dim + which multiplies the result. + + This functionality is experimental. Its API might be changed without warnings. + Use it at your own risk. + """ + assert _is_triton_available() + from ._triton.rmsnorm_kernels import _rms_norm_forward + + if torch.is_grad_enabled() and ( + x.requires_grad or (weight is not None and weight.requires_grad) + ): + raise ValueError("Gradients not supported.") + + return _rms_norm_forward(x, weight, eps) + + +def rms_norm_add( + x: torch.Tensor, y: torch.Tensor, weight: Optional[torch.Tensor], eps: float = 1e-6 +): + """ + An addition fused with rms_norm. + + z = rms_norm_add(x, y, weight, eps) + + is equivalent to + + x += y + z = rms_norm(x, weight, eps) + + where x, y and z are all contiguous. + + This functionality is experimental. Its API might be changed without warnings. + Use it at your own risk. + """ + if torch.is_grad_enabled() and ( + x.requires_grad + or y.requires_grad + or (weight is not None and weight.requires_grad) + ): + raise ValueError("Gradients not supported.") + assert _is_triton_available() + from ._triton.rmsnorm_kernels import _rms_norm_add_forward + + return _rms_norm_add_forward(x, y, weight, eps) + + +class RMSNorm(torch.nn.Module): + """ + RMS Normalization layer along the last dimension. + + This is similar to torch.nn.functional.normalize but with eps being added + instead of max. + + Expects contiguous input of shape (..., dim), and returns normalized data + of the same shape. For each dim-length vector x, the result has + + x / sqrt( x*x.sum() + eps) + + If weights are included, they are a parameter of length dim which multiplies + the result. + + This functionality is experimental. Its API might be changed without warnings. + Use it at your own risk. + """ + + def __init__(self, dim: int, include_weight: bool = True, eps: float = 1e-6): + super().__init__() + self.eps = eps + if include_weight: + self.weight: Optional[nn.Parameter] = nn.Parameter(torch.ones(dim)) + else: + self.weight = None + + def forward(self, x: torch.Tensor): + return rms_norm(x, self.weight, self.eps) # type: ignore + + def increment_and_forward_(self, x: torch.Tensor, y: torch.Tensor): + """ + An addition fused with forward. + + z = layer.increment_and_forward_(x, y) + + is equivalent to + + x += y + z = layer(x) + """ + return rms_norm_add(x, y, self.weight, self.eps) # type: ignore diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/rope_padded.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/rope_padded.py new file mode 100644 index 0000000000000000000000000000000000000000..a78e431b3de60baadbe6bde530126007a718b2a3 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/rope_padded.py @@ -0,0 +1,231 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +from typing import Optional, Tuple + +import torch + +from xformers.ops.fmha.attn_bias import ( # type: ignore + BlockDiagonalCausalWithOffsetPaddedKeysMask, +) + +from .. import _is_triton_available + + +def rope_padded( + xq: torch.Tensor, + xk: torch.Tensor, + xv: torch.Tensor, + cache_k: torch.Tensor, + cache_v: torch.Tensor, + attn_bias: BlockDiagonalCausalWithOffsetPaddedKeysMask, + *, + theta: float = 10000.0, + out_q: Optional[torch.Tensor] = None, + adjacents: bool = True, + internal_dtype: str = "", +): + """ + Performs RoPE (rotary embeddings) and kv-cache emplacement for a heterogeneous + batch for inference in the style given by + BlockDiagonalCausalWithOffsetPaddedKeysMask. + The batch is concatenated along the sequence dimension, so the + actual dim-0 length of all tensors is 1. + + xq, xk and xv should be (1, slen, n_heads, dim), where + xq's n_heads can differ from xk and xv. + + This function places the roped xk in the right place in cache_k, and + xv (unmodified) in the right place in cache_v, and returns out_q + (the roped xq) such that things are ready to call + + xformers.ops.memory_efficient_attention( + out_q, cache_k, cache_v, attn_bias=attn_bias + ) + + This functionality is experimental. Its API might be changed without warnings. + Use it at your own risk. + + Arguments: + xq: tensor of queries to apply rope to + xk: tensor of keys to apply rope to + xv: tensor of values to copy into cache_v + cache_k: cache of keys, MODIFIED IN PLACE + cache_v: cache of values, MODIFIED IN PLACE + attn_bias: details the layout of caches. + Used to determine frequencies for the + RoPE calculation as well as the locations in cache_k and cache_v + to write to. Must be on the device. + adjacents: If True, the inputs are in adjacent pairs along the final dim axis. + This is like the released LLaMA model. + If False, the dim axis is split in two equal pieces. + I.e. the features are ordered with all the real parts before all + the imaginary parts. This matches HuggingFace, e.g. + https://github.com/huggingface/transformers/blob/ + f143037789288ba532dada934a118e648e715738/ + src/transformers/models/llama/modeling_llama.py#L126-L130 + internal_dtype: set to "f32" or "f64" to enforce dtype in the calculation + """ + if torch.is_grad_enabled() and ( + xq.requires_grad + or xk.requires_grad + or xv.requires_grad + or cache_k.requires_grad + or cache_v.requires_grad + or out_q is not None + ): + raise ValueError("Gradients not supported.") + assert _is_triton_available() + import triton + + from ._triton.rope_padded_kernels import _rope_padded_kernel + + n_total_queries = attn_bias.q_seqinfo.seqstart_py[-1] + cache_length = attn_bias.k_seqinfo.seqstart_py[-1] + ndim = xq.ndim + if ndim not in [4, 5]: + raise ValueError("Unexpected xq dimension") + xq_stride = xq.stride() + xk_stride = xk.stride() + xv_stride = xv.stride() + cache_k_stride = cache_k.stride() + cache_v_stride = cache_v.stride() + cache_k_shape = cache_k.shape + xk_shape = xk.shape + n_kv_heads = xk_shape[-2] + expected_kv_heads = n_kv_heads + if xk_stride[-2] == 0: + n_kv_heads = 1 + expected_cache_heads = n_kv_heads + if n_kv_heads == 1 and cache_k_stride[-2] == 0: + # If there's 1 kv head, don't care how expanded + # cache_k is. User might expand before or after rope. + expected_cache_heads = cache_k_shape[-2] + + if ndim == 4: + bsz, q_len, n_q_heads, dim = xq.shape + assert q_len == n_total_queries + if xk_shape != (1, n_total_queries, expected_kv_heads, dim): + raise ValueError("unexpected k shape") + if xv.shape != (1, n_total_queries, expected_kv_heads, dim): + raise ValueError("unexpected v shape") + if cache_k_shape != (1, cache_length, expected_cache_heads, dim): + raise ValueError("unexpected cache_k shape") + if cache_v.shape != (1, cache_length, expected_cache_heads, dim): + raise ValueError("unexpected cache_v shape") + n_groups = 1 + out_q_stride: Tuple[int, ...] = (0, n_q_heads * dim, dim, 1) + + else: + bsz, q_len, n_groups, n_q_heads, dim = xq.shape + assert q_len == n_total_queries + if xk_shape != (1, n_total_queries, n_groups, expected_kv_heads, dim): + raise ValueError("unexpected k shape") + if xv.shape != (1, n_total_queries, n_groups, expected_kv_heads, dim): + raise ValueError("unexpected v shape") + if cache_k_shape != (1, cache_length, n_groups, expected_cache_heads, dim): + raise ValueError("unexpected cache_k shape") + if cache_v.shape != (1, cache_length, n_groups, expected_cache_heads, dim): + raise ValueError("unexpected cache_v shape") + out_q_stride = ( + 0, + n_q_heads * dim * n_groups, + n_q_heads * dim, + dim, + 1, + ) + + if bsz != 1: + raise ValueError( + "Expected batch size dimension to be 1 as batches should be concatenated." + ) + if xq_stride[-1] != 1: + raise ValueError("Each q head must be contiguous") + if xk_stride[-1] != 1: + raise ValueError("Each k head must be contiguous") + if xv_stride[-1] != 1: + raise ValueError("Each v head must be contiguous") + if cache_k_stride[-1] != 1: + raise ValueError("Each cache_k head must be contiguous") + if cache_v_stride[-1] != 1: + raise ValueError("Each cache_v head must be contiguous") + n_total_heads = n_q_heads + 2 * n_kv_heads + v_start = n_total_heads - n_kv_heads + k_start = n_q_heads + if out_q is None: + out_q = xq.new_empty(xq.shape) + else: + if out_q.shape != xq.shape: + raise ValueError("Unexpected shape of out_q") + out_q_stride = out_q.stride() + if out_q_stride[-1] != 1: + raise ValueError("Each out_q head must be contiguous") + + assert out_q is not None + + logical_bsz = len(attn_bias.q_seqinfo.seqstart_py) - 1 + + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // xq.element_size() + BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(dim)) + BLOCK_SIZE = max(BLOCK_SIZE, 128) + BLOCK_SIZE = min(BLOCK_SIZE, 4096) + # heuristics for number of warps + num_warps = min(max(BLOCK_SIZE // 256, 1), 8) + device = xq.device + # Move these to the right device, like fmha does. + attn_bias.k_seqinfo.to(device) + attn_bias.q_seqinfo.to(device) + seqstartq = attn_bias.q_seqinfo.seqstart + seqstartk = attn_bias.k_seqinfo.seqstart + seqlenk = attn_bias.k_seqinfo.seqlen + assert internal_dtype in ["", "f32", "f64"] + # experiment with the order of dims here. + _rope_padded_kernel[ + (logical_bsz, attn_bias.q_seqinfo.max_seqlen, n_total_heads * n_groups) + ]( + xq, + xk, + xv, + out_q, + cache_k, + cache_v, + seqstartq, + seqstartk, + seqlenk, + theta, + k_start, + v_start, + n_groups, + dim, + xq_stride[1], + xq_stride[2] if ndim == 5 else 0, + xq_stride[-2], + xk_stride[1], + xk_stride[2] if ndim == 5 else 0, + xk_stride[-2], + xv_stride[1], + xv_stride[2] if ndim == 5 else 0, + xv_stride[-2], + cache_k_stride[1], + cache_k_stride[2] if ndim == 5 else 0, + cache_k_stride[-2], + cache_v_stride[1], + cache_v_stride[2] if ndim == 5 else 0, + cache_v_stride[-2], + seqstartq.stride(0), + seqstartk.stride(0), + seqlenk.stride(0), + out_q_stride[1], + out_q_stride[2] if ndim == 5 else 0, + out_q_stride[-2], + internal_dtype, + const_batch_strides=False, + cache_padding_length=0, + seqlenk_shift=0, + BLOCK_SIZE=BLOCK_SIZE, + adjacents=adjacents, + num_warps=num_warps, + ) + return out_q diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/seqpar.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/seqpar.py new file mode 100644 index 0000000000000000000000000000000000000000..32210231dbd43a51cd205a05fdb9032434aa4491 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/seqpar.py @@ -0,0 +1,286 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + + +from typing import Callable, List, Optional, Tuple + +import torch + +from .common import make_pytorch_cuda_operator +from .differentiable_collectives import ( + gather_along_first_dim, + gather_along_first_dim_async, + reduce_scatter_along_first_dim, + reduce_scatter_along_first_dim_async, +) +from .sequence_parallel_fused_ops import ( + fused_allgather_and_anything, + fused_allgather_and_linear, + fused_anything_and_reducescatter, + fused_linear_and_reducescatter, +) +from .tiled_matmul import tiled_matmul_fwd + + +@make_pytorch_cuda_operator +def sequence_parallel_leading_matmul_fwd( + scattered_input: torch.Tensor, + weights: List[torch.Tensor], + fuse: bool, + process_group: torch.distributed.ProcessGroup, +) -> List[torch.Tensor]: + if fuse: + gathered_outputs = fused_allgather_and_linear( + scattered_input, [w.t() for w in weights], group=process_group + ) + else: + gathered_input = gather_along_first_dim( + scattered_input, process_group=process_group + ) + (gathered_outputs,) = tiled_matmul_fwd( + [[gathered_input]], + [[w for w in weights]], + ) + return gathered_outputs + + +@make_pytorch_cuda_operator +def sequence_parallel_leading_matmul_bwd( + scattered_input: torch.Tensor, + weights: List[torch.Tensor], + grad_gathered_outputs: List[torch.Tensor], + fuse: bool, + process_group: torch.distributed.ProcessGroup, +) -> Tuple[torch.Tensor, List[torch.Tensor]]: + mp_size = process_group.size() + + if fuse: + grad_scattered_input = torch.empty_like(scattered_input) + grad_weights = [torch.zeros_like(w) for w in weights] + + grad_gathered_outputss = [ + grad_go.tensor_split(mp_size, dim=0) for grad_go in grad_gathered_outputs + ] + + def my_si_matmul( + grad_gathered_inputs: List[torch.Tensor], + dst_rank: int, + stream_factory: Callable[[], torch.cuda.Stream], + ) -> None: + (grad_gi,) = grad_gathered_inputs + with torch.cuda.stream(stream_factory()): + tiled_matmul_fwd( + [[grad_gos[dst_rank] for grad_gos in grad_gathered_outputss]], + [[w.t()] for w in weights], + out=[[grad_gi]], + ) + + fused_anything_and_reducescatter( + my_si_matmul, + [grad_scattered_input], + group=process_group, + ) + + # Each pair of shards of input and grad_output accumulates into the same + # grad_weight. Thus we need to make sure that the in-place addmms are + # sequenced correctly for each of the grad_weights. + events = [torch.cuda.Event() for _ in weights] + + def my_w_matmul( + gathered_inputs_shard: List[torch.Tensor], + src_rank: int, + stream_factory: Callable[[], torch.cuda.Stream], + ) -> None: + (gi_shard,) = gathered_inputs_shard + for grad_gos, grad_w, event in zip( + grad_gathered_outputss, grad_weights, events + ): + with torch.cuda.stream(stream_factory()): + event.wait() + grad_w.t().addmm_(grad_gos[src_rank].t(), gi_shard) + event.record() + + fused_allgather_and_anything( + [scattered_input], + my_w_matmul, + group=process_group, + ) + else: + gathered_input, handle = gather_along_first_dim_async( + scattered_input, process_group=process_group + ) + ((grad_gathered_input,),) = tiled_matmul_fwd( + [[grad_go for grad_go in grad_gathered_outputs]], + [[w.t()] for w in weights], + ) + if handle is not None: + handle.wait() + + grad_scattered_input, handle = reduce_scatter_along_first_dim_async( + grad_gathered_input, process_group=process_group + ) + + grad_weights_tuples = tiled_matmul_fwd( + [[grad_go.t()] for grad_go in grad_gathered_outputs], + [[gathered_input]], + ) + if handle is not None: + handle.wait() + + grad_weights = [grad_w.t() for (grad_w,) in grad_weights_tuples] + + return grad_scattered_input, grad_weights + + +class _SequenceParallelLeadingMatmul(torch.autograd.Function): + @staticmethod + def forward( # type: ignore[override] + ctx, + fuse: bool, + process_group: torch.distributed.ProcessGroup, + scattered_input: torch.Tensor, + *weights: torch.Tensor, + ) -> Tuple[torch.Tensor, ...]: + ctx.save_for_backward(scattered_input, *weights) + ctx.fuse = fuse + ctx.process_group = process_group + gathered_output = sequence_parallel_leading_matmul_fwd( + scattered_input, list(weights), fuse, process_group + ) + return tuple(gathered_output) + + @staticmethod + def backward( # type: ignore[override] + ctx, *grad_gathered_outputs: torch.Tensor + ) -> Tuple[Optional[torch.Tensor], ...]: + scattered_input, *weights = ctx.saved_tensors + (grad_scattered_input, grad_weights,) = sequence_parallel_leading_matmul_bwd( + scattered_input, + list(weights), + list(grad_gathered_outputs), + ctx.fuse, + ctx.process_group, + ) + return None, None, grad_scattered_input, *grad_weights + + +def sequence_parallel_leading_matmul( + x: torch.Tensor, + ws: List[torch.Tensor], + *, + fuse: bool, + process_group: torch.distributed.ProcessGroup, +) -> List[torch.Tensor]: + os = _SequenceParallelLeadingMatmul.apply( + fuse, process_group, x.flatten(0, -2), *ws + ) + return [o.view(-1, *x.shape[1:-1], w.shape[1]) for o, w in zip(os, ws)] + + +@make_pytorch_cuda_operator +def sequence_parallel_trailing_matmul_fwd( + gathered_input: torch.Tensor, + weight: torch.Tensor, + fuse: bool, + process_group: torch.distributed.ProcessGroup, +) -> torch.Tensor: + if fuse: + scattered_output = fused_linear_and_reducescatter( + gathered_input, weight.t(), group=process_group + ) + else: + gathered_output = torch.matmul(gathered_input, weight) + scattered_output = reduce_scatter_along_first_dim( + gathered_output, process_group=process_group + ) + return scattered_output + + +@make_pytorch_cuda_operator +def sequence_parallel_trailing_matmul_bwd( + gathered_input: torch.Tensor, + weight: torch.Tensor, + grad_scattered_output: torch.Tensor, + fuse: bool, + process_group: torch.distributed.ProcessGroup, +) -> Tuple[torch.Tensor, torch.Tensor]: + mp_size = process_group.size() + + if fuse: + grad_gathered_input = torch.empty_like(gathered_input) + grad_weight = torch.zeros_like(weight) + + gathered_inputs = gathered_input.tensor_split(mp_size, dim=0) + grad_gathered_inputs = grad_gathered_input.tensor_split(mp_size, dim=0) + + def my_gi_and_w_matmul( + grad_gathered_outputs_shard: List[torch.Tensor], + src_rank: int, + stream_factory: Callable[[], torch.cuda.Stream], + ) -> None: + (grad_go_shard,) = grad_gathered_outputs_shard + with torch.cuda.stream(stream_factory()): + torch.matmul( + grad_go_shard, weight.t(), out=grad_gathered_inputs[src_rank] + ) + with torch.cuda.stream(stream_factory()): + grad_weight.t().addmm_(grad_go_shard.t(), gathered_inputs[src_rank]) + + fused_allgather_and_anything( + [grad_scattered_output], + my_gi_and_w_matmul, + group=process_group, + ) + else: + grad_gathered_output = gather_along_first_dim( + grad_scattered_output, process_group=process_group + ) + grad_gathered_input = torch.matmul(grad_gathered_output, weight.t()) + grad_weight = torch.matmul(grad_gathered_output.t(), gathered_input).t() + + return grad_gathered_input, grad_weight + + +class _SequenceParallelTrailingMatmul(torch.autograd.Function): + @staticmethod + def forward( # type: ignore[override] + ctx, + fuse: bool, + process_group: torch.distributed.ProcessGroup, + gathered_input: torch.Tensor, + weight: torch.Tensor, + ) -> torch.Tensor: + ctx.save_for_backward(gathered_input, weight) + ctx.fuse = fuse + ctx.process_group = process_group + scattered_output = sequence_parallel_trailing_matmul_fwd( + gathered_input, weight, fuse, process_group + ) + return scattered_output + + @staticmethod + def backward( # type: ignore[override] + ctx, grad_scattered_output: torch.Tensor + ) -> Tuple[Optional[torch.Tensor], ...]: + gathered_input, weight = ctx.saved_tensors + (grad_gathered_input, grad_weight,) = sequence_parallel_trailing_matmul_bwd( + gathered_input, + weight, + grad_scattered_output, + ctx.fuse, + ctx.process_group, + ) + return None, None, grad_gathered_input, grad_weight + + +def sequence_parallel_trailing_matmul( + x: torch.Tensor, + w: torch.Tensor, + *, + fuse: bool, + process_group: torch.distributed.ProcessGroup, +) -> torch.Tensor: + o = _SequenceParallelTrailingMatmul.apply(fuse, process_group, x.flatten(0, -2), w) + return o.view(-1, *x.shape[1:-1], w.shape[1]) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/sequence_parallel_fused_ops.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/sequence_parallel_fused_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..e7e35e758f707955d0b8f5e1dcb6236d9e1f532c --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/sequence_parallel_fused_ops.py @@ -0,0 +1,1170 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + + +import concurrent.futures +import json +import multiprocessing.connection +import os +from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union, overload + +import torch +import torch.distributed as dist +import torch.multiprocessing.reductions + +from .. import _is_triton_available +from .common import BaseOperator, get_xformers_operator, register_operator + +if _is_triton_available(): + from ._triton.sequence_parallel_fused_kernels import ( + BACKWARDS_WITH_ME_FIRST, + FORWARDS_WITH_ME_LAST, + _launch_triton_matmul, + ) + + TRITON_IS_AVAILABLE = True +else: + TRITON_IS_AVAILABLE = False + + +# The sequence numbers will be communicated as 32-bit integers, due to +# limitations in both CUDA (memset can only operate on 4 bytes at a time at +# most) and Triton (scalar arguments are int32 if they fit). 32 bits are not +# enough to be sure that we'll never see overflow. Moreover, different parts of +# the code use signed or unsigned ints. To be safe, let's simulate overflow +# ourselves, at a value low enough so that it fits both a signed and an unsigned +# 32-bit integer. And, in fact, let's make it so low that we're sure we'll hit +# it in our tests, to avoid bugs that only manifest in long-running training. +SEQ_NUM_WRAP_AROUND = 2**8 + + +@register_operator +class WriteValues(BaseOperator): + OPERATOR = get_xformers_operator("write_values") + OPERATOR_CATEGORY = "sequence_parallel_fused" + NAME = "write_values" + + +@register_operator +class WaitValues(BaseOperator): + OPERATOR = get_xformers_operator("wait_values") + OPERATOR_CATEGORY = "sequence_parallel_fused" + NAME = "wait_values" + + +@register_operator +class Memset32bAsync(BaseOperator): + OPERATOR = get_xformers_operator("cuda_memset_32b_async") + OPERATOR_CATEGORY = "sequence_parallel_fused" + NAME = "cuda_memset_32b_async" + + +# We could just send tensors directly on mp.Connections, since PyTorch installs +# the necessary reductions to make it work. However, in the receiving process, +# PyTorch "mounts" the tensor in the CUDA context for the GPU with the **SAME +# INDEX** as on the sender. This works if all processes use CUDA_VISIBLE_DEVICES +# to limit themselves to a single GPU (which thus has index 0 everywhere) but in +# all other cases it's a mess. Hence we use our own reductions (which wrap the +# ones from PyTorch) to use the right devices. + + +def _serialize_cuda_tensor(tensor, device): + assert tensor.device == device + assert device.type == "cuda" + func, args = torch.multiprocessing.reductions.reduce_tensor(tensor) + assert func is torch.multiprocessing.reductions.rebuild_cuda_tensor + assert args[6] == device.index + return args + + +def _deserialize_cuda_tensor(args, device): + return torch.multiprocessing.reductions.rebuild_cuda_tensor( + *args[:6], device.index, *args[7:] + ) + + +# We need all processes to exchange a few strings with their addresses (in order +# to be able to connect to each other). The solution for this kind of things in +# PyTorch is a Store (TCPStore or FileStore) but we cannot create one ourselves +# (we don't know which addr/port/file to use, since the default one is already +# being used by PyTorch's global store) nor can we extract one from the +# ProcessGroup (since there's no API to do so). We thus resort to using the PG +# itself to exchange data, which is overkill (we need to store the pickled data +# into tensors and send it to the GPU). On top of that, it introduces one more +# catch: it doesn't work in inference mode because of something about modifying +# tensors inplace. I couldn't find a way to temporarily disable inference mode +# (although it's supposed to be possible) however inference mode is thread-local +# so we can dodge it by offloading the collective call to another thread. I hate +# all this so much. + + +def _exchange_addresses( + listeners: List[multiprocessing.connection.Listener], + group: dist.ProcessGroup, + device: torch.device, +) -> List[List[str]]: + rank = group.rank() + world_size = group.size() + my_addresses: List[str] = [] + for listener in listeners: + addr = listener.address + # The address could be a tuple if the listener weren't a UNIX socket + if isinstance(addr, bytes): + # Shouldn't be bytes, according to docs and typeshed, but... + # https://github.com/python/typeshed/issues/10054 + addr = addr.decode("utf-8") + assert isinstance(addr, str) + my_addresses.append(addr) + if world_size == 1: + return [my_addresses] + # In fact, we can retrieve the store from the ProcessGroup, but only using + # a private API. Hence we catch whatever exception and fall back in case. + try: + _, store = torch.distributed.distributed_c10d._world.pg_map.get( + group, (None, None) + ) + assert store is not None + store.set(f"xformers_exchange_addresses_{rank}", json.dumps(my_addresses)) + all_addresses = [ + json.loads(store.get(f"xformers_exchange_addresses_{i}")) + for i in range(world_size) + ] + except Exception: + all_addresses = [[""] * (world_size - 1)] * world_size + with concurrent.futures.ThreadPoolExecutor( + initializer=torch.cuda.set_device, initargs=(device,) + ) as e: + e.submit( + dist.all_gather_object, + object_list=all_addresses, + obj=my_addresses, + group=group, + ).result() + return all_addresses + + +def _is_fp8_dtype(dt: torch.dtype): + # Detect if it's float8_e4m3fn or float8_e5m2 without mentioning them in + # order to support old versions of PyTorch that don't define them. + return dt.is_floating_point and torch.finfo(dt).bits == 8 + + +class _FusedSequenceParallel: + """Set up a communication ring and perform fused ops on it + + Stores the persistent state needed to support a ring of connections between + processes, and the logic that can do fused comms + matmuls on it. + + We want to achieve overlap between: + - a computation which reads from the data we received from a remote GPU + - and the communication where we send some data to another GPU + And in order to do that we need some staging buffers and a way to + synchronize access to them across processes. + + To perform the communication over NVLink we make the processes exchange + their staging buffers using IPC (Inter-Process Communication) handles, which + "mounts"/"mmaps" an allocation on one GPU into the virtual address space of + another GPU: the memory remains backed by the original GPU but the other GPU + can access it as if it were local. We exchange these IPC handles using + multiprocessing Connections (and the "reductions" provided by PyTorch), + which we establish over UNIX domain sockets, whose addresses we exchange by + using a ProcessGroup. + + To synchronize accesses we use a set of counters/sequence numbers that are + also allocated in memory shared over IPC handles. Processes signal that they + completed an operation by launching a kernel that increases that value, and + they wait for anoher process to complete an operation by launching a kernel + that busy-waits for that value to increase. Currently we implement these + kernels manually, but on recent CUDA drivers (515.43.04+, corresponding to + CUDA 11.7) we could use standard stream memory operations (see + https://docs.nvidia.com/cuda/archive/11.7.0/cuda-driver-api/group__CUDA__MEMOP.html). + + We prefer to use these kernels (or the stream memory ops) over IPC events + because IPC events require signaling between processes at launch time to + ensure that the wait on one process occurs after the record on another + process. This signaling means that _launching_ our fused operation becomes a + synchronization barrier, which can increase the launch overhead. It would + also behave differently from NCCL, where launching is async and all the + synchronization happens on device in the kernels. A previous version of this + code which uses IPC events can be found here: + https://github.com/fairinternal/xformers/pull/504. + + """ + + def __init__( + self, + device: torch.device, + dtype: torch.dtype, + group: dist.ProcessGroup, + num_stripes: int, + ): + self.my_device = device + self.dtype = dtype + self.my_rank = group.rank() + self.world_size = group.size() + self.num_stripes = num_stripes + self.my_device_capability = torch.cuda.get_device_capability(self.my_device) + + # Open connections to all other processes. We exchange addresses via + # NCCL since we don't have access to a Store. + listeners = [ + multiprocessing.connection.Listener(family="AF_UNIX", address="", backlog=1) + for _ in range(self.world_size - 1) + ] + # If any process is late, all other ones will block here + all_addresses = _exchange_addresses(listeners, group, self.my_device) + self.outgoing_conns = [ + None + if r == self.my_rank + else multiprocessing.connection.Client( + family="AF_UNIX", + # Mypy wants it to be str, but it actually can also be bytes + # https://github.com/python/typeshed/issues/10054 + address=all_addresses[r][(r - self.my_rank) % self.world_size - 1], + ) + for r in range(self.world_size) + ] + self.incoming_conns = [ + None + if r == self.my_rank + else listeners[(self.my_rank - r) % self.world_size - 1].accept() + for r in range(self.world_size) + ] + + self.next_stripe = 0 + self.next_seq_nums = [1] * self.num_stripes + + # My staging buffers + self.staging = torch.empty((0,), device=self.my_device) + + # (Mmapped view of a handle to) buddies' staging buffers + self.buddys_staging = [ + torch.empty((0,), device=self.my_device) + ] * self.world_size + + # Allocate buffers for my inboxes + self.num_writes_into_my_staging = torch.zeros( + (self.world_size, self.num_stripes), dtype=torch.int, device=self.my_device + ) + self.num_reads_from_buddys_staging = torch.zeros( + (self.world_size, self.num_stripes), dtype=torch.int, device=self.my_device + ) + + # Send my handles to buddies + for rank, (in_conn, out_conn) in enumerate( + zip(self.incoming_conns, self.outgoing_conns) + ): + if in_conn is not None: + in_conn.send( + _serialize_cuda_tensor( + self.num_writes_into_my_staging[rank], self.my_device + ) + ) + if out_conn is not None: + out_conn.send( + _serialize_cuda_tensor( + self.num_reads_from_buddys_staging[rank], self.my_device + ) + ) + + # Open buddies' inboxes as my outboxes + self.num_writes_into_buddys_staging = [ + torch.empty((0,), device=self.my_device) + if out_conn is None + else _deserialize_cuda_tensor(out_conn.recv(), self.my_device) + for out_conn in self.outgoing_conns + ] + self.num_reads_from_my_staging = [ + torch.empty((0,), device=self.my_device) + if in_conn is None + else _deserialize_cuda_tensor(in_conn.recv(), self.my_device) + for in_conn in self.incoming_conns + ] + + self.second_stream = torch.cuda.Stream() + # CUDA can schedule the matmul and the memcpy at the same time, but it + # tends to run the matmul first and delay the memcpy, which causes a + # domino effect. We thus "encourage" it to prioritize the memcpy. + self.memcpy_stream = torch.cuda.Stream(priority=-1) + # Use dedicated streams to parallelize other operations. + self.wait_stream = torch.cuda.Stream(priority=-1) + self.write_stream = torch.cuda.Stream(priority=-1) + + self.next_stream_idx = 0 + + def _ensure_staging_is_large_enough(self, num_elements: int, random_init: bool): + # Lazily size up the staging area as needed. (If it's the first call, + # this will always trigger, since staging starts empty). Once at steady + # state, staging will be of the right (max) size and never grow again. + if self.staging.numel() < self.world_size * num_elements: + # When running with _memcpy=False (i.e., for benchmarks) we must + # ensure that the staging buffer doesn't contain all zeroes as that + # makes the matmuls go faster (better L2 compression or something). + self.staging = torch.empty( + (self.num_stripes, self.world_size, num_elements), + device=self.my_device, + dtype=self.dtype, + ) + if random_init: + self.staging.normal_() + for rank, in_conn in enumerate(self.incoming_conns): + if in_conn is not None: + in_conn.send( + _serialize_cuda_tensor(self.staging[:, rank], self.my_device) + ) + self.buddys_staging = [ + torch.empty((0,), device=self.my_device) + if out_conn is None + else _deserialize_cuda_tensor(out_conn.recv(), self.my_device) + for rank, out_conn in enumerate(self.outgoing_conns) + ] + + def _should_use_triton(self, _triton: bool): + if not int(os.getenv("XFORMERS_FUSED_SEQPAR_ENABLE_TRITON", "1")): + return False + if not TRITON_IS_AVAILABLE: + return False + # Triton seems to be having issues on P100 and V100 GPUs, such as + # https://github.com/openai/triton/issues/1609 + # https://github.com/openai/triton/issues/1610 + # https://github.com/openai/triton/issues/1257#issuecomment-1532616965 + # and, in recent Triton versions (Jan 2024), returning wrong values. + if self.my_device_capability < (8, 0): + return False + if not _triton: + return False + return True + + def make_stream_factory( + self, current_stream: torch.cuda.Stream + ) -> Callable[[], torch.cuda.Stream]: + def result(): + stream = [current_stream, self.second_stream][self.next_stream_idx] + self.next_stream_idx += 1 + self.next_stream_idx %= 2 + return stream + + return result + + def allgather_and_linear( + self, + scattered_inputs: List[torch.Tensor], + my_matmul: Callable[ + [List[torch.Tensor], int, Callable[[], torch.cuda.Stream]], None + ], + timeout_s: int, + _wait: bool = True, + _memcpy: bool = True, + _triton: bool = True, + _is_regular_matmul: bool = False, + _extra_triton_args: Mapping[str, Any] = {}, + ): + """Perform a fused all-gather followed by a linear layer""" + + assert all(si.device == self.my_device for si in scattered_inputs) + assert all(si.dtype == self.dtype for si in scattered_inputs) + + scattered_input_numels = [si.numel() for si in scattered_inputs] + total_scattered_input_numel = sum(scattered_input_numels) + self._ensure_staging_is_large_enough( + total_scattered_input_numel, random_init=_memcpy is False + ) + + stripe = self.next_stripe % self.num_stripes + self.next_stripe += 1 + + seq_num = self.next_seq_nums[stripe] % SEQ_NUM_WRAP_AROUND + prev_seq_num = (seq_num - 1) % SEQ_NUM_WRAP_AROUND + self.next_seq_nums[stripe] += 1 + + stagings = [ + s.view((self.world_size,) + si.shape) + for s, si in zip( + self.staging[stripe, :, :total_scattered_input_numel].split( + scattered_input_numels, dim=-1 + ), + scattered_inputs, + ) + ] + buddys_stagings = [ + [bs] * len(scattered_inputs) + if bs.numel() == 0 + else [ + s.view(si.shape) + for s, si in zip( + bs[stripe, :total_scattered_input_numel].split( + scattered_input_numels, dim=-1 + ), + scattered_inputs, + ) + ] + for bs in self.buddys_staging + ] + + current_stream = torch.cuda.current_stream() + + self.memcpy_stream.wait_stream(current_stream) + + # Wait for buddy to signal that it read from the data before we + # overwrite it (this wait matches up with write [B] below). + if _wait: + WaitValues.OPERATOR( + [ + self.num_reads_from_buddys_staging[ + (self.my_rank + iter_) % self.world_size, stripe + ] + for iter_ in range(1, self.world_size) + ], + prev_seq_num, + self.memcpy_stream, + timeout_s, + ) + + for iter_ in range(1, self.world_size): + dst_rank = (self.my_rank + iter_) % self.world_size + + if _memcpy: + with torch.cuda.stream(self.memcpy_stream): + for bs, si in zip(buddys_stagings[dst_rank], scattered_inputs): + bs.copy_(si) + + self.write_stream.wait_stream(self.memcpy_stream) + + # Signal to buddy that we have written into the data so it can + # read from it (this write matches up with the wait in Triton + # or with wait [A] below). + if _wait: + Memset32bAsync.OPERATOR( + self.num_writes_into_buddys_staging[dst_rank][stripe], + seq_num, + self.write_stream, + ) + + # If we're doing a regular matmul, we have a faster fused Triton kernel! + if _is_regular_matmul and self._should_use_triton(_triton): + # Wait for buddy to signal that it wrote into the data before we + # read from it (this wait matches up with write [A] above). + _launch_triton_matmul( + a_my_shard=scattered_inputs[0].flatten(0, -2), + a=stagings[0].flatten(0, -2), + my_rank=self.my_rank, + world_size=self.world_size, + wait_counters=self.num_writes_into_my_staging, + write_counters=None, + direction=BACKWARDS_WITH_ME_FIRST, + stripe=stripe, + seq_num=seq_num, + num_stripes=self.num_stripes, + timeout_s=timeout_s, + _wait=_wait, + **_extra_triton_args, + ) + + else: + # Not needed, but it prevents the waits from starting much earlier + # than the rest of the op, which is confusing when profiling. + self.wait_stream.wait_stream(current_stream) + + self.second_stream.wait_stream(current_stream) + stream_factory = self.make_stream_factory(current_stream) + + my_matmul(scattered_inputs, self.my_rank, stream_factory) + + for iter_ in range(1, self.world_size): + src_rank = (self.my_rank - iter_) % self.world_size + + # Wait for buddy to signal that it wrote into the data before we + # read from it (this wait matches up with write [A] above). + if _wait: + WaitValues.OPERATOR( + [self.num_writes_into_my_staging[src_rank, stripe]], + seq_num, + self.wait_stream, + timeout_s, + ) + current_stream.wait_stream(self.wait_stream) + self.second_stream.wait_stream(self.wait_stream) + + my_matmul([s[src_rank] for s in stagings], src_rank, stream_factory) + + current_stream.wait_stream(self.second_stream) + + self.write_stream.wait_stream(current_stream) + + # Signal to buddy that we have read from the data so it can + # overwrite it (this write matches up with wait [B] above). + if _wait: + WriteValues.OPERATOR( + [ + self.num_reads_from_my_staging[ + (self.my_rank - iter_) % self.world_size + ][stripe] + for iter_ in range(1, self.world_size) + ], + seq_num, + self.write_stream, + ) + + def linear_and_reducescatter( + self, + my_matmul: Callable[ + [List[torch.Tensor], int, Callable[[], torch.cuda.Stream]], None + ], + gathered_outputs: List[torch.Tensor], + scattered_outputs: List[torch.Tensor], + timeout_s: int, + _wait: bool = True, + _memcpy: bool = True, + _triton: bool = True, + _is_regular_matmul: bool = False, + _extra_triton_args: Mapping[str, Any] = {}, + ): + """Perform a fused linear layer followed by a reduce-scatter""" + + assert all(go.device == self.my_device for go in gathered_outputs) + assert all(go.dtype == self.dtype for go in gathered_outputs) + assert all(so.device == self.my_device for so in scattered_outputs) + assert all(so.dtype == self.dtype for so in scattered_outputs) + + scattered_output_numels = [so.numel() for so in scattered_outputs] + total_scattered_output_numel = sum(scattered_output_numels) + self._ensure_staging_is_large_enough( + total_scattered_output_numel, random_init=_memcpy is False + ) + + stripe = self.next_stripe % self.num_stripes + self.next_stripe += 1 + + seq_num = self.next_seq_nums[stripe] % SEQ_NUM_WRAP_AROUND + prev_seq_num = (seq_num - 1) % SEQ_NUM_WRAP_AROUND + self.next_seq_nums[stripe] += 1 + + stagings = [ + s.view((self.world_size,) + so.shape) + for s, so in zip( + self.staging[stripe, :, :total_scattered_output_numel].split( + scattered_output_numels, dim=-1 + ), + scattered_outputs, + ) + ] + buddys_stagings = [ + [bs] * len(scattered_outputs) + if bs.numel() == 0 + else [ + s.view(so.shape) + for s, so in zip( + bs[stripe, :total_scattered_output_numel].split( + scattered_output_numels, dim=-1 + ), + scattered_outputs, + ) + ] + for bs in self.buddys_staging + ] + + current_stream = torch.cuda.current_stream() + + self.wait_stream.wait_stream(current_stream) + + # Wait for buddy to signal that it read from the data before we + # overwrite it (this wait matches up with write [2] below). + if _wait: + WaitValues.OPERATOR( + [ + self.num_reads_from_my_staging[ + (self.my_rank + iter_) % self.world_size + ][stripe] + for iter_ in range(1, self.world_size) + ], + prev_seq_num, + current_stream, + timeout_s, + ) + + # If we're doing a regular matmul, we have a faster fused Triton kernel! + if _is_regular_matmul and self._should_use_triton(_triton): + # Signal to buddy that we have written into the data so it can + # read from it (this write matches up with wait [1] below). + _launch_triton_matmul( + cs=[s.flatten(0, -2) for s in stagings], + cs_my_shard=[ + go[self.my_rank].flatten(0, -2) for go in gathered_outputs + ], + my_rank=self.my_rank, + world_size=self.world_size, + wait_counters=None, + write_counters=self.num_writes_into_my_staging, + direction=FORWARDS_WITH_ME_LAST, + stripe=stripe, + seq_num=seq_num, + num_stripes=self.num_stripes, + timeout_s=timeout_s, + _wait=_wait, + **_extra_triton_args, + ) + + else: + self.second_stream.wait_stream(current_stream) + stream_factory = self.make_stream_factory(current_stream) + + for iter_ in range(1, self.world_size): + dst_rank = (self.my_rank + iter_) % self.world_size + + my_matmul([s[dst_rank] for s in stagings], dst_rank, stream_factory) + + # Signal to buddy that we have written into the data so it can + # read from it (this write matches up with wait [1] below). + if _wait: + self.write_stream.wait_stream(current_stream) + self.write_stream.wait_stream(self.second_stream) + WriteValues.OPERATOR( + [self.num_writes_into_my_staging[dst_rank, stripe]], + seq_num, + self.write_stream, + ) + + my_matmul( + [o[self.my_rank] for o in gathered_outputs], + self.my_rank, + stream_factory, + ) + + current_stream.wait_stream(self.second_stream) + + for iter_ in range(1, self.world_size): + src_rank = (self.my_rank - iter_) % self.world_size + + # Wait for buddy to signal that it wrote into the data before we + # read from it (this wait matches up with the write in Triton + # or with write [1] above). + if _wait: + WaitValues.OPERATOR( + [self.num_writes_into_buddys_staging[src_rank][stripe]], + seq_num, + self.wait_stream, + timeout_s, + ) + + self.memcpy_stream.wait_stream(self.wait_stream) + + if _memcpy: + with torch.cuda.stream(self.memcpy_stream): + for go, bs in zip(gathered_outputs, buddys_stagings[src_rank]): + go[src_rank].copy_(bs) + + current_stream.wait_stream(self.memcpy_stream) + + for go, so in zip(gathered_outputs, scattered_outputs): + torch.sum(go, dim=0, out=so) + + self.write_stream.wait_stream(current_stream) + + # Signal to buddy that we have read from the data so it can + # overwrite it (this write matches up with wait [2] above). + if _wait: + WriteValues.OPERATOR( + [ + self.num_reads_from_buddys_staging[ + (self.my_rank - iter_) % self.world_size, stripe + ] + for iter_ in range(1, self.world_size) + ], + seq_num, + self.write_stream, + ) + + +# We'd store this as an attribute on the PG object itself, but some PGs are +# pybind-bound classes and thus don't support it, so we simulate this as an +# external cache. +CACHE: Dict[Tuple[int, torch.dtype], Optional[_FusedSequenceParallel]] = {} + + +def _can_ranks_communicate_all_to_all_over_nvlink(group: dist.ProcessGroup) -> bool: + # FIXME This is currently overly simplistic, must be improved. The following + # should be enough: + # - ensure that all ranks are running on the same machine (by exchanging + # their /proc/sys/kernel/random/boot_id value) + # - ensure there's P2P between all pairs of ranks (can_device_access_peer + # could help here but it's unclear what happens if target devices aren't + # visible? maybe just trying to exchange IPC handles and catching errors + # would work? note that in any case some ranks might succeed while some + # might fail so we need a barrier to have them all make the same decision) + return group.size() <= 8 + + +def _lazy_init( + device: torch.device, dtype: torch.dtype, group: dist.ProcessGroup, num_stripes: int +) -> Optional[_FusedSequenceParallel]: + world_size = group.size() + try: + obj = CACHE[(id(group), dtype)] + except KeyError: + if int(os.environ.get("DISABLE_FUSED_SEQUENCE_PARALLEL", "0")): + obj = None + elif world_size == 1: + obj = None + elif not _can_ranks_communicate_all_to_all_over_nvlink(group): + obj = None + else: + obj = _FusedSequenceParallel(device, dtype, group, num_stripes) + CACHE[(id(group), dtype)] = obj + return obj + + +def _default_stream_factory() -> torch.cuda.Stream: + return torch.cuda.current_stream() + + +@overload +def fused_allgather_and_linear( + scattered_input: torch.Tensor, + weight: torch.Tensor, + *, + group: dist.ProcessGroup, + out: Optional[torch.Tensor] = None, + num_stripes: int = 1, + timeout_s: int = 60 * 60, + scale_scattered_input: Optional[torch.Tensor] = None, + scale_weight: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None, + out_dtype: Optional[torch.dtype] = None, + **private_args_DO_NOT_USE, +) -> torch.Tensor: + ... + + +@overload +def fused_allgather_and_linear( + scattered_input: torch.Tensor, + weight: List[torch.Tensor], + *, + group: dist.ProcessGroup, + out: Optional[List[torch.Tensor]] = None, + num_stripes: int = 1, + timeout_s: int = 60 * 60, + scale_scattered_input: Optional[torch.Tensor] = None, + scale_weight: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None, + out_dtype: Optional[torch.dtype] = None, + **private_args_DO_NOT_USE, +) -> List[torch.Tensor]: + ... + + +def fused_allgather_and_linear( + scattered_input: torch.Tensor, + weight: Union[torch.Tensor, List[torch.Tensor]], + *, + group: dist.ProcessGroup, + out: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None, + num_stripes: int = 1, + timeout_s: int = 60 * 60, + scale_scattered_input: Optional[torch.Tensor] = None, + scale_weight: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None, + out_dtype: Optional[torch.dtype] = None, + **private_args_DO_NOT_USE, +) -> Union[torch.Tensor, List[torch.Tensor]]: + """Performs a fused all-gather followed by a linear op + + It is equivalent to the following plain PyTorch code: + + # like scattered_input but with first dim multiplied by group's world size + gathered_input = scattered_input.new_empty(...) + dist.all_gather_into_tensor(gathered_input, scattered_input, group=group) + return torch.nn.functional.linear(gathered_input, weight) + + It achieves this by breaking down the matmul into smaller partial ops (as + many as the world size), each needing as input a different "contribution" + to the all-gather (by a different rank), and writing to a different chunk of + the output. Then, on one stream, it sends the local contribution to all + other ranks (first one rank over, then two, ...) while, on another stream, + it launches the sub-matmuls in the order in which the remote contributions + (which are the sub-matmuls' inputs) are supposed to arrive, so that ideally + none of the sub-matmuls will ever have to wait. + + The idea comes from this paper: https://arxiv.org/abs/2302.05442 + + This method uses a staging buffer, which persists across calls, of the same + size as the all-gathered input tensor (i.e., the input's size times the + world size). If multiple inputs of multiple sizes are used, the staging + buffer will be the maximum needed by any of them. Each call, when it starts, + must first wait for the previous call to finish using the staging buffer. In + normal conditions, where there's some other operation between two calls, + this isn't an issue. However, when doing back-to-back calls (like in + benchmarks) it can introduce artificial delays. To hide them, we allow using + more than one staging buffer, which will be cycled through, thus trading + memory for speed. This can be controlled using the num_stripes argument. + + Supports FP8 gemm for tensor-wise quantized weight and input tensors. + To enable FP8 gemm: + 1. pass scattered_input and weight as quantized FP8 datatype + 2. pass scale_scattered_input and scale_weight, the scales used to + quantize input and weight, respectively. + 3. set out_dtype, if not specified, will be inferred from scattered_input type. + + """ + world_size = group.size() + weights = weight if isinstance(weight, list) else [weight] + assert (scale_scattered_input is None) == (scale_weight is None) + if scale_weight is not None: + assert isinstance(weight, list) == isinstance(scale_weight, list) + scales_weights = ( + scale_weight if isinstance(scale_weight, list) else [scale_weight] + ) + assert len(weights) == len(scales_weights) + assert out_dtype is not None, "output_dtype is required with FP8" + else: + scales_weights = [torch.empty(1)] * len(weights) + assert all(w.ndim == 2 for w in weights) + assert scattered_input.ndim >= 2 + assert all(scattered_input.shape[-1] == w.shape[-1] for w in weights) + assert scattered_input.is_contiguous() + gathered_input_shape = (world_size,) + scattered_input.shape + gathered_output_shapes = [gathered_input_shape[:-1] + w.shape[:-1] for w in weights] + if out is not None: + assert isinstance(out, list) == isinstance(weight, list) + gathered_outputs = out if isinstance(out, list) else [out] + assert len(gathered_outputs) == len(gathered_output_shapes) + assert all( + go.shape == gos for go, gos in zip(gathered_outputs, gathered_output_shapes) + ) + assert all(go.is_contiguous() for go in gathered_outputs) + if out_dtype is not None: + if isinstance(out, list): + for o in out: + assert o.dtype == out_dtype + else: + assert out.dtype == out_dtype + else: + gathered_outputs = [ + scattered_input.new_empty( + gos, + dtype=out_dtype if out_dtype is not None else scattered_input.dtype, + ) + for gos in gathered_output_shapes + ] + + def my_matmul( + inputs: List[torch.Tensor], + src_rank: int, + stream_factory: Callable[[], torch.cuda.Stream], + ) -> None: + for w, scale_weight, go in zip(weights, scales_weights, gathered_outputs): + with torch.cuda.stream(stream_factory()): + if _is_fp8_dtype(w.dtype): + output_amax = torch.empty(1, dtype=torch.float32, device=w.device) + torch._scaled_mm( + inputs[0], + w.t(), + out_dtype=go[src_rank].dtype, + scale_a=scale_scattered_input, + scale_b=scale_weight, + out=(go[src_rank], output_amax), + ) + else: + torch.matmul(inputs[0], w.t(), out=go[src_rank]) + + _is_regular_matmul = all([not _is_fp8_dtype(w.dtype) for w in weights]) + fused_allgather_and_anything( + [scattered_input], + my_matmul, + group=group, + num_stripes=num_stripes, + timeout_s=timeout_s, + _is_regular_matmul=_is_regular_matmul, + _extra_triton_args=dict( + bs=[w.t() for w in weights], + cs=[go.flatten(0, -2) for go in gathered_outputs], + cs_my_shard=None, + ), + **private_args_DO_NOT_USE, + ) + + if isinstance(weight, list): + return [go.flatten(0, 1) for go in gathered_outputs] + else: + return gathered_outputs[0].flatten(0, 1) + + +def fused_allgather_and_anything( + scattered_inputs: List[torch.Tensor], + my_matmul: Callable[ + [List[torch.Tensor], int, Callable[[], torch.cuda.Stream]], None + ], + *, + group: dist.ProcessGroup, + num_stripes: int = 1, + timeout_s: int = 60 * 60, + **private_args_DO_NOT_USE, +) -> None: + world_size = group.size() + + if len(scattered_inputs) == 0: + for src_rank in range(world_size): + my_matmul([], src_rank, _default_stream_factory) + return + + assert all(si.is_contiguous() for si in scattered_inputs) + assert all(si.device == scattered_inputs[0].device for si in scattered_inputs) + assert all(si.dtype == scattered_inputs[0].dtype for si in scattered_inputs) + + gathered_input_shapes = [(world_size,) + si.shape for si in scattered_inputs] + + obj = _lazy_init( + scattered_inputs[0].device, scattered_inputs[0].dtype, group, num_stripes + ) + + if world_size == 1: + my_matmul(scattered_inputs, 0, _default_stream_factory) + + # Fallback + elif obj is None: + gathered_inputs = [ + si.new_empty(gis) + for si, gis in zip(scattered_inputs, gathered_input_shapes) + ] + for si, gi in zip(scattered_inputs, gathered_inputs): + dist.all_gather_into_tensor(output_tensor=gi, input_tensor=si, group=group) + for src_rank in range(world_size): + my_matmul( + [gi[src_rank] for gi in gathered_inputs], + src_rank, + _default_stream_factory, + ) + + # Fast path + else: + assert scattered_inputs[0].device == obj.my_device + assert scattered_inputs[0].dtype == obj.dtype + assert obj.num_stripes == num_stripes + obj.allgather_and_linear( + scattered_inputs, + my_matmul, + timeout_s=timeout_s, + _wait=private_args_DO_NOT_USE.get("_wait", True), + _memcpy=private_args_DO_NOT_USE.get("_memcpy", True), + _triton=private_args_DO_NOT_USE.get("_triton", True), + _is_regular_matmul=private_args_DO_NOT_USE.get("_is_regular_matmul", False), + _extra_triton_args=private_args_DO_NOT_USE.get("_extra_triton_args", {}), + ) + + +@overload +def fused_linear_and_reducescatter( + gathered_input: torch.Tensor, + weight: torch.Tensor, + *, + group: dist.ProcessGroup, + out: Optional[torch.Tensor] = None, + num_stripes: int = 1, + timeout_s: int = 60 * 60, + scale_gathered_input: Optional[torch.Tensor] = None, + scale_weight: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None, + out_dtype: Optional[torch.dtype] = None, + **private_args_DO_NOT_USE, +) -> torch.Tensor: + ... + + +@overload +def fused_linear_and_reducescatter( + gathered_input: torch.Tensor, + weight: List[torch.Tensor], + *, + group: dist.ProcessGroup, + out: Optional[List[torch.Tensor]] = None, + num_stripes: int = 1, + timeout_s: int = 60 * 60, + scale_gathered_input: Optional[torch.Tensor] = None, + scale_weight: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None, + out_dtype: Optional[torch.dtype] = None, + **private_args_DO_NOT_USE, +) -> List[torch.Tensor]: + ... + + +def fused_linear_and_reducescatter( + gathered_input: torch.Tensor, + weight: Union[torch.Tensor, List[torch.Tensor]], + *, + group: dist.ProcessGroup, + out: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None, + num_stripes: int = 1, + timeout_s: int = 60 * 60, + scale_gathered_input: Optional[torch.Tensor] = None, + scale_weight: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None, + out_dtype: Optional[torch.dtype] = None, + **private_args_DO_NOT_USE, +) -> Union[torch.Tensor, List[torch.Tensor]]: + """Performs a fused linear op followed by a reduce-scatter + + It is equivalent to the following plain PyTorch code: + + gathered_output = torch.nn.functional.linear(gathered_input, weight) + # like gathered_output but with first dim divided by group's world size + scattered_output = gathered_output.new_empty(...) + dist.reduce_scatter_tensor(scattered_output, gathered_output, group=group) + + Supports FP8 gemm with tensor-wise quantized weights. To enable FP8 gemm: + 1. pass weight and gathered_input as FP8 tensors + 2. Set `scale_gathered_input` and `scale_weight` to the scales used to quantize + inputs and weight, respectively. + 3. Set out_dtype to the desired output dtype. If not specified, it will be inferred from + gathered_input datatype. + """ + world_size = group.size() + weights = weight if isinstance(weight, list) else [weight] + assert (scale_gathered_input is None) == (scale_weight is None) + if scale_weight is not None: + assert isinstance(weight, list) == isinstance(scale_weight, list) + scales_weights = ( + scale_weight if isinstance(scale_weight, list) else [scale_weight] + ) + assert len(weights) == len(scales_weights) + assert out_dtype is not None, "output_dtype is required with FP8" + else: + scales_weights = [torch.empty(1)] * len(weights) + assert all(w.ndim == 2 for w in weights) + assert gathered_input.ndim >= 2 + assert all(gathered_input.shape[-1] == w.shape[-1] for w in weights) + assert gathered_input.is_contiguous() + assert gathered_input.shape[0] % world_size == 0 + gathered_input = gathered_input.view( + (world_size, gathered_input.shape[0] // world_size) + gathered_input.shape[1:] + ) + gathered_output_shapes = [gathered_input.shape[:-1] + w.shape[:-1] for w in weights] + scattered_output_shapes = [gos[1:] for gos in gathered_output_shapes] + if out is not None: + assert isinstance(out, list) == isinstance(weight, list) + scattered_outputs = out if isinstance(out, list) else [out] + assert len(scattered_outputs) == scattered_output_shapes + assert all(so.device == gathered_input.device for so in scattered_outputs) + assert all(so.dtype == gathered_input.dtype for so in scattered_outputs) + assert all( + so.shape == sos + for so, sos in zip(scattered_outputs, scattered_output_shapes) + ) + if out_dtype is not None: + if isinstance(out, list): + for o in out: + assert o.dtype == out_dtype + else: + assert out.dtype == out_dtype + else: + scattered_outputs = [ + gathered_input.new_empty( + sos, + dtype=out_dtype if out_dtype is not None else gathered_input.dtype, + ) + for sos in scattered_output_shapes + ] + + def my_matmul( + outputs: List[torch.Tensor], + dst_rank: int, + stream_factory: Callable[[], torch.cuda.Stream], + ) -> None: + for w, scale_weight, o in zip(weights, scales_weights, outputs): + with torch.cuda.stream(stream_factory()): + if _is_fp8_dtype(w.dtype): + output_amax = torch.empty(1, dtype=torch.float32, device=o.device) + torch._scaled_mm( + gathered_input[dst_rank], + w.t(), + out_dtype=o.dtype, + scale_a=scale_gathered_input, + scale_b=scale_weight, + out=(o, output_amax), + ) + else: + torch.matmul(gathered_input[dst_rank], w.t(), out=o) + + _is_regular_matmul = all([not _is_fp8_dtype(w.dtype) for w in weights]) + fused_anything_and_reducescatter( + my_matmul, + scattered_outputs, + group=group, + num_stripes=num_stripes, + timeout_s=timeout_s, + _is_regular_matmul=_is_regular_matmul, + _extra_triton_args=dict( + a_my_shard=None, + a=gathered_input.flatten(0, -2), + bs=[w.t() for w in weights], + ), + **private_args_DO_NOT_USE, + ) + + if isinstance(weight, list): + return scattered_outputs + else: + return scattered_outputs[0] + + +def fused_anything_and_reducescatter( + my_matmul: Callable[ + [List[torch.Tensor], int, Callable[[], torch.cuda.Stream]], None + ], + scattered_outputs: List[torch.Tensor], + *, + group: dist.ProcessGroup, + num_stripes: int = 1, + timeout_s: int = 60 * 60, + **private_args_DO_NOT_USE, +) -> None: + world_size = group.size() + + if len(scattered_outputs) == 0: + for dst_rank in range(world_size): + my_matmul([], dst_rank, _default_stream_factory) + return + + assert all(so.is_contiguous() for so in scattered_outputs) + assert all(so.device == scattered_outputs[0].device for so in scattered_outputs) + assert all(so.dtype == scattered_outputs[0].dtype for so in scattered_outputs) + + gathered_output_shapes = [(world_size,) + so.shape for so in scattered_outputs] + + obj = _lazy_init( + scattered_outputs[0].device, scattered_outputs[0].dtype, group, num_stripes + ) + + if world_size == 1: + my_matmul(scattered_outputs, 0, _default_stream_factory) + + # Fallback + elif obj is None: + gathered_outputs = [ + so.new_empty(gos) + for so, gos in zip(scattered_outputs, gathered_output_shapes) + ] + for dst_rank in range(world_size): + my_matmul( + [go[dst_rank] for go in gathered_outputs], + dst_rank, + _default_stream_factory, + ) + for go, so in zip(gathered_outputs, scattered_outputs): + dist.reduce_scatter_tensor(output=so, input=go, group=group) + + # Fast path + else: + assert scattered_outputs[0].device == obj.my_device + assert scattered_outputs[0].dtype == obj.dtype + assert obj.num_stripes == num_stripes + gathered_outputs = [ + scattered_outputs[0].new_empty(gos) for gos in gathered_output_shapes + ] + obj.linear_and_reducescatter( + my_matmul, + gathered_outputs, + scattered_outputs, + timeout_s=timeout_s, + _wait=private_args_DO_NOT_USE.get("_wait", True), + _memcpy=private_args_DO_NOT_USE.get("_memcpy", True), + _triton=private_args_DO_NOT_USE.get("_triton", True), + _is_regular_matmul=private_args_DO_NOT_USE.get("_is_regular_matmul", False), + _extra_triton_args=private_args_DO_NOT_USE.get("_extra_triton_args", {}), + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/sp24.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/sp24.py new file mode 100644 index 0000000000000000000000000000000000000000..3031ac6ba20c63e6a70485bb98dec994a1054f51 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/sp24.py @@ -0,0 +1,629 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + +import contextlib +import ctypes +import glob +import warnings +from functools import partial +from pathlib import Path +from typing import Any, Callable, Optional, Tuple, TypeVar, cast + +import torch + +from .common import BaseOperator, get_operator, get_xformers_operator, register_operator + + +@register_operator +class SparsifyBothWays(BaseOperator): + OPERATOR = get_xformers_operator("sparse24_sparsify_both_ways") + OPERATOR_CATEGORY = "sp24" + NAME = "sparse24_sparsify_both_ways" + + +@register_operator +class SparsifyApply(BaseOperator): + OPERATOR = get_xformers_operator("sparse24_apply") + OPERATOR_CATEGORY = "sp24" + NAME = "sparse24_apply" + + +@register_operator +class SparsifyApplyDenseOutput(BaseOperator): + OPERATOR = get_xformers_operator("sparse24_apply_dense_output") + OPERATOR_CATEGORY = "sp24" + NAME = "sparse24_apply_dense_output" + + +@register_operator +class Sp24Gemm(BaseOperator): + OPERATOR = get_xformers_operator("_sparse24_gemm") + OPERATOR_CATEGORY = "sp24" + NAME = "_sparse24_gemm" + + +def _get_cusparselt_lib() -> Optional[str]: + libs = glob.glob( + str(Path(torch._C.__file__).parent / "lib" / "libcusparseLt*.so.0") + ) + if len(libs) != 1: + return None + return libs[0] + + +def _get_cusparselt_torch_version() -> Tuple[int, int, int]: + """ + Returns the version of the cusparselt.so library that ships with pytorch 2.2+ + """ + lib_path = _get_cusparselt_lib() + if lib_path is None: + return (0, 0, 0) + lib = ctypes.CDLL(lib_path) + + def get_version_part(version_part: int) -> int: + value = ctypes.c_int() + ret = lib.cusparseLtGetProperty(version_part, ctypes.byref(value)) + if ret != 0: + return -1 + return value.value + + return (get_version_part(0), get_version_part(1), get_version_part(2)) + + +_cusplt_version = _get_cusparselt_torch_version() +_cusplt_version_str = ".".join(str(v) for v in _cusplt_version) + + +@register_operator +class Sp24GemmCusplt(BaseOperator): + OPERATOR = get_operator("aten", "_cslt_sparse_mm") + OPERATOR_CATEGORY = "sp24" + NAME = f"_cslt_sparse_mm@{_cusplt_version_str}" + + +def _has_cusparseLt() -> bool: + available = _cusplt_version >= (0, 4, 0) + if available and _cusplt_version < (0, 5, 0): + # Version 0.5.0 has much better perf because it can fuse the + # transpose within the GEMM epilogue + warnings.warn( + f"You have cusparseLt version {_cusplt_version_str} " + f"but you get better performance with v0.5.0+ if " + f"you replace the .so file ({_get_cusparselt_lib()})" + ) + return available + + +def sparse24_pointwise_op( + func, types, args=(), kwargs=None, allow_sparsify_args_list=() +): + self = None + for tensor in args: + if isinstance(tensor, Sparse24Tensor): + self = tensor + assert self is not None + args_updated = [] + for i, tensor in enumerate(args): + if isinstance(tensor, torch.Tensor): + if not isinstance(tensor, Sparse24Tensor): + if i in allow_sparsify_args_list: + tensor = sparsify24_like(tensor, self) + else: + raise ValueError( + f"Operation {func.__module__}.{func.__name__} on Sparse24Tensor requires all operands to " + f"be Sparse24Tensors, but operand {i} is a {type(tensor)}" + ) + if ( + tensor.threads_masks is None + or self.threads_masks is None + or tensor.threads_masks.data_ptr() != self.threads_masks.data_ptr() + or tensor.threads_masks.stride() != self.threads_masks.stride() + ): + raise ValueError( + f"Operation {func.__module__}.{func.__name__} on Sparse24Tensor requires all operands to be " + "Sparse24Tensors with the same sparsity pattern" + ) + args_updated.append(tensor) + assert isinstance( + self, Sparse24TensorCutlass + ), "Only implemented for CUTLASS tensors" + return Sparse24TensorCutlass( + self.shape, + func( + *[(x.packed if isinstance(x, Sparse24Tensor) else x) for x in args_updated] + ), + self.meta, + func( + *[ + (x.packed_t if isinstance(x, Sparse24Tensor) else x) + for x in args_updated + ] + ), + self.meta_t, + self.threads_masks, + ) + + +def sparse24_mm(func, types, args=(), kwargs=None) -> torch.Tensor: + assert len(args) == 2 + A, B = args + if A.ndim != 2 or B.ndim != 2: + raise NotImplementedError( + "`Sparse24Tensor` matmul: Broadcasting is not implemented" + ) + if isinstance(A, Sparse24Tensor): + return A._mm(B) + else: + B_t = B.t() + assert isinstance(B_t, Sparse24Tensor) + return B_t._mm(A.t(), prefer_col_major_output=True).t() + + +def sparse24_addmm(func, types, args=(), kwargs=None) -> torch.Tensor: + assert len(args) == 3 + bias, A, B = args + if A.ndim != 2 or B.ndim != 2: + raise NotImplementedError( + "`Sparse24Tensor` matmul: Broadcasting is not implemented" + ) + if bias.ndim != 1: + raise NotImplementedError( + f"`Sparse24Tensor` matmul: only bias dim=1 supported. Shape={bias.shape}" + ) + if isinstance(A, Sparse24Tensor): + raise NotImplementedError( + "`Sparse24Tensor` matmul: only operand B of `addmm` can be sparse" + ) + B_t = B.t() + assert isinstance(B_t, Sparse24Tensor) + return B_t._mm(A.t(), bias=bias, prefer_col_major_output=True).t() + + +def sparse24_linear(func, types, args=(), kwargs=None) -> torch.Tensor: + assert len(args) in [2, 3] + A, B = args[:2] + bias = args[2] if len(args) == 3 else None + if bias is None: + return A @ B.t() + return sparse24_addmm( + func=None, + types=None, + args=[bias, A, B.t()], + ) + + +def sparse24_t(func, types, args=(), kwargs=None) -> torch.Tensor: + assert len(args) == 1 + self = args[0] + assert isinstance(self, Sparse24Tensor) + assert len(self.shape) == 2 + return self.__class__( + (self.shape[-1], self.shape[0]), + packed=self.packed_t, + meta=self.meta_t, + packed_t=self.packed, + meta_t=self.meta, + threads_masks=self.threads_masks.transpose(0, 1), + ) + + +def sparse24_view(func, types, args=(), kwargs=None) -> torch.Tensor: + assert len(args) == 2 + self, shape = args + if tuple(shape) != self.shape: + raise NotImplementedError( + f"`view` is not implemented for Sparse24Tensor, except for the dummy case (shape={shape})" + ) + return self + + +def sparse24_detach(func, types, args, kwargs) -> torch.Tensor: + assert len(args) == 1 + self = args[0] + return self.__class__( + shape=self.shape, + packed=self.packed, + meta=self.meta, + packed_t=self.packed_t, + meta_t=self.meta_t, + threads_masks=self.threads_masks, + requires_grad=False, + ) + + +@contextlib.contextmanager +def no_dispatch(): + guard = torch._C._DisableTorchDispatch() + try: + yield + finally: + del guard + + +def fallback_dispatcher(func, types, args, kwargs): + with no_dispatch(): + return func(*args) + + +SPARSE24_DISPATCH_CUTLASS = { + torch.ops.aten.is_same_size: fallback_dispatcher, + torch.ops.aten.detach_: fallback_dispatcher, + torch.ops.aten.detach: sparse24_detach, + torch.ops.aten.relu: sparse24_pointwise_op, + torch.ops.aten.gelu: sparse24_pointwise_op, + torch.ops.aten.silu: sparse24_pointwise_op, + torch.ops.aten.mul: partial( + # `mul` BW in swiglu + sparse24_pointwise_op, + allow_sparsify_args_list=( + 0, + 1, + ), + ), + torch.ops.aten.add: sparse24_pointwise_op, + # Note: for these ops, we allow the gradient to come in as a `torch.Tensor` + # and we will run the sparsification right before calling the BW aten func + torch.ops.aten.gelu_backward: partial( + sparse24_pointwise_op, allow_sparsify_args_list=(0,) + ), + torch.ops.aten.silu_backward: partial( + sparse24_pointwise_op, allow_sparsify_args_list=(0, 1) + ), + torch.ops.aten.threshold_backward: partial( # relu BW + sparse24_pointwise_op, + allow_sparsify_args_list=(0,), + ), + torch.ops.aten.mm: sparse24_mm, + torch.ops.aten.matmul: sparse24_mm, + torch.ops.aten.t: sparse24_t, + torch.ops.aten.view: sparse24_view, + torch.ops.aten.linear: sparse24_linear, +} + +SPARSE24_DISPATCH_CUSPARSELT = { + torch.ops.aten.is_same_size: fallback_dispatcher, + torch.ops.aten.detach_: fallback_dispatcher, + torch.ops.aten.detach: sparse24_detach, + torch.ops.aten.t: sparse24_t, + torch.ops.aten.view: sparse24_view, + torch.ops.aten.mm: sparse24_mm, + torch.ops.aten.matmul: sparse24_mm, + torch.ops.aten.addmm: sparse24_addmm, + torch.ops.aten.linear: sparse24_linear, +} + + +class Sparse24Tensor(torch.Tensor): + packed: torch.Tensor + meta: torch.Tensor + packed_t: torch.Tensor + meta_t: torch.Tensor + threads_masks: torch.Tensor + __slots__ = ["packed", "meta", "packed_t", "meta_t", "threads_masks"] + + # We need to update the new method here to tell PyTorch what should be + # the Tensor corresponding to the wrapper object + @staticmethod + def __new__( + cls, + shape, + packed: torch.Tensor, + meta: torch.Tensor, + packed_t: torch.Tensor, + meta_t: torch.Tensor, + threads_masks: torch.Tensor, + *, + requires_grad=False, + ): + assert isinstance(packed, torch.Tensor) + tensor = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] + cls, + shape, + device=packed.device, + dtype=packed.dtype, + requires_grad=requires_grad, + ) + tensor.packed = packed + tensor.meta = meta + tensor.packed_t = packed_t + tensor.meta_t = meta_t + tensor.threads_masks = threads_masks + return tensor + + def __repr__(self): + return f"{self.__class__.__name__}(shape={self.shape})" + + def _sp24_to_dense(self) -> torch.Tensor: + # Multiply by identity + # WARN: This is not efficient at all + e = torch.eye( + self.shape[1], self.shape[1], device=self.device, dtype=self.dtype + ) + return self @ e + + def _mm( + self, + B: torch.Tensor, + *, + prefer_col_major_output: bool = False, + bias: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + raise NotImplementedError() + + __torch_function__ = torch._C._disabled_torch_function_impl + + def __tensor_flatten__(self): + return self.__slots__, (self.shape, self.requires_grad) + + @classmethod + def __tensor_unflatten__(cls, inner_tensors, flatten_spec): + shape, requires_grad = flatten_spec + return cls( + shape, + **inner_tensors, + requires_grad=requires_grad, + ) + + +class Sparse24TensorCutlass(Sparse24Tensor): + def _mm( + self, + B: torch.Tensor, + *, + bias: Optional[torch.Tensor] = None, + prefer_col_major_output: bool = False, + ) -> torch.Tensor: + if isinstance(B, Sparse24Tensor): + raise ValueError( + "`Sparse24Tensor @ Sparse24Tensor` is not supported by the hardware" + ) + if bias is not None: + raise NotImplementedError( + f"`Sparse24Tensor` with backend='{BACKEND_CUTLASS}' does not support matmul with bias. " + f"Remove the bias, or use backend='{BACKEND_CUSPARSELT}'" + ) + if self.ndim != 2 or B.ndim != 2: + raise NotImplementedError( + f"`{self.__class__.__name__}` matmul: Broadcasting is not implemented" + ) + if self.shape[1] != B.shape[0]: + raise NotImplementedError( + f"`{self.__class__.__name__}` matmul: invalid shapes \ + ({self.shape[0]}, {self.shape[1]}) @ ({B.shape[0]}, {B.shape[1]})" + ) + return Sp24Gemm.OPERATOR(self.packed, B, self.meta)[: self.shape[0]] + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + if func._overloadpacket not in SPARSE24_DISPATCH_CUTLASS: + raise NotImplementedError( + f"{cls.__name__} only supports a specific set of operations, " + f"can't perform requested op ({func.__name__})" + ) + return SPARSE24_DISPATCH_CUTLASS[func._overloadpacket]( + func, types, args, kwargs + ) + + +class Sparse24TensorCuSparseLt(Sparse24Tensor): + def _mm( + self, + B: torch.Tensor, + *, + prefer_col_major_output: bool = False, + bias: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + if isinstance(B, Sparse24Tensor): + raise ValueError( + "`Sparse24Tensor @ Sparse24Tensor` is not supported by the hardware" + ) + if self.ndim != 2 or B.ndim != 2: + raise NotImplementedError( + f"`{self.__class__.__name__}` matmul: Broadcasting is not implemented" + ) + if self.shape[1] != B.shape[0]: + raise NotImplementedError( + f"`{self.__class__.__name__}` matmul: invalid shapes \ + ({self.shape[0]}, {self.shape[1]}) @ ({B.shape[0]}, {B.shape[1]})" + ) + if B.shape[1] % 8 != 0: + raise NotImplementedError( + f"`{self.__class__.__name__}` matmul: trying to do `A={tuple(self.shape)} @ B={tuple(B.shape)}`. " + "The dense matrix B should have the second dimension aligned to 8." + ) + if B.dtype != self.dtype: + raise NotImplementedError( + f"`{self.__class__.__name__}` matmul: trying to do `A={tuple(self.shape)} @ B={tuple(B.shape)}`, " + f"with A.dtype={self.dtype} and B.dtype={B.dtype}. " + "This operation is only supported when A and B have the same data type." + ) + if bias is not None and bias.dtype != self.dtype: + raise NotImplementedError( + f"`{self.__class__.__name__}` matmul: trying to do `A={tuple(self.shape)} @ B={tuple(B.shape)} + C`, " + "with A.dtype=B.dtype={self.dtype} and C.dtype={B.dtype}. " + "This operation is only supported when A, B and C have the same data type." + ) + assert _has_cusparseLt() + out = Sp24GemmCusplt.OPERATOR( + self.packed, B, bias=bias, transpose_result=prefer_col_major_output + ) + if prefer_col_major_output: + out = out.t() + return out[: self.shape[0]] + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + if func._overloadpacket not in SPARSE24_DISPATCH_CUSPARSELT: + raise NotImplementedError( + f"{cls.__name__} only supports a specific set of operations, " + f"can't perform requested op ({func.__name__})" + ) + return SPARSE24_DISPATCH_CUSPARSELT[func._overloadpacket]( + func, types, args, kwargs + ) + + +if torch.__version__ >= "2.1.0": + torch._dynamo.allow_in_graph(Sparse24TensorCuSparseLt) + torch._dynamo.allow_in_graph(Sparse24TensorCutlass) + +GRADIENT_SP24 = "24sparse" +GRADIENT_DENSE = "24dense" + +BACKEND_CUTLASS = "cutlass" +BACKEND_CUSPARSELT = "cusparselt" + + +class _Sparsify24Func(torch.autograd.Function): + @staticmethod + def forward(ctx, x: torch.Tensor, algo: str, gradient: str, backend: str): # type: ignore[override] + if gradient not in [GRADIENT_SP24, GRADIENT_DENSE]: + raise ValueError( + f"Invalid gradient type: '{gradient}'. " + f"Expected '{GRADIENT_SP24}' or '{GRADIENT_DENSE}'" + ) + if not isinstance(x, Sparse24Tensor): + (packed, meta, packed_t, meta_t, threads_masks) = SparsifyBothWays.OPERATOR( + x, algorithm=algo, backend=backend + ) + cls = ( + Sparse24TensorCutlass + if backend == BACKEND_CUTLASS + else Sparse24TensorCuSparseLt + ) + out = cls( + x.shape, + packed=packed, + meta=meta, + packed_t=packed_t, + meta_t=meta_t, + threads_masks=threads_masks, + requires_grad=False, + ) + else: + if x.threads_masks is None: + raise ValueError("!!") + out = x + ctx.threads_masks = out.threads_masks + ctx.meta = out.meta + ctx.meta_t = out.meta_t + ctx.dtype = out.dtype + ctx.gradient = gradient + return out + + @staticmethod + def backward(ctx, grad_out: torch.Tensor): # type: ignore[override] + if isinstance(grad_out, Sparse24Tensor): + return grad_out, None, None, None + assert not isinstance(grad_out, Sparse24Tensor) + assert grad_out.dtype == ctx.dtype + if ctx.gradient == GRADIENT_SP24: + packed, packed_t = SparsifyApply.OPERATOR(grad_out, ctx.threads_masks) + grad_in: torch.Tensor = Sparse24TensorCutlass( + grad_out.shape, + packed, + ctx.meta, + packed_t, + ctx.meta_t, + ctx.threads_masks, + requires_grad=grad_out.requires_grad, + ) + elif ctx.gradient == GRADIENT_DENSE: + assert ctx.threads_masks.is_contiguous() + grad_in = SparsifyApplyDenseOutput.OPERATOR(grad_out, ctx.threads_masks) + else: + assert False, f"Unsupported gradient type: {ctx.gradient}" + return ( + grad_in, + None, + None, + None, + ) + + +class _Sparsify24LikeFunc(torch.autograd.Function): + @staticmethod + def forward(ctx, x: torch.Tensor, pattern: Sparse24Tensor, out_dense: bool): # type: ignore[override] + assert isinstance(pattern, Sparse24Tensor) + if not isinstance(pattern, Sparse24TensorCutlass): + raise NotImplementedError( + "`sparsify24_like(x, pattern)` is only implemented for CUTLASS backend" + ) + if not pattern.threads_masks.is_contiguous(): + raise NotImplementedError( + "`sparsify24_like(x, pattern)` is not implemented when `pattern` is transposed" + ) + ctx.threads_masks = pattern.threads_masks + ctx.meta = pattern.meta + ctx.meta_t = pattern.meta_t + ctx.dtype = pattern.dtype + if out_dense: + assert ctx.threads_masks.is_contiguous() + return SparsifyApplyDenseOutput.OPERATOR(x, ctx.threads_masks) + packed, packed_t = SparsifyApply.OPERATOR(x, ctx.threads_masks) + return Sparse24TensorCutlass( + x.shape, + packed, + ctx.meta, + packed_t, + ctx.meta_t, + ctx.threads_masks, + requires_grad=x.requires_grad, + ) + + @staticmethod + def backward(ctx, grad_out: torch.Tensor): # type: ignore[override] + if isinstance(grad_out, Sparse24Tensor): + return grad_out, None, None + assert not isinstance(grad_out, Sparse24Tensor) + assert grad_out.dtype == ctx.dtype + packed, packed_t = SparsifyApply.OPERATOR(grad_out, ctx.threads_masks) + return ( + Sparse24TensorCutlass( + grad_out.shape, + packed, + ctx.meta, + packed_t, + ctx.meta_t, + ctx.threads_masks, + requires_grad=grad_out.requires_grad, + ), + None, + None, + ) + + +# We want to use `torch._dynamo.allow_in_graph` as a decorator +# (see https://fburl.com/workplace/uimiz0mf) but it breaks mypy. +# This is a hack to work around this +F = TypeVar("F", bound=Callable[..., Any]) + + +def allow_in_graph(func: F) -> F: + return cast(F, torch._dynamo.allow_in_graph(func)) + + +@allow_in_graph +def sparsify24( + x: torch.Tensor, + algo: str = "", + gradient: str = GRADIENT_SP24, + backend: str = BACKEND_CUTLASS, +) -> Sparse24Tensor: + return _Sparsify24Func.apply(x, algo, gradient, backend) + + +@allow_in_graph +def sparsify24_like( + x: torch.Tensor, pattern: torch.Tensor, out_dense: bool = False +) -> Sparse24Tensor: + if not isinstance(pattern, Sparse24Tensor): + raise ValueError( + f"`pattern` must be a `Sparse24Tensor` but got a {type(pattern)}" + ) + # Handle transposed case + if not pattern.threads_masks.is_contiguous(): + return _Sparsify24LikeFunc.apply(x.t(), pattern.t(), out_dense).t() + return _Sparsify24LikeFunc.apply(x, pattern, out_dense) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/swiglu_op.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/swiglu_op.py new file mode 100644 index 0000000000000000000000000000000000000000..c97b16f54fff2879d079e39a2d1a9cb018ad8fdd --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/swiglu_op.py @@ -0,0 +1,551 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + +from dataclasses import dataclass +from typing import Dict, Optional, Sequence, Tuple, Union + +import torch +import torch.nn.functional as F +from torch import nn + +from .common import BaseOperator, get_xformers_operator, register_operator +from .unbind import stack_or_none, unbind + + +@register_operator +class DualGemmSiluOp(BaseOperator): + OPERATOR = get_xformers_operator("dual_gemm_silu_identity_mul") + OPERATOR_CATEGORY = "swiglu" + NAME = "dual_gemm_silu" + + @classmethod + # type: ignore + def operator_flop( + cls, x: torch.Tensor, w1: torch.Tensor, b1, w2: torch.Tensor, b2 + ) -> int: + """NOTE: we neglect the impact of biases / pointwises""" + M, N, K = x.shape[0], w1.shape[0], w1.shape[1] + return M * N * K * 2 * 2 + + +@register_operator +class GemmFusedSumOp(BaseOperator): + OPERATOR = get_xformers_operator("gemm_fused_operand_sum") + OPERATOR_CATEGORY = "swiglu" + NAME = "gemm_fused_operand_sum" + + @classmethod + # type: ignore + def operator_flop(cls, a: torch.Tensor, b: torch.Tensor, out1, out2) -> int: + M, N, K = a.shape[0], b.shape[1], a.shape[1] + return M * N * K * 2 + + +class _SwiGLUDecomposedFunc(torch.autograd.Function): + """ + This is just an example implementation with all + operations explicited. This implementation is worse + than pytorch, because pytorch is able to fuse some operations + (eg the linear forward ...) that are decomposed here. + + The time measurements were made on the ViT-Giant setting: + - A100/f16 + - input: [4440, 1536] + - hidden: [4440, 4096] + """ + + NAME = "decomposed" + FORCE_BW_F32 = False + + def _silu_backward(dy, x): + # https://github.com/pytorch/pytorch/blob/563b065f5a4b4055fa6b025c2514b566d5fd9439/aten/src/ATen/native/Activation.cpp#L483 + sigm = 1 / (1 + torch.exp(-x.float())) + return (dy.float() * sigm * (1 + x.float() * (1 - sigm))).to(x.dtype) + + # 952us + @classmethod + def forward(cls, ctx, x, w1, b1, w2, b2, w3, b3): + x1 = x @ w1.transpose(-2, -1) + b1 # 275us + x2 = x @ w2.transpose(-2, -1) + b2 # 275us + x3 = F.silu(x1) # 62us + x4 = x3 * x2 # 90us + x5 = x4 @ w3.transpose(-2, -1) + b3 # 250us + + ctx.save_for_backward(x, w1, b1, w2, b2, w3, b3, x1, x2, x3, x4, x5) + return x5 + + # 1900us + @classmethod + def backward(cls, ctx, dx5): + saved_tensors = ctx.saved_tensors + if cls.FORCE_BW_F32: + dx5 = dx5.float() + saved_tensors = [t.float() for t in ctx.saved_tensors] + x, w1, b1, w2, b2, w3, b3, x1, x2, x3, x4, x5 = saved_tensors + dx4 = dx5 @ w3 # 255us (nn) + dw3 = dx5.transpose(-2, -1) @ x4 # 247us (nt) + db3 = dx5.sum(0) # 25us + dx3 = dx4 * x2 # 88us + dx2 = dx4 * x3 # 88us + dx1 = cls._silu_backward(dx3, x1) # 90us + dx = dx2 @ w2 # 260us (nn) + dw2 = dx2.transpose(-2, -1) @ x # 245us (nt) + db2 = dx2.sum(0) # 50us + dx += dx1 @ w1 # 260us (nn) + dw1 = dx1.transpose(-2, -1) @ x # 245us (nt) + db1 = dx1.sum(0) # 50us + return (dx, dw1, db1, dw2, db2, dw3, db3) + + +class _SwiGLUFusedFunc(torch.autograd.Function): + NAME = "fused.py" + + @classmethod + @torch.cuda.amp.custom_fwd + def forward(cls, ctx, x, w1, b1, w2, b2, w3, b3): + x1, x2, x4 = DualGemmSiluOp.OPERATOR(x, w1, b1, w2, b2) + + x5 = F.linear(x4, w3, b3) + ctx.save_for_backward(x, w1, w2, w3, x1, x2) + ctx.bias = [b1 is not None, b2 is not None, b3 is not None] + return x5 + + @staticmethod + def _linear_bw( + dy: torch.Tensor, x: torch.Tensor, bias: bool + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + if not bias: + return (dy.transpose(-2, -1) @ x), None + db = torch.empty([dy.shape[1]], dtype=dy.dtype, device=dy.device) + dw = torch.empty([dy.shape[1], x.shape[1]], dtype=dy.dtype, device=dy.device) + GemmFusedSumOp.OPERATOR(dy.transpose(-2, -1), x, dw, db) + return dw, db + + @classmethod + @torch.cuda.amp.custom_bwd + def backward(cls, ctx, dx5): + x, w1, w2, w3, x1, x2 = ctx.saved_tensors + w1w2 = stack_or_none([w1, w2], dim=0) + + dx4 = dx5 @ w3 # 255us (nn) + dx1dx2, x4 = torch.ops.xformers.silu_bw_fused(x1, x2, dx4) + dx1, dx2 = dx1dx2.unbind(1) + del x1, x2, dx4 + + dw3, db3 = cls._linear_bw(dx5, x4, bias=ctx.bias[2]) + del x4, dx5 + if w1w2 is not None: + assert dx1dx2.is_contiguous() + assert w1w2.is_contiguous() + w1w2 = w1w2.view([w1.shape[0] * 2, w1.shape[1]]) + dx = dx1dx2.view([dx1.shape[0], 2 * dx1.shape[1]]) @ w1w2 + + # backward of linear1 + linear2 - packed + dw1dw2 = dx1dx2.view([dx1.shape[0], 2 * dx1.shape[1]]).transpose(-2, -1) @ x + dw1dw2, db1db2 = cls._linear_bw( + dx1dx2.view([dx1.shape[0], 2 * dx1.shape[1]]), x, bias=ctx.bias[0] + ) + dw1, dw2 = dw1dw2.view([2, *w1.shape]).unbind(0) + if ctx.bias[0]: + db1db2 = db1db2.view([2, dx1.shape[1]]) + db1, db2 = torch.unbind(db1db2, dim=0) + else: + db1 = db2 = None + else: + dx = dx2 @ w2 # 260us (nn) + torch.addmm( + dx, dx1, w1.to(dx1.dtype), beta=1, alpha=1, out=dx + ) # dx += dx1 @ w1 + dw2, db2 = cls._linear_bw(dx2, x, bias=ctx.bias[1]) + dw1, db1 = cls._linear_bw(dx1, x, bias=ctx.bias[0]) + return (dx, dw1, db1, dw2, db2, dw3, db3) + + +class SwiGLUOp: + """Base class for any swiglu operator in :attr:`xformers.ops.swiglu`""" + + def __init__(self, op, packed_weights: bool, name: str, constraints): + self.NAME = name + self.PACKED_WEIGHTS = packed_weights + self.op = op + self.constraints = constraints + + def supports(self, op: "SwiGLUOpDispatch") -> bool: + if self.PACKED_WEIGHTS and not op.packed_weights: + return False + return all(c(op) for c in self.constraints) + + def __call__(self, *args: Optional[torch.Tensor]) -> torch.Tensor: + raise NotImplementedError() + + def __str__(self) -> str: + return f"SwiGLUOp:{self.NAME}" + + +class _ForwardToPythonAutogradFunc(SwiGLUOp): + def supports(self, op: "SwiGLUOpDispatch") -> bool: + # Let's disable autocast in bf16 until this issue is fixed + # https://github.com/pytorch/pytorch/issues/87979 + if op.dtype_autocast_gpu == torch.bfloat16: + return False + return super().supports(op) + + def __call__(self, *args, **kwargs): + return self.op.apply(*args, **kwargs) + + +class _ForwardToFunc(SwiGLUOp): + def __call__(self, *args, **kwargs): + return self.op(*args, **kwargs) + + def info(self): + if self.op.__name__ == "no_such_operator": + return "not built" + return "available" + + +def _eager_functional_swiglu( + x: torch.Tensor, + w1: torch.Tensor, + b1: torch.Tensor, + w2: torch.Tensor, + b2: torch.Tensor, + w3: torch.Tensor, + b3: torch.Tensor, +) -> torch.Tensor: + x1 = F.linear(x, w1, b1) + x2 = F.linear(x, w2, b2) + hidden = F.silu(x1) * x2 + return F.linear(hidden, w3, b3) + + +@dataclass +class SwiGLUOpDispatch: + """Dispatcher to automatically select + the best operator in :attr:`xformers.ops.swiglu` + """ + + device: Union[torch.device, str] + dtype: torch.dtype + dtype_autocast_gpu: Optional[torch.dtype] + packed_weights: bool + bias_enabled: bool + + @property + def op(self) -> SwiGLUOp: + """Computes the best operator + + Returns: + SwiGLUOp: The best operator for the configuration + """ + priorities: Sequence[SwiGLUOp] = [ + SwiGLUPackedFusedOp, + SwiGLUFusedOp, + ] + for op in priorities: + if op.supports(self): + return op + return SwiGLUEagerOp + + @staticmethod + def from_arguments( + x: torch.Tensor, + w1: torch.Tensor, + b1: Optional[torch.Tensor], + w2: torch.Tensor, + b2: Optional[torch.Tensor], + w3: torch.Tensor, + b3: Optional[torch.Tensor], + ) -> "SwiGLUOpDispatch": + return SwiGLUOpDispatch( + device=x.device, + dtype=x.dtype, + packed_weights=stack_or_none((w1, w2), dim=0) is not None, + dtype_autocast_gpu=torch.get_autocast_gpu_dtype() + if torch.is_autocast_enabled() + else w1.dtype, + bias_enabled=b1 is not None and b2 is not None and b3 is not None, + ) + + +def _only_sm80(op: SwiGLUOpDispatch) -> bool: + device_type = op.device if isinstance(op.device, str) else op.device.type + return device_type == "cuda" and torch.cuda.get_device_capability(op.device)[0] >= 8 + + +def _only_half_or_autocast(op: SwiGLUOpDispatch) -> bool: + HALF_DTYPES = [torch.half, torch.bfloat16] + return op.dtype in HALF_DTYPES or ( + op.dtype_autocast_gpu is not None and op.dtype_autocast_gpu in HALF_DTYPES + ) + + +def _bias_enabled(op: SwiGLUOpDispatch) -> bool: + return op.bias_enabled + + +_SwiGLUDecomposedOp = _ForwardToPythonAutogradFunc( + _SwiGLUDecomposedFunc, False, "decomposed", constraints=[_bias_enabled] +) +SwiGLUFusedOp = _ForwardToPythonAutogradFunc( + _SwiGLUFusedFunc, False, "fused", constraints=[_only_sm80, _only_half_or_autocast] +) +SwiGLUPackedFusedOp = _ForwardToFunc( + get_xformers_operator("swiglu_packedw"), + True, + "fused.p.cpp", + constraints=[_only_sm80, _only_half_or_autocast], +) +SwiGLUEagerOp = _ForwardToFunc( + _eager_functional_swiglu, + False, + "eager", + constraints=[], +) + + +def _info() -> Dict[str, str]: + return {op.NAME: op.info() for op in [SwiGLUPackedFusedOp]} + + +def swiglu( + x: torch.Tensor, + w1: torch.Tensor, + b1: Optional[torch.Tensor], + w2: torch.Tensor, + b2: Optional[torch.Tensor], + w3: torch.Tensor, + b3: Optional[torch.Tensor], + *, + op: Optional[SwiGLUOp] = None, +) -> torch.Tensor: + """ + Computes a SwiGLU block given the weights/bias of the 3 + linear layers. + + - It is recommended to keep ``op=None`` so the best implementation \ + available for the inputs will be used. + + + :Equivalent pytorch code: + + .. code-block:: python + + x1 = F.linear(x, w1, b1) + x2 = F.linear(x, w2, b2) + hidden = F.silu(x1) * x2 + return F.linear(hidden, w3, b3) + + :Packing weights: + + To allow faster implementations, it's recommended to have w1/w2 come from the same storage, as in: + .. code-block:: python + + w1, w2 = xformers.ops.unbind(w12, 0) + + :Supported hardware: + + This operator is only optimized on A100+ on ``torch.half`` or ``torch.bfloat16`` \ + (autocast is supported), and will fallback to a functional pytorch \ + implementation otherwise. + """ + + batch_shape = x.shape[:-1] + x = x.reshape([-1, x.shape[-1]]) + if w1.ndim != 2 or w1.shape != w2.shape: + raise ValueError(f"Invalid shapes for w1: {w1.shape} / w2: {w2.shape}") + if b1 is not None: + if b1.ndim != 1 or b1.shape[0] != w1.shape[0]: + raise ValueError(f"Invalid shapes for b1: {b1.shape}") + if b2 is not None: + if b2.ndim != 1 or b2.shape[0] != w2.shape[0]: + raise ValueError(f"Invalid shapes for b2: {b2.shape}") + if w3.ndim != 2 or w3.shape[1] != w2.shape[0]: + raise ValueError(f"Invalid shape for w3: {w3.shape}") + if b3 is not None: + if b3.ndim != 1 or b3.shape[0] != w3.shape[0]: + raise ValueError(f"Invalid shapes for w3: {w3.shape} / b3: {b3.shape}") + + if op is None: + op = SwiGLUOpDispatch.from_arguments(x, w1, b1, w2, b2, w3, b3).op + + if not op.PACKED_WEIGHTS: + return op(x, w1, b1, w2, b2, w3, b3).reshape([*batch_shape, -1]) + w1w2 = stack_or_none((w1, w2), dim=0) + if b1 is not None and b2 is not None: + b1b2: Optional[torch.Tensor] = stack_or_none((b1, b2), dim=0) + if b1b2 is None: + raise NotImplementedError("b1/b2 needs to be properly packed") + else: + b1b2 = None + assert b1 is None and b2 is None + + if w1w2 is None: + raise NotImplementedError("w1/w2 needs to be properly packed") + return op(x, w1w2, b1b2, w3, b3).reshape([*batch_shape, -1]) + + +def swiglu_packed( + x: torch.Tensor, + w1w2: torch.Tensor, + b1b2: Optional[torch.Tensor], + w3: torch.Tensor, + b3: Optional[torch.Tensor], + *, + op: SwiGLUOp, +) -> torch.Tensor: + """ + Computes a SwiGLU block given the weights/bias of the 3 + linear layers. + + :Equivalent pytorch code: + + .. code-block:: python + + x1 = F.linear(x, w1, b1) + x2 = F.linear(x, w2, b2) + hidden = F.silu(x1) * x2 + return F.linear(hidden, w3, b3) + + :Supported hardware: + + This operator is only optimized on A100+ on ``torch.half`` or ``torch.bfloat16`` \ + (autocast is supported), and will fallback to a functional pytorch \ + implementation otherwise. + """ + batch_shape = x.shape[:-1] + x = x.reshape([-1, x.shape[-1]]) + + if b3 is not None: + if b3.ndim != 1 or b3.shape[0] != w3.shape[0]: + raise ValueError(f"Invalid shapes for w3: {w3.shape} / b3: {b3.shape}") + + assert op.PACKED_WEIGHTS, "Not implemented PACKED_WEIGHTS" + + return op(x, w1w2, b1b2, w3, b3).reshape([*batch_shape, -1]) + + +class SwiGLU(nn.Module): + """ + A Module that encapsulates the call to :attr:`xformers.ops.swiglu`, + and holds the weights for the 3 linear layers + """ + + def __init__( + self, + in_features: int, + hidden_features: int, + out_features: Optional[int] = None, + bias: bool = True, + *, + _pack_weights: bool = True, + ) -> None: + """Create a SwiGLU module + + Args: + in_features (int): Number of features of the input + hidden_features (int): Number of hidden features + out_features (Optional[int], optional): Number of features of the input. Defaults to None. + bias (bool, optional): Whether linear layers also include a bias. Defaults to True. + """ + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + + self.w12: Optional[nn.Linear] + if _pack_weights: + self.w12 = nn.Linear(in_features, 2 * hidden_features, bias=bias) + else: + self.w12 = None + self.w1 = nn.Linear(in_features, hidden_features, bias=bias) + self.w2 = nn.Linear(in_features, hidden_features, bias=bias) + self.w3 = nn.Linear(hidden_features, out_features, bias=bias) + + self.hidden_features = hidden_features + self.out_features = out_features + self.in_features = in_features + self.op: Optional[SwiGLUOp] = None + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Computes :attr:`swiglu` with the module's weights + + Args: + x (torch.Tensor): A Tensor of shape ``[..., in_features]`` + + Returns: + torch.Tensor: A Tensor of shape ``[..., out_features]`` + """ + if self.w12 is not None: + if self.op is not None: + assert ( + self.op.PACKED_WEIGHTS + ), "_pack_weights and self.op.PACKED_WEIGHTS should match" + return swiglu_packed(x, *self._packed_ordered_params(), op=self.op) + + return swiglu(x, *self._ordered_params(), op=self.op) + + def _ordered_params( + self, + ) -> Tuple[ + torch.Tensor, + Optional[torch.Tensor], + torch.Tensor, + Optional[torch.Tensor], + torch.Tensor, + Optional[torch.Tensor], + ]: + """Used for testing - returns ordered arguments for operators""" + b1: Optional[torch.Tensor] + b2: Optional[torch.Tensor] + if self.w12 is not None: + w1w2 = self.w12.weight + b1b2 = self.w12.bias + w1, w2 = unbind( + w1w2.view([2, w1w2.shape[0] // 2, w1w2.shape[1]]), + dim=0, + ) + if b1b2 is not None: + b1, b2 = unbind(b1b2.view([2, b1b2.shape[0] // 2]), dim=0) + else: + b1, b2 = None, None + else: + w1, w2 = self.w1.weight, self.w2.weight + b1, b2 = self.w1.bias, self.w2.bias + + return ( + w1, + b1, + w2, + b2, + self.w3.weight, + self.w3.bias, + ) + + def _packed_ordered_params( + self, + ) -> Tuple[ + torch.Tensor, + Optional[torch.Tensor], + torch.Tensor, + Optional[torch.Tensor], + ]: + assert self.w12 is not None, "Packed weights are only available when using w12" + + """Used for testing - returns ordered arguments for packed operators""" + w1w2 = self.w12.weight + b1b2_param = self.w12.bias + + w1w2 = w1w2.view([2, w1w2.shape[0] // 2, w1w2.shape[1]]) + + b1b2: Optional[torch.Tensor] = None + if b1b2_param is not None: + b1b2 = b1b2_param.view([2, b1b2_param.shape[0] // 2]) + + return ( + w1w2, + b1b2, + self.w3.weight, + self.w3.bias, + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/tiled_matmul.py b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/tiled_matmul.py new file mode 100644 index 0000000000000000000000000000000000000000..99cf8a6081dfe0a5ed7526301fbb53715a681387 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/xformers/ops/tiled_matmul.py @@ -0,0 +1,247 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + + +import os +from typing import List, Optional + +import torch +import torch.multiprocessing.reductions +from torch.utils._pytree import tree_flatten, tree_unflatten +from typing_extensions import Annotated + +from .. import _is_triton_available +from .common import Alias, make_pytorch_operator_for_dispatch_key + +if _is_triton_available(): + from ._triton.tiled_matmul_kernels import _launch_triton_matmul + + TRITON_IS_AVAILABLE = True +else: + TRITON_IS_AVAILABLE = False + + +# Copied over from the sequence parallel fused ops. +def _should_use_triton(device: torch.device, dtype: torch.dtype) -> bool: + if not int(os.getenv("XFORMERS_TILED_MATMUL_ENABLE_TRITON", "1")): + return False + if not TRITON_IS_AVAILABLE: + return False + device_capability = torch.cuda.get_device_capability(device) + # Triton seems to be having issues on P100 and V100 GPUs, such as + # https://github.com/openai/triton/issues/1609 + # https://github.com/openai/triton/issues/1610 + # https://github.com/openai/triton/issues/1257#issuecomment-1532616965 + # and, in recent Triton versions (Jan 2024), returning wrong values. + if device_capability < (8, 0): + return False + return True + + +# We can't use make_pytorch_cuda_operator because PyTorch isn't able to inspect +# Tensor[][] args to detect they contain CUDA args. Thus we need to register +# this as a fallback implementation, so it gets invoked regardless of the args. +# See: https://github.com/pytorch/pytorch/issues/113022 +@make_pytorch_operator_for_dispatch_key("") +def tiled_matmul_fwd( + a: List[List[torch.Tensor]], + b: List[List[torch.Tensor]], + out: Optional[List[List[Annotated[torch.Tensor, Alias("a", write=True)]]]] = None, +) -> List[List[Annotated[torch.Tensor, Alias("a", write=True)]]]: + assert len(a) >= 1 and len(a[0]) >= 1 and all(len(row) == len(a[0]) for row in a), ( + "the first operand must be a non-empty two-dimensional regular list of lists " + "of tenors" + ) + assert len(b) >= 1 and len(b[0]) >= 1 and all(len(row) == len(b[0]) for row in b), ( + "the second operand must be a non-empty two-dimensional regular list of lists " + "of tenors" + ) + + m_tiles = len(a) + k_tiles = len(a[0]) + assert len(b) == k_tiles, ( + "the first operand's inner dimension must match the second operand's outer " + f"dimension, got {k_tiles} and {len(b)}" + ) + n_tiles = len(b[0]) + + ms = [a[tile_m][0].shape[0] for tile_m in range(m_tiles)] + ns = [b[0][tile_n].shape[1] for tile_n in range(n_tiles)] + aks = [a[0][tile_k].shape[1] for tile_k in range(k_tiles)] + bks = [b[tile_k][0].shape[0] for tile_k in range(k_tiles)] + + for tile_m in range(m_tiles): + for tile_k in range(k_tiles): + assert a[tile_m][tile_k].shape[0] == ms[tile_m], ( + f"the tensors on row {tile_m} of the first operand must all have the " + f"same size along the m dimension, got {ms[tile_m]} at position 0 and " + f"{a[tile_m][tile_k].shape[0]} at position {tile_k}" + ) + assert a[tile_m][tile_k].shape[1] == aks[tile_k], ( + f"the tensors on column {tile_k} of the first operand must all have " + f"the same size along the k dimension, got {aks[tile_k]} at position 0 " + f"and {a[tile_m][tile_k].shape[1]} at position {tile_m}" + ) + + for tile_n in range(n_tiles): + for tile_k in range(k_tiles): + assert b[tile_k][tile_n].shape[0] == bks[tile_k], ( + f"the tensors on row {tile_k} of the second operand must all have the " + f"same size along the k dimension, got {bks[tile_k]} at position 0 and " + f"{b[tile_k][tile_n].shape[0]} at position {tile_n}" + ) + assert b[tile_k][tile_n].shape[1] == ns[tile_n], ( + f"the tensors on column {tile_n} of the second operand must all have " + f"the same size along the n dimension, got {ns[tile_n]} at position 0 " + f"and {b[tile_k][tile_n].shape[1]} at position {tile_k}" + ) + + for tile_k in range(k_tiles): + assert aks[tile_k] == bks[tile_k], ( + f"the tensors on column {tile_k} of the first operand and those on row " + f"{tile_k} of the second operand must have the same size along the k " + f"dimension, got {aks[tile_k]} and {bks[tile_k]}" + ) + ks = aks + + if out is not None: + assert ( + len(out) >= 1 + and len(out[0]) >= 1 + and all(len(row) == len(out[0]) for row in out) + ), "out must be a non-empty two-dimensional regular list of lists of tenors" + assert len(out) == m_tiles + assert len(out[0]) == n_tiles + cms = [out[tile_m][0].shape[0] for tile_m in range(m_tiles)] + cns = [out[0][tile_n].shape[1] for tile_n in range(n_tiles)] + for tile_m in range(m_tiles): + for tile_n in range(n_tiles): + assert out[tile_m][tile_n].shape[0] == cms[tile_m], ( + f"the tensors on row {tile_m} of out must all have the same size " + f"along the m dimension, got {cms[tile_m]} at position 0 and " + f"{out[tile_m][tile_n].shape[0]} at position {tile_n}" + ) + assert out[tile_m][tile_n].shape[1] == cns[tile_n], ( + f"the tensors on column {tile_n} of out must all have the same " + f"size along the k dimension, got {cns[tile_n]} at position 0 and " + f"{out[tile_m][tile_n].shape[1]} at position {tile_m}" + ) + for tile_m in range(m_tiles): + assert cms[tile_m] == ms[tile_m], ( + f"the tensors on row {tile_m} of out and those on row {tile_m} of the " + f"first operand must have the same size along the m dimension, got " + f"{cms[tile_m]} and {ms[tile_m]}" + ) + for tile_n in range(n_tiles): + assert cns[tile_n] == ns[tile_n], ( + f"the tensors on column {tile_n} of out and those on column {tile_n} " + f"of the second operand must have the same size along the n dimension, " + f"got {cns[tile_n]} and {ns[tile_n]}" + ) + c = out + else: + c = [[a[0][0].new_empty((m, n)) for n in ns] for m in ms] + + # TODO We can try merging tiles that come from contiguous memory, using + # stack_or_none, to further improve performance. + + # Because the Triton kernel is hardcoded for maximum three tiles. + # Because, in turn, we aimed this at the fusion of wq/wk/wv. + if ( + m_tiles <= 3 + and k_tiles <= 3 + and n_tiles <= 3 + and _should_use_triton(a[0][0].device, a[0][0].dtype) + ): + _launch_triton_matmul(a, b, c, ms, ns, ks) + else: + for tile_m in range(len(ms)): + for tile_n in range(len(ns)): + torch.mm(a[tile_m][0], b[0][tile_n], out=c[tile_m][tile_n]) + for tile_k in range(1, len(ks)): + c[tile_m][tile_n].addmm_(a[tile_m][tile_k], b[tile_k][tile_n]) + + return c + + +def _transpose(x: List[List[torch.Tensor]]) -> List[List[torch.Tensor]]: + return [[t.t() for t in y] for y in zip(*x)] + + +class _TiledMatmul(torch.autograd.Function): + @staticmethod + def forward(ctx, ab_tree_spec, *ab_tree_values): + ctx.ab_tree_spec = ab_tree_spec + ctx.save_for_backward(*ab_tree_values) + a, b = tree_unflatten(list(ab_tree_values), ab_tree_spec) + + c = tiled_matmul_fwd(a, b) + + c_tree_values, c_tree_spec = tree_flatten(c) + ctx.c_tree_spec = c_tree_spec + return (c_tree_spec,) + tuple(c_tree_values) + + @staticmethod + def backward(ctx, _none, *grad_c_tree_values): + a, b = tree_unflatten(list(ctx.saved_tensors), ctx.ab_tree_spec) + grad_c = tree_unflatten(list(grad_c_tree_values), ctx.c_tree_spec) + + grad_a = tiled_matmul_fwd(grad_c, _transpose(b)) + grad_b = tiled_matmul_fwd(_transpose(a), grad_c) + + grad_ab_tree_values, grad_ab_tree_spec = tree_flatten((grad_a, grad_b)) + return (None,) + tuple(grad_ab_tree_values) + + +def tiled_matmul( + a: List[List[torch.Tensor]], + b: List[List[torch.Tensor]], +) -> List[List[torch.Tensor]]: + """Multiply two matrices given as grids of tiles + + It performs the matmul between A and B, which are given as two-dimensional + grids of tiles (i.e., blocks), represented as lists of lists of tensors. + The output will itself be a matrix in such a form. Formally: + + out[m][n] = sum(a[m][k] @ b[k][n] for k in range(...)) + + with the obvious constraints needed to make it work, in terms of number of + tiles and sizes of each tile. + + The interest of this operator is to improve performance by avoding wave + quantization effects when doing independent matrix multiplications in + series. Sometimes, when these matmuls have one operand in common, this can + also be addressed by concatenating the other operands into a single matrix, + and issuing a single matmul. However this isn't always possible (e.g., might + break the checkpoint format) and it's an anti-pattern, as it obscures the + logic (e.g., changing the modelling code out of performance reasons). This + tiled matmul performs the same computation as if the matrices were merged, + without merging them, simply through a smarter memory addressing scheme. + + The tiled matmul is less generic than a grouped matmul, which can also help + with wave quantization, and doesn't need the matmuls to have the same lhs + or rhs operand. However, a grouped matmul will write the result of each + matmul to a separate output matrix, whereas the tiled matmul allows to add + them together into a single output. This is needed during the backward pass + of a linear layer, and it's the reason we wrote this instead of using a + grouped matmul. + + The tiled matmul is implemented using a custom Triton kernel, which puts + constraints on the strides of the tiles. All rows of A must have the same + K stride, all columns of A must have the same M stride, and so on. + + Currently the tiled matmul supports at most three tiles on each dimension, + although fewer can also be given. This is because we needed it to fuse the + query, key and value weights of an attention layer. This limit can be + increased if needed. + + This operator is differentiable. + + """ + ab_tree_values, ab_tree_spec = tree_flatten((a, b)) + c_tree_spec, *c_tree_values = _TiledMatmul.apply(ab_tree_spec, *ab_tree_values) + c = tree_unflatten(list(c_tree_values), c_tree_spec) + + return c diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/triton/__pycache__/k_activations.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/triton/__pycache__/k_activations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44527745fc08094fa8a884cc72173ef41db16fc6 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/triton/__pycache__/k_activations.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/triton/__pycache__/k_fused_matmul_fw.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/triton/__pycache__/k_fused_matmul_fw.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..246f949fac0247a542401a0763663fb6e42c22ec Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/triton/__pycache__/k_fused_matmul_fw.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/triton/__pycache__/k_softmax.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/triton/__pycache__/k_softmax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b16ad0ea0845d8ee51ff03a25193dde7646ac04b Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/triton/__pycache__/k_softmax.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/xformers/triton/__pycache__/vararg_kernel.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/xformers/triton/__pycache__/vararg_kernel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25300356b095c673f1ad82f2acdf9b196f715d80 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/xformers/triton/__pycache__/vararg_kernel.cpython-310.pyc differ