fix: trans_a=True also need to be modified to tl.trans()
Browse files- flash_attn_triton.py +2 -2
flash_attn_triton.py
CHANGED
|
@@ -491,7 +491,7 @@ def _bwd_kernel_one_col_block(
|
|
| 491 |
# else:
|
| 492 |
# do = tl.load(do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q)
|
| 493 |
# & (offs_d[None, :] < headdim), other=0.0)
|
| 494 |
-
dv += tl.dot(p.to(do.dtype), do
|
| 495 |
# compute dp = dot(v, do)
|
| 496 |
# There seems to be a race condition when headdim=48/96, and dq, dk are wrong.
|
| 497 |
# Also wrong for headdim=128, seqlen=(108, 256), and ATOMIC_ADD=True
|
|
@@ -509,7 +509,7 @@ def _bwd_kernel_one_col_block(
|
|
| 509 |
# for BLOCK_HEADDIM=128
|
| 510 |
ds = (p * (dp - Di[:, None]) * softmax_scale).to(q.dtype)
|
| 511 |
# compute dk = dot(ds.T, q)
|
| 512 |
-
dk += tl.dot(ds, q
|
| 513 |
# compute dq
|
| 514 |
if not ATOMIC_ADD:
|
| 515 |
if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M
|
|
|
|
| 491 |
# else:
|
| 492 |
# do = tl.load(do_ptrs, mask=(offs_m_curr[:, None] < seqlen_q)
|
| 493 |
# & (offs_d[None, :] < headdim), other=0.0)
|
| 494 |
+
dv += tl.dot(tl.trans(p.to(do.dtype)), do) # see issue: https://github.com/Dao-AILab/flash-attention/issues/508
|
| 495 |
# compute dp = dot(v, do)
|
| 496 |
# There seems to be a race condition when headdim=48/96, and dq, dk are wrong.
|
| 497 |
# Also wrong for headdim=128, seqlen=(108, 256), and ATOMIC_ADD=True
|
|
|
|
| 509 |
# for BLOCK_HEADDIM=128
|
| 510 |
ds = (p * (dp - Di[:, None]) * softmax_scale).to(q.dtype)
|
| 511 |
# compute dk = dot(ds.T, q)
|
| 512 |
+
dk += tl.dot(tl.trans(ds), q)
|
| 513 |
# compute dq
|
| 514 |
if not ATOMIC_ADD:
|
| 515 |
if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M
|