|
|
|
|
|
|
|
|
|
|
|
|
|
|
#include "flash_common.hpp" |
|
|
|
|
|
std::vector<at::Tensor> |
|
|
mha_fwd(at::Tensor &q, |
|
|
const at::Tensor &k, |
|
|
const at::Tensor &v, |
|
|
std::optional<at::Tensor> &out_, |
|
|
std::optional<at::Tensor> &alibi_slopes_, |
|
|
const float p_dropout, |
|
|
const float softmax_scale, |
|
|
bool is_causal, |
|
|
int window_size_left, |
|
|
int window_size_right, |
|
|
const float softcap, |
|
|
const bool return_softmax, |
|
|
std::optional<at::Generator> gen_); |
|
|
|
|
|
std::vector<at::Tensor> |
|
|
mha_varlen_fwd(at::Tensor &q, |
|
|
const at::Tensor &k, |
|
|
const at::Tensor &v, |
|
|
std::optional<at::Tensor> &out_, |
|
|
const at::Tensor &cu_seqlens_q, |
|
|
const at::Tensor &cu_seqlens_k, |
|
|
std::optional<at::Tensor> &seqused_k, |
|
|
std::optional<const at::Tensor> &leftpad_k_, |
|
|
std::optional<at::Tensor> &block_table_, |
|
|
std::optional<at::Tensor> &alibi_slopes_, |
|
|
int max_seqlen_q, |
|
|
const int max_seqlen_k, |
|
|
const float p_dropout, |
|
|
const float softmax_scale, |
|
|
const bool zero_tensors, |
|
|
bool is_causal, |
|
|
int window_size_left, |
|
|
int window_size_right, |
|
|
const float softcap, |
|
|
const bool return_softmax, |
|
|
std::optional<at::Generator> gen_); |
|
|
|
|
|
std::vector<at::Tensor> |
|
|
mha_bwd(const at::Tensor &dout, |
|
|
const at::Tensor &q, |
|
|
const at::Tensor &k, |
|
|
const at::Tensor &v, |
|
|
const at::Tensor &out, |
|
|
const at::Tensor &softmax_lse, |
|
|
std::optional<at::Tensor> &dq_, |
|
|
std::optional<at::Tensor> &dk_, |
|
|
std::optional<at::Tensor> &dv_, |
|
|
std::optional<at::Tensor> &alibi_slopes_, |
|
|
const float p_dropout, |
|
|
const float softmax_scale, |
|
|
const bool is_causal, |
|
|
int window_size_left, |
|
|
int window_size_right, |
|
|
const float softcap, |
|
|
const bool deterministic, |
|
|
std::optional<at::Generator> gen_, |
|
|
std::optional<at::Tensor> &rng_state); |
|
|
|
|
|
std::vector<at::Tensor> |
|
|
mha_varlen_bwd(const at::Tensor &dout, |
|
|
const at::Tensor &q, |
|
|
const at::Tensor &k, |
|
|
const at::Tensor &v, |
|
|
const at::Tensor &out, |
|
|
const at::Tensor &softmax_lse, |
|
|
std::optional<at::Tensor> &dq_, |
|
|
std::optional<at::Tensor> &dk_, |
|
|
std::optional<at::Tensor> &dv_, |
|
|
const at::Tensor &cu_seqlens_q, |
|
|
const at::Tensor &cu_seqlens_k, |
|
|
std::optional<at::Tensor> &alibi_slopes_, |
|
|
const int max_seqlen_q, |
|
|
const int max_seqlen_k, |
|
|
const float p_dropout, |
|
|
const float softmax_scale, |
|
|
const bool zero_tensors, |
|
|
const bool is_causal, |
|
|
int window_size_left, |
|
|
int window_size_right, |
|
|
const float softcap, |
|
|
const bool deterministic, |
|
|
std::optional<at::Generator> gen_, |
|
|
std::optional<at::Tensor> &rng_state); |
|
|
|
|
|
std::vector<at::Tensor> |
|
|
mha_fwd_kvcache(at::Tensor &q, |
|
|
const at::Tensor &kcache, |
|
|
const at::Tensor &vcache, |
|
|
std::optional<const at::Tensor> &k_, |
|
|
std::optional<const at::Tensor> &v_, |
|
|
std::optional<const at::Tensor> &seqlens_k_, |
|
|
std::optional<const at::Tensor> &rotary_cos_, |
|
|
std::optional<const at::Tensor> &rotary_sin_, |
|
|
std::optional<const at::Tensor> &cache_batch_idx_, |
|
|
std::optional<const at::Tensor> &leftpad_k_, |
|
|
std::optional<at::Tensor> &block_table_, |
|
|
std::optional<at::Tensor> &alibi_slopes_, |
|
|
std::optional<at::Tensor> &out_, |
|
|
const float softmax_scale, |
|
|
bool is_causal, |
|
|
int window_size_left, |
|
|
int window_size_right, |
|
|
const float softcap, |
|
|
bool is_rotary_interleaved, |
|
|
int num_splits); |
|
|
|
|
|
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) |
|
|
{ |
|
|
m.doc() = "FlashAttention"; |
|
|
m.def("fwd", &mha_fwd, "Forward pass"); |
|
|
m.def("varlen_fwd", &mha_varlen_fwd, "Forward pass (variable length)"); |
|
|
m.def("bwd", &mha_bwd, "Backward pass"); |
|
|
m.def("varlen_bwd", &mha_varlen_bwd, "Backward pass (variable length)"); |
|
|
m.def("fwd_kvcache", &mha_fwd_kvcache, "Forward pass, with KV-cache"); |
|
|
} |
|
|
|