|
|
#include "llama-graph.h" |
|
|
|
|
|
#include "llama-impl.h" |
|
|
#include "llama-batch.h" |
|
|
#include "llama-cparams.h" |
|
|
|
|
|
#include "llama-kv-cache-unified.h" |
|
|
#include "llama-kv-cache-unified-iswa.h" |
|
|
#include "llama-memory-hybrid.h" |
|
|
#include "llama-memory-recurrent.h" |
|
|
|
|
|
#include <cassert> |
|
|
#include <cmath> |
|
|
#include <cstring> |
|
|
|
|
|
void llm_graph_input_embd::set_input(const llama_ubatch * ubatch) { |
|
|
if (ubatch->token) { |
|
|
const int64_t n_tokens = ubatch->n_tokens; |
|
|
|
|
|
ggml_backend_tensor_set(tokens, ubatch->token, 0, n_tokens*ggml_element_size(tokens)); |
|
|
} |
|
|
|
|
|
if (ubatch->embd) { |
|
|
const int64_t n_embd = embd->ne[0]; |
|
|
const int64_t n_tokens = ubatch->n_tokens; |
|
|
|
|
|
ggml_backend_tensor_set(embd, ubatch->embd, 0, n_tokens*n_embd*ggml_element_size(embd)); |
|
|
} |
|
|
} |
|
|
|
|
|
bool llm_graph_input_embd::can_reuse(const llm_graph_params & params) { |
|
|
bool res = true; |
|
|
|
|
|
res &= (!tokens && !params.ubatch.token) || (tokens && tokens->ne[0] == params.ubatch.n_tokens); |
|
|
res &= (!embd && !params.ubatch.embd) || (embd && embd->ne[0] == params.ubatch.n_tokens); |
|
|
|
|
|
return res; |
|
|
} |
|
|
|
|
|
void llm_graph_input_pos::set_input(const llama_ubatch * ubatch) { |
|
|
if (ubatch->pos && pos) { |
|
|
const int64_t n_tokens = ubatch->n_tokens; |
|
|
|
|
|
if (ubatch->token && n_pos_per_embd == 4) { |
|
|
|
|
|
|
|
|
std::vector<llama_pos> pos_data(n_tokens*n_pos_per_embd); |
|
|
|
|
|
for (int i = 0; i < n_tokens; ++i) { |
|
|
pos_data[ i] = ubatch->pos[i]; |
|
|
pos_data[ n_tokens + i] = ubatch->pos[i]; |
|
|
pos_data[2 * n_tokens + i] = ubatch->pos[i]; |
|
|
pos_data[3 * n_tokens + i] = 0; |
|
|
} |
|
|
ggml_backend_tensor_set(pos, pos_data.data(), 0, pos_data.size()*ggml_element_size(pos)); |
|
|
} else { |
|
|
ggml_backend_tensor_set(pos, ubatch->pos, 0, n_tokens*n_pos_per_embd*ggml_element_size(pos)); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
bool llm_graph_input_pos::can_reuse(const llm_graph_params & params) { |
|
|
bool res = true; |
|
|
|
|
|
res &= pos->ne[0] == params.ubatch.n_tokens; |
|
|
|
|
|
return res; |
|
|
} |
|
|
|
|
|
void llm_graph_input_attn_temp::set_input(const llama_ubatch * ubatch) { |
|
|
if (ubatch->pos && attn_scale) { |
|
|
const int64_t n_tokens = ubatch->n_tokens; |
|
|
|
|
|
std::vector<float> attn_scale_data(n_tokens, 0.0f); |
|
|
for (int i = 0; i < n_tokens; ++i) { |
|
|
const float pos = ubatch->pos[i]; |
|
|
attn_scale_data[i] = std::log( |
|
|
std::floor((pos + 1.0f) / n_attn_temp_floor_scale) + 1.0 |
|
|
) * f_attn_temp_scale + 1.0; |
|
|
} |
|
|
|
|
|
ggml_backend_tensor_set(attn_scale, attn_scale_data.data(), 0, n_tokens*ggml_element_size(attn_scale)); |
|
|
} |
|
|
} |
|
|
|
|
|
void llm_graph_input_pos_bucket::set_input(const llama_ubatch * ubatch) { |
|
|
if (pos_bucket) { |
|
|
const int64_t n_tokens = ubatch->n_tokens; |
|
|
|
|
|
GGML_ASSERT(ggml_backend_buffer_is_host(pos_bucket->buffer)); |
|
|
GGML_ASSERT(!ubatch->equal_seqs()); |
|
|
|
|
|
int32_t * data = (int32_t *) pos_bucket->data; |
|
|
|
|
|
for (int h = 0; h < 1; ++h) { |
|
|
for (int j = 0; j < n_tokens; ++j) { |
|
|
for (int i = 0; i < n_tokens; ++i) { |
|
|
data[h*(n_tokens*n_tokens) + j*n_tokens + i] = llama_relative_position_bucket(ubatch->pos[i], ubatch->pos[j], hparams.n_rel_attn_bkts, true); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
void llm_graph_input_pos_bucket_kv::set_input(const llama_ubatch * ubatch) { |
|
|
if (pos_bucket) { |
|
|
mctx->set_input_pos_bucket(pos_bucket, ubatch); |
|
|
} |
|
|
} |
|
|
|
|
|
void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) { |
|
|
GGML_ASSERT(out_ids); |
|
|
|
|
|
const int64_t n_tokens = ubatch->n_tokens; |
|
|
|
|
|
GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer)); |
|
|
int32_t * data = (int32_t *) out_ids->data; |
|
|
|
|
|
if (n_outputs == n_tokens) { |
|
|
for (int i = 0; i < n_tokens; ++i) { |
|
|
data[i] = i; |
|
|
} |
|
|
|
|
|
return; |
|
|
} |
|
|
|
|
|
GGML_ASSERT(ubatch->output); |
|
|
|
|
|
int n_outputs = 0; |
|
|
|
|
|
for (int i = 0; i < n_tokens; ++i) { |
|
|
if (ubatch->output[i]) { |
|
|
data[n_outputs++] = i; |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
bool llm_graph_input_out_ids::can_reuse(const llm_graph_params & params) { |
|
|
bool res = true; |
|
|
|
|
|
res &= n_outputs == params.n_outputs; |
|
|
|
|
|
return res; |
|
|
} |
|
|
|
|
|
void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) { |
|
|
if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) { |
|
|
const int64_t n_tokens = ubatch->n_tokens; |
|
|
const int64_t n_seq_tokens = ubatch->n_seq_tokens; |
|
|
const int64_t n_seqs_unq = ubatch->n_seqs_unq; |
|
|
|
|
|
GGML_ASSERT(mean); |
|
|
GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer)); |
|
|
|
|
|
float * data = (float *) mean->data; |
|
|
memset(mean->data, 0, n_tokens*n_seqs_unq*ggml_element_size(mean)); |
|
|
|
|
|
std::vector<uint64_t> sums(n_seqs_unq, 0); |
|
|
for (int i = 0; i < n_tokens; i += n_seq_tokens) { |
|
|
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { |
|
|
const llama_seq_id seq_id = ubatch->seq_id[i][s]; |
|
|
const int32_t seq_idx = ubatch->seq_idx[seq_id]; |
|
|
|
|
|
sums[seq_idx] += ubatch->n_seq_tokens; |
|
|
} |
|
|
} |
|
|
|
|
|
std::vector<float> div(n_seqs_unq, 0.0f); |
|
|
for (int s = 0; s < n_seqs_unq; ++s) { |
|
|
const uint64_t sum = sums[s]; |
|
|
if (sum > 0) { |
|
|
div[s] = 1.0f/float(sum); |
|
|
} |
|
|
} |
|
|
|
|
|
for (int i = 0; i < n_tokens; i += n_seq_tokens) { |
|
|
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { |
|
|
const llama_seq_id seq_id = ubatch->seq_id[i][s]; |
|
|
const int32_t seq_idx = ubatch->seq_idx[seq_id]; |
|
|
|
|
|
for (int j = 0; j < n_seq_tokens; ++j) { |
|
|
data[seq_idx*n_tokens + i + j] = div[seq_idx]; |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) { |
|
|
const int64_t n_tokens = ubatch->n_tokens; |
|
|
const int64_t n_seq_tokens = ubatch->n_seq_tokens; |
|
|
const int64_t n_seqs_unq = ubatch->n_seqs_unq; |
|
|
|
|
|
if (cparams.embeddings && ( |
|
|
cparams.pooling_type == LLAMA_POOLING_TYPE_CLS || |
|
|
cparams.pooling_type == LLAMA_POOLING_TYPE_RANK |
|
|
)) { |
|
|
GGML_ASSERT(cls); |
|
|
GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer)); |
|
|
|
|
|
uint32_t * data = (uint32_t *) cls->data; |
|
|
memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls)); |
|
|
|
|
|
for (int i = 0; i < n_tokens; i += n_seq_tokens) { |
|
|
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { |
|
|
const llama_seq_id seq_id = ubatch->seq_id[i][s]; |
|
|
const int32_t seq_idx = ubatch->seq_idx[seq_id]; |
|
|
|
|
|
data[seq_idx] = i; |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) { |
|
|
GGML_ASSERT(cls); |
|
|
GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer)); |
|
|
|
|
|
uint32_t * data = (uint32_t *) cls->data; |
|
|
memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls)); |
|
|
|
|
|
std::vector<int> last_pos(n_seqs_unq, -1); |
|
|
std::vector<int> last_row(n_seqs_unq, -1); |
|
|
|
|
|
for (int i = 0; i < n_tokens; ++i) { |
|
|
const llama_pos pos = ubatch->pos[i]; |
|
|
|
|
|
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { |
|
|
const llama_seq_id seq_id = ubatch->seq_id[i][s]; |
|
|
const int32_t seq_idx = ubatch->seq_idx[seq_id]; |
|
|
|
|
|
if (pos >= last_pos[seq_idx]) { |
|
|
last_pos[seq_idx] = pos; |
|
|
last_row[seq_idx] = i; |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
for (int s = 0; s < n_seqs_unq; ++s) { |
|
|
if (last_row[s] >= 0) { |
|
|
data[s] = last_row[s]; |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
void llm_graph_input_rs::set_input(const llama_ubatch * ubatch) { |
|
|
GGML_UNUSED(ubatch); |
|
|
|
|
|
const int64_t n_rs = mctx->get_n_rs(); |
|
|
|
|
|
if (s_copy) { |
|
|
GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer)); |
|
|
int32_t * data = (int32_t *) s_copy->data; |
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < n_rs; ++i) { |
|
|
data[i] = mctx->s_copy(i); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) { |
|
|
GGML_UNUSED(ubatch); |
|
|
|
|
|
if (cross_embd && !cross->v_embd.empty()) { |
|
|
assert(cross_embd->type == GGML_TYPE_F32); |
|
|
|
|
|
ggml_backend_tensor_set(cross_embd, cross->v_embd.data(), 0, ggml_nbytes(cross_embd)); |
|
|
} |
|
|
} |
|
|
|
|
|
void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) { |
|
|
const int64_t n_kv = ubatch->n_tokens; |
|
|
const int64_t n_tokens = ubatch->n_tokens; |
|
|
|
|
|
GGML_ASSERT(kq_mask); |
|
|
GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer)); |
|
|
|
|
|
float * data = (float *) kq_mask->data; |
|
|
|
|
|
for (int h = 0; h < 1; ++h) { |
|
|
for (int i1 = 0; i1 < n_tokens; ++i1) { |
|
|
const llama_seq_id s1 = ubatch->seq_id[i1][0]; |
|
|
|
|
|
for (int i0 = 0; i0 < n_tokens; ++i0) { |
|
|
float f = -INFINITY; |
|
|
|
|
|
for (int s = 0; s < ubatch->n_seq_id[i0]; ++s) { |
|
|
const llama_seq_id s0 = ubatch->seq_id[i0][0]; |
|
|
|
|
|
|
|
|
if (s0 == s1 && (!cparams.causal_attn || ubatch->pos[i0] <= ubatch->pos[i1])) { |
|
|
if (hparams.use_alibi) { |
|
|
f = -std::abs(ubatch->pos[i0] - ubatch->pos[i1]); |
|
|
} else { |
|
|
f = 0.0f; |
|
|
} |
|
|
break; |
|
|
} |
|
|
} |
|
|
|
|
|
data[h*(n_kv*n_tokens) + i1*n_kv + i0] = f; |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
void llm_graph_input_attn_kv_unified::set_input(const llama_ubatch * ubatch) { |
|
|
mctx->set_input_k_idxs(self_k_idxs, ubatch); |
|
|
mctx->set_input_v_idxs(self_v_idxs, ubatch); |
|
|
|
|
|
mctx->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); |
|
|
} |
|
|
|
|
|
bool llm_graph_input_attn_kv_unified::can_reuse(const llm_graph_params & params) { |
|
|
const auto * mctx = static_cast<const llama_kv_cache_unified_context *>(params.mctx); |
|
|
|
|
|
this->mctx = mctx; |
|
|
|
|
|
bool res = true; |
|
|
|
|
|
res &= self_k_idxs->ne[0] == params.ubatch.n_tokens; |
|
|
|
|
|
|
|
|
res &= self_kq_mask->ne[0] == mctx->get_n_kv(); |
|
|
res &= self_kq_mask->ne[1] == GGML_PAD(params.ubatch.n_tokens, GGML_KQ_MASK_PAD); |
|
|
|
|
|
res &= mctx->get_supports_set_rows(); |
|
|
|
|
|
return res; |
|
|
} |
|
|
|
|
|
void llm_graph_input_attn_kv_unified_iswa::set_input(const llama_ubatch * ubatch) { |
|
|
mctx->get_base()->set_input_k_idxs(self_k_idxs, ubatch); |
|
|
mctx->get_base()->set_input_v_idxs(self_v_idxs, ubatch); |
|
|
|
|
|
mctx->get_base()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); |
|
|
|
|
|
mctx->get_swa()->set_input_k_idxs(self_k_idxs_swa, ubatch); |
|
|
mctx->get_swa()->set_input_v_idxs(self_v_idxs_swa, ubatch); |
|
|
|
|
|
mctx->get_swa()->set_input_kq_mask(self_kq_mask_swa, ubatch, cparams.causal_attn); |
|
|
} |
|
|
|
|
|
bool llm_graph_input_attn_kv_unified_iswa::can_reuse(const llm_graph_params & params) { |
|
|
const auto * mctx = static_cast<const llama_kv_cache_unified_iswa_context *>(params.mctx); |
|
|
|
|
|
this->mctx = mctx; |
|
|
|
|
|
bool res = true; |
|
|
|
|
|
res &= self_k_idxs->ne[0] == params.ubatch.n_tokens; |
|
|
|
|
|
|
|
|
res &= self_k_idxs_swa->ne[0] == params.ubatch.n_tokens; |
|
|
|
|
|
|
|
|
res &= self_kq_mask->ne[0] == mctx->get_base()->get_n_kv(); |
|
|
res &= self_kq_mask->ne[1] == GGML_PAD(params.ubatch.n_tokens, GGML_KQ_MASK_PAD); |
|
|
|
|
|
res &= self_kq_mask_swa->ne[0] == mctx->get_swa()->get_n_kv(); |
|
|
res &= self_kq_mask_swa->ne[1] == GGML_PAD(params.ubatch.n_tokens, GGML_KQ_MASK_PAD); |
|
|
|
|
|
res &= mctx->get_base()->get_supports_set_rows(); |
|
|
|
|
|
return res; |
|
|
} |
|
|
|
|
|
void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) { |
|
|
GGML_ASSERT(cross_kq_mask); |
|
|
|
|
|
const int64_t n_enc = cross_kq_mask->ne[0]; |
|
|
const int64_t n_tokens = ubatch->n_tokens; |
|
|
|
|
|
GGML_ASSERT(ggml_backend_buffer_is_host(cross_kq_mask->buffer)); |
|
|
GGML_ASSERT(!ubatch->equal_seqs()); |
|
|
|
|
|
float * data = (float *) cross_kq_mask->data; |
|
|
|
|
|
for (int h = 0; h < 1; ++h) { |
|
|
for (int i = 0; i < n_tokens; ++i) { |
|
|
for (int j = 0; j < n_enc; ++j) { |
|
|
float f = -INFINITY; |
|
|
|
|
|
for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { |
|
|
const llama_seq_id seq_id = ubatch->seq_id[i][s]; |
|
|
|
|
|
if (cross->seq_ids_enc[j].find(seq_id) != cross->seq_ids_enc[j].end()) { |
|
|
f = 0.0f; |
|
|
} |
|
|
} |
|
|
|
|
|
data[h*(n_enc*n_tokens) + i*n_enc + j] = f; |
|
|
} |
|
|
} |
|
|
|
|
|
for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { |
|
|
for (int j = 0; j < n_enc; ++j) { |
|
|
data[h*(n_enc*n_tokens) + i*n_enc + j] = -INFINITY; |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) { |
|
|
inp_attn->set_input(ubatch); |
|
|
inp_rs->set_input(ubatch); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llm_graph_result::llm_graph_result(int64_t max_nodes) : max_nodes(max_nodes) { |
|
|
reset(); |
|
|
|
|
|
const char * LLAMA_GRAPH_RESULT_DEBUG = getenv("LLAMA_GRAPH_RESULT_DEBUG"); |
|
|
debug = LLAMA_GRAPH_RESULT_DEBUG ? atoi(LLAMA_GRAPH_RESULT_DEBUG) : 0; |
|
|
} |
|
|
|
|
|
int64_t llm_graph_result::get_max_nodes() const { |
|
|
return max_nodes; |
|
|
} |
|
|
|
|
|
void llm_graph_result::reset() { |
|
|
t_tokens = nullptr; |
|
|
t_logits = nullptr; |
|
|
t_embd = nullptr; |
|
|
t_embd_pooled = nullptr; |
|
|
|
|
|
params = {}; |
|
|
|
|
|
inputs.clear(); |
|
|
|
|
|
buf_compute_meta.resize(ggml_tensor_overhead()*max_nodes + ggml_graph_overhead_custom(max_nodes, false)); |
|
|
|
|
|
ggml_init_params params = { |
|
|
buf_compute_meta.size(), |
|
|
buf_compute_meta.data(), |
|
|
true, |
|
|
}; |
|
|
|
|
|
ctx_compute.reset(ggml_init(params)); |
|
|
|
|
|
gf = ggml_new_graph_custom(ctx_compute.get(), max_nodes, false); |
|
|
} |
|
|
|
|
|
void llm_graph_result::set_inputs(const llama_ubatch * ubatch) { |
|
|
for (auto & input : inputs) { |
|
|
input->set_input(ubatch); |
|
|
} |
|
|
} |
|
|
|
|
|
bool llm_graph_result::can_reuse(const llm_graph_params & params) { |
|
|
if (!this->params.allow_reuse(params)) { |
|
|
if (debug > 1) { |
|
|
LLAMA_LOG_DEBUG("%s: cannot reuse graph due to incompatible graph parameters\n", __func__); |
|
|
} |
|
|
|
|
|
return false; |
|
|
} |
|
|
|
|
|
if (debug > 1) { |
|
|
LLAMA_LOG_DEBUG("%s: checking compatibility of %d inputs:\n", __func__, (int) inputs.size()); |
|
|
} |
|
|
|
|
|
bool res = true; |
|
|
|
|
|
for (auto & input : inputs) { |
|
|
const bool cur = input->can_reuse(params); |
|
|
|
|
|
if (debug > 1) { |
|
|
LLAMA_LOG_DEBUG("%s: can_reuse = %d\n", "placeholder", cur); |
|
|
} |
|
|
|
|
|
res = res && cur; |
|
|
} |
|
|
|
|
|
if (debug > 0) { |
|
|
LLAMA_LOG_DEBUG("%s: can reuse graph = %d\n", __func__, res); |
|
|
} |
|
|
|
|
|
return res; |
|
|
} |
|
|
|
|
|
llm_graph_input_i * llm_graph_result::add_input(llm_graph_input_ptr input) { |
|
|
inputs.emplace_back(std::move(input)); |
|
|
return inputs.back().get(); |
|
|
} |
|
|
|
|
|
void llm_graph_result::set_params(const llm_graph_params & params) { |
|
|
this->params = params; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llm_graph_context::llm_graph_context(const llm_graph_params & params) : |
|
|
arch (params.arch), |
|
|
hparams (params.hparams), |
|
|
cparams (params.cparams), |
|
|
ubatch (params.ubatch), |
|
|
n_embd (hparams.n_embd), |
|
|
n_layer (hparams.n_layer), |
|
|
n_rot (hparams.n_rot), |
|
|
n_ctx (cparams.n_ctx), |
|
|
n_head (hparams.n_head()), |
|
|
n_head_kv (hparams.n_head_kv()), |
|
|
n_embd_head_k (hparams.n_embd_head_k), |
|
|
n_embd_k_gqa (hparams.n_embd_k_gqa()), |
|
|
n_embd_head_v (hparams.n_embd_head_v), |
|
|
n_embd_v_gqa (hparams.n_embd_v_gqa()), |
|
|
n_expert (hparams.n_expert), |
|
|
n_expert_used (cparams.warmup ? hparams.n_expert : hparams.n_expert_used), |
|
|
freq_base (cparams.rope_freq_base), |
|
|
freq_scale (cparams.rope_freq_scale), |
|
|
ext_factor (cparams.yarn_ext_factor), |
|
|
attn_factor (cparams.yarn_attn_factor), |
|
|
beta_fast (cparams.yarn_beta_fast), |
|
|
beta_slow (cparams.yarn_beta_slow), |
|
|
norm_eps (hparams.f_norm_eps), |
|
|
norm_rms_eps (hparams.f_norm_rms_eps), |
|
|
n_tokens (ubatch.n_tokens), |
|
|
n_outputs (params.n_outputs), |
|
|
n_ctx_orig (cparams.n_ctx_orig_yarn), |
|
|
pooling_type (cparams.pooling_type), |
|
|
rope_type (hparams.rope_type), |
|
|
sched (params.sched), |
|
|
backend_cpu (params.backend_cpu), |
|
|
cvec (params.cvec), |
|
|
loras (params.loras), |
|
|
mctx (params.mctx), |
|
|
cross (params.cross), |
|
|
cb_func (params.cb), |
|
|
res (params.res), |
|
|
ctx0 (res->get_ctx()), |
|
|
gf (res->get_gf()) { |
|
|
res->set_params(params); |
|
|
} |
|
|
|
|
|
void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const { |
|
|
if (cb_func) { |
|
|
cb_func(ubatch, cur, name, il); |
|
|
} |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_cvec( |
|
|
ggml_tensor * cur, |
|
|
int il) const { |
|
|
return cvec->apply_to(ctx0, cur, il); |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_lora_mm( |
|
|
ggml_tensor * w, |
|
|
ggml_tensor * cur) const { |
|
|
ggml_tensor * res = ggml_mul_mat(ctx0, w, cur); |
|
|
|
|
|
for (const auto & lora : *loras) { |
|
|
llama_adapter_lora_weight * lw = lora.first->get_weight(w); |
|
|
if (lw == nullptr) { |
|
|
continue; |
|
|
} |
|
|
|
|
|
const float adapter_scale = lora.second; |
|
|
const float scale = lw->get_scale(lora.first->alpha, adapter_scale); |
|
|
|
|
|
ggml_tensor * ab_cur = ggml_mul_mat( |
|
|
ctx0, lw->b, |
|
|
ggml_mul_mat(ctx0, lw->a, cur) |
|
|
); |
|
|
|
|
|
ab_cur = ggml_scale(ctx0, ab_cur, scale); |
|
|
res = ggml_add(ctx0, res, ab_cur); |
|
|
} |
|
|
|
|
|
return res; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_lora_mm_id( |
|
|
ggml_tensor * w, |
|
|
ggml_tensor * cur, |
|
|
ggml_tensor * ids) const { |
|
|
ggml_tensor * res = ggml_mul_mat_id(ctx0, w, cur, ids); |
|
|
for (const auto & lora : *loras) { |
|
|
llama_adapter_lora_weight * lw = lora.first->get_weight(w); |
|
|
if (lw == nullptr) { |
|
|
continue; |
|
|
} |
|
|
|
|
|
const float alpha = lora.first->alpha; |
|
|
const float rank = (float) lw->b->ne[0]; |
|
|
const float scale = alpha ? lora.second * alpha / rank : lora.second; |
|
|
|
|
|
ggml_tensor * ab_cur = ggml_mul_mat_id( |
|
|
ctx0, lw->b, |
|
|
ggml_mul_mat_id(ctx0, lw->a, cur, ids), |
|
|
ids |
|
|
); |
|
|
|
|
|
ab_cur = ggml_scale(ctx0, ab_cur, scale); |
|
|
res = ggml_add(ctx0, res, ab_cur); |
|
|
} |
|
|
|
|
|
return res; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_norm( |
|
|
ggml_tensor * cur, |
|
|
ggml_tensor * mw, |
|
|
ggml_tensor * mb, |
|
|
llm_norm_type type, |
|
|
int il) const { |
|
|
switch (type) { |
|
|
case LLM_NORM: cur = ggml_norm (ctx0, cur, hparams.f_norm_eps); break; |
|
|
case LLM_NORM_RMS: cur = ggml_rms_norm(ctx0, cur, hparams.f_norm_rms_eps); break; |
|
|
case LLM_NORM_GROUP: |
|
|
{ |
|
|
cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], 1, cur->ne[1]); |
|
|
cur = ggml_group_norm(ctx0, cur, hparams.n_norm_groups, hparams.f_norm_group_eps); |
|
|
cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], cur->ne[2]); |
|
|
} break; |
|
|
} |
|
|
|
|
|
if (mw || mb) { |
|
|
cb(cur, "norm", il); |
|
|
} |
|
|
|
|
|
if (mw) { |
|
|
cur = ggml_mul(ctx0, cur, mw); |
|
|
if (mb) { |
|
|
cb(cur, "norm_w", il); |
|
|
} |
|
|
} |
|
|
|
|
|
if (mb) { |
|
|
cur = ggml_add(ctx0, cur, mb); |
|
|
} |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_ffn( |
|
|
ggml_tensor * cur, |
|
|
ggml_tensor * up, |
|
|
ggml_tensor * up_b, |
|
|
ggml_tensor * up_s, |
|
|
ggml_tensor * gate, |
|
|
ggml_tensor * gate_b, |
|
|
ggml_tensor * gate_s, |
|
|
ggml_tensor * down, |
|
|
ggml_tensor * down_b, |
|
|
ggml_tensor * down_s, |
|
|
ggml_tensor * act_scales, |
|
|
llm_ffn_op_type type_op, |
|
|
llm_ffn_gate_type type_gate, |
|
|
int il) const { |
|
|
ggml_tensor * tmp = up ? build_lora_mm(up, cur) : cur; |
|
|
cb(tmp, "ffn_up", il); |
|
|
|
|
|
if (up_b) { |
|
|
tmp = ggml_add(ctx0, tmp, up_b); |
|
|
cb(tmp, "ffn_up_b", il); |
|
|
} |
|
|
|
|
|
if (up_s) { |
|
|
tmp = ggml_mul(ctx0, tmp, up_s); |
|
|
cb(tmp, "ffn_up_s", il); |
|
|
} |
|
|
|
|
|
if (gate) { |
|
|
switch (type_gate) { |
|
|
case LLM_FFN_SEQ: |
|
|
{ |
|
|
cur = build_lora_mm(gate, tmp); |
|
|
cb(cur, "ffn_gate", il); |
|
|
} break; |
|
|
case LLM_FFN_PAR: |
|
|
{ |
|
|
cur = build_lora_mm(gate, cur); |
|
|
cb(cur, "ffn_gate", il); |
|
|
} break; |
|
|
} |
|
|
|
|
|
if (gate_b) { |
|
|
cur = ggml_add(ctx0, cur, gate_b); |
|
|
cb(cur, "ffn_gate_b", il); |
|
|
} |
|
|
|
|
|
if (gate_s) { |
|
|
cur = ggml_mul(ctx0, cur, gate_s); |
|
|
cb(cur, "ffn_gate_s", il); |
|
|
} |
|
|
|
|
|
} else { |
|
|
cur = tmp; |
|
|
} |
|
|
|
|
|
switch (type_op) { |
|
|
case LLM_FFN_SILU: |
|
|
if (gate && type_gate == LLM_FFN_PAR) { |
|
|
cur = ggml_swiglu_split(ctx0, cur, tmp); |
|
|
cb(cur, "ffn_swiglu", il); |
|
|
type_gate = LLM_FFN_SEQ; |
|
|
} else { |
|
|
cur = ggml_silu(ctx0, cur); |
|
|
cb(cur, "ffn_silu", il); |
|
|
} break; |
|
|
case LLM_FFN_GELU: |
|
|
if (gate && type_gate == LLM_FFN_PAR) { |
|
|
cur = ggml_geglu_split(ctx0, cur, tmp); |
|
|
cb(cur, "ffn_geglu", il); |
|
|
type_gate = LLM_FFN_SEQ; |
|
|
} else { |
|
|
cur = ggml_gelu(ctx0, cur); |
|
|
cb(cur, "ffn_gelu", il); |
|
|
if (act_scales != NULL) { |
|
|
cur = ggml_div(ctx0, cur, act_scales); |
|
|
cb(cur, "ffn_act", il); |
|
|
} |
|
|
} break; |
|
|
case LLM_FFN_RELU: |
|
|
if (gate && type_gate == LLM_FFN_PAR) { |
|
|
cur = ggml_reglu_split(ctx0, cur, tmp); |
|
|
cb(cur, "ffn_reglu", il); |
|
|
type_gate = LLM_FFN_SEQ; |
|
|
} else { |
|
|
cur = ggml_relu(ctx0, cur); |
|
|
cb(cur, "ffn_relu", il); |
|
|
} break; |
|
|
case LLM_FFN_RELU_SQR: |
|
|
{ |
|
|
cur = ggml_relu(ctx0, cur); |
|
|
cb(cur, "ffn_relu", il); |
|
|
|
|
|
cur = ggml_sqr(ctx0, cur); |
|
|
cb(cur, "ffn_sqr(relu)", il); |
|
|
} break; |
|
|
case LLM_FFN_SWIGLU: |
|
|
{ |
|
|
cur = ggml_swiglu(ctx0, cur); |
|
|
cb(cur, "ffn_swiglu", il); |
|
|
} break; |
|
|
case LLM_FFN_GEGLU: |
|
|
{ |
|
|
cur = ggml_geglu(ctx0, cur); |
|
|
cb(cur, "ffn_geglu", il); |
|
|
} break; |
|
|
case LLM_FFN_REGLU: |
|
|
{ |
|
|
cur = ggml_reglu(ctx0, cur); |
|
|
cb(cur, "ffn_reglu", il); |
|
|
} break; |
|
|
} |
|
|
|
|
|
if (gate && type_gate == LLM_FFN_PAR) { |
|
|
cur = ggml_mul(ctx0, cur, tmp); |
|
|
cb(cur, "ffn_gate_par", il); |
|
|
} |
|
|
|
|
|
if (down) { |
|
|
cur = build_lora_mm(down, cur); |
|
|
if (arch == LLM_ARCH_GLM4) { |
|
|
|
|
|
ggml_mul_mat_set_prec(cur, GGML_PREC_F32); |
|
|
} |
|
|
} |
|
|
|
|
|
if (down_b) { |
|
|
cb(cur, "ffn_down", il); |
|
|
} |
|
|
|
|
|
if (down_b) { |
|
|
cur = ggml_add(ctx0, cur, down_b); |
|
|
} |
|
|
|
|
|
if (down_s) { |
|
|
cur = ggml_mul(ctx0, cur, down_s); |
|
|
cb(cur, "ffn_down_s", il); |
|
|
} |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_moe_ffn( |
|
|
ggml_tensor * cur, |
|
|
ggml_tensor * gate_inp, |
|
|
ggml_tensor * up_exps, |
|
|
ggml_tensor * gate_exps, |
|
|
ggml_tensor * down_exps, |
|
|
ggml_tensor * exp_probs_b, |
|
|
int64_t n_expert, |
|
|
int64_t n_expert_used, |
|
|
llm_ffn_op_type type_op, |
|
|
bool norm_w, |
|
|
bool scale_w, |
|
|
float w_scale, |
|
|
llama_expert_gating_func_type gating_op, |
|
|
int il) const { |
|
|
const int64_t n_embd = cur->ne[0]; |
|
|
const int64_t n_tokens = cur->ne[1]; |
|
|
const bool weight_before_ffn = arch == LLM_ARCH_LLAMA4; |
|
|
|
|
|
ggml_tensor * logits = build_lora_mm(gate_inp, cur); |
|
|
cb(logits, "ffn_moe_logits", il); |
|
|
|
|
|
ggml_tensor * probs = nullptr; |
|
|
switch (gating_op) { |
|
|
case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX: |
|
|
{ |
|
|
probs = ggml_soft_max(ctx0, logits); |
|
|
} break; |
|
|
case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID: |
|
|
{ |
|
|
probs = ggml_sigmoid(ctx0, logits); |
|
|
} break; |
|
|
default: |
|
|
GGML_ABORT("fatal error"); |
|
|
} |
|
|
cb(probs, "ffn_moe_probs", il); |
|
|
|
|
|
|
|
|
|
|
|
ggml_tensor * selection_probs = probs; |
|
|
if (exp_probs_b != nullptr) { |
|
|
selection_probs = ggml_add(ctx0, probs, exp_probs_b); |
|
|
cb(selection_probs, "ffn_moe_probs_biased", il); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (arch == LLM_ARCH_LLAMA4) { |
|
|
selection_probs = logits; |
|
|
} |
|
|
|
|
|
|
|
|
ggml_tensor * selected_experts = ggml_top_k(ctx0, selection_probs, n_expert_used); |
|
|
cb(selected_experts->src[0], "ffn_moe_argsort", il); |
|
|
cb(selected_experts, "ffn_moe_topk", il); |
|
|
|
|
|
ggml_tensor * weights = ggml_get_rows(ctx0, |
|
|
ggml_reshape_3d(ctx0, probs, 1, n_expert, n_tokens), selected_experts); |
|
|
cb(weights, "ffn_moe_weights", il); |
|
|
|
|
|
if (norm_w) { |
|
|
weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens); |
|
|
|
|
|
ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights); |
|
|
cb(weights_sum, "ffn_moe_weights_sum", il); |
|
|
|
|
|
weights = ggml_div(ctx0, weights, weights_sum); |
|
|
cb(weights, "ffn_moe_weights_norm", il); |
|
|
|
|
|
weights = ggml_reshape_3d(ctx0, weights, 1, n_expert_used, n_tokens); |
|
|
} |
|
|
if (scale_w) { |
|
|
weights = ggml_scale(ctx0, weights, w_scale); |
|
|
cb(weights, "ffn_moe_weights_scaled", il); |
|
|
} |
|
|
|
|
|
cur = ggml_reshape_3d(ctx0, cur, n_embd, 1, n_tokens); |
|
|
|
|
|
if (weight_before_ffn) { |
|
|
|
|
|
ggml_tensor * repeated = ggml_repeat_4d(ctx0, cur, n_embd, n_expert_used, n_tokens, 1); |
|
|
cur = ggml_mul(ctx0, repeated, weights); |
|
|
cb(cur, "ffn_moe_weighted", il); |
|
|
} |
|
|
|
|
|
ggml_tensor * up = build_lora_mm_id(up_exps, cur, selected_experts); |
|
|
cb(up, "ffn_moe_up", il); |
|
|
|
|
|
ggml_tensor * experts = nullptr; |
|
|
if (gate_exps) { |
|
|
cur = build_lora_mm_id(gate_exps, cur, selected_experts); |
|
|
cb(cur, "ffn_moe_gate", il); |
|
|
} else { |
|
|
cur = up; |
|
|
} |
|
|
|
|
|
switch (type_op) { |
|
|
case LLM_FFN_SILU: |
|
|
if (gate_exps) { |
|
|
cur = ggml_swiglu_split(ctx0, cur, up); |
|
|
cb(cur, "ffn_moe_swiglu", il); |
|
|
} else { |
|
|
cur = ggml_silu(ctx0, cur); |
|
|
cb(cur, "ffn_moe_silu", il); |
|
|
} break; |
|
|
case LLM_FFN_GELU: |
|
|
if (gate_exps) { |
|
|
cur = ggml_geglu_split(ctx0, cur, up); |
|
|
cb(cur, "ffn_moe_geglu", il); |
|
|
} else { |
|
|
cur = ggml_gelu(ctx0, cur); |
|
|
cb(cur, "ffn_moe_gelu", il); |
|
|
} break; |
|
|
default: |
|
|
GGML_ABORT("fatal error"); |
|
|
} |
|
|
|
|
|
experts = build_lora_mm_id(down_exps, cur, selected_experts); |
|
|
cb(experts, "ffn_moe_down", il); |
|
|
|
|
|
if (!weight_before_ffn) { |
|
|
experts = ggml_mul(ctx0, experts, weights); |
|
|
cb(cur, "ffn_moe_weighted", il); |
|
|
} |
|
|
|
|
|
ggml_tensor * cur_experts[LLAMA_MAX_EXPERTS] = { nullptr }; |
|
|
|
|
|
assert(n_expert_used > 0); |
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < hparams.n_expert_used; ++i) { |
|
|
cur_experts[i] = ggml_view_2d(ctx0, experts, n_embd, n_tokens, experts->nb[2], i*experts->nb[1]); |
|
|
|
|
|
ggml_build_forward_expand(gf, cur_experts[i]); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ggml_tensor * moe_out = cur_experts[0]; |
|
|
|
|
|
for (uint32_t i = 1; i < hparams.n_expert_used; ++i) { |
|
|
moe_out = ggml_add(ctx0, moe_out, cur_experts[i]); |
|
|
} |
|
|
|
|
|
if (hparams.n_expert_used == 1) { |
|
|
|
|
|
moe_out = ggml_cont(ctx0, moe_out); |
|
|
} |
|
|
|
|
|
cb(moe_out, "ffn_moe_out", il); |
|
|
|
|
|
return moe_out; |
|
|
} |
|
|
|
|
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_embd(ggml_tensor * tok_embd) const { |
|
|
const int64_t n_embd = hparams.n_embd; |
|
|
|
|
|
auto inp = std::make_unique<llm_graph_input_embd>(); |
|
|
|
|
|
ggml_tensor * cur = nullptr; |
|
|
|
|
|
if (ubatch.token) { |
|
|
inp->tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_tokens); |
|
|
|
|
|
ggml_set_input(inp->tokens); |
|
|
res->t_tokens = inp->tokens; |
|
|
|
|
|
cur = ggml_get_rows(ctx0, tok_embd, inp->tokens); |
|
|
|
|
|
|
|
|
for (const auto & lora : *loras) { |
|
|
llama_adapter_lora_weight * lw = lora.first->get_weight(tok_embd); |
|
|
if (lw == nullptr) { |
|
|
continue; |
|
|
} |
|
|
|
|
|
const float adapter_scale = lora.second; |
|
|
const float scale = lw->get_scale(lora.first->alpha, adapter_scale); |
|
|
|
|
|
ggml_tensor * inpL_delta = ggml_scale(ctx0, ggml_mul_mat( |
|
|
ctx0, lw->b, |
|
|
ggml_get_rows(ctx0, lw->a, inp->tokens) |
|
|
), scale); |
|
|
|
|
|
cur = ggml_add(ctx0, cur, inpL_delta); |
|
|
} |
|
|
} else { |
|
|
inp->embd = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, ubatch.n_tokens); |
|
|
ggml_set_input(inp->embd); |
|
|
|
|
|
cur = inp->embd; |
|
|
} |
|
|
|
|
|
|
|
|
if (hparams.f_embedding_scale != 0.0f) { |
|
|
cur = ggml_scale(ctx0, cur, hparams.f_embedding_scale); |
|
|
} |
|
|
|
|
|
cb(cur, "inp_embd", -1); |
|
|
|
|
|
res->add_input(std::move(inp)); |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_pos() const { |
|
|
auto inp = std::make_unique<llm_graph_input_pos>(hparams.n_pos_per_embd()); |
|
|
|
|
|
auto & cur = inp->pos; |
|
|
|
|
|
cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, (int64_t)n_tokens*hparams.n_pos_per_embd()); |
|
|
ggml_set_input(cur); |
|
|
|
|
|
res->add_input(std::move(inp)); |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_attn_scale() const { |
|
|
auto inp = std::make_unique<llm_graph_input_attn_temp>(hparams.n_attn_temp_floor_scale, hparams.f_attn_temp_scale); |
|
|
|
|
|
auto & cur = inp->attn_scale; |
|
|
|
|
|
|
|
|
cur = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 1, 1, n_tokens); |
|
|
ggml_set_input(cur); |
|
|
|
|
|
res->add_input(std::move(inp)); |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_out_ids() const { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto inp = std::make_unique<llm_graph_input_out_ids>(hparams, cparams, n_outputs); |
|
|
|
|
|
auto & cur = inp->out_ids; |
|
|
|
|
|
cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_outputs); |
|
|
ggml_set_input(cur); |
|
|
|
|
|
res->add_input(std::move(inp)); |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_mean() const { |
|
|
auto inp = std::make_unique<llm_graph_input_mean>(cparams); |
|
|
|
|
|
auto & cur = inp->mean; |
|
|
|
|
|
cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, ubatch.n_seqs_unq); |
|
|
ggml_set_input(cur); |
|
|
|
|
|
res->add_input(std::move(inp)); |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_cls() const { |
|
|
auto inp = std::make_unique<llm_graph_input_cls>(cparams); |
|
|
|
|
|
auto & cur = inp->cls; |
|
|
|
|
|
cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_seqs_unq); |
|
|
ggml_set_input(cur); |
|
|
|
|
|
res->add_input(std::move(inp)); |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_cross_embd() const { |
|
|
auto inp = std::make_unique<llm_graph_input_cross_embd>(cross); |
|
|
|
|
|
auto & cur = inp->cross_embd; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const auto n_embd = !cross->v_embd.empty() ? cross->n_embd : hparams.n_embd; |
|
|
const auto n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train; |
|
|
|
|
|
cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_enc); |
|
|
ggml_set_input(cur); |
|
|
|
|
|
res->add_input(std::move(inp)); |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_pos_bucket_enc() const { |
|
|
auto inp = std::make_unique<llm_graph_input_pos_bucket>(hparams); |
|
|
|
|
|
auto & cur = inp->pos_bucket; |
|
|
|
|
|
cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_tokens, n_tokens); |
|
|
ggml_set_input(cur); |
|
|
|
|
|
res->add_input(std::move(inp)); |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_inp_pos_bucket_dec() const { |
|
|
const auto * mctx_cur = static_cast<const llama_kv_cache_unified_context *>(mctx); |
|
|
|
|
|
auto inp = std::make_unique<llm_graph_input_pos_bucket_kv>(hparams, mctx_cur); |
|
|
|
|
|
const auto n_kv = mctx_cur->get_n_kv(); |
|
|
|
|
|
auto & cur = inp->pos_bucket; |
|
|
|
|
|
cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_kv, n_tokens); |
|
|
ggml_set_input(cur); |
|
|
|
|
|
res->add_input(std::move(inp)); |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_pos_bias(ggml_tensor * pos_bucket, ggml_tensor * attn_rel_b) const { |
|
|
ggml_tensor * pos_bucket_1d = ggml_reshape_1d(ctx0, pos_bucket, pos_bucket->ne[0] * pos_bucket->ne[1]); |
|
|
cb(pos_bucket_1d, "pos_bucket_1d", -1); |
|
|
|
|
|
ggml_tensor * pos_bias = ggml_get_rows(ctx0, attn_rel_b, pos_bucket_1d); |
|
|
|
|
|
pos_bias = ggml_reshape_3d(ctx0, pos_bias, pos_bias->ne[0], pos_bucket->ne[0], pos_bucket->ne[1]); |
|
|
pos_bias = ggml_permute (ctx0, pos_bias, 2, 0, 1, 3); |
|
|
pos_bias = ggml_cont (ctx0, pos_bias); |
|
|
|
|
|
cb(pos_bias, "pos_bias", -1); |
|
|
|
|
|
return pos_bias; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_attn_mha( |
|
|
ggml_tensor * q, |
|
|
ggml_tensor * k, |
|
|
ggml_tensor * v, |
|
|
ggml_tensor * kq_b, |
|
|
ggml_tensor * kq_mask, |
|
|
ggml_tensor * v_mla, |
|
|
float kq_scale) const { |
|
|
const bool v_trans = v->nb[1] > v->nb[2]; |
|
|
|
|
|
|
|
|
const auto n_stream = k->ne[3]; |
|
|
|
|
|
q = ggml_reshape_4d(ctx0, q, q->ne[0], q->ne[1], q->ne[2]/n_stream, n_stream); |
|
|
|
|
|
q = ggml_permute(ctx0, q, 0, 2, 1, 3); |
|
|
k = ggml_permute(ctx0, k, 0, 2, 1, 3); |
|
|
v = ggml_permute(ctx0, v, 0, 2, 1, 3); |
|
|
|
|
|
const auto n_kv = k->ne[1]; |
|
|
|
|
|
ggml_tensor * cur; |
|
|
|
|
|
|
|
|
if (cparams.flash_attn && (n_kv % 256 == 0) && kq_b == nullptr) { |
|
|
GGML_ASSERT(kq_b == nullptr && "Flash attention does not support KQ bias yet"); |
|
|
|
|
|
if (v_trans) { |
|
|
v = ggml_transpose(ctx0, v); |
|
|
} |
|
|
|
|
|
|
|
|
if (k->type == GGML_TYPE_F32) { |
|
|
k = ggml_cast(ctx0, k, GGML_TYPE_F16); |
|
|
} |
|
|
|
|
|
if (v->type == GGML_TYPE_F32) { |
|
|
v = ggml_cast(ctx0, v, GGML_TYPE_F16); |
|
|
} |
|
|
|
|
|
cur = ggml_flash_attn_ext(ctx0, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias, |
|
|
hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f); |
|
|
|
|
|
ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32); |
|
|
|
|
|
if (v_mla) { |
|
|
#if 0 |
|
|
|
|
|
|
|
|
cur = ggml_reshape_4d(ctx0, cur, v_mla->ne[0], 1, n_head, n_tokens); |
|
|
cur = ggml_mul_mat(ctx0, v_mla, cur); |
|
|
#else |
|
|
|
|
|
|
|
|
cur = ggml_permute(ctx0, cur, 0, 2, 1, 3); |
|
|
cur = ggml_mul_mat(ctx0, v_mla, cur); |
|
|
cur = ggml_permute(ctx0, cur, 0, 2, 1, 3); |
|
|
cur = ggml_cont(ctx0, cur); |
|
|
#endif |
|
|
} |
|
|
|
|
|
cur = ggml_reshape_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]); |
|
|
} else { |
|
|
ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); |
|
|
|
|
|
|
|
|
|
|
|
ggml_mul_mat_set_prec(kq, GGML_PREC_F32); |
|
|
|
|
|
if (arch == LLM_ARCH_GROK) { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
kq = ggml_tanh(ctx0, ggml_scale(ctx0, kq, 0.08838834764831845f/30.0f)); |
|
|
kq = ggml_scale(ctx0, kq, 30); |
|
|
} |
|
|
|
|
|
if (hparams.attn_soft_cap) { |
|
|
kq = ggml_scale(ctx0, kq, 1.0f / hparams.f_attn_logit_softcapping); |
|
|
kq = ggml_tanh (ctx0, kq); |
|
|
kq = ggml_scale(ctx0, kq, hparams.f_attn_logit_softcapping); |
|
|
} |
|
|
|
|
|
if (kq_b) { |
|
|
kq = ggml_add(ctx0, kq, kq_b); |
|
|
} |
|
|
|
|
|
kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias); |
|
|
|
|
|
if (!v_trans) { |
|
|
|
|
|
v = ggml_cont(ctx0, ggml_transpose(ctx0, v)); |
|
|
} |
|
|
|
|
|
ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq); |
|
|
|
|
|
|
|
|
if (v_mla) { |
|
|
kqv = ggml_mul_mat(ctx0, v_mla, kqv); |
|
|
} |
|
|
|
|
|
cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3); |
|
|
|
|
|
|
|
|
cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]); |
|
|
|
|
|
if (!cparams.offload_kqv) { |
|
|
|
|
|
ggml_backend_sched_set_tensor_backend(sched, cur, backend_cpu); |
|
|
} |
|
|
} |
|
|
|
|
|
ggml_build_forward_expand(gf, cur); |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
llm_graph_input_attn_no_cache * llm_graph_context::build_attn_inp_no_cache() const { |
|
|
auto inp = std::make_unique<llm_graph_input_attn_no_cache>(hparams, cparams); |
|
|
|
|
|
|
|
|
inp->kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD), 1, 1); |
|
|
ggml_set_input(inp->kq_mask); |
|
|
|
|
|
inp->kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->kq_mask, GGML_TYPE_F16) : inp->kq_mask; |
|
|
|
|
|
return (llm_graph_input_attn_no_cache *) res->add_input(std::move(inp)); |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_attn( |
|
|
llm_graph_input_attn_no_cache * inp, |
|
|
ggml_tensor * wo, |
|
|
ggml_tensor * wo_b, |
|
|
ggml_tensor * q_cur, |
|
|
ggml_tensor * k_cur, |
|
|
ggml_tensor * v_cur, |
|
|
ggml_tensor * kq_b, |
|
|
ggml_tensor * v_mla, |
|
|
float kq_scale, |
|
|
int il) const { |
|
|
GGML_UNUSED(n_tokens); |
|
|
|
|
|
|
|
|
|
|
|
ggml_build_forward_expand(gf, q_cur); |
|
|
ggml_build_forward_expand(gf, k_cur); |
|
|
ggml_build_forward_expand(gf, v_cur); |
|
|
|
|
|
const auto & kq_mask = inp->get_kq_mask(); |
|
|
|
|
|
|
|
|
|
|
|
assert(!ubatch.equal_seqs()); |
|
|
|
|
|
ggml_tensor * q = q_cur; |
|
|
ggml_tensor * k = k_cur; |
|
|
ggml_tensor * v = v_cur; |
|
|
|
|
|
ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, v_mla, kq_scale); |
|
|
cb(cur, "kqv_out", il); |
|
|
|
|
|
if (wo) { |
|
|
cur = build_lora_mm(wo, cur); |
|
|
} |
|
|
|
|
|
if (wo_b) { |
|
|
|
|
|
} |
|
|
|
|
|
if (wo_b) { |
|
|
cur = ggml_add(ctx0, cur, wo_b); |
|
|
} |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
static std::unique_ptr<llm_graph_input_attn_kv_unified> build_attn_inp_kv_unified_impl( |
|
|
ggml_context * ctx0, |
|
|
const llama_ubatch & ubatch, |
|
|
const llama_hparams & hparams, |
|
|
const llama_cparams & cparams, |
|
|
const llama_kv_cache_unified_context * mctx_cur) { |
|
|
|
|
|
auto inp = std::make_unique<llm_graph_input_attn_kv_unified>(hparams, cparams, mctx_cur); |
|
|
|
|
|
{ |
|
|
GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified_iswa for SWA"); |
|
|
|
|
|
const auto n_kv = mctx_cur->get_n_kv(); |
|
|
const auto n_tokens = ubatch.n_tokens; |
|
|
const auto n_stream = cparams.kv_unified ? 1 : ubatch.n_seqs_unq; |
|
|
|
|
|
inp->self_k_idxs = mctx_cur->build_input_k_idxs(ctx0, ubatch); |
|
|
inp->self_v_idxs = mctx_cur->build_input_v_idxs(ctx0, ubatch); |
|
|
|
|
|
inp->self_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens/n_stream, GGML_KQ_MASK_PAD), 1, n_stream); |
|
|
ggml_set_input(inp->self_kq_mask); |
|
|
|
|
|
inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask; |
|
|
} |
|
|
|
|
|
return inp; |
|
|
} |
|
|
|
|
|
llm_graph_input_attn_kv_unified * llm_graph_context::build_attn_inp_kv_unified() const { |
|
|
const auto * mctx_cur = static_cast<const llama_kv_cache_unified_context *>(mctx); |
|
|
|
|
|
auto inp = build_attn_inp_kv_unified_impl(ctx0, ubatch, hparams, cparams, mctx_cur); |
|
|
|
|
|
return (llm_graph_input_attn_kv_unified *) res->add_input(std::move(inp)); |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_attn( |
|
|
llm_graph_input_attn_kv_unified * inp, |
|
|
ggml_tensor * wo, |
|
|
ggml_tensor * wo_b, |
|
|
ggml_tensor * q_cur, |
|
|
ggml_tensor * k_cur, |
|
|
ggml_tensor * v_cur, |
|
|
ggml_tensor * kq_b, |
|
|
ggml_tensor * v_mla, |
|
|
float kq_scale, |
|
|
int il) const { |
|
|
|
|
|
|
|
|
ggml_build_forward_expand(gf, q_cur); |
|
|
ggml_build_forward_expand(gf, k_cur); |
|
|
ggml_build_forward_expand(gf, v_cur); |
|
|
|
|
|
const auto * mctx_cur = inp->mctx; |
|
|
|
|
|
|
|
|
{ |
|
|
const auto & k_idxs = inp->get_k_idxs(); |
|
|
const auto & v_idxs = inp->get_v_idxs(); |
|
|
|
|
|
ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, k_idxs, il)); |
|
|
ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, v_idxs, il)); |
|
|
} |
|
|
|
|
|
const auto & kq_mask = inp->get_kq_mask(); |
|
|
|
|
|
ggml_tensor * q = q_cur; |
|
|
ggml_tensor * k = mctx_cur->get_k(ctx0, il); |
|
|
ggml_tensor * v = mctx_cur->get_v(ctx0, il); |
|
|
|
|
|
ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, v_mla, kq_scale); |
|
|
cb(cur, "kqv_out", il); |
|
|
|
|
|
if (wo) { |
|
|
cur = build_lora_mm(wo, cur); |
|
|
if (arch == LLM_ARCH_GLM4) { |
|
|
|
|
|
ggml_mul_mat_set_prec(cur, GGML_PREC_F32); |
|
|
} |
|
|
} |
|
|
|
|
|
if (wo_b) { |
|
|
cur = ggml_add(ctx0, cur, wo_b); |
|
|
} |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_attn( |
|
|
llm_graph_input_attn_kv_unified_iswa * inp, |
|
|
ggml_tensor * wo, |
|
|
ggml_tensor * wo_b, |
|
|
ggml_tensor * q_cur, |
|
|
ggml_tensor * k_cur, |
|
|
ggml_tensor * v_cur, |
|
|
ggml_tensor * kq_b, |
|
|
ggml_tensor * v_mla, |
|
|
float kq_scale, |
|
|
int il) const { |
|
|
|
|
|
|
|
|
ggml_build_forward_expand(gf, q_cur); |
|
|
|
|
|
if (k_cur) { |
|
|
ggml_build_forward_expand(gf, k_cur); |
|
|
} |
|
|
|
|
|
if (v_cur) { |
|
|
ggml_build_forward_expand(gf, v_cur); |
|
|
} |
|
|
|
|
|
const auto * mctx_iswa = inp->mctx; |
|
|
|
|
|
const bool is_swa = hparams.is_swa(il); |
|
|
|
|
|
const auto * mctx_cur = is_swa ? mctx_iswa->get_swa() : mctx_iswa->get_base(); |
|
|
|
|
|
|
|
|
if (k_cur) { |
|
|
const auto & k_idxs = is_swa ? inp->get_k_idxs_swa() : inp->get_k_idxs(); |
|
|
|
|
|
ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, k_idxs, il)); |
|
|
} |
|
|
|
|
|
if (v_cur) { |
|
|
const auto & v_idxs = is_swa ? inp->get_v_idxs_swa() : inp->get_v_idxs(); |
|
|
|
|
|
ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, v_idxs, il)); |
|
|
} |
|
|
|
|
|
const auto & kq_mask = is_swa ? inp->get_kq_mask_swa() : inp->get_kq_mask(); |
|
|
|
|
|
ggml_tensor * q = q_cur; |
|
|
ggml_tensor * k = mctx_cur->get_k(ctx0, il); |
|
|
ggml_tensor * v = mctx_cur->get_v(ctx0, il); |
|
|
|
|
|
ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, v_mla, kq_scale); |
|
|
cb(cur, "kqv_out", il); |
|
|
|
|
|
if (wo) { |
|
|
cur = build_lora_mm(wo, cur); |
|
|
} |
|
|
|
|
|
if (wo_b) { |
|
|
|
|
|
} |
|
|
|
|
|
if (wo_b) { |
|
|
cur = ggml_add(ctx0, cur, wo_b); |
|
|
} |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
llm_graph_input_attn_cross * llm_graph_context::build_attn_inp_cross() const { |
|
|
auto inp = std::make_unique<llm_graph_input_attn_cross>(cross); |
|
|
|
|
|
const int32_t n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train; |
|
|
|
|
|
inp->cross_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_enc, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD), 1, 1); |
|
|
ggml_set_input(inp->cross_kq_mask); |
|
|
|
|
|
inp->cross_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->cross_kq_mask, GGML_TYPE_F16) : inp->cross_kq_mask; |
|
|
|
|
|
return (llm_graph_input_attn_cross *) res->add_input(std::move(inp)); |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_attn( |
|
|
llm_graph_input_attn_cross * inp, |
|
|
ggml_tensor * wo, |
|
|
ggml_tensor * wo_b, |
|
|
ggml_tensor * q_cur, |
|
|
ggml_tensor * k_cur, |
|
|
ggml_tensor * v_cur, |
|
|
ggml_tensor * kq_b, |
|
|
ggml_tensor * v_mla, |
|
|
float kq_scale, |
|
|
int il) const { |
|
|
|
|
|
|
|
|
ggml_build_forward_expand(gf, q_cur); |
|
|
ggml_build_forward_expand(gf, k_cur); |
|
|
ggml_build_forward_expand(gf, v_cur); |
|
|
|
|
|
const auto & kq_mask = inp->get_kq_mask_cross(); |
|
|
|
|
|
ggml_tensor * q = q_cur; |
|
|
ggml_tensor * k = k_cur; |
|
|
ggml_tensor * v = v_cur; |
|
|
|
|
|
ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, v_mla, kq_scale); |
|
|
cb(cur, "kqv_out", il); |
|
|
|
|
|
if (wo) { |
|
|
cur = build_lora_mm(wo, cur); |
|
|
} |
|
|
|
|
|
if (wo_b) { |
|
|
|
|
|
} |
|
|
|
|
|
if (wo_b) { |
|
|
cur = ggml_add(ctx0, cur, wo_b); |
|
|
} |
|
|
|
|
|
return cur; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llm_graph_input_attn_kv_unified_iswa * llm_graph_context::build_attn_inp_kv_unified_iswa() const { |
|
|
const auto * mctx_cur = static_cast<const llama_kv_cache_unified_iswa_context *>(mctx); |
|
|
|
|
|
auto inp = std::make_unique<llm_graph_input_attn_kv_unified_iswa>(hparams, cparams, mctx_cur); |
|
|
|
|
|
const auto n_stream = cparams.kv_unified ? 1 : ubatch.n_seqs_unq; |
|
|
|
|
|
{ |
|
|
const auto n_kv = mctx_cur->get_base()->get_n_kv(); |
|
|
|
|
|
inp->self_k_idxs = mctx_cur->get_base()->build_input_k_idxs(ctx0, ubatch); |
|
|
inp->self_v_idxs = mctx_cur->get_base()->build_input_v_idxs(ctx0, ubatch); |
|
|
|
|
|
inp->self_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens/n_stream, GGML_KQ_MASK_PAD), 1, n_stream); |
|
|
ggml_set_input(inp->self_kq_mask); |
|
|
|
|
|
inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask; |
|
|
} |
|
|
|
|
|
{ |
|
|
GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified for non-SWA"); |
|
|
|
|
|
const auto n_kv = mctx_cur->get_swa()->get_n_kv(); |
|
|
|
|
|
inp->self_k_idxs_swa = mctx_cur->get_swa()->build_input_k_idxs(ctx0, ubatch); |
|
|
inp->self_v_idxs_swa = mctx_cur->get_swa()->build_input_v_idxs(ctx0, ubatch); |
|
|
|
|
|
inp->self_kq_mask_swa = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens/n_stream, GGML_KQ_MASK_PAD), 1, n_stream); |
|
|
ggml_set_input(inp->self_kq_mask_swa); |
|
|
|
|
|
inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa; |
|
|
} |
|
|
|
|
|
return (llm_graph_input_attn_kv_unified_iswa *) res->add_input(std::move(inp)); |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_rs( |
|
|
ggml_tensor * s, |
|
|
ggml_tensor * state_copy, |
|
|
int32_t state_size, |
|
|
int32_t n_seqs, |
|
|
uint32_t n_kv, |
|
|
uint32_t kv_head, |
|
|
uint32_t kv_size, |
|
|
int32_t rs_zero, |
|
|
const llm_graph_get_rows_fn & get_state_rows) const { |
|
|
|
|
|
ggml_tensor * states = ggml_reshape_2d(ctx0, s, state_size, kv_size); |
|
|
|
|
|
|
|
|
|
|
|
ggml_tensor * state_zero = ggml_view_1d(ctx0, states, state_size*(rs_zero >= 0), rs_zero*states->nb[1]*(rs_zero >= 0)); |
|
|
ggml_build_forward_expand(gf, ggml_scale_inplace(ctx0, state_zero, 0)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ggml_tensor * output_states = get_state_rows(ctx0, states, ggml_view_1d(ctx0, state_copy, n_seqs, 0)); |
|
|
ggml_build_forward_expand(gf, output_states); |
|
|
|
|
|
|
|
|
ggml_tensor * states_extra = ggml_get_rows(ctx0, states, ggml_view_1d(ctx0, state_copy, n_kv - n_seqs, n_seqs*state_copy->nb[0])); |
|
|
ggml_build_forward_expand(gf, |
|
|
ggml_cpy(ctx0, |
|
|
states_extra, |
|
|
ggml_view_1d(ctx0, s, state_size*(n_kv - n_seqs), (kv_head + n_seqs)*state_size*ggml_element_size(s)))); |
|
|
|
|
|
return output_states; |
|
|
} |
|
|
|
|
|
static std::unique_ptr<llm_graph_input_rs> build_rs_inp_impl( |
|
|
ggml_context * ctx0, |
|
|
const llama_memory_recurrent_context * mctx_cur) { |
|
|
|
|
|
auto inp = std::make_unique<llm_graph_input_rs>(mctx_cur); |
|
|
|
|
|
const auto n_rs = mctx_cur->get_n_rs(); |
|
|
|
|
|
inp->s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_rs); |
|
|
ggml_set_input(inp->s_copy); |
|
|
|
|
|
return inp; |
|
|
} |
|
|
|
|
|
llm_graph_input_rs * llm_graph_context::build_rs_inp() const { |
|
|
const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx); |
|
|
|
|
|
auto inp = build_rs_inp_impl(ctx0, mctx_cur); |
|
|
|
|
|
return (llm_graph_input_rs *) res->add_input(std::move(inp)); |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_rs( |
|
|
llm_graph_input_rs * inp, |
|
|
ggml_tensor * s, |
|
|
int32_t state_size, |
|
|
int32_t n_seqs, |
|
|
const llm_graph_get_rows_fn & get_state_rows) const { |
|
|
const auto * kv_state = inp->mctx; |
|
|
|
|
|
return build_rs(s, inp->s_copy, state_size, n_seqs, kv_state->get_n_rs(), kv_state->get_head(), kv_state->get_size(), kv_state->get_rs_z(), get_state_rows); |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_rwkv_token_shift_load( |
|
|
llm_graph_input_rs * inp, |
|
|
const llama_ubatch & ubatch, |
|
|
int il) const { |
|
|
const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx); |
|
|
|
|
|
const auto token_shift_count = hparams.token_shift_count; |
|
|
|
|
|
const int64_t n_seqs = ubatch.n_seqs; |
|
|
|
|
|
ggml_tensor * token_shift_all = mctx_cur->get_r_l(il); |
|
|
|
|
|
ggml_tensor * token_shift = build_rs( |
|
|
inp, token_shift_all, |
|
|
hparams.n_embd_r(), n_seqs); |
|
|
|
|
|
token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs); |
|
|
|
|
|
return token_shift; |
|
|
} |
|
|
|
|
|
ggml_tensor * llm_graph_context::build_rwkv_token_shift_store( |
|
|
ggml_tensor * token_shift, |
|
|
const llama_ubatch & ubatch, |
|
|
int il) const { |
|
|
const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx); |
|
|
|
|
|
const auto token_shift_count = hparams.token_shift_count; |
|
|
const auto n_embd = hparams.n_embd; |
|
|
|
|
|
const int64_t n_seqs = ubatch.n_seqs; |
|
|
|
|
|
const auto kv_head = mctx_cur->get_head(); |
|
|
|
|
|
return ggml_cpy( |
|
|
ctx0, |
|
|
ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0), |
|
|
ggml_view_1d(ctx0, mctx_cur->get_r_l(il), hparams.n_embd_r()*n_seqs, hparams.n_embd_r()*kv_head*ggml_element_size(mctx_cur->get_r_l(il))) |
|
|
); |
|
|
} |
|
|
|
|
|
llm_graph_input_mem_hybrid * llm_graph_context::build_inp_mem_hybrid() const { |
|
|
const auto * mctx_cur = static_cast<const llama_memory_hybrid_context *>(mctx); |
|
|
|
|
|
auto inp_rs = build_rs_inp_impl(ctx0, mctx_cur->get_recr()); |
|
|
auto inp_attn = build_attn_inp_kv_unified_impl(ctx0, ubatch, hparams, cparams, mctx_cur->get_attn()); |
|
|
|
|
|
auto inp = std::make_unique<llm_graph_input_mem_hybrid>(std::move(inp_attn), std::move(inp_rs), mctx_cur); |
|
|
|
|
|
return (llm_graph_input_mem_hybrid *) res->add_input(std::move(inp)); |
|
|
} |
|
|
|
|
|
void llm_graph_context::build_pooling( |
|
|
ggml_tensor * cls, |
|
|
ggml_tensor * cls_b, |
|
|
ggml_tensor * cls_out, |
|
|
ggml_tensor * cls_out_b) const { |
|
|
if (!cparams.embeddings) { |
|
|
return; |
|
|
} |
|
|
|
|
|
ggml_tensor * inp = res->t_embd; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GGML_ASSERT(inp != nullptr && "missing result_norm/result_embd tensor"); |
|
|
|
|
|
ggml_tensor * cur; |
|
|
|
|
|
switch (pooling_type) { |
|
|
case LLAMA_POOLING_TYPE_NONE: |
|
|
{ |
|
|
cur = inp; |
|
|
} break; |
|
|
case LLAMA_POOLING_TYPE_MEAN: |
|
|
{ |
|
|
ggml_tensor * inp_mean = build_inp_mean(); |
|
|
cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, inp)), inp_mean); |
|
|
} break; |
|
|
case LLAMA_POOLING_TYPE_CLS: |
|
|
case LLAMA_POOLING_TYPE_LAST: |
|
|
{ |
|
|
ggml_tensor * inp_cls = build_inp_cls(); |
|
|
cur = ggml_get_rows(ctx0, inp, inp_cls); |
|
|
} break; |
|
|
case LLAMA_POOLING_TYPE_RANK: |
|
|
{ |
|
|
ggml_tensor * inp_cls = build_inp_cls(); |
|
|
inp = ggml_get_rows(ctx0, inp, inp_cls); |
|
|
|
|
|
if (cls) { |
|
|
|
|
|
|
|
|
cur = ggml_mul_mat(ctx0, cls, inp); |
|
|
if (cls_b) { |
|
|
cur = ggml_add(ctx0, cur, cls_b); |
|
|
} |
|
|
cur = ggml_tanh(ctx0, cur); |
|
|
|
|
|
|
|
|
|
|
|
if (cls_out) { |
|
|
cur = ggml_mul_mat(ctx0, cls_out, cur); |
|
|
if (cls_out_b) { |
|
|
cur = ggml_add(ctx0, cur, cls_out_b); |
|
|
} |
|
|
} |
|
|
} else if (cls_out) { |
|
|
|
|
|
|
|
|
cur = ggml_mul_mat(ctx0, cls_out, inp); |
|
|
if (cls_out_b) { |
|
|
cur = ggml_add(ctx0, cur, cls_out_b); |
|
|
} |
|
|
} else { |
|
|
GGML_ABORT("RANK pooling requires either cls+cls_b or cls_out+cls_out_b"); |
|
|
} |
|
|
} break; |
|
|
default: |
|
|
{ |
|
|
GGML_ABORT("unknown pooling type"); |
|
|
} |
|
|
} |
|
|
|
|
|
cb(cur, "result_embd_pooled", -1); |
|
|
res->t_embd_pooled = cur; |
|
|
|
|
|
ggml_build_forward_expand(gf, cur); |
|
|
} |
|
|
|
|
|
int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) { |
|
|
|
|
|
const int64_t max_distance = 128; |
|
|
|
|
|
if (bidirectional) { |
|
|
n_buckets >>= 1; |
|
|
} |
|
|
|
|
|
const int64_t max_exact = n_buckets >> 1; |
|
|
|
|
|
int32_t relative_position = x - y; |
|
|
int32_t relative_bucket = 0; |
|
|
|
|
|
if (bidirectional) { |
|
|
relative_bucket += (relative_position > 0) * n_buckets; |
|
|
relative_position = abs(relative_position); |
|
|
} else { |
|
|
relative_position = -std::min<int32_t>(relative_position, 0); |
|
|
} |
|
|
|
|
|
int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact)); |
|
|
relative_position_if_large = std::min<int32_t>(relative_position_if_large, n_buckets - 1); |
|
|
relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large); |
|
|
|
|
|
return relative_bucket; |
|
|
} |
|
|
|