| #include "llama-graph.h" |
|
|
| #include "llama-impl.h" |
| #include "llama-batch.h" |
| #include "llama-cparams.h" |
|
|
| #include "llama-kv-cache.h" |
| #include "llama-kv-cache-iswa.h" |
| #include "llama-memory-hybrid.h" |
| #include "llama-memory-hybrid-iswa.h" |
| #include "llama-memory-recurrent.h" |
|
|
| #include <cassert> |
| #include <cmath> |
| #include <cstring> |
| #include <numeric> |
| #include <sstream> |
| #include <unordered_set> |
|
|
| |
|
|
| static ggml_tensor * build_kq_mask( |
| ggml_context * ctx, |
| const llama_kv_cache_context * mctx, |
| const llama_ubatch & ubatch, |
| const llama_cparams & cparams) { |
| const auto n_kv = mctx->get_n_kv(); |
| const auto n_tokens = ubatch.n_tokens; |
| const auto n_stream = cparams.kv_unified ? 1 : ubatch.n_seqs_unq; |
|
|
| return ggml_new_tensor_4d(ctx, GGML_TYPE_F32, n_kv, n_tokens/n_stream, 1, n_stream); |
| } |
|
|
| static bool can_reuse_kq_mask( |
| ggml_tensor * kq_mask, |
| const llama_kv_cache_context * mctx, |
| const llama_ubatch & ubatch, |
| const llama_cparams & cparams) { |
| const auto n_kv = mctx->get_n_kv(); |
| const auto n_tokens = ubatch.n_tokens; |
| const auto n_stream = cparams.kv_unified ? 1 : ubatch.n_seqs_unq; |
|
|
| bool res = true; |
|
|
| res &= (kq_mask->ne[0] == n_kv); |
| res &= (kq_mask->ne[1] == n_tokens/n_stream); |
| res &= (kq_mask->ne[2] == 1); |
| res &= (kq_mask->ne[3] == n_stream); |
|
|
| return res; |
| } |
|
|
| |
|
|
| void llm_graph_input_embd::set_input(const llama_ubatch * ubatch) { |
| if (ubatch->token) { |
| const int64_t n_tokens = ubatch->n_tokens; |
|
|
| ggml_backend_tensor_set(tokens, ubatch->token, 0, n_tokens*ggml_element_size(tokens)); |
| } |
|
|
| if (ubatch->embd) { |
| GGML_ASSERT(n_embd == embd->ne[0]); |
|
|
| const int64_t n_tokens = ubatch->n_tokens; |
|
|
| ggml_backend_tensor_set(embd, ubatch->embd, 0, n_tokens*n_embd*ggml_element_size(embd)); |
| } |
| } |
|
|
| bool llm_graph_input_embd::can_reuse(const llm_graph_params & params) { |
| bool res = true; |
|
|
| res &= (!params.ubatch.token) || (tokens && tokens->ne[0] == params.ubatch.n_tokens); |
| res &= (!params.ubatch.embd) || (embd && embd->ne[1] == params.ubatch.n_tokens); |
|
|
| return res; |
| } |
|
|
| void llm_graph_input_pos::set_input(const llama_ubatch * ubatch) { |
| if (ubatch->pos && pos) { |
| const int64_t n_tokens = ubatch->n_tokens; |
|
|
| if (ubatch->token && n_pos_per_embd == 4) { |
| |
| |
| std::vector<llama_pos> pos_data(n_tokens*n_pos_per_embd); |
| |
| for (int i = 0; i < n_tokens; ++i) { |
| pos_data[ i] = ubatch->pos[i]; |
| pos_data[ n_tokens + i] = ubatch->pos[i]; |
| pos_data[2 * n_tokens + i] = ubatch->pos[i]; |
| pos_data[3 * n_tokens + i] = 0; |
| } |
| ggml_backend_tensor_set(pos, pos_data.data(), 0, pos_data.size()*ggml_element_size(pos)); |
| } else { |
| ggml_backend_tensor_set(pos, ubatch->pos, 0, n_tokens*n_pos_per_embd*ggml_element_size(pos)); |
| } |
| } |
| } |
|
|
| bool llm_graph_input_pos::can_reuse(const llm_graph_params & params) { |
| bool res = true; |
|
|
| res &= pos->ne[0] == params.ubatch.n_tokens*n_pos_per_embd; |
|
|
| return res; |
| } |
|
|
| void llm_graph_input_attn_temp::set_input(const llama_ubatch * ubatch) { |
| if (ubatch->pos && attn_scale) { |
| const int64_t n_tokens = ubatch->n_tokens; |
|
|
| GGML_ASSERT(f_attn_temp_scale != 0.0f); |
| GGML_ASSERT(n_attn_temp_floor_scale != 0); |
|
|
| std::vector<float> attn_scale_data(n_tokens, 0.0f); |
| for (int i = 0; i < n_tokens; ++i) { |
| const float pos = ubatch->pos[i]; |
| attn_scale_data[i] = std::log( |
| std::floor((pos + f_attn_temp_offset) / n_attn_temp_floor_scale) + 1.0 |
| ) * f_attn_temp_scale + 1.0; |
| } |
|
|
| ggml_backend_tensor_set(attn_scale, attn_scale_data.data(), 0, n_tokens*ggml_element_size(attn_scale)); |
| } |
| } |
|
|
| void llm_graph_input_pos_bucket::set_input(const llama_ubatch * ubatch) { |
| if (pos_bucket) { |
| const int64_t n_tokens = ubatch->n_tokens; |
|
|
| GGML_ASSERT(ggml_backend_buffer_is_host(pos_bucket->buffer)); |
| GGML_ASSERT(!ubatch->equal_seqs()); |
|
|
| int32_t * data = (int32_t *) pos_bucket->data; |
|
|
| for (int j = 0; j < n_tokens; ++j) { |
| for (int i = 0; i < n_tokens; ++i) { |
| data[j*n_tokens + i] = llama_relative_position_bucket(ubatch->pos[i], ubatch->pos[j], hparams.n_rel_attn_bkts, true); |
| } |
| } |
| } |
| } |
|
|
| void llm_graph_input_pos_bucket_kv::set_input(const llama_ubatch * ubatch) { |
| if (pos_bucket) { |
| mctx->set_input_pos_bucket(pos_bucket, ubatch); |
| } |
| } |
|
|
| void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) { |
| GGML_ASSERT(out_ids); |
|
|
| const int64_t n_tokens = ubatch->n_tokens; |
|
|
| GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer)); |
| int32_t * data = (int32_t *) out_ids->data; |
|
|
| if (n_outputs == n_tokens) { |
| for (int i = 0; i < n_tokens; ++i) { |
| data[i] = i; |
| } |
|
|
| return; |
| } |
|
|
| GGML_ASSERT(ubatch->output); |
|
|
| int n_outputs = 0; |
|
|
| for (int i = 0; i < n_tokens; ++i) { |
| if (ubatch->output[i]) { |
| data[n_outputs++] = i; |
| } |
| } |
| } |
|
|
| bool llm_graph_input_out_ids::can_reuse(const llm_graph_params & params) { |
| bool res = true; |
|
|
| res &= n_outputs == params.n_outputs; |
|
|
| return res; |
| } |
|
|
| void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) { |
| if (cparams.embeddings && |
| (cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN || |
| cparams.pooling_type == LLAMA_POOLING_TYPE_RANK )) { |
|
|
| const int64_t n_tokens = ubatch->n_tokens; |
| const int64_t n_seq_tokens = ubatch->n_seq_tokens; |
| const int64_t n_seqs_unq = ubatch->n_seqs_unq; |
|
|
| GGML_ASSERT(mean); |
| GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer)); |
|
|
| float * data = (float *) mean->data; |
| memset(mean->data, 0, n_tokens*n_seqs_unq*ggml_element_size(mean)); |
|
|
| std::vector<uint64_t> sums(n_seqs_unq, 0); |
| for (int i = 0; i < n_tokens; i += n_seq_tokens) { |
| for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { |
| const llama_seq_id seq_id = ubatch->seq_id[i][s]; |
| const int32_t seq_idx = ubatch->seq_idx[seq_id]; |
|
|
| sums[seq_idx] += ubatch->n_seq_tokens; |
| } |
| } |
|
|
| std::vector<float> div(n_seqs_unq, 0.0f); |
| for (int s = 0; s < n_seqs_unq; ++s) { |
| const uint64_t sum = sums[s]; |
| if (sum > 0) { |
| div[s] = 1.0f/float(sum); |
| } |
| } |
|
|
| for (int i = 0; i < n_tokens; i += n_seq_tokens) { |
| for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { |
| const llama_seq_id seq_id = ubatch->seq_id[i][s]; |
| const int32_t seq_idx = ubatch->seq_idx[seq_id]; |
|
|
| for (int j = 0; j < n_seq_tokens; ++j) { |
| data[seq_idx*n_tokens + i + j] = div[seq_idx]; |
| } |
| } |
| } |
| } |
| } |
|
|
| void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) { |
| const int64_t n_tokens = ubatch->n_tokens; |
| const int64_t n_seqs_unq = ubatch->n_seqs_unq; |
|
|
| if (cparams.embeddings && ( |
| cparams.pooling_type == LLAMA_POOLING_TYPE_CLS || |
| cparams.pooling_type == LLAMA_POOLING_TYPE_RANK || |
| cparams.pooling_type == LLAMA_POOLING_TYPE_LAST |
| )) { |
| GGML_ASSERT(cls); |
| GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer)); |
|
|
| uint32_t * data = (uint32_t *) cls->data; |
| memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls)); |
|
|
| std::vector<int> target_pos(n_seqs_unq, -1); |
| std::vector<int> target_row(n_seqs_unq, -1); |
|
|
| const bool last = ( |
| cparams.pooling_type == LLAMA_POOLING_TYPE_LAST || |
| (cparams.pooling_type == LLAMA_POOLING_TYPE_RANK && (arch == LLM_ARCH_QWEN3 || arch == LLM_ARCH_QWEN3VL)) |
| ); |
|
|
| for (int i = 0; i < n_tokens; ++i) { |
| const llama_pos pos = ubatch->pos[i]; |
|
|
| for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { |
| const llama_seq_id seq_id = ubatch->seq_id[i][s]; |
| const int32_t seq_idx = ubatch->seq_idx[seq_id]; |
|
|
| if ( |
| (target_pos[seq_idx] == -1) || |
| ( last && pos >= target_pos[seq_idx]) || |
| (!last && pos < target_pos[seq_idx]) |
| ) { |
| target_pos[seq_idx] = pos; |
| target_row[seq_idx] = i; |
| } |
| } |
| } |
|
|
| for (int s = 0; s < n_seqs_unq; ++s) { |
| if (target_row[s] >= 0) { |
| data[s] = target_row[s]; |
| } |
| } |
| } |
| } |
|
|
| void llm_graph_input_rs::set_input(const llama_ubatch * ubatch) { |
| GGML_UNUSED(ubatch); |
|
|
| const int64_t n_rs = mctx->get_n_rs(); |
|
|
| if (s_copy) { |
| GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer)); |
| int32_t * data = (int32_t *) s_copy->data; |
|
|
| |
| for (uint32_t i = 0; i < n_rs; ++i) { |
| data[i] = mctx->s_copy(i); |
| } |
| } |
| } |
|
|
| bool llm_graph_input_rs::can_reuse(const llm_graph_params & params) { |
| const auto * mctx = static_cast<const llama_memory_recurrent_context *>(params.mctx); |
|
|
| this->mctx = mctx; |
|
|
| bool res = true; |
|
|
| res &= s_copy->ne[0] == mctx->get_n_rs(); |
|
|
| res &= s_copy_main->ne[0] == params.ubatch.n_seqs; |
| res &= s_copy_extra->ne[0] == mctx->get_n_rs() - params.ubatch.n_seqs; |
|
|
| res &= head == mctx->get_head(); |
| res &= rs_z == mctx->get_rs_z(); |
|
|
| return res; |
| } |
|
|
| void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) { |
| GGML_UNUSED(ubatch); |
|
|
| if (cross_embd && !cross->v_embd.empty()) { |
| assert(cross_embd->type == GGML_TYPE_F32); |
|
|
| ggml_backend_tensor_set(cross_embd, cross->v_embd.data(), 0, ggml_nbytes(cross_embd)); |
| } |
| } |
|
|
| static void print_mask(const float * data, int64_t n_tokens, int64_t n_kv, int64_t n_swa, llama_swa_type swa_type) { |
| LLAMA_LOG_DEBUG("%s: === Attention mask ===\n", __func__); |
| const char * swa_type_str = "unknown"; |
|
|
| switch (swa_type) { |
| case LLAMA_SWA_TYPE_NONE: swa_type_str = "LLAMA_SWA_TYPE_NONE"; break; |
| case LLAMA_SWA_TYPE_STANDARD: swa_type_str = "LLAMA_SWA_TYPE_STANDARD"; break; |
| case LLAMA_SWA_TYPE_CHUNKED: swa_type_str = "LLAMA_SWA_TYPE_CHUNKED"; break; |
| case LLAMA_SWA_TYPE_SYMMETRIC: swa_type_str = "LLAMA_SWA_TYPE_SYMMETRIC"; break; |
| }; |
|
|
| LLAMA_LOG_DEBUG("%s: n_swa : %d, n_kv: %d, swq_type: %s\n", __func__, (int)n_swa, (int)n_kv, swa_type_str); |
| LLAMA_LOG_DEBUG("%s: '0' = can attend, '∞' = masked\n", __func__); |
| LLAMA_LOG_DEBUG("%s: Rows = query tokens, Columns = key/value tokens\n\n", __func__); |
|
|
| LLAMA_LOG_DEBUG(" "); |
| for (int j = 0; j < std::min((int64_t)20, n_kv); ++j) { |
| LLAMA_LOG_DEBUG("%2d", j); |
| } |
| LLAMA_LOG_DEBUG("\n"); |
|
|
| for (int i = 0; i < std::min((int64_t)20, n_tokens); ++i) { |
| LLAMA_LOG_DEBUG(" %2d ", i); |
| for (int j = 0; j < std::min((int64_t)20, n_kv); ++j) { |
| float val = data[i * n_kv + j]; |
| if (val == -INFINITY) { |
| LLAMA_LOG_DEBUG(" ∞"); |
| } else { |
| LLAMA_LOG_DEBUG(" 0"); |
| } |
| } |
| LLAMA_LOG_DEBUG("\n"); |
| } |
| } |
|
|
| void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) { |
| const int64_t n_kv = ubatch->n_tokens; |
| const int64_t n_tokens = ubatch->n_tokens; |
|
|
| const auto fill_mask = [&](float * data, int n_swa, llama_swa_type swa_type) { |
| for (int i1 = 0; i1 < n_tokens; ++i1) { |
| const llama_seq_id s1 = ubatch->seq_id[i1][0]; |
| const llama_pos p1 = ubatch->pos[i1]; |
|
|
| const uint64_t idst = i1*n_kv; |
|
|
| for (int i0 = 0; i0 < n_tokens; ++i0) { |
| const llama_seq_id s0 = ubatch->seq_id[i0][0]; |
| const llama_pos p0 = ubatch->pos[i0]; |
|
|
| |
| if (s0 != s1) { |
| continue; |
| } |
|
|
| |
| if (cparams.causal_attn && p0 > p1) { |
| continue; |
| } |
|
|
| |
| if (llama_hparams::is_masked_swa(n_swa, swa_type, p0, p1)) { |
| continue; |
| } |
|
|
| data[idst + i0] = hparams.use_alibi ? -std::abs(p0 - p1) : 0.0f; |
| } |
| } |
| }; |
|
|
| { |
| GGML_ASSERT(self_kq_mask); |
| GGML_ASSERT(ggml_backend_buffer_is_host(self_kq_mask->buffer)); |
|
|
| float * data = (float *) self_kq_mask->data; |
|
|
| std::fill(data, data + ggml_nelements(self_kq_mask), -INFINITY); |
|
|
| fill_mask(data, 0, LLAMA_SWA_TYPE_NONE); |
|
|
| if (debug) { |
| print_mask(data, n_tokens, n_kv, 0, LLAMA_SWA_TYPE_NONE); |
| } |
| } |
|
|
| if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) { |
| GGML_ASSERT(self_kq_mask_swa); |
| GGML_ASSERT(ggml_backend_buffer_is_host(self_kq_mask_swa->buffer)); |
|
|
| float * data = (float *) self_kq_mask_swa->data; |
|
|
| std::fill(data, data + ggml_nelements(self_kq_mask_swa), -INFINITY); |
|
|
| fill_mask(data, hparams.n_swa, hparams.swa_type); |
|
|
| if (debug) { |
| print_mask(data, n_tokens, n_kv, hparams.n_swa, hparams.swa_type); |
| } |
| } |
| } |
|
|
| void llm_graph_input_attn_kv::set_input(const llama_ubatch * ubatch) { |
| mctx->set_input_k_idxs(self_k_idxs, ubatch); |
| mctx->set_input_v_idxs(self_v_idxs, ubatch); |
|
|
| mctx->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); |
| } |
|
|
| bool llm_graph_input_attn_kv::can_reuse(const llm_graph_params & params) { |
| const auto * mctx = static_cast<const llama_kv_cache_context *>(params.mctx); |
|
|
| this->mctx = mctx; |
|
|
| bool res = true; |
|
|
| res &= self_k_idxs->ne[0] == params.ubatch.n_tokens; |
| |
|
|
| res &= can_reuse_kq_mask(self_kq_mask, mctx, params.ubatch, params.cparams); |
|
|
| return res; |
| } |
|
|
| void llm_graph_input_attn_k::set_input(const llama_ubatch * ubatch) { |
| mctx->set_input_k_idxs(self_k_idxs, ubatch); |
|
|
| mctx->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); |
| } |
|
|
| bool llm_graph_input_attn_k::can_reuse(const llm_graph_params & params) { |
| const auto * mctx = static_cast<const llama_kv_cache_context *>(params.mctx); |
|
|
| this->mctx = mctx; |
|
|
| bool res = true; |
|
|
| res &= self_k_idxs->ne[0] == params.ubatch.n_tokens; |
|
|
| res &= can_reuse_kq_mask(self_kq_mask, mctx, params.ubatch, params.cparams); |
|
|
| return res; |
| } |
|
|
| void llm_graph_input_attn_kv_iswa::set_input(const llama_ubatch * ubatch) { |
| mctx->get_base()->set_input_k_idxs(self_k_idxs, ubatch); |
| mctx->get_base()->set_input_v_idxs(self_v_idxs, ubatch); |
|
|
| mctx->get_base()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); |
|
|
| mctx->get_swa()->set_input_k_idxs(self_k_idxs_swa, ubatch); |
| mctx->get_swa()->set_input_v_idxs(self_v_idxs_swa, ubatch); |
|
|
| mctx->get_swa()->set_input_kq_mask(self_kq_mask_swa, ubatch, cparams.causal_attn); |
| } |
|
|
| bool llm_graph_input_attn_kv_iswa::can_reuse(const llm_graph_params & params) { |
| const auto * mctx = static_cast<const llama_kv_cache_iswa_context *>(params.mctx); |
|
|
| this->mctx = mctx; |
|
|
| bool res = true; |
|
|
| res &= self_k_idxs->ne[0] == params.ubatch.n_tokens; |
| |
|
|
| res &= self_k_idxs_swa->ne[0] == params.ubatch.n_tokens; |
| |
|
|
| res &= can_reuse_kq_mask(self_kq_mask, mctx->get_base(), params.ubatch, params.cparams); |
| res &= can_reuse_kq_mask(self_kq_mask_swa, mctx->get_swa(), params.ubatch, params.cparams); |
|
|
| return res; |
| } |
|
|
| void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) { |
| GGML_ASSERT(cross_kq_mask); |
|
|
| const int64_t n_enc = cross_kq_mask->ne[0]; |
| const int64_t n_tokens = ubatch->n_tokens; |
|
|
| GGML_ASSERT(ggml_backend_buffer_is_host(cross_kq_mask->buffer)); |
| GGML_ASSERT(!ubatch->equal_seqs()); |
|
|
| float * data = (float *) cross_kq_mask->data; |
|
|
| for (int i = 0; i < n_tokens; ++i) { |
| GGML_ASSERT(!cross->seq_ids_enc.empty() && "llama_encode must be called first"); |
| for (int j = 0; j < n_enc; ++j) { |
| float f = -INFINITY; |
|
|
| for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { |
| const llama_seq_id seq_id = ubatch->seq_id[i][s]; |
|
|
| if (cross->seq_ids_enc[j].find(seq_id) != cross->seq_ids_enc[j].end()) { |
| f = 0.0f; |
| } |
| } |
|
|
| data[i*n_enc + j] = f; |
| } |
| } |
| } |
|
|
| void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) { |
| mctx->get_attn()->set_input_k_idxs(inp_attn->self_k_idxs, ubatch); |
| mctx->get_attn()->set_input_v_idxs(inp_attn->self_v_idxs, ubatch); |
|
|
| mctx->get_attn()->set_input_kq_mask(inp_attn->self_kq_mask, ubatch, cparams.causal_attn); |
|
|
| const int64_t n_rs = mctx->get_recr()->get_n_rs(); |
|
|
| if (inp_rs->s_copy) { |
| GGML_ASSERT(ggml_backend_buffer_is_host(inp_rs->s_copy->buffer)); |
| int32_t * data = (int32_t *) inp_rs->s_copy->data; |
|
|
| |
| for (uint32_t i = 0; i < n_rs; ++i) { |
| data[i] = mctx->get_recr()->s_copy(i); |
| } |
| } |
| } |
|
|
| bool llm_graph_input_mem_hybrid::can_reuse(const llm_graph_params & params) { |
| const auto * mctx = static_cast<const llama_memory_hybrid_context *>(params.mctx); |
|
|
| this->mctx = mctx; |
|
|
| bool res = true; |
|
|
| res &= inp_attn->self_k_idxs->ne[0] == params.ubatch.n_tokens; |
| |
|
|
| res &= can_reuse_kq_mask(inp_attn->self_kq_mask, mctx->get_attn(), params.ubatch, params.cparams); |
|
|
| res &= inp_rs->s_copy->ne[0] == mctx->get_recr()->get_n_rs(); |
|
|
| res &= inp_rs->s_copy_main->ne[0] == params.ubatch.n_seqs; |
| res &= inp_rs->s_copy_extra->ne[0] == mctx->get_recr()->get_n_rs() - params.ubatch.n_seqs; |
|
|
| res &= inp_rs->head == mctx->get_recr()->get_head(); |
| res &= inp_rs->rs_z == mctx->get_recr()->get_rs_z(); |
|
|
| return res; |
| } |
|
|
| |
| |
| |
| void llm_graph_input_mem_hybrid_k::set_input(const llama_ubatch * ubatch) { |
| mctx->get_attn()->set_input_k_idxs(inp_attn->self_k_idxs, ubatch); |
|
|
| mctx->get_attn()->set_input_kq_mask(inp_attn->self_kq_mask, ubatch, cparams.causal_attn); |
|
|
| const int64_t n_rs = mctx->get_recr()->get_n_rs(); |
|
|
| if (inp_rs->s_copy) { |
| GGML_ASSERT(ggml_backend_buffer_is_host(inp_rs->s_copy->buffer)); |
| int32_t * data = (int32_t *) inp_rs->s_copy->data; |
|
|
| |
| for (uint32_t i = 0; i < n_rs; ++i) { |
| data[i] = mctx->get_recr()->s_copy(i); |
| } |
| } |
| } |
|
|
| bool llm_graph_input_mem_hybrid_k::can_reuse(const llm_graph_params & params) { |
| const auto * mctx = static_cast<const llama_memory_hybrid_context *>(params.mctx); |
|
|
| this->mctx = mctx; |
|
|
| bool res = true; |
|
|
| res &= inp_attn->self_k_idxs->ne[0] == params.ubatch.n_tokens; |
|
|
| res &= can_reuse_kq_mask(inp_attn->self_kq_mask, mctx->get_attn(), params.ubatch, params.cparams); |
|
|
| res &= inp_rs->s_copy->ne[0] == mctx->get_recr()->get_n_rs(); |
|
|
| res &= inp_rs->s_copy_main->ne[0] == params.ubatch.n_seqs; |
| res &= inp_rs->s_copy_extra->ne[0] == mctx->get_recr()->get_n_rs() - params.ubatch.n_seqs; |
|
|
| res &= inp_rs->head == mctx->get_recr()->get_head(); |
| res &= inp_rs->rs_z == mctx->get_recr()->get_rs_z(); |
|
|
| return res; |
| } |
|
|
| void llm_graph_input_mem_hybrid_iswa::set_input(const llama_ubatch * ubatch) { |
| const auto * attn_ctx = mctx->get_attn(); |
|
|
| |
| if (inp_attn->self_k_idxs && inp_attn->self_k_idxs->buffer) { |
| attn_ctx->get_base()->set_input_k_idxs(inp_attn->self_k_idxs, ubatch); |
| attn_ctx->get_base()->set_input_v_idxs(inp_attn->self_v_idxs, ubatch); |
|
|
| attn_ctx->get_base()->set_input_kq_mask(inp_attn->self_kq_mask, ubatch, cparams.causal_attn); |
| } |
|
|
| |
| if (inp_attn->self_k_idxs_swa && inp_attn->self_k_idxs_swa->buffer) { |
| attn_ctx->get_swa()->set_input_k_idxs(inp_attn->self_k_idxs_swa, ubatch); |
| attn_ctx->get_swa()->set_input_v_idxs(inp_attn->self_v_idxs_swa, ubatch); |
|
|
| attn_ctx->get_swa()->set_input_kq_mask(inp_attn->self_kq_mask_swa, ubatch, cparams.causal_attn); |
| } |
|
|
| const int64_t n_rs = mctx->get_recr()->get_n_rs(); |
|
|
| if (inp_rs->s_copy) { |
| GGML_ASSERT(ggml_backend_buffer_is_host(inp_rs->s_copy->buffer)); |
| int32_t * data = (int32_t *) inp_rs->s_copy->data; |
|
|
| |
| for (uint32_t i = 0; i < n_rs; ++i) { |
| data[i] = mctx->get_recr()->s_copy(i); |
| } |
| } |
| } |
|
|
| bool llm_graph_input_mem_hybrid_iswa::can_reuse(const llm_graph_params & params) { |
| const auto * mctx = static_cast<const llama_memory_hybrid_iswa_context *>(params.mctx); |
|
|
| this->mctx = mctx; |
|
|
| bool res = true; |
|
|
| const auto * attn_ctx = mctx->get_attn(); |
|
|
| |
| if (inp_attn->self_k_idxs && inp_attn->self_k_idxs->buffer) { |
| res &= inp_attn->self_k_idxs->ne[0] == params.ubatch.n_tokens; |
| |
|
|
| res &= can_reuse_kq_mask(inp_attn->self_kq_mask, attn_ctx->get_base(), params.ubatch, params.cparams); |
| } |
|
|
| |
| if (inp_attn->self_k_idxs_swa && inp_attn->self_k_idxs_swa->buffer) { |
| res &= inp_attn->self_k_idxs_swa->ne[0] == params.ubatch.n_tokens; |
| |
|
|
| res &= can_reuse_kq_mask(inp_attn->self_kq_mask_swa, attn_ctx->get_swa(), params.ubatch, params.cparams); |
| } |
|
|
| res &= inp_rs->s_copy->ne[0] == mctx->get_recr()->get_n_rs(); |
|
|
| res &= inp_rs->s_copy_main->ne[0] == params.ubatch.n_seqs; |
| res &= inp_rs->s_copy_extra->ne[0] == mctx->get_recr()->get_n_rs() - params.ubatch.n_seqs; |
|
|
| res &= inp_rs->head == mctx->get_recr()->get_head(); |
| res &= inp_rs->rs_z == mctx->get_recr()->get_rs_z(); |
|
|
| return res; |
| } |
|
|
| void llm_graph_input_sampling::set_input(const llama_ubatch * ubatch) { |
| |
| std::unordered_set<llama_seq_id> active_samplers; |
| for (uint32_t i = 0; i < ubatch->n_tokens; i++) { |
| if (ubatch->output[i]) { |
| llama_seq_id seq_id = ubatch->seq_id[i][0]; |
| active_samplers.insert(seq_id); |
| } |
| } |
|
|
| for (auto seq_id : active_samplers) { |
| if (samplers.find(seq_id) == samplers.end()) { |
| continue; |
| } |
|
|
| auto & sampler = samplers[seq_id]; |
|
|
| if (sampler->iface->backend_set_input) { |
| sampler->iface->backend_set_input(sampler); |
| } |
| } |
| } |
|
|
| bool llm_graph_input_sampling::can_reuse(const llm_graph_params & params) { |
| if (samplers.size() != params.samplers.size()) { |
| return false; |
| } |
|
|
| for (const auto & [seq_id, sampler] : params.samplers) { |
| if (samplers[seq_id] != sampler) { |
| return false; |
| } |
| } |
|
|
| return true; |
| } |
|
|
| |
| |
| |
|
|
| llm_graph_result::llm_graph_result(int64_t max_nodes) : max_nodes(max_nodes) { |
| reset(); |
|
|
| const char * LLAMA_GRAPH_RESULT_DEBUG = getenv("LLAMA_GRAPH_RESULT_DEBUG"); |
| debug = LLAMA_GRAPH_RESULT_DEBUG ? atoi(LLAMA_GRAPH_RESULT_DEBUG) : 0; |
| } |
|
|
| int64_t llm_graph_result::get_max_nodes() const { |
| return max_nodes; |
| } |
|
|
| void llm_graph_result::reset() { |
| t_inp_tokens = nullptr; |
| t_inp_embd = nullptr; |
| t_logits = nullptr; |
| t_embd = nullptr; |
| t_embd_pooled = nullptr; |
| t_sampled.clear(); |
| t_sampled_probs.clear(); |
| t_sampled_logits.clear(); |
| t_candidates.clear(); |
|
|
| params = {}; |
|
|
| inputs.clear(); |
|
|
| buf_compute_meta.resize(ggml_tensor_overhead()*max_nodes + ggml_graph_overhead_custom(max_nodes, false)); |
|
|
| ggml_init_params params = { |
| buf_compute_meta.size(), |
| buf_compute_meta.data(), |
| true, |
| }; |
|
|
| ctx_compute.reset(ggml_init(params)); |
|
|
| gf = ggml_new_graph_custom(ctx_compute.get(), max_nodes, false); |
| } |
|
|
| void llm_graph_result::set_inputs(const llama_ubatch * ubatch) { |
| for (auto & input : inputs) { |
| input->set_input(ubatch); |
| } |
| } |
|
|
| void llm_graph_result::set_outputs() { |
| if (t_logits != nullptr) { |
| ggml_set_output(t_logits); |
| } |
| if (t_embd != nullptr) { |
| ggml_set_output(t_embd); |
| } |
| if (t_embd_pooled != nullptr) { |
| ggml_set_output(t_embd_pooled); |
| } |
| for (auto & [seq_id, t] : t_sampled) { |
| if (t != nullptr) { |
| ggml_set_output(t); |
| } |
| } |
| for (auto & [seq_id, t] : t_sampled_probs) { |
| if (t != nullptr) { |
| ggml_set_output(t); |
| } |
| } |
| for (auto & [seq_id, t] : t_sampled_logits) { |
| if (t != nullptr) { |
| ggml_set_output(t); |
| } |
| } |
| for (auto & [seq_id, t] : t_candidates) { |
| if (t != nullptr) { |
| ggml_set_output(t); |
| } |
| } |
| } |
|
|
| bool llm_graph_result::can_reuse(const llm_graph_params & params) { |
| if (!this->params.allow_reuse(params)) { |
| if (debug > 1) { |
| LLAMA_LOG_DEBUG("%s: cannot reuse graph due to incompatible graph parameters\n", __func__); |
| } |
|
|
| return false; |
| } |
|
|
| if (debug > 1) { |
| LLAMA_LOG_DEBUG("%s: checking compatibility of %d inputs:\n", __func__, (int) inputs.size()); |
| } |
|
|
| bool res = true; |
|
|
| for (auto & input : inputs) { |
| const bool cur = input->can_reuse(params); |
|
|
| if (debug > 1) { |
| LLAMA_LOG_DEBUG("%s: can_reuse = %d\n", "placeholder", cur); |
| } |
|
|
| res = res && cur; |
| } |
|
|
| if (debug > 0) { |
| LLAMA_LOG_DEBUG("%s: can reuse graph = %d\n", __func__, res); |
| } |
|
|
| return res; |
| } |
|
|
| llm_graph_input_i * llm_graph_result::add_input(llm_graph_input_ptr input) { |
| inputs.emplace_back(std::move(input)); |
| return inputs.back().get(); |
| } |
|
|
| void llm_graph_result::set_params(const llm_graph_params & params) { |
| this->params = params; |
| } |
|
|
| |
| |
| |
|
|
| llm_graph_context::llm_graph_context(const llm_graph_params & params) : |
| arch (params.arch), |
| hparams (params.hparams), |
| cparams (params.cparams), |
| ubatch (params.ubatch), |
| n_embd (hparams.n_embd), |
| n_layer (hparams.n_layer), |
| n_rot (hparams.n_rot()), |
| n_ctx (cparams.n_ctx), |
| n_head (hparams.n_head()), |
| n_head_kv (hparams.n_head_kv()), |
| n_embd_head_k (hparams.n_embd_head_k()), |
| n_embd_k_gqa (hparams.n_embd_k_gqa()), |
| n_embd_head_v (hparams.n_embd_head_v()), |
| n_embd_v_gqa (hparams.n_embd_v_gqa()), |
| n_expert (hparams.n_expert), |
| n_expert_used (cparams.warmup ? hparams.n_expert : hparams.n_expert_used), |
| freq_base (cparams.rope_freq_base), |
| freq_scale (cparams.rope_freq_scale), |
| ext_factor (cparams.yarn_ext_factor), |
| attn_factor (cparams.yarn_attn_factor), |
| beta_fast (cparams.yarn_beta_fast), |
| beta_slow (cparams.yarn_beta_slow), |
| norm_eps (hparams.f_norm_eps), |
| norm_rms_eps (hparams.f_norm_rms_eps), |
| n_tokens (ubatch.n_tokens), |
| n_outputs (params.n_outputs), |
| n_ctx_orig (cparams.n_ctx_orig_yarn), |
| pooling_type (cparams.pooling_type), |
| rope_type (hparams.rope_type), |
| sched (params.sched), |
| backend_cpu (params.backend_cpu), |
| cvec (params.cvec), |
| loras (params.loras), |
| mctx (params.mctx), |
| cross (params.cross), |
| samplers (params.samplers), |
| cb_func (params.cb), |
| res (params.res), |
| ctx0 (res->get_ctx()), |
| gf (res->get_gf()) { |
| res->set_params(params); |
| } |
|
|
| void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const { |
| if (cb_func) { |
| cb_func(ubatch, cur, name, il); |
| } |
| } |
|
|
| ggml_tensor * llm_graph_context::build_cvec( |
| ggml_tensor * cur, |
| int il) const { |
| return cvec->apply_to(ctx0, cur, il); |
| } |
|
|
| ggml_tensor * llm_graph_context::build_lora_mm( |
| ggml_tensor * w, |
| ggml_tensor * cur, |
| ggml_tensor * w_s) const { |
| ggml_tensor * res = ggml_mul_mat(ctx0, w, cur); |
|
|
| for (const auto & lora : *loras) { |
| llama_adapter_lora_weight * lw = lora.first->get_weight(w); |
| if (lw == nullptr) { |
| continue; |
| } |
|
|
| const float adapter_scale = lora.second; |
| const float scale = lw->get_scale(lora.first->alpha, adapter_scale); |
|
|
| ggml_tensor * ab_cur = ggml_mul_mat( |
| ctx0, lw->b, |
| ggml_mul_mat(ctx0, lw->a, cur) |
| ); |
|
|
| ab_cur = ggml_scale(ctx0, ab_cur, scale); |
| res = ggml_add(ctx0, res, ab_cur); |
| } |
|
|
| if (w_s) { |
| res = ggml_mul(ctx0, res, w_s); |
| } |
|
|
| return res; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_lora_mm_id( |
| ggml_tensor * w, |
| ggml_tensor * cur, |
| ggml_tensor * ids) const { |
| ggml_tensor * res = ggml_mul_mat_id(ctx0, w, cur, ids); |
| for (const auto & lora : *loras) { |
| llama_adapter_lora_weight * lw = lora.first->get_weight(w); |
| if (lw == nullptr) { |
| continue; |
| } |
|
|
| const float alpha = lora.first->alpha; |
| const float rank = (float) lw->b->ne[0]; |
| const float scale = alpha ? lora.second * alpha / rank : lora.second; |
|
|
| ggml_tensor * ab_cur = ggml_mul_mat_id( |
| ctx0, lw->b, |
| ggml_mul_mat_id(ctx0, lw->a, cur, ids), |
| ids |
| ); |
|
|
| ab_cur = ggml_scale(ctx0, ab_cur, scale); |
| res = ggml_add(ctx0, res, ab_cur); |
| } |
|
|
| return res; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_norm( |
| ggml_tensor * cur, |
| ggml_tensor * mw, |
| ggml_tensor * mb, |
| llm_norm_type type, |
| int il) const { |
| switch (type) { |
| case LLM_NORM: cur = ggml_norm (ctx0, cur, hparams.f_norm_eps); break; |
| case LLM_NORM_RMS: cur = ggml_rms_norm(ctx0, cur, hparams.f_norm_rms_eps); break; |
| case LLM_NORM_GROUP: |
| { |
| cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], 1, cur->ne[1]); |
| cur = ggml_group_norm(ctx0, cur, hparams.n_norm_groups, hparams.f_norm_group_eps); |
| cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], cur->ne[2]); |
| } break; |
| } |
|
|
| if (mw || mb) { |
| cb(cur, "norm", il); |
| } |
|
|
| if (mw) { |
| cur = ggml_mul(ctx0, cur, mw); |
| if (mb) { |
| cb(cur, "norm_w", il); |
| } |
| } |
|
|
| if (mb) { |
| cur = ggml_add(ctx0, cur, mb); |
| } |
|
|
| return cur; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_ffn( |
| ggml_tensor * cur, |
| ggml_tensor * up, |
| ggml_tensor * up_b, |
| ggml_tensor * up_s, |
| ggml_tensor * gate, |
| ggml_tensor * gate_b, |
| ggml_tensor * gate_s, |
| ggml_tensor * down, |
| ggml_tensor * down_b, |
| ggml_tensor * down_s, |
| ggml_tensor * act_scales, |
| llm_ffn_op_type type_op, |
| llm_ffn_gate_type type_gate, |
| int il) const { |
| ggml_tensor * tmp = up ? build_lora_mm(up, cur) : cur; |
| cb(tmp, "ffn_up", il); |
|
|
| if (up_b) { |
| tmp = ggml_add(ctx0, tmp, up_b); |
| cb(tmp, "ffn_up_b", il); |
| } |
|
|
| if (up_s) { |
| tmp = ggml_mul(ctx0, tmp, up_s); |
| cb(tmp, "ffn_up_s", il); |
| } |
|
|
| if (gate) { |
| switch (type_gate) { |
| case LLM_FFN_SEQ: |
| { |
| cur = build_lora_mm(gate, tmp); |
| cb(cur, "ffn_gate", il); |
| } break; |
| case LLM_FFN_PAR: |
| { |
| cur = build_lora_mm(gate, cur); |
| cb(cur, "ffn_gate", il); |
| } break; |
| } |
|
|
| if (gate_b) { |
| cur = ggml_add(ctx0, cur, gate_b); |
| cb(cur, "ffn_gate_b", il); |
| } |
|
|
| if (gate_s) { |
| cur = ggml_mul(ctx0, cur, gate_s); |
| cb(cur, "ffn_gate_s", il); |
| } |
|
|
| } else { |
| cur = tmp; |
| } |
|
|
| switch (type_op) { |
| case LLM_FFN_SILU: |
| if (gate && type_gate == LLM_FFN_PAR) { |
| |
| if (arch == LLM_ARCH_STEP35 && il >= 0) { |
| const float limit = hparams.swiglu_clamp_shexp[il]; |
| constexpr float eps = 1e-6f; |
| if (limit > eps) { |
| ggml_tensor * gate_act = ggml_silu(ctx0, cur); |
| cb(gate_act, "ffn_silu", il); |
| gate_act = ggml_clamp(ctx0, gate_act, -INFINITY, limit); |
| cb(gate_act, "ffn_silu_clamped", il); |
|
|
| tmp = ggml_clamp(ctx0, tmp, -limit, limit); |
| cb(tmp, "ffn_up_clamped", il); |
|
|
| cur = ggml_mul(ctx0, gate_act, tmp); |
| cb(cur, "ffn_swiglu_limited", il); |
| type_gate = LLM_FFN_SEQ; |
| break; |
| } |
| } |
|
|
| cur = ggml_swiglu_split(ctx0, cur, tmp); |
| cb(cur, "ffn_swiglu", il); |
| type_gate = LLM_FFN_SEQ; |
| } else { |
| cur = ggml_silu(ctx0, cur); |
| cb(cur, "ffn_silu", il); |
| } break; |
| case LLM_FFN_GELU: |
| if (gate && type_gate == LLM_FFN_PAR) { |
| cur = ggml_geglu_split(ctx0, cur, tmp); |
| cb(cur, "ffn_geglu", il); |
| type_gate = LLM_FFN_SEQ; |
| } else { |
| cur = ggml_gelu(ctx0, cur); |
| cb(cur, "ffn_gelu", il); |
| if (act_scales != NULL) { |
| cur = ggml_div(ctx0, cur, act_scales); |
| cb(cur, "ffn_act", il); |
| } |
| } break; |
| case LLM_FFN_RELU: |
| if (gate && type_gate == LLM_FFN_PAR) { |
| cur = ggml_reglu_split(ctx0, cur, tmp); |
| cb(cur, "ffn_reglu", il); |
| type_gate = LLM_FFN_SEQ; |
| } else { |
| cur = ggml_relu(ctx0, cur); |
| cb(cur, "ffn_relu", il); |
| } break; |
| case LLM_FFN_RELU_SQR: |
| { |
| cur = ggml_relu(ctx0, cur); |
| cb(cur, "ffn_relu", il); |
|
|
| cur = ggml_sqr(ctx0, cur); |
| cb(cur, "ffn_sqr(relu)", il); |
| } break; |
| case LLM_FFN_SWIGLU: |
| { |
| cur = ggml_swiglu(ctx0, cur); |
| cb(cur, "ffn_swiglu", il); |
| } break; |
| case LLM_FFN_GEGLU: |
| { |
| cur = ggml_geglu(ctx0, cur); |
| cb(cur, "ffn_geglu", il); |
| } break; |
| case LLM_FFN_REGLU: |
| { |
| cur = ggml_reglu(ctx0, cur); |
| cb(cur, "ffn_reglu", il); |
| } break; |
| default: |
| GGML_ABORT("fatal error"); |
| } |
|
|
| if (gate && type_gate == LLM_FFN_PAR) { |
| cur = ggml_mul(ctx0, cur, tmp); |
| cb(cur, "ffn_gate_par", il); |
| } |
|
|
| if (down) { |
| cur = build_lora_mm(down, cur); |
| if (arch == LLM_ARCH_GLM4 || arch == LLM_ARCH_GLM4_MOE || arch == LLM_ARCH_JAIS2) { |
| |
| ggml_mul_mat_set_prec(cur, GGML_PREC_F32); |
| } |
| } |
|
|
| if (down_b) { |
| cb(cur, "ffn_down", il); |
| } |
|
|
| if (down_b) { |
| cur = ggml_add(ctx0, cur, down_b); |
| } |
|
|
| if (down_s) { |
| cur = ggml_mul(ctx0, cur, down_s); |
| cb(cur, "ffn_down_s", il); |
| } |
|
|
| return cur; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_moe_ffn( |
| ggml_tensor * cur, |
| ggml_tensor * gate_inp, |
| ggml_tensor * up_exps, |
| ggml_tensor * gate_exps, |
| ggml_tensor * down_exps, |
| ggml_tensor * exp_probs_b, |
| int64_t n_expert, |
| int64_t n_expert_used, |
| llm_ffn_op_type type_op, |
| bool norm_w, |
| float w_scale, |
| llama_expert_gating_func_type gating_op, |
| int il, |
| ggml_tensor * probs_in, |
| ggml_tensor * gate_up_exps, |
| ggml_tensor * up_exps_s, |
| ggml_tensor * gate_exps_s, |
| ggml_tensor * down_exps_s) const { |
| return build_moe_ffn( |
| cur, |
| gate_inp, nullptr, |
| up_exps, nullptr, |
| gate_exps, nullptr, |
| down_exps, nullptr, |
| exp_probs_b, |
| n_expert, |
| n_expert_used, |
| type_op, |
| norm_w, |
| w_scale, |
| gating_op, |
| il, |
| probs_in, |
| gate_up_exps, |
| nullptr, |
| up_exps_s, |
| gate_exps_s, |
| down_exps_s |
| ); |
| } |
|
|
| ggml_tensor * llm_graph_context::build_moe_ffn( |
| ggml_tensor * cur, |
| ggml_tensor * gate_inp, |
| ggml_tensor * gate_inp_b, |
| ggml_tensor * up_exps, |
| ggml_tensor * up_exps_b, |
| ggml_tensor * gate_exps, |
| ggml_tensor * gate_exps_b, |
| ggml_tensor * down_exps, |
| ggml_tensor * down_exps_b, |
| ggml_tensor * exp_probs_b, |
| int64_t n_expert, |
| int64_t n_expert_used, |
| llm_ffn_op_type type_op, |
| bool norm_w, |
| float w_scale, |
| llama_expert_gating_func_type gating_op, |
| int il, |
| ggml_tensor * probs_in, |
| ggml_tensor * gate_up_exps, |
| ggml_tensor * gate_up_exps_b, |
| ggml_tensor * up_exps_s, |
| ggml_tensor * gate_exps_s, |
| ggml_tensor * down_exps_s) const { |
| const int64_t n_embd = cur->ne[0]; |
| const int64_t n_tokens = cur->ne[1]; |
| const bool weight_before_ffn = arch == LLM_ARCH_LLAMA4; |
|
|
| ggml_tensor * logits = nullptr; |
|
|
| if (probs_in == nullptr) { |
| logits = build_lora_mm(gate_inp, cur); |
| cb(logits, "ffn_moe_logits", il); |
| } else { |
| logits = probs_in; |
| } |
|
|
| if (gate_inp_b) { |
| logits = ggml_add(ctx0, logits, gate_inp_b); |
| cb(logits, "ffn_moe_logits_biased", il); |
| } |
|
|
| ggml_tensor * probs = nullptr; |
| switch (gating_op) { |
| case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX: |
| { |
| probs = ggml_soft_max(ctx0, logits); |
| } break; |
| case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID: |
| { |
| probs = ggml_sigmoid(ctx0, logits); |
| } break; |
| case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT: |
| { |
| probs = logits; |
| } break; |
| default: |
| GGML_ABORT("fatal error"); |
| } |
| cb(probs, "ffn_moe_probs", il); |
|
|
| |
| |
| ggml_tensor * selection_probs = probs; |
| if (exp_probs_b != nullptr) { |
| selection_probs = ggml_add(ctx0, probs, exp_probs_b); |
| cb(selection_probs, "ffn_moe_probs_biased", il); |
| } |
|
|
| |
| |
| if (arch == LLM_ARCH_LLAMA4) { |
| selection_probs = logits; |
| } |
|
|
| if (arch == LLM_ARCH_GROVEMOE) { |
| selection_probs = ggml_sigmoid(ctx0, logits); |
| cb(selection_probs, "ffn_moe_probs_biased", il); |
| } |
|
|
| |
| |
| if (hparams.n_expert_groups > 1 && n_tokens > 0) { |
| const int64_t n_exp_per_group = n_expert / hparams.n_expert_groups; |
|
|
| |
| ggml_tensor * selection_groups = ggml_reshape_3d(ctx0, selection_probs, n_exp_per_group, hparams.n_expert_groups, n_tokens); |
|
|
| ggml_tensor * group_scores = ggml_argsort_top_k(ctx0, selection_groups, 2); |
| group_scores = ggml_get_rows(ctx0, ggml_reshape_4d(ctx0, selection_groups, 1, selection_groups->ne[0], selection_groups->ne[1], selection_groups->ne[2]), group_scores); |
|
|
| |
| group_scores = ggml_sum_rows(ctx0, ggml_reshape_3d(ctx0, group_scores, group_scores->ne[1], group_scores->ne[2], group_scores->ne[3])); |
| group_scores = ggml_reshape_2d(ctx0, group_scores, group_scores->ne[1], group_scores->ne[2]); |
|
|
| ggml_tensor * expert_groups = ggml_argsort_top_k(ctx0, group_scores, hparams.n_group_used); |
| cb(expert_groups, "ffn_moe_group_topk", il); |
|
|
| |
| selection_probs = ggml_get_rows(ctx0, selection_groups, expert_groups); |
| selection_probs = ggml_set_rows(ctx0, ggml_fill(ctx0, selection_groups, -INFINITY), selection_probs, expert_groups); |
| selection_probs = ggml_reshape_2d(ctx0, selection_probs, n_expert, n_tokens); |
| cb(selection_probs, "ffn_moe_probs_masked", il); |
| } |
|
|
| |
| ggml_tensor * selected_experts = ggml_argsort_top_k(ctx0, selection_probs, n_expert_used); |
| cb(selected_experts->src[0], "ffn_moe_argsort", il); |
| cb(selected_experts, "ffn_moe_topk", il); |
|
|
| if (arch == LLM_ARCH_GROVEMOE && n_expert != hparams.n_expert) { |
| |
| ggml_tensor * f_sel = ggml_cast(ctx0, selected_experts, GGML_TYPE_F32); |
| selected_experts = ggml_cast(ctx0, ggml_scale(ctx0, f_sel, 1.0f / float(hparams.n_group_experts)), GGML_TYPE_I32); |
| probs = ggml_reshape_3d(ctx0, probs, 1, hparams.n_expert, n_tokens); |
| } else { |
| probs = ggml_reshape_3d(ctx0, probs, 1, n_expert, n_tokens); |
| } |
|
|
| ggml_tensor * weights = ggml_get_rows(ctx0, probs, selected_experts); |
| cb(weights, "ffn_moe_weights", il); |
|
|
|
|
| if (gating_op == LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT) { |
| weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens); |
| weights = ggml_soft_max(ctx0, weights); |
| weights = ggml_reshape_3d(ctx0, weights, 1, n_expert_used, n_tokens); |
| cb(weights, "ffn_moe_weights_softmax", il); |
| } |
|
|
| if (norm_w) { |
| weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens); |
|
|
| ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights); |
| cb(weights_sum, "ffn_moe_weights_sum", il); |
|
|
| |
| weights_sum = ggml_clamp(ctx0, weights_sum, 6.103515625e-5, INFINITY); |
| cb(weights_sum, "ffn_moe_weights_sum_clamped", il); |
|
|
| weights = ggml_div(ctx0, weights, weights_sum); |
| cb(weights, "ffn_moe_weights_norm", il); |
|
|
| weights = ggml_reshape_3d(ctx0, weights, 1, n_expert_used, n_tokens); |
| } |
| if (w_scale != 0.0f && w_scale != 1.0f) { |
| weights = ggml_scale(ctx0, weights, w_scale); |
| cb(weights, "ffn_moe_weights_scaled", il); |
| } |
|
|
| |
| ggml_build_forward_expand(gf, weights); |
|
|
| cur = ggml_reshape_3d(ctx0, cur, n_embd, 1, n_tokens); |
|
|
| if (weight_before_ffn) { |
| |
| ggml_tensor * repeated = ggml_repeat_4d(ctx0, cur, n_embd, n_expert_used, n_tokens, 1); |
| cur = ggml_mul(ctx0, repeated, weights); |
| cb(cur, "ffn_moe_weighted", il); |
| } |
|
|
| ggml_tensor * up = nullptr; |
| ggml_tensor * experts = nullptr; |
|
|
| if (gate_up_exps) { |
| |
| ggml_tensor * gate_up = build_lora_mm_id(gate_up_exps, cur, selected_experts); |
| cb(gate_up, "ffn_moe_gate_up", il); |
|
|
| if (gate_up_exps_b) { |
| gate_up = ggml_add_id(ctx0, gate_up, gate_up_exps_b, selected_experts); |
| cb(gate_up, "ffn_moe_gate_up_biased", il); |
| } |
|
|
| |
| if (up_exps_s) { |
| ggml_tensor * s = ggml_reshape_3d(ctx0, up_exps_s, 1, n_expert, 1); |
| s = ggml_repeat_4d(ctx0, s, 1, n_expert, n_tokens, 1); |
| s = ggml_get_rows(ctx0, s, selected_experts); |
| gate_up = ggml_mul(ctx0, gate_up, s); |
| cb(gate_up, "ffn_moe_gate_up_scaled", il); |
| } |
|
|
| const int64_t n_ff = gate_up->ne[0] / 2; |
| cur = ggml_view_3d(ctx0, gate_up, n_ff, gate_up->ne[1], gate_up->ne[2], gate_up->nb[1], gate_up->nb[2], 0); |
| cb(cur, "ffn_moe_gate", il); |
| up = ggml_view_3d(ctx0, gate_up, n_ff, gate_up->ne[1], gate_up->ne[2], gate_up->nb[1], gate_up->nb[2], n_ff * gate_up->nb[0]); |
| cb(up, "ffn_moe_up", il); |
| } else { |
| |
| up = build_lora_mm_id(up_exps, cur, selected_experts); |
| cb(up, "ffn_moe_up", il); |
|
|
| if (up_exps_b) { |
| up = ggml_add_id(ctx0, up, up_exps_b, selected_experts); |
| cb(up, "ffn_moe_up_biased", il); |
| } |
|
|
| |
| if (up_exps_s) { |
| ggml_tensor * s = ggml_reshape_3d(ctx0, up_exps_s, 1, n_expert, 1); |
| s = ggml_repeat_4d(ctx0, s, 1, n_expert, n_tokens, 1); |
| s = ggml_get_rows(ctx0, s, selected_experts); |
| up = ggml_mul(ctx0, up, s); |
| cb(up, "ffn_moe_up_scaled", il); |
| } |
|
|
| if (gate_exps) { |
| cur = build_lora_mm_id(gate_exps, cur, selected_experts); |
| cb(cur, "ffn_moe_gate", il); |
| } else { |
| cur = up; |
| } |
|
|
| if (gate_exps_b) { |
| cur = ggml_add_id(ctx0, cur, gate_exps_b, selected_experts); |
| cb(cur, "ffn_moe_gate_biased", il); |
| } |
|
|
| |
| if (gate_exps_s) { |
| ggml_tensor * s = ggml_reshape_3d(ctx0, gate_exps_s, 1, n_expert, 1); |
| s = ggml_repeat_4d(ctx0, s, 1, n_expert, n_tokens, 1); |
| s = ggml_get_rows(ctx0, s, selected_experts); |
| cur = ggml_mul(ctx0, cur, s); |
| cb(cur, "ffn_moe_gate_scaled", il); |
| } |
| } |
|
|
| const bool has_gate = gate_exps || gate_up_exps; |
|
|
| switch (type_op) { |
| case LLM_FFN_SILU: |
| if (gate_exps) { |
| |
| if (arch == LLM_ARCH_STEP35 && il >= 0) { |
| const float limit = hparams.swiglu_clamp_exp[il]; |
| constexpr float eps = 1e-6f; |
| if (limit > eps) { |
| ggml_tensor * gate_act = ggml_silu(ctx0, cur); |
| cb(gate_act, "ffn_moe_silu", il); |
| gate_act = ggml_clamp(ctx0, gate_act, -INFINITY, limit); |
| cb(gate_act, "ffn_moe_silu_clamped", il); |
|
|
| up = ggml_clamp(ctx0, up, -limit, limit); |
| cb(up, "ffn_moe_up_clamped", il); |
|
|
| cur = ggml_mul(ctx0, gate_act, up); |
| cb(cur, "ffn_moe_swiglu_limited", il); |
| break; |
| } |
| } |
| } |
|
|
| if (has_gate) { |
| cur = ggml_swiglu_split(ctx0, cur, up); |
| cb(cur, "ffn_moe_swiglu", il); |
| } else { |
| cur = ggml_silu(ctx0, cur); |
| cb(cur, "ffn_moe_silu", il); |
| } break; |
| case LLM_FFN_GELU: |
| if (has_gate) { |
| cur = ggml_geglu_split(ctx0, cur, up); |
| cb(cur, "ffn_moe_geglu", il); |
| } else { |
| cur = ggml_gelu(ctx0, cur); |
| cb(cur, "ffn_moe_gelu", il); |
| } break; |
| case LLM_FFN_SWIGLU_OAI_MOE: |
| { |
| |
| constexpr float alpha = 1.702f; |
| constexpr float limit = 7.0f; |
| cur = ggml_swiglu_oai(ctx0, cur, up, alpha, limit); |
| cb(cur, "ffn_moe_swiglu_oai", il); |
| } break; |
| case LLM_FFN_RELU: |
| if (has_gate) { |
| cur = ggml_reglu_split(ctx0, cur, up); |
| cb(cur, "ffn_moe_reglu", il); |
| } else { |
| cur = ggml_relu(ctx0, cur); |
| cb(cur, "ffn_moe_relu", il); |
| } break; |
| case LLM_FFN_RELU_SQR: |
| if (has_gate) { |
| |
| GGML_ABORT("fatal error: gated squared relu not implemented"); |
| } else { |
| cur = ggml_relu(ctx0, cur); |
| cur = ggml_sqr(ctx0, cur); |
| cb(cur, "ffn_moe_relu_sqr", il); |
| } break; |
| default: |
| GGML_ABORT("fatal error"); |
| } |
|
|
| experts = build_lora_mm_id(down_exps, cur, selected_experts); |
| cb(experts, "ffn_moe_down", il); |
|
|
| if (down_exps_b) { |
| experts = ggml_add_id(ctx0, experts, down_exps_b, selected_experts); |
| cb(experts, "ffn_moe_down_biased", il); |
| } |
|
|
| |
| if (down_exps_s) { |
| ggml_tensor * s = ggml_reshape_3d(ctx0, down_exps_s, 1, n_expert, 1); |
| s = ggml_repeat_4d(ctx0, s, 1, n_expert, n_tokens, 1); |
| s = ggml_get_rows(ctx0, s, selected_experts); |
| experts = ggml_mul(ctx0, experts, s); |
| cb(experts, "ffn_moe_down_scaled", il); |
| } |
|
|
| if (!weight_before_ffn) { |
| experts = ggml_mul(ctx0, experts, weights); |
| cb(cur, "ffn_moe_weighted", il); |
| } |
|
|
| ggml_tensor * cur_experts[LLAMA_MAX_EXPERTS] = { nullptr }; |
|
|
| assert(n_expert_used > 0); |
|
|
| |
| for (uint32_t i = 0; i < hparams.n_expert_used; ++i) { |
| cur_experts[i] = ggml_view_2d(ctx0, experts, n_embd, n_tokens, experts->nb[2], i*experts->nb[1]); |
|
|
| ggml_build_forward_expand(gf, cur_experts[i]); |
| } |
|
|
| |
| |
| |
| |
| ggml_tensor * moe_out = cur_experts[0]; |
|
|
| for (uint32_t i = 1; i < hparams.n_expert_used; ++i) { |
| moe_out = ggml_add(ctx0, moe_out, cur_experts[i]); |
| } |
|
|
| if (hparams.n_expert_used == 1) { |
| |
| moe_out = ggml_cont(ctx0, moe_out); |
| } |
|
|
| cb(moe_out, "ffn_moe_out", il); |
|
|
| return moe_out; |
| } |
|
|
| |
| ggml_tensor * llm_graph_context::build_inp_embd(ggml_tensor * tok_embd) const { |
| const int64_t n_embd_inp = hparams.n_embd_inp(); |
| const int64_t n_embd = hparams.n_embd; |
|
|
| assert(n_embd_inp >= n_embd); |
|
|
| auto inp = std::make_unique<llm_graph_input_embd>(n_embd_inp); |
|
|
| inp->tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_tokens); |
| cb(inp->tokens, "inp_tokens", -1); |
| ggml_set_input(inp->tokens); |
| res->t_inp_tokens = inp->tokens; |
|
|
| inp->embd = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd_inp, ubatch.n_tokens); |
| cb(inp->embd, "inp_embd", -1); |
| ggml_set_input(inp->embd); |
|
|
| |
| |
| std::array<ggml_tensor *, 2> inps; |
|
|
| |
| { |
| auto & cur = inps[0]; |
|
|
| cur = ggml_get_rows(ctx0, tok_embd, inp->tokens); |
|
|
| |
| for (const auto & lora : *loras) { |
| llama_adapter_lora_weight * lw = lora.first->get_weight(tok_embd); |
| if (lw == nullptr) { |
| continue; |
| } |
|
|
| const float adapter_scale = lora.second; |
| const float scale = lw->get_scale(lora.first->alpha, adapter_scale); |
|
|
| ggml_tensor * inpL_delta = ggml_scale(ctx0, ggml_mul_mat( |
| ctx0, lw->b, |
| ggml_get_rows(ctx0, lw->a, inp->tokens) |
| ), scale); |
|
|
| cur = ggml_add(ctx0, cur, inpL_delta); |
| } |
|
|
| if (n_embd_inp != n_embd) { |
| cur = ggml_pad(ctx0, cur, hparams.n_embd_inp() - n_embd, 0, 0, 0); |
| } |
| } |
|
|
| |
| { |
| auto & cur = inps[1]; |
|
|
| cur = inp->embd; |
| } |
|
|
| assert(ggml_are_same_shape (inps[0], inps[1])); |
| assert(ggml_are_same_stride(inps[0], inps[1])); |
|
|
| ggml_tensor * cur = ggml_build_forward_select(gf, inps.data(), inps.size(), ubatch.token ? 0 : 1); |
|
|
| if (n_embd_inp != n_embd) { |
| cur = ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0); |
| } |
|
|
| res->t_inp_embd = cur; |
|
|
| |
| if (hparams.f_embedding_scale != 0.0f) { |
| cur = ggml_scale(ctx0, cur, hparams.f_embedding_scale); |
| } |
|
|
| cb(cur, "embd", -1); |
|
|
| res->add_input(std::move(inp)); |
|
|
| |
| |
| ggml_build_forward_expand(gf, cur); |
|
|
| return cur; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_inp_pos() const { |
| auto inp = std::make_unique<llm_graph_input_pos>(hparams.n_pos_per_embd()); |
|
|
| auto & cur = inp->pos; |
|
|
| cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, (int64_t)n_tokens*hparams.n_pos_per_embd()); |
| ggml_set_input(cur); |
|
|
| res->add_input(std::move(inp)); |
|
|
| return cur; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_inp_attn_scale() const { |
| auto inp = std::make_unique<llm_graph_input_attn_temp>(hparams.n_attn_temp_floor_scale, hparams.f_attn_temp_scale, hparams.f_attn_temp_offset); |
|
|
| auto & cur = inp->attn_scale; |
|
|
| |
| cur = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 1, 1, n_tokens); |
| ggml_set_input(cur); |
| ggml_set_name(cur, "attn_scale"); |
|
|
| res->add_input(std::move(inp)); |
|
|
| return cur; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_inp_out_ids() const { |
| |
| |
| |
| |
| |
| |
| |
|
|
| auto inp = std::make_unique<llm_graph_input_out_ids>(hparams, cparams, n_outputs); |
|
|
| auto & cur = inp->out_ids; |
|
|
| cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_outputs); |
| ggml_set_input(cur); |
|
|
| res->add_input(std::move(inp)); |
|
|
| return cur; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_inp_mean() const { |
| auto inp = std::make_unique<llm_graph_input_mean>(cparams); |
|
|
| auto & cur = inp->mean; |
|
|
| cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, ubatch.n_seqs_unq); |
| ggml_set_input(cur); |
|
|
| res->add_input(std::move(inp)); |
|
|
| return cur; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_inp_cls() const { |
| auto inp = std::make_unique<llm_graph_input_cls>(cparams, arch); |
|
|
| auto & cur = inp->cls; |
|
|
| cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_seqs_unq); |
| ggml_set_input(cur); |
|
|
| res->add_input(std::move(inp)); |
|
|
| return cur; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_inp_cross_embd() const { |
| auto inp = std::make_unique<llm_graph_input_cross_embd>(cross); |
|
|
| auto & cur = inp->cross_embd; |
|
|
| |
| |
| |
| |
|
|
| |
| |
|
|
| const auto n_embd = !cross->v_embd.empty() ? cross->n_embd : hparams.n_embd_inp(); |
| const auto n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train; |
|
|
| cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_enc); |
| ggml_set_input(cur); |
|
|
| res->add_input(std::move(inp)); |
|
|
| return cur; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_inp_pos_bucket_enc() const { |
| auto inp = std::make_unique<llm_graph_input_pos_bucket>(hparams); |
|
|
| auto & cur = inp->pos_bucket; |
|
|
| cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_tokens, n_tokens); |
| ggml_set_input(cur); |
|
|
| res->add_input(std::move(inp)); |
|
|
| return cur; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_inp_pos_bucket_dec() const { |
| const auto * mctx_cur = static_cast<const llama_kv_cache_context *>(mctx); |
|
|
| auto inp = std::make_unique<llm_graph_input_pos_bucket_kv>(hparams, mctx_cur); |
|
|
| const auto n_kv = mctx_cur->get_n_kv(); |
|
|
| auto & cur = inp->pos_bucket; |
|
|
| cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_kv, n_tokens); |
| ggml_set_input(cur); |
|
|
| res->add_input(std::move(inp)); |
|
|
| return cur; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_pos_bias(ggml_tensor * pos_bucket, ggml_tensor * attn_rel_b) const { |
| ggml_tensor * pos_bucket_1d = ggml_reshape_1d(ctx0, pos_bucket, pos_bucket->ne[0] * pos_bucket->ne[1]); |
| cb(pos_bucket_1d, "pos_bucket_1d", -1); |
|
|
| ggml_tensor * pos_bias = ggml_get_rows(ctx0, attn_rel_b, pos_bucket_1d); |
|
|
| pos_bias = ggml_reshape_3d(ctx0, pos_bias, pos_bias->ne[0], pos_bucket->ne[0], pos_bucket->ne[1]); |
| pos_bias = ggml_permute (ctx0, pos_bias, 2, 0, 1, 3); |
| pos_bias = ggml_cont (ctx0, pos_bias); |
|
|
| cb(pos_bias, "pos_bias", -1); |
|
|
| return pos_bias; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_attn_mha( |
| ggml_tensor * q, |
| ggml_tensor * k, |
| ggml_tensor * v, |
| ggml_tensor * kq_b, |
| ggml_tensor * kq_mask, |
| ggml_tensor * sinks, |
| ggml_tensor * v_mla, |
| float kq_scale, |
| int il) const { |
| const bool v_trans = v->nb[1] > v->nb[2]; |
|
|
| |
| const auto n_stream = k->ne[3]; |
|
|
| q = ggml_view_4d(ctx0, q, q->ne[0], q->ne[1], q->ne[2]/n_stream, n_stream, q->nb[1], q->nb[2], q->nb[3]/n_stream, 0); |
|
|
| q = ggml_permute(ctx0, q, 0, 2, 1, 3); |
| k = ggml_permute(ctx0, k, 0, 2, 1, 3); |
| v = ggml_permute(ctx0, v, 0, 2, 1, 3); |
|
|
| ggml_tensor * cur; |
|
|
| const bool use_flash_attn = cparams.flash_attn && kq_b == nullptr; |
| if (use_flash_attn) { |
| GGML_ASSERT(kq_b == nullptr && "Flash attention does not support KQ bias yet"); |
|
|
| if (v_trans) { |
| v = ggml_transpose(ctx0, v); |
| } |
|
|
| |
| if (k->type == GGML_TYPE_F32) { |
| k = ggml_cast(ctx0, k, GGML_TYPE_F16); |
| } |
|
|
| if (v->type == GGML_TYPE_F32) { |
| v = ggml_cast(ctx0, v, GGML_TYPE_F16); |
| } |
|
|
| cur = ggml_flash_attn_ext(ctx0, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias, |
| hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f); |
| cb(cur, LLAMA_TENSOR_NAME_FATTN, il); |
|
|
| ggml_flash_attn_ext_add_sinks(cur, sinks); |
| ggml_flash_attn_ext_set_prec (cur, GGML_PREC_F32); |
|
|
| if (v_mla) { |
| #if 0 |
| |
| |
| cur = ggml_reshape_4d(ctx0, cur, v_mla->ne[0], 1, n_head, n_tokens); |
| cur = ggml_mul_mat(ctx0, v_mla, cur); |
| #else |
| |
| |
| cur = ggml_permute(ctx0, cur, 0, 2, 1, 3); |
| cur = ggml_mul_mat(ctx0, v_mla, cur); |
| cb(cur, "fattn_mla", il); |
| cur = ggml_permute(ctx0, cur, 0, 2, 1, 3); |
| cur = ggml_cont(ctx0, cur); |
| #endif |
| } |
|
|
| cur = ggml_reshape_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]); |
| } else { |
| ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); |
| cb(kq, "kq", il); |
|
|
| |
| |
| ggml_mul_mat_set_prec(kq, GGML_PREC_F32); |
|
|
| if (arch == LLM_ARCH_GROK) { |
| |
| |
| |
| |
| |
|
|
| kq = ggml_tanh(ctx0, ggml_scale(ctx0, kq, hparams.f_attn_out_scale / hparams.f_attn_logit_softcapping)); |
| cb(kq, "kq_tanh", il); |
| kq = ggml_scale(ctx0, kq, hparams.f_attn_logit_softcapping); |
| cb(kq, "kq_scaled", il); |
| } |
|
|
| if (hparams.attn_soft_cap) { |
| kq = ggml_scale(ctx0, kq, 1.0f / hparams.f_attn_logit_softcapping); |
| cb(kq, "kq_scaled_1", il); |
| kq = ggml_tanh (ctx0, kq); |
| cb(kq, "kq_tanh", il); |
| kq = ggml_scale(ctx0, kq, hparams.f_attn_logit_softcapping); |
| cb(kq, "kq_scaled_2", il); |
| } |
|
|
| if (kq_b) { |
| kq = ggml_add(ctx0, kq, kq_b); |
| cb(kq, "kq_plus_kq_b", il); |
| } |
|
|
| kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias); |
| ggml_soft_max_add_sinks(kq, sinks); |
| cb(kq, "kq_soft_max", il); |
|
|
| if (!v_trans) { |
| |
| v = ggml_cont(ctx0, ggml_transpose(ctx0, v)); |
| cb(v, "v_cont", il); |
| } |
|
|
| ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq); |
| cb(kqv, "kqv", il); |
|
|
| |
| if (v_mla) { |
| kqv = ggml_mul_mat(ctx0, v_mla, kqv); |
| cb(kqv, "kqv_mla", il); |
| } |
|
|
| cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3); |
|
|
| |
| cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]); |
|
|
| if (!cparams.offload_kqv) { |
| |
| ggml_backend_sched_set_tensor_backend(sched, cur, backend_cpu); |
| } |
| } |
|
|
| ggml_build_forward_expand(gf, cur); |
|
|
| return cur; |
| } |
|
|
| llm_graph_input_attn_no_cache * llm_graph_context::build_attn_inp_no_cache() const { |
| auto inp = std::make_unique<llm_graph_input_attn_no_cache>(hparams, cparams); |
|
|
| |
| inp->self_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens, 1, 1); |
| ggml_set_input(inp->self_kq_mask); |
|
|
| inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask; |
|
|
| if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) { |
| inp->self_kq_mask_swa = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens, 1, 1); |
| ggml_set_input(inp->self_kq_mask_swa); |
|
|
| inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa; |
| } else { |
| inp->self_kq_mask_swa = nullptr; |
| inp->self_kq_mask_swa_cnv = nullptr; |
| } |
|
|
| return (llm_graph_input_attn_no_cache *) res->add_input(std::move(inp)); |
| } |
|
|
| ggml_tensor * llm_graph_context::build_attn( |
| llm_graph_input_attn_no_cache * inp, |
| ggml_tensor * wo, |
| ggml_tensor * wo_b, |
| ggml_tensor * q_cur, |
| ggml_tensor * k_cur, |
| ggml_tensor * v_cur, |
| ggml_tensor * kq_b, |
| ggml_tensor * sinks, |
| ggml_tensor * v_mla, |
| float kq_scale, |
| int il) const { |
| GGML_UNUSED(n_tokens); |
|
|
| |
| |
| ggml_build_forward_expand(gf, q_cur); |
| ggml_build_forward_expand(gf, k_cur); |
| ggml_build_forward_expand(gf, v_cur); |
|
|
| const bool is_swa = hparams.is_swa(il); |
|
|
| const auto & kq_mask = is_swa ? inp->get_kq_mask_swa() : inp->get_kq_mask(); |
|
|
| |
| |
| |
| |
|
|
| ggml_tensor * q = q_cur; |
| ggml_tensor * k = k_cur; |
| ggml_tensor * v = v_cur; |
|
|
| ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il); |
| cb(cur, "kqv_out", il); |
|
|
| if (wo) { |
| cur = build_lora_mm(wo, cur); |
| } |
|
|
| if (wo_b) { |
| |
| } |
|
|
| if (wo_b) { |
| cur = ggml_add(ctx0, cur, wo_b); |
| } |
|
|
| return cur; |
| } |
|
|
| static std::unique_ptr<llm_graph_input_attn_kv> build_attn_inp_kv_impl( |
| ggml_context * ctx0, |
| const llama_ubatch & ubatch, |
| const llama_hparams & hparams, |
| const llama_cparams & cparams, |
| const llama_kv_cache_context * mctx_cur) { |
|
|
| auto inp = std::make_unique<llm_graph_input_attn_kv>(hparams, cparams, mctx_cur); |
|
|
| { |
| GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_iswa for SWA"); |
|
|
| inp->self_k_idxs = mctx_cur->build_input_k_idxs(ctx0, ubatch); |
| inp->self_v_idxs = mctx_cur->build_input_v_idxs(ctx0, ubatch); |
|
|
| inp->self_kq_mask = build_kq_mask(ctx0, mctx_cur, ubatch, cparams); |
|
|
| ggml_set_input(inp->self_kq_mask); |
|
|
| inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask; |
| } |
|
|
| return inp; |
| } |
|
|
| llm_graph_input_attn_kv * llm_graph_context::build_attn_inp_kv() const { |
| const auto * mctx_cur = static_cast<const llama_kv_cache_context *>(mctx); |
|
|
| auto inp = build_attn_inp_kv_impl(ctx0, ubatch, hparams, cparams, mctx_cur); |
|
|
| return (llm_graph_input_attn_kv *) res->add_input(std::move(inp)); |
| } |
|
|
| ggml_tensor * llm_graph_context::build_attn( |
| llm_graph_input_attn_kv * inp, |
| ggml_tensor * wo, |
| ggml_tensor * wo_b, |
| ggml_tensor * q_cur, |
| ggml_tensor * k_cur, |
| ggml_tensor * v_cur, |
| ggml_tensor * kq_b, |
| ggml_tensor * sinks, |
| ggml_tensor * v_mla, |
| float kq_scale, |
| int il) const { |
| GGML_ASSERT(v_mla == nullptr); |
|
|
| |
| |
| |
| ggml_build_forward_expand(gf, q_cur); |
| ggml_build_forward_expand(gf, v_cur); |
| ggml_build_forward_expand(gf, k_cur); |
|
|
| const auto * mctx_cur = inp->mctx; |
|
|
| |
| { |
| const auto & k_idxs = inp->get_k_idxs(); |
| const auto & v_idxs = inp->get_v_idxs(); |
|
|
| ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, k_idxs, il)); |
| ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, v_idxs, il)); |
| } |
|
|
| const auto & kq_mask = inp->get_kq_mask(); |
|
|
| ggml_tensor * q = q_cur; |
| ggml_tensor * k = mctx_cur->get_k(ctx0, il); |
| ggml_tensor * v = mctx_cur->get_v(ctx0, il); |
|
|
| ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il); |
| cb(cur, "kqv_out", il); |
|
|
| if (wo) { |
| cur = build_lora_mm(wo, cur); |
| if (arch == LLM_ARCH_GLM4 || arch == LLM_ARCH_GLM4_MOE || arch == LLM_ARCH_JAIS2) { |
| |
| ggml_mul_mat_set_prec(cur, GGML_PREC_F32); |
| } |
| } |
|
|
| if (wo_b) { |
| cur = ggml_add(ctx0, cur, wo_b); |
| } |
|
|
| return cur; |
| } |
|
|
| static std::unique_ptr<llm_graph_input_attn_k> build_attn_inp_k_impl( |
| ggml_context * ctx0, |
| const llama_ubatch & ubatch, |
| const llama_hparams & hparams, |
| const llama_cparams & cparams, |
| const llama_kv_cache_context * mctx_cur) { |
|
|
| auto inp = std::make_unique<llm_graph_input_attn_k>(hparams, cparams, mctx_cur); |
|
|
| { |
| GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_iswa for SWA"); |
|
|
| inp->self_k_idxs = mctx_cur->build_input_k_idxs(ctx0, ubatch); |
|
|
| inp->self_kq_mask = build_kq_mask(ctx0, mctx_cur, ubatch, cparams); |
| ggml_set_input(inp->self_kq_mask); |
|
|
| inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask; |
| } |
|
|
| return inp; |
| } |
|
|
| llm_graph_input_attn_k * llm_graph_context::build_attn_inp_k() const { |
| const auto * mctx_cur = static_cast<const llama_kv_cache_context *>(mctx); |
|
|
| auto inp = build_attn_inp_k_impl(ctx0, ubatch, hparams, cparams, mctx_cur); |
|
|
| return (llm_graph_input_attn_k *) res->add_input(std::move(inp)); |
| } |
|
|
| ggml_tensor * llm_graph_context::build_attn( |
| llm_graph_input_attn_k * inp, |
| ggml_tensor * wo, |
| ggml_tensor * wo_b, |
| ggml_tensor * q_cur, |
| ggml_tensor * k_cur, |
| ggml_tensor * v_cur, |
| ggml_tensor * kq_b, |
| ggml_tensor * sinks, |
| ggml_tensor * v_mla, |
| float kq_scale, |
| int il) const { |
| |
| |
| |
| ggml_build_forward_expand(gf, q_cur); |
| ggml_build_forward_expand(gf, v_cur); |
| ggml_build_forward_expand(gf, k_cur); |
|
|
| const auto * mctx_cur = inp->mctx; |
|
|
| |
| { |
| const auto & k_idxs = inp->get_k_idxs(); |
|
|
| ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, k_idxs, il)); |
| } |
|
|
| const auto & kq_mask = inp->get_kq_mask(); |
|
|
| ggml_tensor * q = q_cur; |
| ggml_tensor * k = mctx_cur->get_k(ctx0, il); |
| ggml_tensor * v = ggml_view_4d(ctx0, k, v_cur->ne[0], k->ne[1], k->ne[2], k->ne[3], k->nb[1], k->nb[2], k->nb[3], 0); |
|
|
| ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il); |
| cb(cur, "kqv_out", il); |
|
|
| if (wo) { |
| cur = build_lora_mm(wo, cur); |
| if (arch == LLM_ARCH_GLM4 || arch == LLM_ARCH_GLM4_MOE) { |
| |
| ggml_mul_mat_set_prec(cur, GGML_PREC_F32); |
| } |
| } |
|
|
| if (wo_b) { |
| cur = ggml_add(ctx0, cur, wo_b); |
| } |
|
|
| return cur; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_attn( |
| llm_graph_input_attn_kv_iswa * inp, |
| ggml_tensor * wo, |
| ggml_tensor * wo_b, |
| ggml_tensor * q_cur, |
| ggml_tensor * k_cur, |
| ggml_tensor * v_cur, |
| ggml_tensor * kq_b, |
| ggml_tensor * sinks, |
| ggml_tensor * v_mla, |
| float kq_scale, |
| int il) const { |
| |
| |
| ggml_build_forward_expand(gf, q_cur); |
|
|
| if (k_cur) { |
| ggml_build_forward_expand(gf, k_cur); |
| } |
|
|
| if (v_cur) { |
| ggml_build_forward_expand(gf, v_cur); |
| } |
|
|
| const auto * mctx_iswa = inp->mctx; |
|
|
| const bool is_swa = hparams.is_swa(il); |
|
|
| const auto * mctx_cur = is_swa ? mctx_iswa->get_swa() : mctx_iswa->get_base(); |
|
|
| |
| if (k_cur) { |
| const auto & k_idxs = is_swa ? inp->get_k_idxs_swa() : inp->get_k_idxs(); |
|
|
| ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, k_idxs, il)); |
| } |
|
|
| if (v_cur) { |
| const auto & v_idxs = is_swa ? inp->get_v_idxs_swa() : inp->get_v_idxs(); |
|
|
| ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, v_idxs, il)); |
| } |
|
|
| const auto & kq_mask = is_swa ? inp->get_kq_mask_swa() : inp->get_kq_mask(); |
|
|
| ggml_tensor * q = q_cur; |
| ggml_tensor * k = mctx_cur->get_k(ctx0, il); |
| ggml_tensor * v = mctx_cur->get_v(ctx0, il); |
|
|
| ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il); |
| cb(cur, "kqv_out", il); |
|
|
| if (wo) { |
| cur = build_lora_mm(wo, cur); |
| } |
|
|
| if (wo_b) { |
| |
| } |
|
|
| if (wo_b) { |
| cur = ggml_add(ctx0, cur, wo_b); |
| } |
|
|
| return cur; |
| } |
|
|
| llm_graph_input_attn_cross * llm_graph_context::build_attn_inp_cross() const { |
| auto inp = std::make_unique<llm_graph_input_attn_cross>(cross); |
|
|
| const int32_t n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train; |
|
|
| inp->cross_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_enc, n_tokens, 1, 1); |
| ggml_set_input(inp->cross_kq_mask); |
|
|
| inp->cross_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->cross_kq_mask, GGML_TYPE_F16) : inp->cross_kq_mask; |
|
|
| return (llm_graph_input_attn_cross *) res->add_input(std::move(inp)); |
| } |
|
|
| ggml_tensor * llm_graph_context::build_attn( |
| llm_graph_input_attn_cross * inp, |
| ggml_tensor * wo, |
| ggml_tensor * wo_b, |
| ggml_tensor * q_cur, |
| ggml_tensor * k_cur, |
| ggml_tensor * v_cur, |
| ggml_tensor * kq_b, |
| ggml_tensor * sinks, |
| ggml_tensor * v_mla, |
| float kq_scale, |
| int il) const { |
| |
| |
| ggml_build_forward_expand(gf, q_cur); |
| ggml_build_forward_expand(gf, k_cur); |
| ggml_build_forward_expand(gf, v_cur); |
|
|
| const auto & kq_mask = inp->get_kq_mask_cross(); |
|
|
| ggml_tensor * q = q_cur; |
| ggml_tensor * k = k_cur; |
| ggml_tensor * v = v_cur; |
|
|
| ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, sinks, v_mla, kq_scale, il); |
| cb(cur, "kqv_out", il); |
|
|
| if (wo) { |
| cur = build_lora_mm(wo, cur); |
| } |
|
|
| if (wo_b) { |
| |
| } |
|
|
| if (wo_b) { |
| cur = ggml_add(ctx0, cur, wo_b); |
| } |
|
|
| return cur; |
| } |
|
|
| |
| |
| |
| llm_graph_input_attn_kv_iswa * llm_graph_context::build_attn_inp_kv_iswa() const { |
| const auto * mctx_cur = static_cast<const llama_kv_cache_iswa_context *>(mctx); |
|
|
| auto inp = std::make_unique<llm_graph_input_attn_kv_iswa>(hparams, cparams, mctx_cur); |
|
|
| { |
| inp->self_k_idxs = mctx_cur->get_base()->build_input_k_idxs(ctx0, ubatch); |
| inp->self_v_idxs = mctx_cur->get_base()->build_input_v_idxs(ctx0, ubatch); |
|
|
| inp->self_kq_mask = build_kq_mask(ctx0, mctx_cur->get_base(), ubatch, cparams); |
| ggml_set_input(inp->self_kq_mask); |
| ggml_set_name(inp->self_kq_mask, "self_kq_mask"); |
|
|
| inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask; |
| ggml_set_name(inp->self_kq_mask_cnv, "self_kq_mask_cnv"); |
| } |
|
|
| { |
| GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache for non-SWA"); |
|
|
| inp->self_k_idxs_swa = mctx_cur->get_swa()->build_input_k_idxs(ctx0, ubatch); |
| inp->self_v_idxs_swa = mctx_cur->get_swa()->build_input_v_idxs(ctx0, ubatch); |
|
|
| inp->self_kq_mask_swa = build_kq_mask(ctx0, mctx_cur->get_swa(), ubatch, cparams); |
| ggml_set_input(inp->self_kq_mask_swa); |
| ggml_set_name(inp->self_kq_mask_swa, "self_kq_mask_swa"); |
|
|
| inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa; |
| ggml_set_name(inp->self_kq_mask_swa_cnv, "self_kq_mask_swa_cnv"); |
| } |
|
|
| return (llm_graph_input_attn_kv_iswa *) res->add_input(std::move(inp)); |
| } |
|
|
| ggml_tensor * llm_graph_context::build_rs( |
| ggml_tensor * s, |
| ggml_tensor * state_copy_main, |
| ggml_tensor * state_copy_extra, |
| int32_t state_size, |
| int32_t n_seqs, |
| uint32_t n_rs, |
| uint32_t rs_head, |
| uint32_t rs_size, |
| int32_t rs_zero, |
| const llm_graph_get_rows_fn & get_state_rows) const { |
|
|
| ggml_tensor * states = ggml_reshape_2d(ctx0, s, state_size, rs_size); |
|
|
| |
| |
| ggml_tensor * state_zero = ggml_view_1d(ctx0, states, state_size*(rs_zero >= 0), rs_zero*states->nb[1]*(rs_zero >= 0)); |
| ggml_build_forward_expand(gf, ggml_scale_inplace(ctx0, state_zero, 0)); |
|
|
| |
| |
| |
| ggml_tensor * output_states = get_state_rows(ctx0, states, state_copy_main); |
| ggml_build_forward_expand(gf, output_states); |
|
|
| |
| ggml_tensor * states_extra = ggml_get_rows(ctx0, states, state_copy_extra); |
| ggml_build_forward_expand(gf, |
| ggml_cpy(ctx0, |
| states_extra, |
| ggml_view_1d(ctx0, s, state_size*(n_rs - n_seqs), (rs_head + n_seqs)*state_size*ggml_element_size(s)))); |
|
|
| return output_states; |
| } |
|
|
| static std::unique_ptr<llm_graph_input_rs> build_rs_inp_impl( |
| ggml_context * ctx0, |
| const llama_ubatch & ubatch, |
| const llama_memory_recurrent_context * mctx_cur) { |
|
|
| auto inp = std::make_unique<llm_graph_input_rs>(mctx_cur); |
|
|
| const int64_t n_rs = mctx_cur->get_n_rs(); |
| const int64_t n_seqs = ubatch.n_seqs; |
|
|
| inp->s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_rs); |
| ggml_set_input(inp->s_copy); |
|
|
| inp->s_copy_main = ggml_view_1d(ctx0, inp->s_copy, n_seqs, 0); |
| inp->s_copy_extra = ggml_view_1d(ctx0, inp->s_copy, n_rs - n_seqs, n_seqs * inp->s_copy->nb[0]); |
|
|
| inp->head = mctx_cur->get_head(); |
| inp->rs_z = mctx_cur->get_rs_z(); |
|
|
| return inp; |
| } |
|
|
| llm_graph_input_rs * llm_graph_context::build_rs_inp() const { |
| const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx); |
|
|
| auto inp = build_rs_inp_impl(ctx0, ubatch, mctx_cur); |
|
|
| return (llm_graph_input_rs *) res->add_input(std::move(inp)); |
| } |
|
|
| ggml_tensor * llm_graph_context::build_rs( |
| llm_graph_input_rs * inp, |
| ggml_tensor * s, |
| int32_t state_size, |
| int32_t n_seqs, |
| const llm_graph_get_rows_fn & get_state_rows) const { |
| const auto * kv_state = inp->mctx; |
|
|
| return build_rs(s, inp->s_copy_main, inp->s_copy_extra, state_size, n_seqs, |
| kv_state->get_n_rs(), kv_state->get_head(), kv_state->get_size(), kv_state->get_rs_z(), |
| get_state_rows); |
| } |
|
|
| ggml_tensor * llm_graph_context::build_rwkv_token_shift_load( |
| llm_graph_input_rs * inp, |
| const llama_ubatch & ubatch, |
| int il) const { |
| const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx); |
|
|
| const auto token_shift_count = hparams.token_shift_count; |
|
|
| const int64_t n_seqs = ubatch.n_seqs; |
|
|
| ggml_tensor * token_shift_all = mctx_cur->get_r_l(il); |
|
|
| ggml_tensor * token_shift = build_rs( |
| inp, token_shift_all, |
| hparams.n_embd_r(), n_seqs); |
|
|
| token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs); |
|
|
| return token_shift; |
| } |
|
|
| ggml_tensor * llm_graph_context::build_rwkv_token_shift_store( |
| ggml_tensor * token_shift, |
| const llama_ubatch & ubatch, |
| int il) const { |
| const auto * mctx_cur = static_cast<const llama_memory_recurrent_context *>(mctx); |
|
|
| const auto token_shift_count = hparams.token_shift_count; |
| const auto n_embd = hparams.n_embd; |
|
|
| const int64_t n_seqs = ubatch.n_seqs; |
|
|
| const auto kv_head = mctx_cur->get_head(); |
|
|
| return ggml_cpy( |
| ctx0, |
| ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0), |
| ggml_view_1d(ctx0, mctx_cur->get_r_l(il), hparams.n_embd_r()*n_seqs, hparams.n_embd_r()*kv_head*ggml_element_size(mctx_cur->get_r_l(il))) |
| ); |
| } |
|
|
| llm_graph_input_mem_hybrid * llm_graph_context::build_inp_mem_hybrid() const { |
| const auto * mctx_cur = static_cast<const llama_memory_hybrid_context *>(mctx); |
|
|
| auto inp_rs = build_rs_inp_impl (ctx0, ubatch, mctx_cur->get_recr()); |
| auto inp_attn = build_attn_inp_kv_impl(ctx0, ubatch, hparams, cparams, mctx_cur->get_attn()); |
|
|
| auto inp = std::make_unique<llm_graph_input_mem_hybrid>(cparams, std::move(inp_attn), std::move(inp_rs), mctx_cur); |
|
|
| return (llm_graph_input_mem_hybrid *) res->add_input(std::move(inp)); |
| } |
|
|
| llm_graph_input_mem_hybrid_k * llm_graph_context::build_inp_mem_hybrid_k() const { |
| const auto * mctx_cur = static_cast<const llama_memory_hybrid_context *>(mctx); |
|
|
| auto inp_rs = build_rs_inp_impl (ctx0, ubatch, mctx_cur->get_recr()); |
| auto inp_attn = build_attn_inp_k_impl(ctx0, ubatch, hparams, cparams, mctx_cur->get_attn()); |
|
|
| auto inp = std::make_unique<llm_graph_input_mem_hybrid_k>(cparams, std::move(inp_attn), std::move(inp_rs), mctx_cur); |
|
|
| return (llm_graph_input_mem_hybrid_k *) res->add_input(std::move(inp)); |
| } |
|
|
| llm_graph_input_mem_hybrid_iswa * llm_graph_context::build_inp_mem_hybrid_iswa() const { |
| const auto * mctx_cur = static_cast<const llama_memory_hybrid_iswa_context *>(mctx); |
|
|
| auto inp_rs = build_rs_inp_impl(ctx0, ubatch, mctx_cur->get_recr()); |
|
|
| |
| const auto * attn_ctx = mctx_cur->get_attn(); |
|
|
| auto inp_attn = std::make_unique<llm_graph_input_attn_kv_iswa>(hparams, cparams, attn_ctx); |
|
|
| { |
| inp_attn->self_k_idxs = attn_ctx->get_base()->build_input_k_idxs(ctx0, ubatch); |
| inp_attn->self_v_idxs = attn_ctx->get_base()->build_input_v_idxs(ctx0, ubatch); |
|
|
| inp_attn->self_kq_mask = build_kq_mask(ctx0, attn_ctx->get_base(), ubatch, cparams); |
| ggml_set_input(inp_attn->self_kq_mask); |
|
|
| inp_attn->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp_attn->self_kq_mask, GGML_TYPE_F16) : inp_attn->self_kq_mask; |
| } |
|
|
| { |
| inp_attn->self_k_idxs_swa = attn_ctx->get_swa()->build_input_k_idxs(ctx0, ubatch); |
| inp_attn->self_v_idxs_swa = attn_ctx->get_swa()->build_input_v_idxs(ctx0, ubatch); |
|
|
| inp_attn->self_kq_mask_swa = build_kq_mask(ctx0, attn_ctx->get_swa(), ubatch, cparams); |
| ggml_set_input(inp_attn->self_kq_mask_swa); |
|
|
| inp_attn->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp_attn->self_kq_mask_swa, GGML_TYPE_F16) : inp_attn->self_kq_mask_swa; |
| } |
|
|
| auto inp = std::make_unique<llm_graph_input_mem_hybrid_iswa>(cparams, std::move(inp_attn), std::move(inp_rs), mctx_cur); |
|
|
| return (llm_graph_input_mem_hybrid_iswa *) res->add_input(std::move(inp)); |
| } |
|
|
| void llm_graph_context::build_dense_out( |
| ggml_tensor * dense_2, |
| ggml_tensor * dense_2_b, |
| ggml_tensor * dense_3) const { |
| if (!cparams.embeddings || !(dense_2 || dense_2_b || dense_3)) { |
| return; |
| } |
| ggml_tensor * cur = res->t_embd_pooled != nullptr ? res->t_embd_pooled : res->t_embd; |
| GGML_ASSERT(cur != nullptr && "missing t_embd_pooled/t_embd"); |
|
|
| if (dense_2) { |
| cur = ggml_mul_mat(ctx0, dense_2, cur); |
| } |
| if (dense_2_b) { |
| cur = ggml_add(ctx0, cur, dense_2_b); |
| } |
| if (dense_3) { |
| cur = ggml_mul_mat(ctx0, dense_3, cur); |
| } |
| cb(cur, "result_embd_pooled", -1); |
| res->t_embd_pooled = cur; |
| ggml_build_forward_expand(gf, cur); |
| } |
|
|
|
|
| void llm_graph_context::build_pooling( |
| ggml_tensor * cls, |
| ggml_tensor * cls_b, |
| ggml_tensor * cls_out, |
| ggml_tensor * cls_out_b, |
| ggml_tensor * cls_norm) const { |
| if (!cparams.embeddings) { |
| return; |
| } |
|
|
| ggml_tensor * inp = res->t_embd; |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| |
|
|
| GGML_ASSERT(inp != nullptr && "missing result_norm/result_embd tensor"); |
|
|
| ggml_tensor * cur; |
|
|
| switch (pooling_type) { |
| case LLAMA_POOLING_TYPE_NONE: |
| { |
| cur = inp; |
| } break; |
| case LLAMA_POOLING_TYPE_MEAN: |
| { |
| ggml_tensor * inp_mean = build_inp_mean(); |
| cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, inp)), inp_mean); |
| } break; |
| case LLAMA_POOLING_TYPE_CLS: |
| case LLAMA_POOLING_TYPE_LAST: |
| { |
| ggml_tensor * inp_cls = build_inp_cls(); |
| cur = ggml_get_rows(ctx0, inp, inp_cls); |
| } break; |
| case LLAMA_POOLING_TYPE_RANK: |
| { |
| if (arch == LLM_ARCH_MODERN_BERT) { |
| |
| |
| ggml_tensor * inp_mean = build_inp_mean(); |
| cur = ggml_mul_mat(ctx0, ggml_cont(ctx0, ggml_transpose(ctx0, inp)), inp_mean); |
| } else { |
| ggml_tensor * inp_cls = build_inp_cls(); |
| cur = ggml_get_rows(ctx0, inp, inp_cls); |
| } |
|
|
| |
| |
| if (cls) { |
| cur = ggml_mul_mat(ctx0, cls, cur); |
| if (cls_b) { |
| cur = ggml_add(ctx0, cur, cls_b); |
| } |
| if (arch == LLM_ARCH_MODERN_BERT) { |
| cur = ggml_gelu(ctx0, cur); |
| } else { |
| cur = ggml_tanh(ctx0, cur); |
| } |
| if (cls_norm) { |
| |
| cur = build_norm(cur, cls_norm, NULL, LLM_NORM, -1); |
| } |
| } |
|
|
| |
| |
| |
| |
| if (cls_out) { |
| cur = ggml_mul_mat(ctx0, cls_out, cur); |
| if (cls_out_b) { |
| cur = ggml_add(ctx0, cur, cls_out_b); |
| } |
| } |
|
|
| |
| if (arch == LLM_ARCH_QWEN3 || arch == LLM_ARCH_QWEN3VL) { |
| cur = ggml_soft_max(ctx0, cur); |
| } |
| } break; |
| default: |
| { |
| GGML_ABORT("unknown pooling type"); |
| } |
| } |
|
|
| cb(cur, "result_embd_pooled", -1); |
| res->t_embd_pooled = cur; |
|
|
| ggml_build_forward_expand(gf, cur); |
| } |
|
|
| void llm_graph_context::build_sampling() const { |
| if (samplers.empty() || !res->t_logits) { |
| return; |
| } |
|
|
| std::array<ggml_tensor *, 2> outs; |
| outs[0] = res->t_logits; |
|
|
| auto inp_sampling = std::make_unique<llm_graph_input_sampling>(samplers); |
| res->add_input(std::move(inp_sampling)); |
|
|
| std::map<llama_seq_id, int32_t> seq_to_logit_row; |
| int32_t logit_row_idx = 0; |
|
|
| for (uint32_t i = 0; i < ubatch.n_tokens; i++) { |
| if (ubatch.output[i]) { |
| llama_seq_id seq_id = ubatch.seq_id[i][0]; |
| seq_to_logit_row[seq_id] = logit_row_idx; |
| logit_row_idx++; |
| } |
| } |
|
|
| |
| GGML_ASSERT(res->t_logits != nullptr && "missing t_logits tensor"); |
|
|
| |
| |
| |
| ggml_tensor * logits_t = ggml_pad(ctx0, res->t_logits, 0, 1, 0, 0); |
|
|
| for (const auto & [seq_id, sampler] : samplers) { |
| const auto it = seq_to_logit_row.find(seq_id); |
|
|
| |
| const auto row_idx = it != seq_to_logit_row.end() ? it->second : 0; |
| const int i_out = it != seq_to_logit_row.end() ? 1 : 0; |
|
|
| ggml_tensor * logits_seq = ggml_view_1d(ctx0, logits_t, logits_t->ne[0], row_idx * logits_t->nb[1]); |
| ggml_format_name(logits_seq, "logits_seq_%d", seq_id); |
|
|
| struct llama_sampler_data data = { |
| logits_seq, |
| nullptr, |
| nullptr, |
| nullptr, |
| }; |
|
|
| assert(sampler->iface->backend_apply); |
| sampler->iface->backend_apply(sampler, ctx0, gf, &data); |
|
|
| if (data.sampled != nullptr) { |
| res->t_sampled[seq_id] = data.sampled; |
| outs[1] = data.sampled; |
| ggml_build_forward_select(gf, outs.data(), outs.size(), i_out); |
| } |
|
|
| if (data.probs != nullptr) { |
| res->t_sampled_probs[seq_id] = data.probs; |
| outs[1] = data.probs; |
| ggml_build_forward_select(gf, outs.data(), outs.size(), i_out); |
| } |
|
|
| if (data.logits != nullptr) { |
| res->t_sampled_logits[seq_id] = data.logits; |
| outs[1] = data.logits; |
| ggml_build_forward_select(gf, outs.data(), outs.size(), i_out); |
| } |
|
|
| if (data.candidates != nullptr) { |
| res->t_candidates[seq_id] = data.candidates; |
| outs[1] = data.candidates; |
| ggml_build_forward_select(gf, outs.data(), outs.size(), i_out); |
| } |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| } |
|
|
| int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) { |
| |
| const int64_t max_distance = 128; |
|
|
| if (bidirectional) { |
| n_buckets >>= 1; |
| } |
|
|
| const int64_t max_exact = n_buckets >> 1; |
|
|
| int32_t relative_position = x - y; |
| int32_t relative_bucket = 0; |
|
|
| if (bidirectional) { |
| relative_bucket += (relative_position > 0) * n_buckets; |
| relative_position = std::abs(relative_position); |
| } else { |
| relative_position = -std::min<int32_t>(relative_position, 0); |
| } |
|
|
| int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact)); |
| relative_position_if_large = std::min<int32_t>(relative_position_if_large, n_buckets - 1); |
| relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large); |
|
|
| return relative_bucket; |
| } |
|
|