| | #include "llama-kv-cache.h"
|
| |
|
| | #include "llama-impl.h"
|
| | #include "llama-io.h"
|
| | #include "llama-model.h"
|
| | #include "llama-context.h"
|
| |
|
| | #include <algorithm>
|
| | #include <cassert>
|
| | #include <cmath>
|
| | #include <cstring>
|
| | #include <limits>
|
| | #include <map>
|
| | #include <stdexcept>
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | llama_kv_cache::llama_kv_cache(
|
| | const llama_model & model,
|
| | ggml_type type_k,
|
| | ggml_type type_v,
|
| | bool v_trans,
|
| | bool offload,
|
| | bool unified,
|
| | uint32_t kv_size,
|
| | uint32_t n_seq_max,
|
| | uint32_t n_pad,
|
| | uint32_t n_swa,
|
| | llama_swa_type swa_type,
|
| | const layer_filter_cb & filter,
|
| | const layer_reuse_cb & reuse) :
|
| | model(model), hparams(model.hparams), v_trans(v_trans),
|
| | n_seq_max(n_seq_max), n_stream(unified ? 1 : n_seq_max), n_pad(n_pad), n_swa(n_swa), swa_type(swa_type) {
|
| |
|
| | GGML_ASSERT(kv_size % n_pad == 0);
|
| |
|
| | const uint32_t n_layer_kv = hparams.n_layer_kv();
|
| |
|
| |
|
| | struct ggml_backend_buft_comparator {
|
| | bool operator()(const ggml_backend_buffer_type_t & lhs, const ggml_backend_buffer_type_t & rhs) const {
|
| | return strcmp(ggml_backend_buft_name(lhs), ggml_backend_buft_name(rhs)) < 0;
|
| | }
|
| | };
|
| | std::map<ggml_backend_buffer_type_t, ggml_context_ptr, ggml_backend_buft_comparator> ctx_map;
|
| |
|
| |
|
| | auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
|
| | auto it = ctx_map.find(buft);
|
| | if (it == ctx_map.end()) {
|
| | ggml_init_params params = {
|
| | size_t(2u*(1 + n_stream)*n_layer_kv*ggml_tensor_overhead()),
|
| | NULL,
|
| | true,
|
| | };
|
| |
|
| | ggml_context * ctx = ggml_init(params);
|
| | if (!ctx) {
|
| | return nullptr;
|
| | }
|
| |
|
| | ctx_map.emplace(buft, ctx);
|
| |
|
| | return ctx;
|
| | }
|
| |
|
| | return it->second.get();
|
| | };
|
| |
|
| | GGML_ASSERT(n_stream == 1 || n_stream == n_seq_max);
|
| |
|
| | v_heads.resize(n_stream);
|
| | for (uint32_t s = 0; s < n_stream; ++s) {
|
| | v_heads[s] = 0;
|
| | }
|
| |
|
| | v_cells.resize(n_stream);
|
| | for (uint32_t s = 0; s < n_stream; ++s) {
|
| | v_cells[s].resize(kv_size);
|
| | }
|
| |
|
| |
|
| | seq_to_stream.resize(LLAMA_MAX_SEQ, 0);
|
| |
|
| | if (n_stream > 1) {
|
| | seq_to_stream.resize(n_stream, 0);
|
| | for (uint32_t s = 0; s < n_stream; ++s) {
|
| | seq_to_stream[s] = s;
|
| | }
|
| | }
|
| |
|
| |
|
| | if (v_trans && hparams.is_n_embd_v_gqa_variable()) {
|
| | LLAMA_LOG_WARN("%s: the V embeddings have different sizes across layers and FA is not enabled - padding V cache to %d\n",
|
| | __func__, hparams.n_embd_v_gqa_max());
|
| | }
|
| |
|
| | const bool is_mla = hparams.is_mla();
|
| |
|
| | for (uint32_t il = 0; il < hparams.n_layer; il++) {
|
| | if (!hparams.has_kv(il)) {
|
| | LLAMA_LOG_DEBUG("%s: layer %3d: does not have KV cache\n", __func__, il);
|
| | continue;
|
| | }
|
| |
|
| | if (filter && !filter(il)) {
|
| | LLAMA_LOG_DEBUG("%s: layer %3d: filtered\n", __func__, il);
|
| | continue;
|
| | }
|
| |
|
| |
|
| | const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
|
| | const uint32_t n_embd_v_gqa = !v_trans ? hparams.n_embd_v_gqa(il) : hparams.n_embd_v_gqa_max();
|
| |
|
| | const char * dev_name = "CPU";
|
| |
|
| | ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type();
|
| |
|
| | if (offload) {
|
| | auto * dev = model.dev_layer(il);
|
| | buft = ggml_backend_dev_buffer_type(dev);
|
| |
|
| | dev_name = ggml_backend_dev_name(dev);
|
| | }
|
| |
|
| | LLAMA_LOG_DEBUG("%s: layer %3d: dev = %s\n", __func__, il, dev_name);
|
| |
|
| | ggml_context * ctx = ctx_for_buft(buft);
|
| | if (!ctx) {
|
| | throw std::runtime_error("failed to create ggml context for kv cache");
|
| | }
|
| |
|
| | const bool has_k = true;
|
| | const bool has_v = !is_mla;
|
| |
|
| | ggml_tensor * k = has_k ? ggml_new_tensor_3d(ctx, type_k, n_embd_k_gqa, kv_size, n_stream) : nullptr;
|
| | ggml_tensor * v = has_v ? ggml_new_tensor_3d(ctx, type_v, n_embd_v_gqa, kv_size, n_stream) : nullptr;
|
| |
|
| | has_k && ggml_format_name(k, "cache_k_l%d", il);
|
| | has_v && ggml_format_name(v, "cache_v_l%d", il);
|
| |
|
| | std::vector<ggml_tensor *> k_stream;
|
| | std::vector<ggml_tensor *> v_stream;
|
| |
|
| | for (uint32_t s = 0; s < n_stream; ++s) {
|
| | k_stream.push_back(has_k ? ggml_view_2d(ctx, k, n_embd_k_gqa, kv_size, k->nb[1], s*k->nb[2]) : nullptr);
|
| | v_stream.push_back(has_v ? ggml_view_2d(ctx, v, n_embd_v_gqa, kv_size, v->nb[1], s*v->nb[2]) : nullptr);
|
| | }
|
| |
|
| | map_layer_ids[il] = layers.size();
|
| |
|
| | layers.push_back({ il, k, v, k_stream, v_stream, });
|
| | }
|
| |
|
| | if (reuse) {
|
| | LLAMA_LOG_DEBUG("%s: reusing layers:\n", __func__);
|
| |
|
| | for (uint32_t il = 0; il < hparams.n_layer; il++) {
|
| | const int32_t il_reuse = reuse(il);
|
| |
|
| | if (il_reuse < 0) {
|
| | LLAMA_LOG_DEBUG("%s: - layer %3d: no reuse\n", __func__, il);
|
| | continue;
|
| | }
|
| |
|
| | if (filter && !filter(il)) {
|
| | LLAMA_LOG_DEBUG("%s: - layer %3d: filtered\n", __func__, il);
|
| | continue;
|
| | }
|
| |
|
| | GGML_ASSERT(map_layer_ids.find(il_reuse) != map_layer_ids.end());
|
| |
|
| | map_layer_ids[il] = map_layer_ids[il_reuse];
|
| |
|
| | LLAMA_LOG_DEBUG("%s: - layer %3d: reuse layer %d, is_swa = %d\n", __func__, il, il_reuse, hparams.is_swa(il));
|
| | }
|
| | }
|
| |
|
| |
|
| | for (auto & [buft, ctx] : ctx_map) {
|
| | ggml_backend_buffer_t buf;
|
| | if (model.hparams.no_alloc) {
|
| | buf = ggml_backend_buft_alloc_buffer(buft, 0);
|
| | for (ggml_tensor * t = ggml_get_first_tensor(ctx.get()); t != nullptr; t = ggml_get_next_tensor(ctx.get(), t)) {
|
| | t->buffer = buf;
|
| | }
|
| | } else {
|
| | buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx.get(), buft);
|
| | }
|
| | if (!buf) {
|
| | throw std::runtime_error("failed to allocate buffer for kv cache");
|
| | }
|
| |
|
| | LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
|
| |
|
| | ggml_backend_buffer_clear(buf, 0);
|
| | ctxs_bufs.emplace_back(std::move(ctx), buf);
|
| | }
|
| |
|
| | {
|
| | const size_t memory_size_k = size_k_bytes();
|
| | const size_t memory_size_v = size_v_bytes();
|
| |
|
| | LLAMA_LOG_INFO("%s: size = %7.2f MiB (%6u cells, %3d layers, %2u/%u seqs), K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
|
| | (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f), kv_size, (int) layers.size(), n_seq_max, n_stream,
|
| | ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
|
| | ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
|
| | }
|
| |
|
| | const char * LLAMA_KV_CACHE_DEBUG = getenv("LLAMA_KV_CACHE_DEBUG");
|
| | debug = LLAMA_KV_CACHE_DEBUG ? atoi(LLAMA_KV_CACHE_DEBUG) : 0;
|
| | }
|
| |
|
| | void llama_kv_cache::clear(bool data) {
|
| | for (uint32_t s = 0; s < n_stream; ++s) {
|
| | v_cells[s].reset();
|
| | v_heads[s] = 0;
|
| | }
|
| |
|
| | if (data) {
|
| | for (auto & [_, buf] : ctxs_bufs) {
|
| | ggml_backend_buffer_clear(buf.get(), 0);
|
| | }
|
| | }
|
| | }
|
| |
|
| | bool llama_kv_cache::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
|
| | GGML_ASSERT(seq_id == -1 || (seq_id >= 0 && (size_t) seq_id < seq_to_stream.size()));
|
| |
|
| | if (p0 < 0) {
|
| | p0 = 0;
|
| | }
|
| |
|
| | if (p1 < 0) {
|
| | p1 = std::numeric_limits<llama_pos>::max();
|
| | }
|
| |
|
| | if (seq_id >= 0) {
|
| | auto & cells = v_cells[seq_to_stream[seq_id]];
|
| | auto & head = v_heads[seq_to_stream[seq_id]];
|
| |
|
| | uint32_t new_head = cells.size();
|
| |
|
| | for (uint32_t i = 0; i < cells.size(); ++i) {
|
| | if (!cells.pos_in(i, p0, p1)) {
|
| | continue;
|
| | }
|
| |
|
| | if (cells.seq_has(i, seq_id) && cells.seq_rm(i, seq_id)) {
|
| | if (new_head == cells.size()) {
|
| | new_head = i;
|
| | }
|
| | }
|
| | }
|
| |
|
| |
|
| | if (new_head != cells.size() && new_head < head) {
|
| | head = new_head;
|
| | }
|
| | } else {
|
| |
|
| | for (uint32_t s = 0; s < n_stream; ++s) {
|
| | auto & cells = v_cells[s];
|
| | auto & head = v_heads[s];
|
| |
|
| | uint32_t new_head = cells.size();
|
| |
|
| | for (uint32_t i = 0; i < cells.size(); ++i) {
|
| | if (!cells.pos_in(i, p0, p1)) {
|
| | continue;
|
| | }
|
| |
|
| | cells.rm(i);
|
| |
|
| | if (new_head == cells.size()) {
|
| | new_head = i;
|
| | }
|
| | }
|
| |
|
| |
|
| | if (new_head != cells.size() && new_head < head) {
|
| | head = new_head;
|
| | }
|
| | }
|
| | }
|
| |
|
| | return true;
|
| | }
|
| |
|
| | void llama_kv_cache::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
|
| | GGML_ASSERT(seq_id_src >= 0 && (size_t) seq_id_src < seq_to_stream.size());
|
| | GGML_ASSERT(seq_id_dst >= 0 && (size_t) seq_id_dst < seq_to_stream.size());
|
| |
|
| | const auto s0 = seq_to_stream[seq_id_src];
|
| | const auto s1 = seq_to_stream[seq_id_dst];
|
| |
|
| | if (s0 == s1) {
|
| |
|
| |
|
| |
|
| | auto & cells = v_cells[s0];
|
| |
|
| | if (seq_id_src == seq_id_dst) {
|
| | return;
|
| | }
|
| |
|
| | if (p0 < 0) {
|
| | p0 = 0;
|
| | }
|
| |
|
| | if (p1 < 0) {
|
| | p1 = std::numeric_limits<llama_pos>::max();
|
| | }
|
| |
|
| | for (uint32_t i = 0; i < cells.size(); ++i) {
|
| | if (!cells.pos_in(i, p0, p1)) {
|
| | continue;
|
| | }
|
| |
|
| | if (cells.seq_has(i, seq_id_src)) {
|
| | cells.seq_add(i, seq_id_dst);
|
| | }
|
| | }
|
| |
|
| | return;
|
| | }
|
| |
|
| |
|
| |
|
| | bool is_full = true;
|
| |
|
| | if (p0 > 0 && p0 + 1 < (int) get_size()) {
|
| | is_full = false;
|
| | }
|
| |
|
| | if (p1 > 0 && p1 + 1 < (int) get_size()) {
|
| | is_full = false;
|
| | }
|
| |
|
| | GGML_ASSERT(is_full && "seq_cp() is only supported for full KV buffers");
|
| |
|
| |
|
| | sc_info.ssrc.push_back(s0);
|
| | sc_info.sdst.push_back(s1);
|
| |
|
| | v_cells[s1].reset();
|
| | for (uint32_t i = 0; i < v_cells[s0].size(); ++i) {
|
| | if (v_cells[s0].seq_has(i, seq_id_src)) {
|
| | llama_pos pos = v_cells[s0].pos_get(i);
|
| | llama_pos shift = v_cells[s0].get_shift(i);
|
| |
|
| | llama_kv_cell_ext ext = v_cells[s0].ext_get(i);
|
| |
|
| | if (shift != 0) {
|
| | pos -= shift;
|
| | assert(pos >= 0);
|
| | }
|
| |
|
| | v_cells[s1].pos_set(i, pos);
|
| | v_cells[s1].seq_add(i, seq_id_dst);
|
| |
|
| | if (shift != 0) {
|
| | v_cells[s1].pos_add(i, shift);
|
| | }
|
| |
|
| | v_cells[s1].ext_set(i, ext);
|
| | }
|
| | }
|
| |
|
| | v_heads[s1] = v_heads[s0];
|
| |
|
| |
|
| |
|
| |
|
| | }
|
| |
|
| | void llama_kv_cache::seq_keep(llama_seq_id seq_id) {
|
| | GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size());
|
| |
|
| | auto & cells = v_cells[seq_to_stream[seq_id]];
|
| | auto & head = v_heads[seq_to_stream[seq_id]];
|
| |
|
| | uint32_t new_head = cells.size();
|
| |
|
| | for (uint32_t i = 0; i < cells.size(); ++i) {
|
| | if (cells.seq_keep(i, seq_id)) {
|
| | if (new_head == cells.size()) {
|
| | new_head = i;
|
| | }
|
| | }
|
| | }
|
| |
|
| |
|
| | if (new_head != cells.size() && new_head < head) {
|
| | head = new_head;
|
| | }
|
| | }
|
| |
|
| | void llama_kv_cache::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
|
| | GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size());
|
| | GGML_ASSERT(hparams.n_pos_per_embd() == 1 && "seq_add() is only supported for n_pos_per_embd() == 1");
|
| |
|
| | auto & cells = v_cells[seq_to_stream[seq_id]];
|
| | auto & head = v_heads[seq_to_stream[seq_id]];
|
| |
|
| | if (shift == 0) {
|
| | return;
|
| | }
|
| |
|
| | uint32_t new_head = cells.size();
|
| |
|
| | if (p0 < 0) {
|
| | p0 = 0;
|
| | }
|
| |
|
| | if (p1 < 0) {
|
| | p1 = std::numeric_limits<llama_pos>::max();
|
| | }
|
| |
|
| |
|
| | if (p0 == p1) {
|
| | return;
|
| | }
|
| |
|
| | for (uint32_t i = 0; i < cells.size(); ++i) {
|
| | if (!cells.pos_in(i, p0, p1)) {
|
| | continue;
|
| | }
|
| |
|
| | if (cells.seq_has(i, seq_id)) {
|
| | if (cells.pos_add(i, shift)) {
|
| | if (new_head == cells.size()) {
|
| | new_head = i;
|
| | }
|
| | }
|
| | }
|
| | }
|
| |
|
| |
|
| |
|
| | head = new_head != cells.size() ? new_head : 0;
|
| | }
|
| |
|
| | void llama_kv_cache::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
|
| | GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size());
|
| | GGML_ASSERT(hparams.n_pos_per_embd() == 1 && "seq_div() is only supported for n_pos_per_embd() == 1");
|
| |
|
| | auto & cells = v_cells[seq_to_stream[seq_id]];
|
| |
|
| | if (d == 1) {
|
| | return;
|
| | }
|
| |
|
| | if (p0 < 0) {
|
| | p0 = 0;
|
| | }
|
| |
|
| | if (p1 < 0) {
|
| | p1 = std::numeric_limits<llama_pos>::max();
|
| | }
|
| |
|
| |
|
| | if (p0 == p1) {
|
| | return;
|
| | }
|
| |
|
| | for (uint32_t i = 0; i < cells.size(); ++i) {
|
| | if (!cells.pos_in(i, p0, p1)) {
|
| | continue;
|
| | }
|
| |
|
| | if (cells.seq_has(i, seq_id)) {
|
| | cells.pos_div(i, d);
|
| | }
|
| | }
|
| | }
|
| |
|
| | llama_pos llama_kv_cache::seq_pos_min(llama_seq_id seq_id) const {
|
| | GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size());
|
| |
|
| | const auto & cells = v_cells[seq_to_stream[seq_id]];
|
| |
|
| | return cells.seq_pos_min(seq_id);
|
| | }
|
| |
|
| | llama_pos llama_kv_cache::seq_pos_max(llama_seq_id seq_id) const {
|
| | GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size());
|
| |
|
| | const auto & cells = v_cells[seq_to_stream[seq_id]];
|
| |
|
| | return cells.seq_pos_max(seq_id);
|
| | }
|
| |
|
| | std::map<ggml_backend_buffer_type_t, size_t> llama_kv_cache::memory_breakdown() const {
|
| | std::map<ggml_backend_buffer_type_t, size_t> ret;
|
| | for (const auto & [ctx, buf] : ctxs_bufs) {
|
| | ggml_backend_buffer_type_t buft = ggml_backend_buffer_get_type(buf.get());
|
| |
|
| | if (hparams.no_alloc) {
|
| | GGML_ASSERT(ggml_backend_buffer_get_base(buf.get()) == nullptr);
|
| | ret[buft] += ggml_backend_alloc_ctx_tensors_from_buft_size(ctx.get(), buft);
|
| | } else {
|
| |
|
| | ret[buft] += ggml_backend_buffer_get_size(buf.get());
|
| | }
|
| | }
|
| |
|
| | return ret;
|
| | }
|
| |
|
| | llama_memory_context_ptr llama_kv_cache::init_batch(
|
| | llama_batch_allocr & balloc,
|
| | uint32_t n_ubatch,
|
| | bool embd_all) {
|
| | GGML_UNUSED(embd_all);
|
| |
|
| | do {
|
| | balloc.split_reset();
|
| |
|
| | std::vector<llama_ubatch> ubatches;
|
| | while (true) {
|
| | auto ubatch = n_stream == 1 ? balloc.split_simple(n_ubatch) : balloc.split_equal(n_ubatch, true);
|
| |
|
| | if (ubatch.n_tokens == 0) {
|
| | break;
|
| | }
|
| |
|
| | ubatches.push_back(std::move(ubatch));
|
| | }
|
| |
|
| | if (balloc.get_n_used() < balloc.get_n_tokens()) {
|
| |
|
| | break;
|
| | }
|
| |
|
| | auto sinfos = prepare(ubatches);
|
| | if (sinfos.empty()) {
|
| | break;
|
| | }
|
| |
|
| | return std::make_unique<llama_kv_cache_context>(
|
| | this, std::move(sinfos), std::move(ubatches));
|
| | } while (false);
|
| |
|
| | return std::make_unique<llama_kv_cache_context>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
|
| | }
|
| |
|
| | llama_memory_context_ptr llama_kv_cache::init_full() {
|
| | return std::make_unique<llama_kv_cache_context>(this);
|
| | }
|
| |
|
| | llama_memory_context_ptr llama_kv_cache::init_update(llama_context * lctx, bool optimize) {
|
| | GGML_UNUSED(optimize);
|
| |
|
| | bool do_shift = get_has_shift();
|
| |
|
| | return std::make_unique<llama_kv_cache_context>(this, lctx, do_shift, std::move(sc_info));
|
| | }
|
| |
|
| | llama_kv_cache::slot_info_vec_t llama_kv_cache::prepare(const std::vector<llama_ubatch> & ubatches) {
|
| | llama_kv_cache::slot_info_vec_t res;
|
| |
|
| | struct state_t {
|
| | slot_info sinfo;
|
| |
|
| | std::vector<uint32_t> v_heads_old;
|
| |
|
| | std::vector<llama_kv_cells> v_cells;
|
| | };
|
| |
|
| |
|
| | std::vector<state_t> states;
|
| |
|
| | bool success = true;
|
| |
|
| | for (const auto & ubatch : ubatches) {
|
| |
|
| | const auto sinfo_new = find_slot(ubatch, false);
|
| | if (sinfo_new.empty()) {
|
| | success = false;
|
| | break;
|
| | }
|
| |
|
| |
|
| | res.push_back(sinfo_new);
|
| |
|
| |
|
| | {
|
| | state_t state = { sinfo_new, v_heads, {} };
|
| |
|
| | for (uint32_t s = 0; s < sinfo_new.n_stream(); ++s) {
|
| | auto & cells = v_cells[sinfo_new.strm[s]];
|
| |
|
| | state.v_cells.push_back(cells.cp(sinfo_new.idxs[s]));
|
| | }
|
| |
|
| | states.push_back(std::move(state));
|
| | }
|
| |
|
| |
|
| | apply_ubatch(sinfo_new, ubatch);
|
| | }
|
| |
|
| | GGML_ASSERT(!states.empty() || !success);
|
| |
|
| |
|
| | for (auto it = states.rbegin(); it != states.rend(); ++it) {
|
| | const auto & sinfo = it->sinfo;
|
| |
|
| | for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
|
| | auto & cells = v_cells[sinfo.strm[s]];
|
| | auto & head = v_heads[sinfo.strm[s]];
|
| |
|
| | cells.set(sinfo.idxs[s], it->v_cells[s]);
|
| | head = it->v_heads_old[s];
|
| | }
|
| | }
|
| |
|
| | if (!success) {
|
| | return {};
|
| | }
|
| |
|
| | return res;
|
| | }
|
| |
|
| | bool llama_kv_cache::update(llama_context * lctx, bool do_shift, const stream_copy_info & sc_info) {
|
| | bool updated = false;
|
| |
|
| | auto * sched = lctx->get_sched();
|
| |
|
| | if (!sc_info.empty()) {
|
| | assert(n_stream > 1 && "stream copy should never happen with a single stream");
|
| |
|
| | llama_synchronize(lctx);
|
| |
|
| | const size_t n_copy = sc_info.ssrc.size();
|
| |
|
| | for (size_t i = 0; i < n_copy; ++i) {
|
| | const auto ssrc = sc_info.ssrc[i];
|
| | const auto sdst = sc_info.sdst[i];
|
| |
|
| | assert(ssrc < n_stream);
|
| | assert(sdst < n_stream);
|
| |
|
| | LLAMA_LOG_DEBUG("%s: copying KV buffer: stream %d to stream %d\n", __func__, ssrc, sdst);
|
| |
|
| | assert(ssrc != sdst);
|
| |
|
| | for (uint32_t il = 0; il < layers.size(); ++il) {
|
| | const auto & layer = layers[il];
|
| |
|
| | ggml_backend_tensor_copy(layer.k_stream[ssrc], layer.k_stream[sdst]);
|
| |
|
| | if (layer.v_stream[ssrc]) {
|
| | ggml_backend_tensor_copy(layer.v_stream[ssrc], layer.v_stream[sdst]);
|
| | }
|
| | }
|
| | }
|
| | }
|
| |
|
| | if (do_shift) {
|
| | if (!get_can_shift()) {
|
| | GGML_ABORT("The current KV cache / model configuration does not support K-shift");
|
| | }
|
| |
|
| | LLAMA_LOG_DEBUG("%s: applying K-shift\n", __func__);
|
| |
|
| |
|
| | if (hparams.rope_type != LLAMA_ROPE_TYPE_NONE) {
|
| | ggml_backend_sched_reset(sched);
|
| |
|
| | auto * res = lctx->get_gf_res_reserve();
|
| |
|
| | res->reset();
|
| |
|
| | auto * gf = build_graph_shift(res, lctx);
|
| | if (!ggml_backend_sched_alloc_graph(sched, gf)) {
|
| | LLAMA_LOG_ERROR("%s: failed to allocate compute graph for K-shift\n", __func__);
|
| | return updated;
|
| | }
|
| |
|
| | res->set_inputs(nullptr);
|
| |
|
| | if (lctx->graph_compute(gf, false) != GGML_STATUS_SUCCESS) {
|
| | LLAMA_LOG_ERROR("%s: failed to compute K-shift\n", __func__);
|
| | return updated;
|
| | }
|
| |
|
| | updated = true;
|
| | }
|
| |
|
| | for (uint32_t s = 0; s < n_stream; ++s) {
|
| | auto & cells = v_cells[s];
|
| |
|
| | cells.reset_shift();
|
| | }
|
| | }
|
| |
|
| | return updated;
|
| | }
|
| |
|
| | llama_kv_cache::slot_info llama_kv_cache::find_slot(const llama_ubatch & ubatch, bool cont) const {
|
| |
|
| | if (debug > 0) {
|
| | for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) {
|
| | const auto seq_id = ubatch.seq_id_unq[s];
|
| | const auto stream_id = seq_to_stream[seq_id];
|
| | const auto & cells = v_cells[stream_id];
|
| | const uint32_t head_cur = v_heads[stream_id];
|
| |
|
| | LLAMA_LOG_DEBUG("%s: stream[%d], n = %5d, used = %5d, head = %5d, size = %5d, n_swa = %5d\n",
|
| | __func__, stream_id, cells.used_max_p1(), cells.get_used(), head_cur, get_size(), n_swa);
|
| |
|
| | if ((debug == 2 && n_swa > 0) || debug > 2) {
|
| | std::string ss;
|
| | for (uint32_t i = 0; i < cells.size(); ++i) {
|
| | if (cells.is_empty(i)) {
|
| | ss += '.';
|
| | } else {
|
| | assert(cells.seq_count(i) >= 1);
|
| |
|
| | if (cells.seq_count(i) == 1) {
|
| | ss += std::to_string(cells.seq_get(i));
|
| | } else {
|
| | ss += 'M';
|
| | }
|
| | }
|
| | if (i%256 == 255) {
|
| | ss += " *";
|
| | ss += '\n';
|
| | }
|
| | }
|
| | LLAMA_LOG_DEBUG("\n%s\n", ss.c_str());
|
| | }
|
| |
|
| | if ((debug == 2 && n_swa > 0) || debug > 2) {
|
| | std::string ss;
|
| | for (uint32_t i = 0; i < cells.size(); ++i) {
|
| | std::string cur;
|
| | if (cells.is_empty(i)) {
|
| | cur = '.';
|
| | } else {
|
| | cur = std::to_string(cells.pos_get(i));
|
| | }
|
| | const int n = cur.size();
|
| | for (int j = 0; j < 5 - n; ++j) {
|
| | cur += ' ';
|
| | }
|
| | ss += cur;
|
| | if (i%256 == 255) {
|
| | ss += " *";
|
| | }
|
| | if (i%64 == 63) {
|
| | ss += '\n';
|
| | }
|
| | }
|
| | LLAMA_LOG_DEBUG("\n%s\n", ss.c_str());
|
| | }
|
| |
|
| | for (int s = 0; s < LLAMA_MAX_SEQ; ++s) {
|
| | if (cells.seq_pos_min(s) < 0) {
|
| | continue;
|
| | }
|
| |
|
| | LLAMA_LOG_DEBUG("%s: stream[%d] min[%d] = %5d, max[%d] = %5d\n", __func__, stream_id, s, cells.seq_pos_min(s), s, cells.seq_pos_max(s));
|
| | }
|
| | }
|
| | }
|
| |
|
| | uint32_t n_tokens = ubatch.n_tokens;
|
| | uint32_t n_seqs = 1;
|
| |
|
| | if (n_stream > 1) {
|
| | GGML_ASSERT(n_tokens % ubatch.n_seqs_unq == 0);
|
| |
|
| | n_seqs = ubatch.n_seqs_unq;
|
| | n_tokens = n_tokens / n_seqs;
|
| | }
|
| |
|
| | slot_info res = {
|
| | LLAMA_MAX_SEQ,
|
| | 0,
|
| | { },
|
| | { },
|
| | };
|
| |
|
| | res.resize(n_seqs);
|
| |
|
| | for (uint32_t s = 0; s < n_seqs; ++s) {
|
| | const auto seq_id = ubatch.seq_id_unq[s];
|
| |
|
| | if (n_stream > 1) {
|
| | GGML_ASSERT(ubatch.n_seq_id[s*n_tokens] == 1);
|
| | GGML_ASSERT(ubatch.seq_id [s*n_tokens][0] == seq_id);
|
| | }
|
| |
|
| | res.s0 = std::min<uint32_t>(res.s0, seq_to_stream[seq_id]);
|
| | res.s1 = std::max<uint32_t>(res.s1, seq_to_stream[seq_id]);
|
| |
|
| | res.strm[s] = seq_to_stream[seq_id];
|
| | res.idxs[s].reserve(n_tokens);
|
| |
|
| | const auto & cells = v_cells[seq_to_stream[seq_id]];
|
| |
|
| | uint32_t head_cur = v_heads[seq_to_stream[seq_id]];
|
| |
|
| |
|
| |
|
| | if (head_cur > cells.get_used() + 2*n_tokens) {
|
| | head_cur = 0;
|
| | }
|
| |
|
| | if (n_tokens > cells.size()) {
|
| | LLAMA_LOG_ERROR("%s: n_tokens = %d > size = %u\n", __func__, n_tokens, cells.size());
|
| | return { };
|
| | }
|
| |
|
| | uint32_t n_tested = 0;
|
| |
|
| |
|
| |
|
| | const uint32_t n_test = cont ? n_tokens : 1;
|
| |
|
| | while (true) {
|
| | if (head_cur + n_test > cells.size()) {
|
| | n_tested += cells.size() - head_cur;
|
| | head_cur = 0;
|
| | continue;
|
| | }
|
| |
|
| | for (uint32_t i = 0; i < n_test; i++) {
|
| | const auto idx = head_cur;
|
| |
|
| | head_cur++;
|
| | n_tested++;
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | bool can_use = cells.is_empty(idx);
|
| |
|
| | if (!can_use && cells.seq_count(idx) == 1) {
|
| | const llama_pos pos_cell = cells.pos_get(idx);
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | if (!can_use) {
|
| | const llama_seq_id seq_id_cell = cells.seq_get(idx);
|
| |
|
| |
|
| | if (llama_hparams::is_masked_swa(n_swa, swa_type, pos_cell, cells.seq_pos_max(seq_id_cell) + 1)) {
|
| | can_use = true;
|
| | }
|
| | }
|
| | }
|
| |
|
| | if (can_use) {
|
| | res.idxs[s].push_back(idx);
|
| | } else {
|
| | if (cont) {
|
| | break;
|
| | }
|
| | }
|
| | }
|
| |
|
| | if (res.idxs[s].size() == n_tokens) {
|
| | break;
|
| | }
|
| |
|
| | if (cont) {
|
| | res.idxs[s].clear();
|
| | }
|
| |
|
| | if (n_tested >= cells.size()) {
|
| |
|
| | return { };
|
| | }
|
| | }
|
| |
|
| |
|
| | if (res.idxs[s].size() < n_tokens) {
|
| | return { };
|
| | }
|
| | }
|
| |
|
| | assert(res.s1 >= res.s0);
|
| |
|
| | return res;
|
| | }
|
| |
|
| | void llama_kv_cache::apply_ubatch(const slot_info & sinfo, const llama_ubatch & ubatch) {
|
| |
|
| |
|
| | llama_seq_id seq_pos_max_rm[LLAMA_MAX_SEQ];
|
| | for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
|
| | seq_pos_max_rm[s] = -1;
|
| | }
|
| |
|
| | assert(ubatch.n_tokens == sinfo.n_stream()*sinfo.size());
|
| |
|
| | for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
|
| | for (uint32_t ii = 0; ii < sinfo.size(); ++ii) {
|
| | const uint32_t i = s*sinfo.size() + ii;
|
| |
|
| | auto & cells = v_cells[sinfo.strm[s]];
|
| |
|
| | const auto idx = sinfo.idxs[s][ii];
|
| |
|
| | if (!cells.is_empty(idx)) {
|
| | assert(cells.seq_count(idx) == 1);
|
| |
|
| | const llama_seq_id seq_id = cells.seq_get(idx);
|
| | const llama_pos pos = cells.pos_get(idx);
|
| |
|
| | seq_pos_max_rm[seq_id] = std::max(seq_pos_max_rm[seq_id], pos);
|
| |
|
| | cells.rm(idx);
|
| | }
|
| |
|
| | cells.pos_set(idx, ubatch.pos[i]);
|
| |
|
| | if (ubatch.is_pos_2d()) {
|
| | llama_kv_cell_ext ext {
|
| | ubatch.pos[i + ubatch.n_tokens*2],
|
| | ubatch.pos[i + ubatch.n_tokens],
|
| | };
|
| | cells.ext_set(idx, ext);
|
| | }
|
| |
|
| | for (int32_t s = 0; s < ubatch.n_seq_id[i]; s++) {
|
| | cells.seq_add(idx, ubatch.seq_id[i][s]);
|
| | }
|
| | }
|
| | }
|
| |
|
| |
|
| |
|
| |
|
| | for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) {
|
| | if (seq_pos_max_rm[s] == -1) {
|
| | continue;
|
| | }
|
| |
|
| | GGML_ASSERT(s < seq_to_stream.size());
|
| |
|
| | auto & cells = v_cells[seq_to_stream[s]];
|
| |
|
| | if (cells.seq_pos_min(s) <= seq_pos_max_rm[s]) {
|
| | LLAMA_LOG_DEBUG("%s: purging positions [%d, %d] of sequence %d from KV cache\n",
|
| | __func__, cells.seq_pos_min(s), seq_pos_max_rm[s], s);
|
| |
|
| | seq_rm(s, cells.seq_pos_min(s), seq_pos_max_rm[s] + 1);
|
| | }
|
| | }
|
| |
|
| |
|
| | for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
|
| | auto & head = v_heads[sinfo.strm[s]];
|
| |
|
| | head = sinfo.idxs[s].back() + 1;
|
| | }
|
| | }
|
| |
|
| | bool llama_kv_cache::get_can_shift() const {
|
| |
|
| | if (model.arch == LLM_ARCH_STEP35) {
|
| | return false;
|
| | }
|
| | if (hparams.n_pos_per_embd() > 1) {
|
| | return false;
|
| | }
|
| | return true;
|
| | }
|
| |
|
| | uint32_t llama_kv_cache::get_size() const {
|
| | const auto & cells = v_cells[seq_to_stream[0]];
|
| |
|
| | return cells.size();
|
| | }
|
| |
|
| | uint32_t llama_kv_cache::get_n_stream() const {
|
| | return n_stream;
|
| | }
|
| |
|
| | bool llama_kv_cache::get_has_shift() const {
|
| | bool result = false;
|
| |
|
| | for (uint32_t s = 0; s < n_stream; ++s) {
|
| | result |= v_cells[s].get_has_shift();
|
| | }
|
| |
|
| | return result;
|
| | }
|
| |
|
| | uint32_t llama_kv_cache::get_n_kv(const slot_info & sinfo) const {
|
| | uint32_t result = 0;
|
| |
|
| |
|
| |
|
| | const uint32_t n_pad_cur = std::max(n_pad, 256u);
|
| |
|
| | for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
|
| | const auto & cells = v_cells[sinfo.strm[s]];
|
| |
|
| | result = std::max(std::min(cells.size(), std::max(n_pad_cur, GGML_PAD(cells.used_max_p1(), n_pad_cur))), result);
|
| | }
|
| |
|
| | return result;
|
| | }
|
| |
|
| | ggml_tensor * llama_kv_cache::get_k(ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const {
|
| | const int32_t ikv = map_layer_ids.at(il);
|
| |
|
| | auto * k = layers[ikv].k;
|
| |
|
| | const uint64_t kv_size = get_size();
|
| | const uint64_t n_embd_k_gqa = k->ne[0];
|
| |
|
| | assert(n_embd_k_gqa == hparams.n_embd_k_gqa(il));
|
| |
|
| | const uint32_t ns = sinfo.s1 - sinfo.s0 + 1;
|
| |
|
| | return ggml_view_4d(ctx, k,
|
| | hparams.n_embd_head_k, hparams.n_head_kv(il), n_kv, ns,
|
| | ggml_row_size(k->type, hparams.n_embd_head_k),
|
| | ggml_row_size(k->type, n_embd_k_gqa),
|
| | ggml_row_size(k->type, n_embd_k_gqa*kv_size),
|
| | ggml_row_size(k->type, n_embd_k_gqa*kv_size)*sinfo.s0);
|
| | }
|
| |
|
| | ggml_tensor * llama_kv_cache::get_v(ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const {
|
| | const int32_t ikv = map_layer_ids.at(il);
|
| |
|
| | auto * v = layers[ikv].v;
|
| |
|
| | const uint64_t kv_size = get_size();
|
| | const uint64_t n_embd_v_gqa = v->ne[0];
|
| |
|
| |
|
| | assert(n_embd_v_gqa >= hparams.n_embd_v_gqa(il));
|
| |
|
| | const uint32_t ns = sinfo.s1 - sinfo.s0 + 1;
|
| |
|
| | if (!v_trans) {
|
| |
|
| | return ggml_view_4d(ctx, v,
|
| | hparams.n_embd_head_v, hparams.n_head_kv(il), n_kv, ns,
|
| | ggml_row_size(v->type, hparams.n_embd_head_v),
|
| | ggml_row_size(v->type, n_embd_v_gqa),
|
| | ggml_row_size(v->type, n_embd_v_gqa*kv_size),
|
| | ggml_row_size(v->type, n_embd_v_gqa*kv_size)*sinfo.s0);
|
| | }
|
| |
|
| |
|
| | return ggml_view_4d(ctx, v,
|
| | n_kv, hparams.n_head_kv(il), hparams.n_embd_head_v, ns,
|
| | ggml_row_size(v->type, kv_size*hparams.n_embd_head_v),
|
| | ggml_row_size(v->type, kv_size),
|
| | ggml_row_size(v->type, kv_size*n_embd_v_gqa),
|
| | ggml_row_size(v->type, kv_size*n_embd_v_gqa)*sinfo.s0);
|
| | }
|
| |
|
| | ggml_tensor * llama_kv_cache::cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il, const slot_info & sinfo) const {
|
| | GGML_UNUSED(sinfo);
|
| |
|
| | const int32_t ikv = map_layer_ids.at(il);
|
| |
|
| | ggml_tensor * k = layers[ikv].k;
|
| |
|
| | const int64_t n_embd_head = k_cur->ne[0];
|
| | const int64_t n_head = k_cur->ne[1];
|
| | const int64_t n_tokens = k_cur->ne[2];
|
| |
|
| | const int64_t n_embd_gqa = n_embd_head*n_head;
|
| |
|
| |
|
| |
|
| | GGML_ASSERT(ggml_row_size(k_cur->type, n_embd_head) == k_cur->nb[1]);
|
| |
|
| | k_cur = ggml_view_2d(ctx, k_cur, n_embd_gqa, n_tokens, k_cur->nb[2], 0);
|
| |
|
| | const int64_t n_stream = k->ne[2];
|
| |
|
| | if (n_stream > 1) {
|
| | const int64_t kv_size = get_size();
|
| |
|
| | assert(n_embd_gqa == k->ne[0]);
|
| | assert(kv_size == k->ne[1]);
|
| |
|
| |
|
| | k = ggml_reshape_2d(ctx, k, n_embd_gqa, kv_size*n_stream);
|
| | }
|
| |
|
| |
|
| | return ggml_set_rows(ctx, k, k_cur, k_idxs);
|
| | }
|
| |
|
| | ggml_tensor * llama_kv_cache::cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il, const slot_info & sinfo) const {
|
| | GGML_UNUSED(sinfo);
|
| |
|
| | const int32_t ikv = map_layer_ids.at(il);
|
| |
|
| | auto * v = layers[ikv].v;
|
| |
|
| | const int64_t n_embd_head = v_cur->ne[0];
|
| | const int64_t n_head = v_cur->ne[1];
|
| | const int64_t n_tokens = v_cur->ne[2];
|
| |
|
| | const int64_t n_embd_gqa = n_embd_head*n_head;
|
| |
|
| |
|
| | GGML_ASSERT(ggml_row_size(v_cur->type, n_embd_head) == v_cur->nb[1]);
|
| |
|
| | const int64_t n_stream = v->ne[2];
|
| |
|
| |
|
| | if (!v_trans) {
|
| | v_cur = ggml_view_2d(ctx, v_cur, n_embd_gqa, n_tokens, v_cur->nb[2], 0);
|
| |
|
| | if (n_stream > 1) {
|
| | const int64_t kv_size = get_size();
|
| |
|
| | assert(n_embd_gqa == v->ne[0]);
|
| | assert(kv_size == v->ne[1]);
|
| |
|
| |
|
| | v = ggml_reshape_2d(ctx, v, n_embd_gqa, kv_size*n_stream);
|
| | }
|
| |
|
| | return ggml_set_rows(ctx, v, v_cur, v_idxs);
|
| | }
|
| |
|
| | if (ggml_row_size(v_cur->type, n_embd_gqa) == v_cur->nb[2]) {
|
| |
|
| | v_cur = ggml_reshape_2d(ctx, v_cur, n_embd_gqa, n_tokens);
|
| | } else {
|
| |
|
| | v_cur = ggml_cont_2d (ctx, v_cur, n_embd_gqa, n_tokens);
|
| | }
|
| |
|
| |
|
| | if (n_embd_gqa < v->ne[0]) {
|
| | v_cur = ggml_pad(ctx, v_cur, v->ne[0] - n_embd_gqa, 0, 0, 0);
|
| | }
|
| |
|
| |
|
| | ggml_tensor * v_view = ggml_reshape_2d(ctx, v, 1, ggml_nelements(v));
|
| |
|
| | v_cur = ggml_reshape_2d(ctx, v_cur, 1, ggml_nelements(v_cur));
|
| |
|
| | return ggml_set_rows(ctx, v_view, v_cur, v_idxs);
|
| | }
|
| |
|
| | ggml_tensor * llama_kv_cache::build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const {
|
| | const uint32_t n_tokens = ubatch.n_tokens;
|
| |
|
| | ggml_tensor * k_idxs = ggml_new_tensor_1d(ctx, GGML_TYPE_I64, n_tokens);
|
| |
|
| | ggml_set_input(k_idxs);
|
| |
|
| | return k_idxs;
|
| | }
|
| |
|
| | ggml_tensor * llama_kv_cache::build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const {
|
| | const uint32_t n_tokens = ubatch.n_tokens;
|
| |
|
| | ggml_tensor * v_idxs;
|
| |
|
| | if (!v_trans) {
|
| | v_idxs = ggml_new_tensor_1d(ctx, GGML_TYPE_I64, n_tokens);
|
| | } else {
|
| | v_idxs = ggml_new_tensor_1d(ctx, GGML_TYPE_I64, n_tokens*hparams.n_embd_v_gqa_max());
|
| | }
|
| |
|
| | ggml_set_input(v_idxs);
|
| |
|
| | return v_idxs;
|
| | }
|
| |
|
| | void llama_kv_cache::set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const {
|
| | const uint32_t n_tokens = ubatch->n_tokens;
|
| | GGML_ASSERT(n_tokens == (int64_t) sinfo.size()*sinfo.n_stream());
|
| |
|
| | GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
|
| | int64_t * data = (int64_t *) dst->data;
|
| |
|
| | for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
|
| | const int64_t offs = sinfo.strm[s]*get_size();
|
| |
|
| | for (uint32_t i = 0; i < sinfo.size(); ++i) {
|
| | data[s*sinfo.size() + i] = offs + sinfo.idxs[s][i];
|
| | }
|
| | }
|
| | }
|
| |
|
| | void llama_kv_cache::set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const {
|
| | const uint32_t n_tokens = ubatch->n_tokens;
|
| | GGML_ASSERT(n_tokens == (int64_t) sinfo.size()*sinfo.n_stream());
|
| |
|
| | GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
|
| | int64_t * data = (int64_t *) dst->data;
|
| |
|
| | if (!v_trans) {
|
| | for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
|
| | const int64_t offs = sinfo.strm[s]*get_size();
|
| |
|
| | for (uint32_t i = 0; i < sinfo.size(); ++i) {
|
| | data[s*sinfo.size() + i] = offs + sinfo.idxs[s][i];
|
| | }
|
| | }
|
| | } else {
|
| |
|
| | const int64_t kv_size = get_size();
|
| |
|
| | const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa_max();
|
| |
|
| | for (uint32_t s = 0; s < sinfo.n_stream(); ++s) {
|
| | const int64_t offs = sinfo.strm[s]*kv_size*n_embd_v_gqa;
|
| |
|
| | for (uint32_t i = 0; i < sinfo.size(); ++i) {
|
| | for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
|
| | data[s*sinfo.size()*n_embd_v_gqa + i*n_embd_v_gqa + j] = offs + j*kv_size + sinfo.idxs[s][i];
|
| | }
|
| | }
|
| | }
|
| | }
|
| | }
|
| |
|
| | void llama_kv_cache::set_input_k_shift(ggml_tensor * dst) const {
|
| | GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
|
| |
|
| | int32_t * data = (int32_t *) dst->data;
|
| |
|
| | for (uint32_t s = 0; s < n_stream; ++s) {
|
| | const auto & cells = v_cells[s];
|
| |
|
| | for (uint32_t i = 0; i < cells.size(); ++i) {
|
| | data[s*cells.size() + i] = cells.is_empty(i) ? 0 : cells.get_shift(i);
|
| | }
|
| | }
|
| | }
|
| |
|
| | struct args_set_input_kq_mask {
|
| | const llama_hparams & hparams;
|
| | const llama_ubatch * ubatch;
|
| |
|
| | const std::vector<llama_kv_cells> & v_cells;
|
| | const std::vector<uint32_t> & seq_to_stream;
|
| |
|
| | uint32_t n_swa;
|
| | llama_swa_type swa_type;
|
| |
|
| | int64_t n_kv;
|
| | int64_t n_stream;
|
| | int64_t n_tps;
|
| | };
|
| |
|
| | template<bool causal, bool swa, bool is_2d, bool alibi>
|
| | static void set_input_kq_mask_impl(const args_set_input_kq_mask & args, float * data) {
|
| |
|
| | const auto & ubatch = args.ubatch;
|
| |
|
| | const auto & v_cells = args.v_cells;
|
| | const auto & seq_to_stream = args.seq_to_stream;
|
| |
|
| | const uint32_t n_swa = args.n_swa;
|
| | const llama_swa_type swa_type = args.swa_type;
|
| |
|
| | const int64_t n_kv = args.n_kv;
|
| | const int64_t n_stream = args.n_stream;
|
| | const int64_t n_tps = args.n_tps;
|
| |
|
| |
|
| | llama_pos seq_pos_min[LLAMA_MAX_SEQ];
|
| | std::fill(seq_pos_min, seq_pos_min + LLAMA_MAX_SEQ, INT32_MAX);
|
| |
|
| | for (uint32_t i = 0; i < ubatch->n_tokens; ++i) {
|
| | const llama_seq_id seq_id = ubatch->seq_id[i][0];
|
| |
|
| | seq_pos_min[seq_id] = std::min(seq_pos_min[seq_id], ubatch->pos[i]);
|
| | }
|
| |
|
| | for (uint32_t s = 0; s < n_stream; ++s) {
|
| |
|
| | std::unordered_map<llama_seq_id, uint32_t> seq_srct;
|
| | std::unordered_map<llama_seq_id, std::vector<uint32_t>> seq_idxs;
|
| |
|
| | for (uint32_t ii = 0; ii < n_tps; ++ii) {
|
| | const uint32_t i = s*n_tps + ii;
|
| |
|
| | const llama_seq_id seq_id = ubatch->seq_id[i][0];
|
| |
|
| | const auto & cells = v_cells.at(seq_to_stream[seq_id]);
|
| |
|
| | llama_pos p0 = -1;
|
| | const llama_pos p1 = ubatch->pos[i];
|
| |
|
| |
|
| | const llama_pos p1_x = is_2d ? ubatch->pos[i + ubatch->n_tokens*2] : 0;
|
| | const llama_pos p1_y = is_2d ? ubatch->pos[i + ubatch->n_tokens] : 0;
|
| |
|
| | const uint64_t idst = n_kv*i;
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | bool prev = false;
|
| |
|
| | auto & idxs = seq_idxs[seq_id];
|
| |
|
| | if (!alibi) {
|
| | if (seq_srct.find(seq_id) != seq_srct.end()) {
|
| | const uint32_t srct = seq_srct[seq_id];
|
| |
|
| | const uint64_t idst_prev = n_kv*srct;
|
| |
|
| | std::copy(data + idst_prev, data + idst_prev + n_kv, data + idst);
|
| |
|
| | prev = true;
|
| | } else {
|
| | idxs.clear();
|
| | idxs.reserve(ubatch->n_tokens + n_swa + 32);
|
| |
|
| | seq_srct[seq_id] = i;
|
| | }
|
| | }
|
| |
|
| | for (uint32_t jj = 0; jj < n_kv; ++jj) {
|
| | uint32_t j = jj;
|
| |
|
| |
|
| | if (!alibi) {
|
| | if (prev) {
|
| | if (jj >= idxs.size()) {
|
| | break;
|
| | }
|
| |
|
| | j = idxs[jj];
|
| | }
|
| | }
|
| |
|
| | if (cells.is_empty(j)) {
|
| | goto skip;
|
| | }
|
| |
|
| |
|
| | if (!cells.seq_has(j, seq_id)) {
|
| | goto skip;
|
| | }
|
| |
|
| | p0 = cells.pos_get(j);
|
| |
|
| | if (!alibi) {
|
| | if (!prev) {
|
| |
|
| | if (p0 + (int32_t) (n_swa + 32) >= seq_pos_min[seq_id]) {
|
| | idxs.push_back(j);
|
| | }
|
| | }
|
| | }
|
| |
|
| | if (causal) {
|
| |
|
| | if (p0 > p1) {
|
| | goto skip;
|
| | }
|
| |
|
| |
|
| | if (is_2d) {
|
| | if (p0 == p1) {
|
| | const auto & p0_ext = cells.ext_get(j);
|
| |
|
| | if (p0_ext.is_2d_gt(p1_x, p1_y)) {
|
| | goto skip;
|
| | }
|
| | }
|
| | }
|
| | }
|
| |
|
| |
|
| | if (swa) {
|
| | if (llama_hparams::is_masked_swa(n_swa, swa_type, p0, p1)) {
|
| | goto skip;
|
| | }
|
| | }
|
| |
|
| | if (alibi) {
|
| | data[idst + j] = -std::abs(p0 - p1);
|
| | } else {
|
| | data[idst + j] = 0.0f;
|
| | }
|
| |
|
| | continue;
|
| | skip:
|
| | data[idst + j] = -INFINITY;
|
| | }
|
| | }
|
| | }
|
| | }
|
| |
|
| | template<bool causal, bool swa, bool is_2d>
|
| | static void set_input_kq_mask_impl(const args_set_input_kq_mask & args, float * data) {
|
| | const bool alibi = args.hparams.use_alibi;
|
| | if (alibi) {
|
| | set_input_kq_mask_impl<causal, swa, is_2d, true> (args, data);
|
| | } else {
|
| | set_input_kq_mask_impl<causal, swa, is_2d, false>(args, data);
|
| | }
|
| | }
|
| |
|
| | template<bool causal, bool swa>
|
| | static void set_input_kq_mask_impl(const args_set_input_kq_mask & args, float * data) {
|
| | const bool is_2d = args.ubatch->is_pos_2d();
|
| | if (is_2d) {
|
| | set_input_kq_mask_impl<causal, swa, true> (args, data);
|
| | } else {
|
| | set_input_kq_mask_impl<causal, swa, false>(args, data);
|
| | }
|
| | }
|
| |
|
| | template<bool causal>
|
| | static void set_input_kq_mask_impl(const args_set_input_kq_mask & args, float * data) {
|
| | const bool swa = args.swa_type != LLAMA_SWA_TYPE_NONE;
|
| | if (swa) {
|
| | set_input_kq_mask_impl<causal, true> (args, data);
|
| | } else {
|
| | set_input_kq_mask_impl<causal, false>(args, data);
|
| | }
|
| | }
|
| |
|
| | void llama_kv_cache::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const {
|
| | const uint32_t n_tokens = ubatch->n_tokens;
|
| |
|
| | GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
|
| | float * data = (float *) dst->data;
|
| |
|
| | const int64_t n_kv = dst->ne[0];
|
| | const int64_t n_stream = dst->ne[3];
|
| |
|
| | GGML_ASSERT(n_tokens%n_stream == 0);
|
| |
|
| |
|
| | const int64_t n_tps = n_tokens/n_stream;
|
| |
|
| |
|
| |
|
| | const args_set_input_kq_mask args = {
|
| | hparams,
|
| | ubatch,
|
| | v_cells,
|
| | seq_to_stream,
|
| | n_swa,
|
| | swa_type,
|
| | n_kv,
|
| | n_stream,
|
| | n_tps,
|
| | };
|
| |
|
| | if (causal_attn) {
|
| | set_input_kq_mask_impl<true> (args, data);
|
| | } else {
|
| | set_input_kq_mask_impl<false>(args, data);
|
| | }
|
| |
|
| |
|
| |
|
| |
|
| | }
|
| |
|
| | void llama_kv_cache::set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const {
|
| | const int64_t n_tokens = ubatch->n_tokens;
|
| |
|
| | GGML_ASSERT(n_stream == 1 && "TODO: support multiple streams");
|
| | const auto & cells = v_cells[0];
|
| |
|
| | GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer));
|
| | GGML_ASSERT(!ubatch->equal_seqs());
|
| |
|
| | int32_t * data = (int32_t *) dst->data;
|
| |
|
| | const int32_t n_kv = dst->ne[0];
|
| |
|
| | for (int h = 0; h < 1; ++h) {
|
| | for (int i = 0; i < n_tokens; ++i) {
|
| | for (int j = 0; j < n_kv; ++j) {
|
| |
|
| | const llama_pos p0 = cells.is_empty(j) ? -1 : cells.pos_get(j);
|
| |
|
| | data[h*(n_kv*n_tokens) + i*n_kv + j] = llama_relative_position_bucket(p0, ubatch->pos[i], hparams.n_rel_attn_bkts, false);
|
| | }
|
| | }
|
| | }
|
| | }
|
| |
|
| | size_t llama_kv_cache::total_size() const {
|
| | size_t size = 0;
|
| |
|
| | for (const auto & [_, buf] : ctxs_bufs) {
|
| | size += ggml_backend_buffer_get_size(buf.get());
|
| | }
|
| |
|
| | return size;
|
| | }
|
| |
|
| | size_t llama_kv_cache::size_k_bytes() const {
|
| | size_t size_k_bytes = 0;
|
| |
|
| | for (const auto & layer : layers) {
|
| | size_k_bytes += ggml_nbytes(layer.k);
|
| | }
|
| |
|
| | return size_k_bytes;
|
| | }
|
| |
|
| | size_t llama_kv_cache::size_v_bytes() const {
|
| | size_t size_v_bytes = 0;
|
| |
|
| | for (const auto & layer : layers) {
|
| | size_v_bytes += layer.v ? ggml_nbytes(layer.v) : 0;
|
| | }
|
| |
|
| | return size_v_bytes;
|
| | }
|
| |
|
| | ggml_tensor * llama_kv_cache::build_rope_shift(
|
| | const llama_cparams & cparams,
|
| | ggml_context * ctx,
|
| | ggml_tensor * cur,
|
| | ggml_tensor * shift,
|
| | ggml_tensor * factors,
|
| | float freq_base,
|
| | float freq_scale) const {
|
| | const auto & n_ctx_orig = cparams.n_ctx_orig_yarn;
|
| |
|
| | const auto & yarn_ext_factor = cparams.yarn_ext_factor;
|
| | const auto & yarn_beta_fast = cparams.yarn_beta_fast;
|
| | const auto & yarn_beta_slow = cparams.yarn_beta_slow;
|
| | const auto & yarn_attn_factor = cparams.yarn_attn_factor;
|
| |
|
| | const auto & n_rot = hparams.n_rot;
|
| | const auto & rope_type = hparams.rope_type == LLAMA_ROPE_TYPE_MROPE || hparams.rope_type == LLAMA_ROPE_TYPE_IMROPE
|
| |
|
| |
|
| |
|
| |
|
| | ? LLAMA_ROPE_TYPE_NEOX
|
| | : hparams.rope_type;
|
| |
|
| | ggml_tensor * tmp;
|
| |
|
| | if (ggml_is_quantized(cur->type)) {
|
| |
|
| | tmp = ggml_cast(ctx, cur, GGML_TYPE_F32);
|
| |
|
| | tmp = ggml_rope_ext(ctx, tmp,
|
| | shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
| | yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow);
|
| |
|
| | tmp = ggml_cpy(ctx, tmp, cur);
|
| | } else {
|
| |
|
| | tmp = ggml_rope_ext_inplace(ctx, cur,
|
| | shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
|
| | yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow);
|
| | }
|
| |
|
| | return tmp;
|
| | }
|
| |
|
| | class llm_graph_input_k_shift : public llm_graph_input_i {
|
| | public:
|
| | llm_graph_input_k_shift(const llama_kv_cache * kv_self) : kv_self(kv_self) {}
|
| | virtual ~llm_graph_input_k_shift() = default;
|
| |
|
| | void set_input(const llama_ubatch * ubatch) override;
|
| |
|
| | ggml_tensor * k_shift;
|
| |
|
| | const llama_kv_cache * kv_self;
|
| | };
|
| |
|
| | void llm_graph_input_k_shift::set_input(const llama_ubatch * ubatch) {
|
| | GGML_UNUSED(ubatch);
|
| |
|
| | if (k_shift) {
|
| | kv_self->set_input_k_shift(k_shift);
|
| | }
|
| | }
|
| |
|
| | ggml_cgraph * llama_kv_cache::build_graph_shift(llm_graph_result * res, llama_context * lctx) const {
|
| | auto * ctx = res->get_ctx();
|
| | auto * gf = res->get_gf();
|
| |
|
| | const auto & n_embd_head_k = hparams.n_embd_head_k;
|
| |
|
| |
|
| | const auto & n_rot = hparams.n_rot;
|
| |
|
| | const auto n_embd_nope = hparams.n_lora_kv > 0 ? n_embd_head_k - n_rot : 0;
|
| |
|
| | auto inp = std::make_unique<llm_graph_input_k_shift>(this);
|
| |
|
| | inp->k_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, (int64_t) get_size()*n_stream);
|
| | ggml_set_input(inp->k_shift);
|
| |
|
| | const auto & cparams = lctx->get_cparams();
|
| |
|
| | for (const auto & layer : layers) {
|
| | const uint32_t il = layer.il;
|
| |
|
| | const int64_t n_head_kv = hparams.n_head_kv(il);
|
| | const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
|
| |
|
| | const float freq_base_l = model.get_rope_freq_base (cparams, il);
|
| | const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
|
| |
|
| | ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
|
| |
|
| | ggml_tensor * k =
|
| | ggml_view_3d(ctx, layer.k,
|
| | n_rot, n_head_kv, get_size()*n_stream,
|
| | ggml_row_size(layer.k->type, n_embd_head_k),
|
| | ggml_row_size(layer.k->type, n_embd_k_gqa),
|
| | ggml_row_size(layer.k->type, n_embd_nope));
|
| |
|
| | ggml_tensor * cur = build_rope_shift(cparams, ctx, k, inp->k_shift, rope_factors, freq_base_l, freq_scale_l);
|
| |
|
| | ggml_build_forward_expand(gf, cur);
|
| | }
|
| |
|
| | res->add_input(std::move(inp));
|
| |
|
| | return gf;
|
| | }
|
| |
|
| | void llama_kv_cache::state_write(llama_io_write_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) const {
|
| | GGML_UNUSED(flags);
|
| |
|
| | io.write(&n_stream, sizeof(n_stream));
|
| |
|
| | for (uint32_t s = 0; s < n_stream; ++s) {
|
| | cell_ranges_t cr { s, {} };
|
| |
|
| | uint32_t cell_count = 0;
|
| |
|
| | const auto & cells = v_cells[s];
|
| |
|
| |
|
| |
|
| | uint32_t cell_range_begin = cells.size();
|
| |
|
| | for (uint32_t i = 0; i < cells.size(); ++i) {
|
| | if (!cells.is_empty(i) && (seq_id == -1 || cells.seq_has(i, seq_id))) {
|
| | ++cell_count;
|
| | if (cell_range_begin == cells.size()) {
|
| | cell_range_begin = i;
|
| | }
|
| | } else {
|
| | if (cell_range_begin != cells.size()) {
|
| | cr.data.emplace_back(cell_range_begin, i);
|
| | cell_range_begin = cells.size();
|
| | }
|
| | }
|
| | }
|
| |
|
| | if (cell_range_begin != cells.size()) {
|
| | cr.data.emplace_back(cell_range_begin, cells.size());
|
| | }
|
| |
|
| |
|
| | uint32_t cell_count_check = 0;
|
| | for (const auto & range : cr.data) {
|
| | cell_count_check += range.second - range.first;
|
| | }
|
| | GGML_ASSERT(cell_count == cell_count_check);
|
| |
|
| | io.write(&cell_count, sizeof(cell_count));
|
| |
|
| |
|
| | if (cell_count == 0) {
|
| | continue;
|
| | }
|
| |
|
| | state_write_meta(io, cr, seq_id);
|
| | state_write_data(io, cr);
|
| | }
|
| | }
|
| |
|
| | void llama_kv_cache::state_read(llama_io_read_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) {
|
| | GGML_UNUSED(flags);
|
| |
|
| | GGML_ASSERT(seq_id == -1 || (seq_id >= 0 && (size_t) seq_id < seq_to_stream.size()));
|
| |
|
| | uint32_t n_stream_cur;
|
| | io.read_to(&n_stream_cur, sizeof(n_stream_cur));
|
| | if (n_stream_cur != n_stream) {
|
| | throw std::runtime_error("n_stream mismatch");
|
| | }
|
| |
|
| | for (uint32_t s = 0; s < n_stream; ++s) {
|
| | uint32_t cell_count;
|
| | io.read_to(&cell_count, sizeof(cell_count));
|
| |
|
| | if (cell_count == 0) {
|
| | continue;
|
| | }
|
| |
|
| | const uint32_t strm = seq_id == -1 ? s : seq_to_stream[seq_id];
|
| |
|
| | slot_info sinfo;
|
| |
|
| | bool res = true;
|
| | res = res && state_read_meta(io, strm, cell_count, sinfo, seq_id);
|
| | res = res && state_read_data(io, strm, cell_count, sinfo);
|
| |
|
| | if (!res) {
|
| | if (seq_id == -1) {
|
| | clear(true);
|
| | } else {
|
| | seq_rm(seq_id, -1, -1);
|
| | }
|
| | throw std::runtime_error("failed to restore kv cache");
|
| | }
|
| | }
|
| | }
|
| |
|
| | void llama_kv_cache::state_write_meta(llama_io_write_i & io, const cell_ranges_t & cr, llama_seq_id seq_id) const {
|
| | const auto & cells = v_cells[cr.strm];
|
| |
|
| | for (const auto & range : cr.data) {
|
| | for (uint32_t i = range.first; i < range.second; ++i) {
|
| | std::vector<llama_seq_id> seq_ids;
|
| |
|
| | for (llama_seq_id cur = 0; cur < (int) n_seq_max; ++cur) {
|
| | if (cur == seq_id || seq_id == -1) {
|
| | if (cells.seq_has(i, cur)) {
|
| | seq_ids.push_back(cur);
|
| | }
|
| | }
|
| | }
|
| |
|
| | const llama_pos pos = cells.pos_get(i);
|
| | const uint32_t n_seq_id = seq_ids.size();
|
| |
|
| | io.write(&pos, sizeof(pos));
|
| | io.write(&n_seq_id, sizeof(n_seq_id));
|
| |
|
| |
|
| |
|
| |
|
| | for (const auto & seq_id : seq_ids) {
|
| | io.write(&seq_id, sizeof(seq_id));
|
| | }
|
| | }
|
| | }
|
| | }
|
| |
|
| | void llama_kv_cache::state_write_data(llama_io_write_i & io, const cell_ranges_t & cr) const {
|
| | const auto & cells = v_cells[cr.strm];
|
| |
|
| | const uint32_t v_trans = this->v_trans ? 1 : 0;
|
| | const uint32_t n_layer = layers.size();
|
| |
|
| | io.write(&v_trans, sizeof(v_trans));
|
| | io.write(&n_layer, sizeof(n_layer));
|
| |
|
| |
|
| |
|
| | for (const auto & layer : layers) {
|
| | const uint32_t il = layer.il;
|
| |
|
| | const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
|
| |
|
| | auto * k = layer.k_stream[cr.strm];
|
| |
|
| |
|
| | const int32_t k_type_i = (int32_t) k->type;
|
| | io.write(&k_type_i, sizeof(k_type_i));
|
| |
|
| |
|
| | const uint64_t k_size_row = ggml_row_size(k->type, n_embd_k_gqa);
|
| | io.write(&k_size_row, sizeof(k_size_row));
|
| |
|
| |
|
| | for (const auto & range : cr.data) {
|
| | const size_t range_size = range.second - range.first;
|
| | const size_t buf_size = range_size * k_size_row;
|
| | io.write_tensor(k, range.first * k_size_row, buf_size);
|
| | }
|
| | }
|
| |
|
| | if (!v_trans) {
|
| | for (const auto & layer : layers) {
|
| | const uint32_t il = layer.il;
|
| |
|
| | const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
|
| |
|
| | auto * v = layer.v_stream[cr.strm];
|
| | if (!v) {
|
| | continue;
|
| | }
|
| |
|
| |
|
| | const int32_t v_type_i = (int32_t) v->type;
|
| | io.write(&v_type_i, sizeof(v_type_i));
|
| |
|
| |
|
| | const uint64_t v_size_row = ggml_row_size(v->type, n_embd_v_gqa);
|
| | io.write(&v_size_row, sizeof(v_size_row));
|
| |
|
| |
|
| | for (const auto & range : cr.data) {
|
| | const size_t range_size = range.second - range.first;
|
| | const size_t buf_size = range_size * v_size_row;
|
| | io.write_tensor(v, range.first * v_size_row, buf_size);
|
| | }
|
| | }
|
| | } else {
|
| |
|
| | const uint32_t kv_size = cells.size();
|
| |
|
| | for (const auto & layer : layers) {
|
| | const uint32_t il = layer.il;
|
| |
|
| | const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
|
| |
|
| | auto * v = layer.v_stream[cr.strm];
|
| | if (!v) {
|
| | continue;
|
| | }
|
| |
|
| |
|
| | const int32_t v_type_i = (int32_t) v->type;
|
| | io.write(&v_type_i, sizeof(v_type_i));
|
| |
|
| |
|
| | const uint32_t v_size_el = ggml_type_size(v->type);
|
| | io.write(&v_size_el, sizeof(v_size_el));
|
| |
|
| |
|
| | io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa));
|
| |
|
| |
|
| | for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
|
| |
|
| | for (const auto & range : cr.data) {
|
| | const size_t range_size = range.second - range.first;
|
| | const size_t src_offset = (range.first + j * kv_size) * v_size_el;
|
| | const size_t buf_size = range_size * v_size_el;
|
| | io.write_tensor(v, src_offset, buf_size);
|
| | }
|
| | }
|
| | }
|
| | }
|
| | }
|
| |
|
| | bool llama_kv_cache::state_read_meta(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, slot_info & sinfo, llama_seq_id dest_seq_id) {
|
| | auto & cells = v_cells[strm];
|
| | auto & head = v_heads[strm];
|
| |
|
| | if (dest_seq_id != -1) {
|
| |
|
| | seq_rm(dest_seq_id, -1, -1);
|
| |
|
| | llama_batch_allocr balloc(hparams.n_pos_per_embd());
|
| |
|
| | llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1);
|
| |
|
| | ubatch.seq_id_unq[0] = dest_seq_id;
|
| |
|
| | for (uint32_t i = 0; i < cell_count; ++i) {
|
| | llama_pos pos;
|
| | uint32_t n_seq_id;
|
| |
|
| | io.read_to(&pos, sizeof(pos));
|
| | io.read_to(&n_seq_id, sizeof(n_seq_id));
|
| |
|
| | if (n_seq_id != 1) {
|
| | LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
|
| | return false;
|
| | }
|
| |
|
| |
|
| | {
|
| | llama_seq_id seq_id;
|
| | io.read_to(&seq_id, sizeof(seq_id));
|
| | }
|
| |
|
| | ubatch.pos[i] = pos;
|
| | ubatch.n_seq_id[i] = n_seq_id;
|
| | ubatch.seq_id[i] = &dest_seq_id;
|
| | }
|
| |
|
| | sinfo = find_slot(ubatch, false);
|
| | if (sinfo.empty()) {
|
| | LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
|
| | return false;
|
| | }
|
| |
|
| |
|
| |
|
| | apply_ubatch(sinfo, ubatch);
|
| |
|
| | LLAMA_LOG_DEBUG("%s: cell_count = %d, dest_seq_id = %d\n", __func__, cell_count, dest_seq_id);
|
| |
|
| |
|
| | GGML_ASSERT(sinfo.n_stream() == 1);
|
| | GGML_ASSERT(sinfo.idxs[0].size() == cell_count);
|
| | for (uint32_t i = 0; i < cell_count; ++i) {
|
| | const uint32_t idx = sinfo.idxs[0][i];
|
| | GGML_ASSERT(cells.pos_get(idx) == ubatch.pos[i]);
|
| | GGML_ASSERT(cells.seq_has(idx, dest_seq_id));
|
| | }
|
| | } else {
|
| |
|
| |
|
| | if (cell_count > cells.size()) {
|
| | LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
|
| | return false;
|
| | }
|
| |
|
| | clear(true);
|
| |
|
| | for (uint32_t i = 0; i < cell_count; ++i) {
|
| | llama_pos pos;
|
| | uint32_t n_seq_id;
|
| |
|
| | io.read_to(&pos, sizeof(pos));
|
| | io.read_to(&n_seq_id, sizeof(n_seq_id));
|
| |
|
| | cells.pos_set(i, pos);
|
| |
|
| | for (uint32_t j = 0; j < n_seq_id; ++j) {
|
| | llama_seq_id seq_id;
|
| | io.read_to(&seq_id, sizeof(seq_id));
|
| |
|
| | if (seq_id < 0 || (uint32_t) seq_id >= n_seq_max) {
|
| | LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, n_seq_max);
|
| | return false;
|
| | }
|
| |
|
| | cells.seq_add(i, seq_id);
|
| | }
|
| | }
|
| |
|
| |
|
| | sinfo.s0 = strm;
|
| | sinfo.s1 = strm;
|
| | sinfo.resize(1);
|
| | sinfo.strm[0] = strm;
|
| | sinfo.idxs[0].resize(cell_count);
|
| | for (uint32_t i = 0; i < cell_count; ++i) {
|
| | sinfo.idxs[0][i] = i;
|
| | }
|
| |
|
| | head = 0;
|
| | }
|
| |
|
| | return true;
|
| | }
|
| |
|
| | bool llama_kv_cache::state_read_data(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, const slot_info & sinfo) {
|
| | auto & cells = v_cells[strm];
|
| |
|
| | uint32_t v_trans;
|
| | uint32_t n_layer;
|
| |
|
| | io.read_to(&v_trans, sizeof(v_trans));
|
| | io.read_to(&n_layer, sizeof(n_layer));
|
| |
|
| | if (n_layer != layers.size()) {
|
| | LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, (uint32_t) layers.size());
|
| | return false;
|
| | }
|
| |
|
| | if (cell_count > cells.size()) {
|
| | LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, cells.size());
|
| | return false;
|
| | }
|
| |
|
| | if (this->v_trans != (bool) v_trans) {
|
| | LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__);
|
| | return false;
|
| | }
|
| |
|
| |
|
| | for (const auto & layer : layers) {
|
| | const uint32_t il = layer.il;
|
| |
|
| | const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
|
| |
|
| | auto * k = layer.k_stream[strm];
|
| |
|
| |
|
| | int32_t k_type_i_ref;
|
| | io.read_to(&k_type_i_ref, sizeof(k_type_i_ref));
|
| | const int32_t k_type_i = (int32_t) k->type;
|
| | if (k_type_i != k_type_i_ref) {
|
| | LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il);
|
| | return false;
|
| | }
|
| |
|
| |
|
| | uint64_t k_size_row_ref;
|
| | io.read_to(&k_size_row_ref, sizeof(k_size_row_ref));
|
| | const size_t k_size_row = ggml_row_size(k->type, n_embd_k_gqa);
|
| | if (k_size_row != k_size_row_ref) {
|
| | LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il);
|
| | return false;
|
| | }
|
| |
|
| | if (cell_count) {
|
| | if (sinfo.is_contiguous()) {
|
| |
|
| | ggml_backend_tensor_set(k, io.read(cell_count * k_size_row), sinfo.head() * k_size_row, cell_count * k_size_row);
|
| | } else {
|
| |
|
| | const void * src = io.read(cell_count * k_size_row);
|
| | for (uint32_t i = 0; i < cell_count; ++i) {
|
| | const size_t dst_offset = sinfo.idxs[0][i] * k_size_row;
|
| | ggml_backend_tensor_set(k, (const char*)src + i * k_size_row, dst_offset, k_size_row);
|
| | }
|
| | }
|
| | }
|
| | }
|
| |
|
| | if (!this->v_trans) {
|
| | for (const auto & layer : layers) {
|
| | const uint32_t il = layer.il;
|
| |
|
| | const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
|
| |
|
| | auto * v = layer.v_stream[strm];
|
| | if (!v) {
|
| | continue;
|
| | }
|
| |
|
| |
|
| | int32_t v_type_i_ref;
|
| | io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
|
| | const int32_t v_type_i = (int32_t) v->type;
|
| | if (v_type_i != v_type_i_ref) {
|
| | LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
|
| | return false;
|
| | }
|
| |
|
| |
|
| | uint64_t v_size_row_ref;
|
| | io.read_to(&v_size_row_ref, sizeof(v_size_row_ref));
|
| | const size_t v_size_row = ggml_row_size(v->type, n_embd_v_gqa);
|
| | if (v_size_row != v_size_row_ref) {
|
| | LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il);
|
| | return false;
|
| | }
|
| |
|
| | if (cell_count) {
|
| | if (sinfo.is_contiguous()) {
|
| |
|
| | ggml_backend_tensor_set(v, io.read(cell_count * v_size_row), sinfo.head() * v_size_row, cell_count * v_size_row);
|
| | } else {
|
| |
|
| | const void * src = io.read(cell_count * v_size_row);
|
| | for (uint32_t i = 0; i < cell_count; ++i) {
|
| | const size_t dst_offset = sinfo.idxs[0][i] * v_size_row;
|
| | ggml_backend_tensor_set(v, (const char*)src + i * v_size_row, dst_offset, v_size_row);
|
| | }
|
| | }
|
| | }
|
| | }
|
| | } else {
|
| |
|
| | for (const auto & layer : layers) {
|
| | const uint32_t il = layer.il;
|
| |
|
| | const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
|
| |
|
| | auto * v = layer.v_stream[strm];
|
| | if (!v) {
|
| | continue;
|
| | }
|
| |
|
| |
|
| | int32_t v_type_i_ref;
|
| | io.read_to(&v_type_i_ref, sizeof(v_type_i_ref));
|
| | const int32_t v_type_i = (int32_t) v->type;
|
| | if (v_type_i != v_type_i_ref) {
|
| | LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
|
| | return false;
|
| | }
|
| |
|
| |
|
| | uint32_t v_size_el_ref;
|
| | io.read_to(&v_size_el_ref, sizeof(v_size_el_ref));
|
| | const size_t v_size_el = ggml_type_size(v->type);
|
| | if (v_size_el != v_size_el_ref) {
|
| | LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il);
|
| | return false;
|
| | }
|
| |
|
| |
|
| | uint32_t n_embd_v_gqa_ref;
|
| | io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref));
|
| | if (n_embd_v_gqa != n_embd_v_gqa_ref) {
|
| | LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il);
|
| | return false;
|
| | }
|
| |
|
| | if (cell_count) {
|
| | if (sinfo.is_contiguous()) {
|
| |
|
| | const uint32_t h = sinfo.head();
|
| | for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
|
| | const size_t dst_offset = (h + j * cells.size()) * v_size_el;
|
| | ggml_backend_tensor_set(v, io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el);
|
| | }
|
| | } else {
|
| |
|
| | for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
|
| | const void * src = io.read(cell_count * v_size_el);
|
| | for (uint32_t i = 0; i < cell_count; ++i) {
|
| | const size_t dst_offset = (sinfo.idxs[0][i] + j * cells.size()) * v_size_el;
|
| | ggml_backend_tensor_set(v, (const char*)src + i * v_size_el, dst_offset, v_size_el);
|
| | }
|
| | }
|
| | }
|
| | }
|
| | }
|
| | }
|
| |
|
| | return true;
|
| | }
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | llama_kv_cache_context::llama_kv_cache_context(llama_memory_status status) : status(status) {}
|
| |
|
| | llama_kv_cache_context::llama_kv_cache_context(
|
| | llama_kv_cache * kv) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv) {
|
| | n_kv = kv->get_size();
|
| |
|
| | const uint32_t n_stream = kv->get_n_stream();
|
| |
|
| |
|
| | sinfos.resize(1);
|
| | sinfos[0].s0 = 0;
|
| | sinfos[0].s1 = n_stream - 1;
|
| | sinfos[0].idxs.resize(n_stream);
|
| | for (uint32_t s = 0; s < n_stream; ++s) {
|
| | sinfos[0].strm.push_back(s);
|
| | sinfos[0].idxs[s].resize(1, 0);
|
| | }
|
| | }
|
| |
|
| | llama_kv_cache_context::llama_kv_cache_context(
|
| | llama_kv_cache * kv,
|
| | llama_context * lctx,
|
| | bool do_shift,
|
| | stream_copy_info sc_info) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), lctx(lctx), do_shift(do_shift), sc_info(std::move(sc_info)) {
|
| | if (!do_shift && this->sc_info.empty()) {
|
| | status = LLAMA_MEMORY_STATUS_NO_UPDATE;
|
| | }
|
| | }
|
| |
|
| | llama_kv_cache_context::llama_kv_cache_context(
|
| | llama_kv_cache * kv,
|
| | llama_kv_cache::slot_info_vec_t sinfos,
|
| | std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), sinfos(std::move(sinfos)), ubatches(std::move(ubatches)) {
|
| | }
|
| |
|
| | llama_kv_cache_context::~llama_kv_cache_context() = default;
|
| |
|
| | bool llama_kv_cache_context::next() {
|
| | assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
|
| |
|
| | if (++i_cur >= ubatches.size()) {
|
| | return false;
|
| | }
|
| |
|
| | return true;
|
| | }
|
| |
|
| | bool llama_kv_cache_context::apply() {
|
| | assert(!llama_memory_status_is_fail(status));
|
| |
|
| |
|
| | if (ubatches.empty()) {
|
| | kv->update(lctx, do_shift, sc_info);
|
| |
|
| | return true;
|
| | }
|
| |
|
| | kv->apply_ubatch(sinfos[i_cur], ubatches[i_cur]);
|
| | n_kv = kv->get_n_kv(sinfos[i_cur]);
|
| |
|
| | return true;
|
| | }
|
| |
|
| | llama_memory_status llama_kv_cache_context::get_status() const {
|
| | return status;
|
| | }
|
| |
|
| | const llama_ubatch & llama_kv_cache_context::get_ubatch() const {
|
| | assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
|
| |
|
| | return ubatches[i_cur];
|
| | }
|
| |
|
| | uint32_t llama_kv_cache_context::get_n_kv() const {
|
| | return n_kv;
|
| | }
|
| |
|
| | ggml_tensor * llama_kv_cache_context::get_k(ggml_context * ctx, int32_t il) const {
|
| | return kv->get_k(ctx, il, n_kv, sinfos[i_cur]);
|
| | }
|
| |
|
| | ggml_tensor * llama_kv_cache_context::get_v(ggml_context * ctx, int32_t il) const {
|
| | return kv->get_v(ctx, il, n_kv, sinfos[i_cur]);
|
| | }
|
| |
|
| | ggml_tensor * llama_kv_cache_context::cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il) const {
|
| | return kv->cpy_k(ctx, k_cur, k_idxs, il, sinfos[i_cur]);
|
| | }
|
| |
|
| | ggml_tensor * llama_kv_cache_context::cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il) const {
|
| | return kv->cpy_v(ctx, v_cur, v_idxs, il, sinfos[i_cur]);
|
| | }
|
| |
|
| | ggml_tensor * llama_kv_cache_context::build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const {
|
| | return kv->build_input_k_idxs(ctx, ubatch);
|
| | }
|
| |
|
| | ggml_tensor * llama_kv_cache_context::build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const {
|
| | return kv->build_input_v_idxs(ctx, ubatch);
|
| | }
|
| |
|
| | void llama_kv_cache_context::set_input_k_shift(ggml_tensor * dst) const {
|
| | kv->set_input_k_shift(dst);
|
| | }
|
| |
|
| | void llama_kv_cache_context::set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const {
|
| | kv->set_input_k_idxs(dst, ubatch, sinfos[i_cur]);
|
| | }
|
| |
|
| | void llama_kv_cache_context::set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const {
|
| | kv->set_input_v_idxs(dst, ubatch, sinfos[i_cur]);
|
| | }
|
| |
|
| | void llama_kv_cache_context::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const {
|
| | kv->set_input_kq_mask(dst, ubatch, causal_attn);
|
| | }
|
| |
|
| | void llama_kv_cache_context::set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const {
|
| | kv->set_input_pos_bucket(dst, ubatch);
|
| | }
|
| |
|