|
|
#include "llama-context.h" |
|
|
|
|
|
#include "llama-impl.h" |
|
|
#include "llama-batch.h" |
|
|
#include "llama-io.h" |
|
|
#include "llama-memory.h" |
|
|
#include "llama-mmap.h" |
|
|
#include "llama-model.h" |
|
|
|
|
|
#include <cinttypes> |
|
|
#include <cstring> |
|
|
#include <limits> |
|
|
#include <stdexcept> |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama_context::llama_context( |
|
|
const llama_model & model, |
|
|
llama_context_params params) : |
|
|
model(model), |
|
|
balloc(std::make_unique<llama_batch_allocr>(model.hparams.n_pos_per_embd())) { |
|
|
LLAMA_LOG_INFO("%s: constructing llama_context\n", __func__); |
|
|
|
|
|
t_start_us = model.t_start_us; |
|
|
t_load_us = model.t_load_us; |
|
|
|
|
|
const auto & hparams = model.hparams; |
|
|
|
|
|
cparams.n_seq_max = std::max(1u, params.n_seq_max); |
|
|
if (cparams.n_seq_max > LLAMA_MAX_SEQ) { |
|
|
throw std::runtime_error("n_seq_max must be <= " + std::to_string(LLAMA_MAX_SEQ)); |
|
|
} |
|
|
|
|
|
cparams.n_threads = params.n_threads; |
|
|
cparams.n_threads_batch = params.n_threads_batch; |
|
|
cparams.yarn_ext_factor = params.yarn_ext_factor; |
|
|
cparams.yarn_attn_factor = params.yarn_attn_factor; |
|
|
cparams.yarn_beta_fast = params.yarn_beta_fast; |
|
|
cparams.yarn_beta_slow = params.yarn_beta_slow; |
|
|
cparams.defrag_thold = params.defrag_thold; |
|
|
cparams.embeddings = params.embeddings; |
|
|
cparams.offload_kqv = params.offload_kqv; |
|
|
cparams.flash_attn = params.flash_attn; |
|
|
cparams.no_perf = params.no_perf; |
|
|
cparams.pooling_type = params.pooling_type; |
|
|
cparams.warmup = false; |
|
|
|
|
|
cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx; |
|
|
cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base; |
|
|
cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale; |
|
|
|
|
|
cparams.n_ctx_orig_yarn = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx : |
|
|
hparams.n_ctx_orig_yarn != 0 ? hparams.n_ctx_orig_yarn : |
|
|
hparams.n_ctx_train; |
|
|
|
|
|
cparams.cb_eval = params.cb_eval; |
|
|
cparams.cb_eval_user_data = params.cb_eval_user_data; |
|
|
|
|
|
auto rope_scaling_type = params.rope_scaling_type; |
|
|
if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED) { |
|
|
rope_scaling_type = hparams.rope_scaling_type_train; |
|
|
} |
|
|
|
|
|
if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_NONE) { |
|
|
cparams.rope_freq_scale = 1.0f; |
|
|
} |
|
|
|
|
|
if (cparams.yarn_ext_factor < 0.0f) { |
|
|
cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_YARN ? 1.0f : 0.0f; |
|
|
} |
|
|
|
|
|
cparams.yarn_attn_factor *= hparams.rope_attn_factor; |
|
|
|
|
|
if (cparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) { |
|
|
if (hparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) { |
|
|
cparams.pooling_type = LLAMA_POOLING_TYPE_NONE; |
|
|
} else { |
|
|
cparams.pooling_type = hparams.pooling_type; |
|
|
} |
|
|
} |
|
|
|
|
|
if (params.attention_type == LLAMA_ATTENTION_TYPE_UNSPECIFIED) { |
|
|
cparams.causal_attn = hparams.causal_attn; |
|
|
} else { |
|
|
cparams.causal_attn = params.attention_type == LLAMA_ATTENTION_TYPE_CAUSAL; |
|
|
} |
|
|
|
|
|
|
|
|
cparams.n_batch = cparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (cparams.n_batch < GGML_KQ_MASK_PAD) { |
|
|
LLAMA_LOG_WARN("%s: n_batch is less than GGML_KQ_MASK_PAD - increasing to %d\n", __func__, GGML_KQ_MASK_PAD); |
|
|
cparams.n_batch = GGML_KQ_MASK_PAD; |
|
|
} |
|
|
cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch); |
|
|
|
|
|
cparams.op_offload = params.op_offload; |
|
|
cparams.kv_unified = params.kv_unified; |
|
|
|
|
|
{ |
|
|
const char * LLAMA_SET_ROWS = getenv("LLAMA_SET_ROWS"); |
|
|
const bool supports_set_rows = LLAMA_SET_ROWS ? (atoi(LLAMA_SET_ROWS) != 0) : false; |
|
|
|
|
|
if (!supports_set_rows && !cparams.kv_unified) { |
|
|
LLAMA_LOG_WARN("%s: non-unified KV cache requires ggml_set_rows() - forcing unified KV cache\n", __func__); |
|
|
cparams.kv_unified = true; |
|
|
} |
|
|
} |
|
|
|
|
|
const uint32_t n_ctx_per_seq = cparams.n_ctx / cparams.n_seq_max; |
|
|
|
|
|
LLAMA_LOG_INFO("%s: n_seq_max = %u\n", __func__, cparams.n_seq_max); |
|
|
LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx); |
|
|
LLAMA_LOG_INFO("%s: n_ctx_per_seq = %u\n", __func__, n_ctx_per_seq); |
|
|
LLAMA_LOG_INFO("%s: n_batch = %u\n", __func__, cparams.n_batch); |
|
|
LLAMA_LOG_INFO("%s: n_ubatch = %u\n", __func__, cparams.n_ubatch); |
|
|
LLAMA_LOG_INFO("%s: causal_attn = %d\n", __func__, cparams.causal_attn); |
|
|
LLAMA_LOG_INFO("%s: flash_attn = %d\n", __func__, cparams.flash_attn); |
|
|
LLAMA_LOG_INFO("%s: kv_unified = %s\n", __func__, cparams.kv_unified ? "true" : "false"); |
|
|
LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base); |
|
|
LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale); |
|
|
|
|
|
if (n_ctx_per_seq < hparams.n_ctx_train) { |
|
|
LLAMA_LOG_WARN("%s: n_ctx_per_seq (%u) < n_ctx_train (%u) -- the full capacity of the model will not be utilized\n", |
|
|
__func__, n_ctx_per_seq, hparams.n_ctx_train); |
|
|
} |
|
|
|
|
|
if (n_ctx_per_seq > hparams.n_ctx_train) { |
|
|
LLAMA_LOG_WARN("%s: n_ctx_per_seq (%u) > n_ctx_train (%u) -- possible training context overflow\n", |
|
|
__func__, n_ctx_per_seq, hparams.n_ctx_train); |
|
|
} |
|
|
|
|
|
if (!params.swa_full && cparams.n_seq_max > 1 && hparams.is_swa_any()) { |
|
|
LLAMA_LOG_WARN("%s: requested n_seq_max (%u) > 1, but swa_full is not enabled -- performance may be degraded: %s\n", |
|
|
__func__, cparams.n_seq_max, "https://github.com/ggml-org/llama.cpp/pull/13845#issuecomment-2924800573"); |
|
|
} |
|
|
|
|
|
if (!hparams.vocab_only) { |
|
|
|
|
|
for (auto * dev : model.devices) { |
|
|
ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); |
|
|
if (backend == nullptr) { |
|
|
throw std::runtime_error(format("failed to initialize %s backend", ggml_backend_dev_name(dev))); |
|
|
} |
|
|
backends.emplace_back(backend); |
|
|
} |
|
|
|
|
|
|
|
|
for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { |
|
|
ggml_backend_dev_t dev = ggml_backend_dev_get(i); |
|
|
if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_ACCEL) { |
|
|
ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); |
|
|
if (backend == nullptr) { |
|
|
throw std::runtime_error(format("failed to initialize %s backend", ggml_backend_dev_name(dev))); |
|
|
} |
|
|
backends.emplace_back(backend); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); |
|
|
if (backend_cpu == nullptr) { |
|
|
throw std::runtime_error("failed to initialize CPU backend"); |
|
|
} |
|
|
backends.emplace_back(backend_cpu); |
|
|
|
|
|
|
|
|
for (auto & backend : backends) { |
|
|
ggml_backend_dev_t dev = ggml_backend_get_device(backend.get()); |
|
|
ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr; |
|
|
if (reg) { |
|
|
auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads"); |
|
|
if (ggml_backend_set_n_threads_fn) { |
|
|
set_n_threads_fns.emplace_back(backend.get(), ggml_backend_set_n_threads_fn); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
llama_set_abort_callback(this, params.abort_callback, params.abort_callback_data); |
|
|
|
|
|
|
|
|
{ |
|
|
|
|
|
if ((uint32_t) output_reserve(params.n_seq_max) < params.n_seq_max) { |
|
|
throw std::runtime_error("failed to reserve initial output buffer"); |
|
|
} |
|
|
|
|
|
LLAMA_LOG_INFO("%s: %10s output buffer size = %8.2f MiB\n", __func__, |
|
|
ggml_backend_buffer_name (buf_output.get()), |
|
|
ggml_backend_buffer_get_size(buf_output.get()) / 1024.0 / 1024.0); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if (!hparams.vocab_only) { |
|
|
llama_memory_params params_mem = { |
|
|
params.type_k, |
|
|
params.type_v, |
|
|
params.swa_full, |
|
|
}; |
|
|
|
|
|
memory.reset(model.create_memory(params_mem, cparams)); |
|
|
} |
|
|
|
|
|
|
|
|
if (!hparams.vocab_only) { |
|
|
LLAMA_LOG_DEBUG("%s: enumerating backends\n", __func__); |
|
|
|
|
|
backend_buft.clear(); |
|
|
backend_ptrs.clear(); |
|
|
|
|
|
for (auto & backend : backends) { |
|
|
auto * buft = ggml_backend_get_default_buffer_type(backend.get()); |
|
|
auto backend_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get())); |
|
|
|
|
|
if (backend_type == GGML_BACKEND_DEVICE_TYPE_CPU && !model.devices.empty()) { |
|
|
|
|
|
auto * dev = model.devices[0]; |
|
|
auto * host_buft = ggml_backend_dev_host_buffer_type(dev); |
|
|
if (host_buft) { |
|
|
buft = host_buft; |
|
|
} |
|
|
} |
|
|
|
|
|
backend_buft.push_back(buft); |
|
|
backend_ptrs.push_back(backend.get()); |
|
|
} |
|
|
|
|
|
LLAMA_LOG_DEBUG("%s: backend_ptrs.size() = %zu\n", __func__, backend_ptrs.size()); |
|
|
|
|
|
const size_t max_nodes = this->graph_max_nodes(); |
|
|
|
|
|
LLAMA_LOG_DEBUG("%s: max_nodes = %zu\n", __func__, max_nodes); |
|
|
|
|
|
gf_res_prev.reset(new llm_graph_result(max_nodes)); |
|
|
gf_res_reserve.reset(new llm_graph_result(max_nodes)); |
|
|
|
|
|
|
|
|
|
|
|
bool pipeline_parallel = |
|
|
model.n_devices() > 1 && |
|
|
model.params.n_gpu_layers > (int) model.hparams.n_layer && |
|
|
model.params.split_mode == LLAMA_SPLIT_MODE_LAYER && |
|
|
cparams.offload_kqv && |
|
|
!model.has_tensor_overrides(); |
|
|
|
|
|
|
|
|
if (pipeline_parallel) { |
|
|
for (auto & backend : backends) { |
|
|
auto dev_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get())); |
|
|
if (dev_type == GGML_BACKEND_DEVICE_TYPE_CPU) { |
|
|
|
|
|
continue; |
|
|
} |
|
|
auto * dev = ggml_backend_get_device(backend.get()); |
|
|
ggml_backend_dev_props props; |
|
|
ggml_backend_dev_get_props(dev, &props); |
|
|
if (!props.caps.async || !props.caps.events) { |
|
|
|
|
|
pipeline_parallel = false; |
|
|
break; |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
sched.reset(ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), max_nodes, pipeline_parallel, cparams.op_offload)); |
|
|
|
|
|
if (pipeline_parallel) { |
|
|
LLAMA_LOG_INFO("%s: pipeline parallelism enabled (n_copies=%d)\n", __func__, ggml_backend_sched_get_n_copies(sched.get())); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if (!hparams.vocab_only && memory) { |
|
|
const uint32_t n_seqs = cparams.kv_unified ? 1 : cparams.n_seq_max; |
|
|
const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); |
|
|
|
|
|
LLAMA_LOG_DEBUG("%s: worst-case: n_tokens = %d, n_seqs = %d, n_outputs = %d\n", __func__, n_tokens, n_seqs, n_outputs); |
|
|
|
|
|
int n_splits_pp = -1; |
|
|
int n_nodes_pp = -1; |
|
|
|
|
|
int n_splits_tg = -1; |
|
|
int n_nodes_tg = -1; |
|
|
|
|
|
|
|
|
|
|
|
const auto mctx = memory->init_full(); |
|
|
if (!mctx) { |
|
|
throw std::runtime_error("failed to initialize KV cache"); |
|
|
} |
|
|
|
|
|
cross.v_embd.clear(); |
|
|
|
|
|
|
|
|
{ |
|
|
auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get()); |
|
|
if (!gf) { |
|
|
throw std::runtime_error("failed to allocate compute pp buffers"); |
|
|
} |
|
|
|
|
|
n_splits_pp = ggml_backend_sched_get_n_splits(sched.get()); |
|
|
n_nodes_pp = ggml_graph_n_nodes(gf); |
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
auto * gf = graph_reserve(n_seqs, n_seqs, n_seqs, mctx.get()); |
|
|
if (!gf) { |
|
|
throw std::runtime_error("failed to allocate compute tg buffers"); |
|
|
} |
|
|
|
|
|
n_splits_tg = ggml_backend_sched_get_n_splits(sched.get()); |
|
|
n_nodes_tg = ggml_graph_n_nodes(gf); |
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get()); |
|
|
if (!gf) { |
|
|
throw std::runtime_error("failed to allocate compute pp buffers"); |
|
|
} |
|
|
} |
|
|
|
|
|
for (size_t i = 0; i < backend_ptrs.size(); ++i) { |
|
|
ggml_backend_t backend = backend_ptrs[i]; |
|
|
ggml_backend_buffer_type_t buft = backend_buft[i]; |
|
|
size_t size = ggml_backend_sched_get_buffer_size(sched.get(), backend); |
|
|
if (size > 1) { |
|
|
LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__, |
|
|
ggml_backend_buft_name(buft), |
|
|
size / 1024.0 / 1024.0); |
|
|
} |
|
|
} |
|
|
|
|
|
if (n_nodes_pp == n_nodes_tg) { |
|
|
LLAMA_LOG_INFO("%s: graph nodes = %d\n", __func__, n_nodes_pp); |
|
|
} else { |
|
|
LLAMA_LOG_INFO("%s: graph nodes = %d (with bs=%d), %d (with bs=1)\n", __func__, n_nodes_pp, n_tokens, n_nodes_tg); |
|
|
} |
|
|
|
|
|
if (n_splits_pp == n_splits_tg) { |
|
|
LLAMA_LOG_INFO("%s: graph splits = %d\n", __func__, n_splits_pp); |
|
|
} else { |
|
|
LLAMA_LOG_INFO("%s: graph splits = %d (with bs=%d), %d (with bs=1)\n", __func__, n_splits_pp, n_tokens, n_splits_tg); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
llama_context::~llama_context() { |
|
|
ggml_opt_free(opt_ctx); |
|
|
} |
|
|
|
|
|
void llama_context::synchronize() { |
|
|
ggml_backend_sched_synchronize(sched.get()); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (n_queued_tokens == 1) { |
|
|
if (!cparams.no_perf) { |
|
|
t_eval_us += ggml_time_us() - t_compute_start_us; |
|
|
} |
|
|
n_eval++; |
|
|
} else if (n_queued_tokens > 1) { |
|
|
if (!cparams.no_perf) { |
|
|
t_p_eval_us += ggml_time_us() - t_compute_start_us; |
|
|
} |
|
|
n_p_eval += n_queued_tokens; |
|
|
} |
|
|
|
|
|
|
|
|
if (n_queued_tokens > 0 && !has_evaluated_once) { |
|
|
t_load_us = ggml_time_us() - t_start_us; |
|
|
has_evaluated_once = true; |
|
|
} |
|
|
|
|
|
n_queued_tokens = 0; |
|
|
t_compute_start_us = 0; |
|
|
} |
|
|
|
|
|
const llama_model & llama_context::get_model() const { |
|
|
return model; |
|
|
} |
|
|
|
|
|
const llama_cparams & llama_context::get_cparams() const { |
|
|
return cparams; |
|
|
} |
|
|
|
|
|
ggml_backend_sched_t llama_context::get_sched() const { |
|
|
return sched.get(); |
|
|
} |
|
|
|
|
|
uint32_t llama_context::n_ctx() const { |
|
|
return cparams.n_ctx; |
|
|
} |
|
|
|
|
|
uint32_t llama_context::n_ctx_per_seq() const { |
|
|
return cparams.n_ctx / cparams.n_seq_max; |
|
|
} |
|
|
|
|
|
uint32_t llama_context::n_batch() const { |
|
|
return cparams.n_batch; |
|
|
} |
|
|
|
|
|
uint32_t llama_context::n_ubatch() const { |
|
|
return cparams.n_ubatch; |
|
|
} |
|
|
|
|
|
uint32_t llama_context::n_seq_max() const { |
|
|
return cparams.n_seq_max; |
|
|
} |
|
|
|
|
|
uint32_t llama_context::n_threads() const { |
|
|
return cparams.n_threads; |
|
|
} |
|
|
|
|
|
uint32_t llama_context::n_threads_batch() const { |
|
|
return cparams.n_threads_batch; |
|
|
} |
|
|
|
|
|
llama_memory_t llama_context::get_memory() const { |
|
|
return memory.get(); |
|
|
} |
|
|
|
|
|
|
|
|
void llama_context::kv_self_defrag_sched() { |
|
|
if (!memory) { |
|
|
return; |
|
|
} |
|
|
|
|
|
memory_force_optimize = true; |
|
|
} |
|
|
|
|
|
|
|
|
bool llama_context::kv_self_update(bool optimize) { |
|
|
if (!memory) { |
|
|
return false; |
|
|
} |
|
|
|
|
|
{ |
|
|
|
|
|
optimize |= memory_force_optimize; |
|
|
memory_force_optimize = false; |
|
|
|
|
|
const auto mctx = memory->init_update(this, optimize); |
|
|
switch (mctx->get_status()) { |
|
|
case LLAMA_MEMORY_STATUS_SUCCESS: |
|
|
{ |
|
|
|
|
|
} break; |
|
|
case LLAMA_MEMORY_STATUS_NO_UPDATE: |
|
|
{ |
|
|
|
|
|
return false; |
|
|
} |
|
|
case LLAMA_MEMORY_STATUS_FAILED_PREPARE: |
|
|
case LLAMA_MEMORY_STATUS_FAILED_COMPUTE: |
|
|
{ |
|
|
LLAMA_LOG_ERROR("%s: failed to prepare memory update\n", __func__); |
|
|
return false; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gf_res_prev->reset(); |
|
|
|
|
|
if (!mctx->apply()) { |
|
|
LLAMA_LOG_ERROR("%s: failed to apply memory update\n", __func__); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
const auto mctx = memory->init_full(); |
|
|
if (!mctx) { |
|
|
throw std::runtime_error("failed to initialize memory context"); |
|
|
} |
|
|
|
|
|
const uint32_t n_seqs = cparams.kv_unified ? 1 : cparams.n_seq_max; |
|
|
const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); |
|
|
|
|
|
auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get()); |
|
|
if (!gf) { |
|
|
LLAMA_LOG_ERROR("%s: failed to reserve graph after the memory update\n", __func__); |
|
|
} |
|
|
} |
|
|
|
|
|
return true; |
|
|
} |
|
|
|
|
|
enum llama_pooling_type llama_context::pooling_type() const { |
|
|
return cparams.pooling_type; |
|
|
} |
|
|
|
|
|
float * llama_context::get_logits() { |
|
|
return logits; |
|
|
} |
|
|
|
|
|
float * llama_context::get_logits_ith(int32_t i) { |
|
|
int64_t j = -1; |
|
|
|
|
|
try { |
|
|
if (logits == nullptr) { |
|
|
throw std::runtime_error("no logits"); |
|
|
} |
|
|
|
|
|
if (i < 0) { |
|
|
j = n_outputs + i; |
|
|
if (j < 0) { |
|
|
throw std::runtime_error(format("negative index out of range [0, %d)", n_outputs)); |
|
|
} |
|
|
} else if ((size_t) i >= output_ids.size()) { |
|
|
throw std::runtime_error(format("out of range [0, %zu)", output_ids.size())); |
|
|
} else { |
|
|
j = output_ids[i]; |
|
|
} |
|
|
|
|
|
if (j < 0) { |
|
|
throw std::runtime_error(format("batch.logits[%d] != true", i)); |
|
|
} |
|
|
if (j >= n_outputs) { |
|
|
|
|
|
throw std::runtime_error(format("corrupt output buffer (j=%" PRId64 ", n_outputs=%d)", j, n_outputs)); |
|
|
} |
|
|
|
|
|
return logits + j*model.vocab.n_tokens(); |
|
|
} catch (const std::exception & err) { |
|
|
LLAMA_LOG_ERROR("%s: invalid logits id %d, reason: %s\n", __func__, i, err.what()); |
|
|
#ifndef NDEBUG |
|
|
GGML_ABORT("fatal error"); |
|
|
#else |
|
|
return nullptr; |
|
|
#endif |
|
|
} |
|
|
} |
|
|
|
|
|
float * llama_context::get_embeddings() { |
|
|
return embd; |
|
|
} |
|
|
|
|
|
float * llama_context::get_embeddings_ith(int32_t i) { |
|
|
int64_t j = -1; |
|
|
|
|
|
try { |
|
|
if (embd == nullptr) { |
|
|
throw std::runtime_error("no embeddings"); |
|
|
} |
|
|
|
|
|
if (i < 0) { |
|
|
j = n_outputs + i; |
|
|
if (j < 0) { |
|
|
throw std::runtime_error(format("negative index out of range [0, %d)", n_outputs)); |
|
|
} |
|
|
} else if ((size_t) i >= output_ids.size()) { |
|
|
throw std::runtime_error(format("out of range [0, %zu)", output_ids.size())); |
|
|
} else { |
|
|
j = output_ids[i]; |
|
|
} |
|
|
|
|
|
if (j < 0) { |
|
|
throw std::runtime_error(format("batch.logits[%d] != true", i)); |
|
|
} |
|
|
if (j >= n_outputs) { |
|
|
|
|
|
throw std::runtime_error(format("corrupt output buffer (j=%" PRId64 ", n_outputs=%d)", j, n_outputs)); |
|
|
} |
|
|
|
|
|
return embd + j*model.hparams.n_embd; |
|
|
} catch (const std::exception & err) { |
|
|
LLAMA_LOG_ERROR("%s: invalid embeddings id %d, reason: %s\n", __func__, i, err.what()); |
|
|
#ifndef NDEBUG |
|
|
GGML_ABORT("fatal error"); |
|
|
#else |
|
|
return nullptr; |
|
|
#endif |
|
|
} |
|
|
} |
|
|
|
|
|
float * llama_context::get_embeddings_seq(llama_seq_id seq_id) { |
|
|
auto it = embd_seq.find(seq_id); |
|
|
if (it == embd_seq.end()) { |
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
return it->second.data(); |
|
|
} |
|
|
|
|
|
void llama_context::attach_threadpool( |
|
|
ggml_threadpool_t threadpool, |
|
|
ggml_threadpool_t threadpool_batch) { |
|
|
LLAMA_LOG_DEBUG("%s: call\n", __func__); |
|
|
|
|
|
this->threadpool = threadpool; |
|
|
this->threadpool_batch = threadpool_batch ? threadpool_batch : threadpool; |
|
|
} |
|
|
|
|
|
void llama_context::detach_threadpool() { |
|
|
LLAMA_LOG_DEBUG("%s: call\n", __func__); |
|
|
|
|
|
this->threadpool = nullptr; |
|
|
this->threadpool_batch = nullptr; |
|
|
} |
|
|
|
|
|
void llama_context::set_n_threads(int32_t n_threads, int32_t n_threads_batch) { |
|
|
LLAMA_LOG_DEBUG("%s: n_threads = %d, n_threads_batch = %d\n", __func__, n_threads, n_threads_batch); |
|
|
|
|
|
cparams.n_threads = n_threads; |
|
|
cparams.n_threads_batch = n_threads_batch; |
|
|
} |
|
|
|
|
|
void llama_context::set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data) { |
|
|
LLAMA_LOG_DEBUG("%s: call\n", __func__); |
|
|
|
|
|
this->abort_callback = abort_callback; |
|
|
this->abort_callback_data = abort_callback_data; |
|
|
|
|
|
for (auto & backend : backends) { |
|
|
auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend.get())); |
|
|
auto * set_abort_callback_fn = (ggml_backend_set_abort_callback_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_abort_callback"); |
|
|
if (set_abort_callback_fn) { |
|
|
set_abort_callback_fn(backend.get(), this->abort_callback, this->abort_callback_data); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
void llama_context::set_embeddings(bool value) { |
|
|
LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value); |
|
|
|
|
|
cparams.embeddings = value; |
|
|
} |
|
|
|
|
|
void llama_context::set_causal_attn(bool value) { |
|
|
LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value); |
|
|
|
|
|
cparams.causal_attn = value; |
|
|
} |
|
|
|
|
|
void llama_context::set_warmup(bool value) { |
|
|
LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value); |
|
|
|
|
|
cparams.warmup = value; |
|
|
} |
|
|
|
|
|
void llama_context::set_adapter_lora( |
|
|
llama_adapter_lora * adapter, |
|
|
float scale) { |
|
|
LLAMA_LOG_DEBUG("%s: adapter = %p, scale = %f\n", __func__, (void *) adapter, scale); |
|
|
|
|
|
loras[adapter] = scale; |
|
|
} |
|
|
|
|
|
bool llama_context::rm_adapter_lora( |
|
|
llama_adapter_lora * adapter) { |
|
|
LLAMA_LOG_DEBUG("%s: adapter = %p\n", __func__, (void *) adapter); |
|
|
|
|
|
auto pos = loras.find(adapter); |
|
|
if (pos != loras.end()) { |
|
|
loras.erase(pos); |
|
|
return true; |
|
|
} |
|
|
|
|
|
return false; |
|
|
} |
|
|
|
|
|
void llama_context::clear_adapter_lora() { |
|
|
LLAMA_LOG_DEBUG("%s: call\n", __func__); |
|
|
|
|
|
loras.clear(); |
|
|
} |
|
|
|
|
|
bool llama_context::apply_adapter_cvec( |
|
|
const float * data, |
|
|
size_t len, |
|
|
int32_t n_embd, |
|
|
int32_t il_start, |
|
|
int32_t il_end) { |
|
|
LLAMA_LOG_DEBUG("%s: il_start = %d, il_end = %d\n", __func__, il_start, il_end); |
|
|
|
|
|
return cvec.apply(model, data, len, n_embd, il_start, il_end); |
|
|
} |
|
|
|
|
|
llm_graph_result * llama_context::process_ubatch(const llama_ubatch & ubatch, llm_graph_type gtype, llama_memory_context_i * mctx, ggml_status & ret) { |
|
|
if (mctx && !mctx->apply()) { |
|
|
LLAMA_LOG_ERROR("%s: failed to apply memory context\n", __func__); |
|
|
ret = GGML_STATUS_FAILED; |
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
auto * res = gf_res_prev.get(); |
|
|
auto * gf = res->get_gf(); |
|
|
|
|
|
|
|
|
|
|
|
const auto gparams = graph_params(res, ubatch, mctx, gtype); |
|
|
|
|
|
if (res->can_reuse(gparams)) { |
|
|
|
|
|
|
|
|
n_reused++; |
|
|
} else { |
|
|
res->reset(); |
|
|
|
|
|
ggml_backend_sched_reset(sched.get()); |
|
|
ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data); |
|
|
|
|
|
|
|
|
|
|
|
gf = model.build_graph(gparams); |
|
|
|
|
|
|
|
|
|
|
|
if (!gf) { |
|
|
LLAMA_LOG_ERROR("%s: failed to initialize graph\n", __func__); |
|
|
ret = GGML_STATUS_FAILED; |
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
if (!ggml_backend_sched_alloc_graph(sched.get(), gf)) { |
|
|
LLAMA_LOG_ERROR("%s: failed to allocate graph\n", __func__); |
|
|
ret = GGML_STATUS_ALLOC_FAILED; |
|
|
return nullptr; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
|
|
|
|
|
|
res->set_inputs(&ubatch); |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
const auto status = graph_compute(res->get_gf(), ubatch.n_tokens > 1); |
|
|
if (status != GGML_STATUS_SUCCESS) { |
|
|
LLAMA_LOG_ERROR("%s: failed to compute graph, compute status: %d\n", __func__, status); |
|
|
ret = status; |
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
ret = GGML_STATUS_SUCCESS; |
|
|
|
|
|
return res; |
|
|
} |
|
|
|
|
|
int llama_context::encode(const llama_batch & batch_inp) { |
|
|
GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); |
|
|
|
|
|
if (batch_inp.n_tokens == 0) { |
|
|
LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__); |
|
|
return -1; |
|
|
} |
|
|
|
|
|
const auto & hparams = model.hparams; |
|
|
|
|
|
const int64_t n_embd = hparams.n_embd; |
|
|
const int32_t n_vocab = model.vocab.n_tokens(); |
|
|
|
|
|
|
|
|
if (!balloc->init(batch_inp, model.vocab, nullptr, n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, true)) { |
|
|
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); |
|
|
return -1; |
|
|
} |
|
|
|
|
|
const uint32_t n_tokens = balloc->get_n_tokens(); |
|
|
|
|
|
|
|
|
|
|
|
const llama_ubatch ubatch = balloc->split_simple(n_tokens); |
|
|
|
|
|
|
|
|
GGML_ASSERT(cparams.n_ubatch >= n_tokens && "encoder requires n_ubatch >= n_tokens"); |
|
|
|
|
|
if (t_compute_start_us == 0) { |
|
|
t_compute_start_us = ggml_time_us(); |
|
|
} |
|
|
|
|
|
|
|
|
embd_seq.clear(); |
|
|
|
|
|
n_queued_tokens += n_tokens; |
|
|
|
|
|
|
|
|
if (output_reserve(n_tokens) < n_tokens) { |
|
|
LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_tokens); |
|
|
return -2; |
|
|
}; |
|
|
|
|
|
for (uint32_t i = 0; i < n_tokens; ++i) { |
|
|
output_ids[i] = i; |
|
|
} |
|
|
|
|
|
n_outputs = n_tokens; |
|
|
|
|
|
const auto causal_attn_org = cparams.causal_attn; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cparams.causal_attn = false; |
|
|
|
|
|
ggml_status status; |
|
|
const auto * res = process_ubatch(ubatch, LLM_GRAPH_TYPE_ENCODER, nullptr, status); |
|
|
|
|
|
cparams.causal_attn = causal_attn_org; |
|
|
|
|
|
if (!res) { |
|
|
switch (status) { |
|
|
case GGML_STATUS_ABORTED: return 2; |
|
|
case GGML_STATUS_ALLOC_FAILED: return -2; |
|
|
case GGML_STATUS_FAILED: return -3; |
|
|
case GGML_STATUS_SUCCESS: GGML_ABORT("should not happen"); |
|
|
} |
|
|
} |
|
|
|
|
|
auto * t_logits = res->get_logits(); |
|
|
auto * t_embd = res->get_embd_pooled() ? res->get_embd_pooled() : res->get_embd(); |
|
|
|
|
|
|
|
|
if (logits && t_logits) { |
|
|
ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(sched.get(), t_logits); |
|
|
GGML_ASSERT(backend_res != nullptr); |
|
|
GGML_ASSERT(logits != nullptr); |
|
|
|
|
|
ggml_backend_tensor_get_async(backend_res, t_logits, logits, 0, n_tokens*n_vocab*sizeof(float)); |
|
|
} |
|
|
|
|
|
|
|
|
if (embd && t_embd) { |
|
|
ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(sched.get(), t_embd); |
|
|
GGML_ASSERT(backend_embd != nullptr); |
|
|
|
|
|
switch (cparams.pooling_type) { |
|
|
case LLAMA_POOLING_TYPE_NONE: |
|
|
{ |
|
|
|
|
|
GGML_ASSERT(embd != nullptr); |
|
|
|
|
|
GGML_ASSERT(n_tokens*n_embd <= (int64_t) embd_size); |
|
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd, 0, n_tokens*n_embd*sizeof(float)); |
|
|
} break; |
|
|
case LLAMA_POOLING_TYPE_MEAN: |
|
|
case LLAMA_POOLING_TYPE_CLS: |
|
|
case LLAMA_POOLING_TYPE_LAST: |
|
|
{ |
|
|
|
|
|
auto & embd_seq_out = embd_seq; |
|
|
|
|
|
for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { |
|
|
const llama_seq_id seq_id = ubatch.seq_id_unq[s]; |
|
|
const int32_t seq_idx = ubatch.seq_idx[seq_id]; |
|
|
|
|
|
embd_seq_out[seq_id].resize(n_embd); |
|
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float)); |
|
|
} |
|
|
} break; |
|
|
case LLAMA_POOLING_TYPE_RANK: |
|
|
{ |
|
|
|
|
|
auto & embd_seq_out = embd_seq; |
|
|
|
|
|
const uint32_t n_cls_out = hparams.n_cls_out; |
|
|
|
|
|
for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { |
|
|
const llama_seq_id seq_id = ubatch.seq_id_unq[s]; |
|
|
const int32_t seq_idx = ubatch.seq_idx[seq_id]; |
|
|
|
|
|
embd_seq_out[seq_id].resize(n_cls_out); |
|
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float)); |
|
|
} |
|
|
} break; |
|
|
case LLAMA_POOLING_TYPE_UNSPECIFIED: |
|
|
{ |
|
|
GGML_ABORT("unknown pooling type"); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if (model.arch == LLM_ARCH_T5 && t_embd) { |
|
|
|
|
|
|
|
|
synchronize(); |
|
|
|
|
|
cross.n_embd = t_embd->ne[0]; |
|
|
cross.n_enc = t_embd->ne[1]; |
|
|
cross.v_embd.resize(cross.n_embd*cross.n_enc); |
|
|
memcpy(cross.v_embd.data(), embd, ggml_nbytes(t_embd)); |
|
|
|
|
|
const auto & batch = balloc->get_batch(); |
|
|
|
|
|
|
|
|
cross.seq_ids_enc.resize(n_tokens); |
|
|
for (uint32_t i = 0; i < n_tokens; i++) { |
|
|
cross.seq_ids_enc[i].clear(); |
|
|
|
|
|
for (int s = 0; s < batch.n_seq_id[i]; s++) { |
|
|
const llama_seq_id seq_id = batch.seq_id[i][s]; |
|
|
|
|
|
cross.seq_ids_enc[i].insert(seq_id); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
return 0; |
|
|
} |
|
|
|
|
|
int llama_context::decode(const llama_batch & batch_inp) { |
|
|
GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); |
|
|
|
|
|
if (!memory) { |
|
|
LLAMA_LOG_DEBUG("%s: cannot decode batches with this context (calling encode() instead)\n", __func__); |
|
|
return encode(batch_inp); |
|
|
} |
|
|
|
|
|
if (batch_inp.n_tokens == 0) { |
|
|
LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__); |
|
|
return -1; |
|
|
} |
|
|
|
|
|
const auto & vocab = model.vocab; |
|
|
const auto & hparams = model.hparams; |
|
|
|
|
|
const int32_t n_vocab = vocab.n_tokens(); |
|
|
const int64_t n_embd = hparams.n_embd; |
|
|
|
|
|
|
|
|
const bool output_all = cparams.embeddings; |
|
|
|
|
|
if (!balloc->init(batch_inp, vocab, memory.get(), n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, output_all)) { |
|
|
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); |
|
|
return -1; |
|
|
} |
|
|
|
|
|
const uint32_t n_tokens_all = balloc->get_n_tokens(); |
|
|
const uint32_t n_outputs_all = balloc->get_n_outputs(); |
|
|
|
|
|
if (output_all) { |
|
|
|
|
|
if (n_outputs_all != n_tokens_all) { |
|
|
LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %d, n_tokens_all = %d)\n", |
|
|
__func__, n_outputs_all, n_tokens_all); |
|
|
return -1; |
|
|
} |
|
|
} |
|
|
|
|
|
GGML_ASSERT(n_tokens_all <= cparams.n_batch); |
|
|
|
|
|
GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens"); |
|
|
|
|
|
if (t_compute_start_us == 0) { |
|
|
t_compute_start_us = ggml_time_us(); |
|
|
} |
|
|
n_queued_tokens += n_tokens_all; |
|
|
|
|
|
|
|
|
embd_seq.clear(); |
|
|
|
|
|
bool did_optimize = false; |
|
|
|
|
|
|
|
|
kv_self_update(false); |
|
|
|
|
|
llama_memory_context_ptr mctx; |
|
|
|
|
|
while (true) { |
|
|
mctx = memory->init_batch(*balloc, cparams.n_ubatch, output_all); |
|
|
if (!mctx) { |
|
|
return -2; |
|
|
} |
|
|
|
|
|
switch (mctx->get_status()) { |
|
|
case LLAMA_MEMORY_STATUS_SUCCESS: |
|
|
{ |
|
|
} break; |
|
|
case LLAMA_MEMORY_STATUS_NO_UPDATE: |
|
|
{ |
|
|
LLAMA_LOG_ERROR("%s: unexpected memory context status: %d\n", __func__, mctx->get_status()); |
|
|
|
|
|
return -2; |
|
|
} |
|
|
case LLAMA_MEMORY_STATUS_FAILED_PREPARE: |
|
|
{ |
|
|
if (!did_optimize) { |
|
|
did_optimize = true; |
|
|
|
|
|
if (kv_self_update(true)) { |
|
|
LLAMA_LOG_DEBUG("%s: retrying batch size %d after cache optimization\n", __func__, balloc->get_n_tokens()); |
|
|
|
|
|
continue; |
|
|
} |
|
|
} |
|
|
|
|
|
LLAMA_LOG_WARN("%s: failed to find a memory slot for batch of size %d\n", __func__, balloc->get_n_tokens()); |
|
|
|
|
|
return 1; |
|
|
} |
|
|
case LLAMA_MEMORY_STATUS_FAILED_COMPUTE: |
|
|
{ |
|
|
LLAMA_LOG_ERROR("%s: compute failed while preparing batch of size %d\n", __func__, balloc->get_n_tokens()); |
|
|
|
|
|
return -2; |
|
|
} |
|
|
} |
|
|
|
|
|
break; |
|
|
} |
|
|
|
|
|
|
|
|
if (output_reserve(n_outputs_all) < n_outputs_all) { |
|
|
LLAMA_LOG_ERROR("%s: could not reserve space for batch with %d outputs\n", __func__, n_outputs_all); |
|
|
return -2; |
|
|
}; |
|
|
|
|
|
int64_t n_outputs_prev = 0; |
|
|
|
|
|
do { |
|
|
const auto & ubatch = mctx->get_ubatch(); |
|
|
|
|
|
|
|
|
{ |
|
|
int32_t n_outputs_new = 0; |
|
|
|
|
|
if (n_outputs_all == n_tokens_all) { |
|
|
n_outputs_new = ubatch.n_tokens; |
|
|
} else { |
|
|
for (uint32_t i = 0; i < ubatch.n_tokens; i++) { |
|
|
n_outputs_new += (int32_t) (ubatch.output[i] != 0); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
n_outputs = n_outputs_new; |
|
|
} |
|
|
|
|
|
ggml_status status; |
|
|
const auto * res = process_ubatch(ubatch, LLM_GRAPH_TYPE_DECODER, mctx.get(), status); |
|
|
|
|
|
if (!res) { |
|
|
|
|
|
llama_pos pos_min[LLAMA_MAX_SEQ]; |
|
|
for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { |
|
|
pos_min[s] = std::numeric_limits<llama_pos>::max(); |
|
|
} |
|
|
|
|
|
for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { |
|
|
const auto & seq_id = ubatch.seq_id[i][0]; |
|
|
|
|
|
pos_min[seq_id] = std::min(pos_min[seq_id], ubatch.pos[i]); |
|
|
} |
|
|
|
|
|
for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { |
|
|
if (pos_min[s] == std::numeric_limits<llama_pos>::max()) { |
|
|
continue; |
|
|
} |
|
|
|
|
|
LLAMA_LOG_WARN("%s: removing KV cache entries for seq_id = %d, pos = [%d, +inf)\n", __func__, s, pos_min[s]); |
|
|
|
|
|
memory->seq_rm(s, pos_min[s], -1); |
|
|
} |
|
|
|
|
|
switch (status) { |
|
|
case GGML_STATUS_ABORTED: return 2; |
|
|
case GGML_STATUS_ALLOC_FAILED: return -2; |
|
|
case GGML_STATUS_FAILED: return -3; |
|
|
case GGML_STATUS_SUCCESS: GGML_ABORT("should not happen"); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto * t_logits = res->get_logits(); |
|
|
auto * t_embd = cparams.embeddings ? res->get_embd() : nullptr; |
|
|
|
|
|
if (t_embd && res->get_embd_pooled()) { |
|
|
t_embd = res->get_embd_pooled(); |
|
|
} |
|
|
|
|
|
|
|
|
if (t_logits && n_outputs > 0) { |
|
|
ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(sched.get(), t_logits); |
|
|
GGML_ASSERT(backend_res != nullptr); |
|
|
GGML_ASSERT(logits != nullptr); |
|
|
|
|
|
float * logits_out = logits + n_outputs_prev*n_vocab; |
|
|
|
|
|
if (n_outputs) { |
|
|
GGML_ASSERT( n_outputs_prev + n_outputs <= n_outputs_all); |
|
|
GGML_ASSERT((n_outputs_prev + n_outputs)*n_vocab <= (int64_t) logits_size); |
|
|
ggml_backend_tensor_get_async(backend_res, t_logits, logits_out, 0, n_outputs*n_vocab*sizeof(float)); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if (t_embd && n_outputs > 0) { |
|
|
ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(sched.get(), t_embd); |
|
|
GGML_ASSERT(backend_embd != nullptr); |
|
|
|
|
|
switch (cparams.pooling_type) { |
|
|
case LLAMA_POOLING_TYPE_NONE: |
|
|
{ |
|
|
|
|
|
GGML_ASSERT(embd != nullptr); |
|
|
float * embd_out = embd + n_outputs_prev*n_embd; |
|
|
|
|
|
if (n_outputs) { |
|
|
GGML_ASSERT( n_outputs_prev + n_outputs <= n_outputs_all); |
|
|
GGML_ASSERT((n_outputs_prev + n_outputs)*n_embd <= (int64_t) embd_size); |
|
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_out, 0, n_outputs*n_embd*sizeof(float)); |
|
|
} |
|
|
} break; |
|
|
case LLAMA_POOLING_TYPE_MEAN: |
|
|
case LLAMA_POOLING_TYPE_CLS: |
|
|
case LLAMA_POOLING_TYPE_LAST: |
|
|
{ |
|
|
|
|
|
auto & embd_seq_out = embd_seq; |
|
|
|
|
|
for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { |
|
|
const llama_seq_id seq_id = ubatch.seq_id_unq[s]; |
|
|
const int32_t seq_idx = ubatch.seq_idx[seq_id]; |
|
|
|
|
|
embd_seq_out[seq_id].resize(n_embd); |
|
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float)); |
|
|
} |
|
|
} break; |
|
|
case LLAMA_POOLING_TYPE_RANK: |
|
|
{ |
|
|
|
|
|
auto & embd_seq_out = embd_seq; |
|
|
|
|
|
const uint32_t n_cls_out = hparams.n_cls_out; |
|
|
|
|
|
for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { |
|
|
const llama_seq_id seq_id = ubatch.seq_id_unq[s]; |
|
|
const int32_t seq_idx = ubatch.seq_idx[seq_id]; |
|
|
|
|
|
embd_seq_out[seq_id].resize(n_cls_out); |
|
|
ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float)); |
|
|
} |
|
|
} break; |
|
|
case LLAMA_POOLING_TYPE_UNSPECIFIED: |
|
|
{ |
|
|
GGML_ABORT("unknown pooling type"); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
n_outputs_prev += n_outputs; |
|
|
} while (mctx->next()); |
|
|
|
|
|
|
|
|
n_outputs = n_outputs_all; |
|
|
|
|
|
|
|
|
if (n_outputs > 0) { |
|
|
bool sorted_output = true; |
|
|
|
|
|
auto & out_ids = balloc->get_out_ids(); |
|
|
|
|
|
GGML_ASSERT(out_ids.size() == (size_t) n_outputs); |
|
|
|
|
|
for (int64_t i = 0; i < n_outputs; ++i) { |
|
|
int64_t out_id = out_ids[i]; |
|
|
output_ids[out_id] = i; |
|
|
if (out_id != i) { |
|
|
sorted_output = false; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (!sorted_output) { |
|
|
const uint32_t n_vocab = model.vocab.n_tokens(); |
|
|
const uint64_t n_embd = model.hparams.n_embd; |
|
|
|
|
|
GGML_ASSERT((size_t) n_outputs == out_ids.size()); |
|
|
|
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < n_outputs - 1; ++i) { |
|
|
uint32_t j_min = i; |
|
|
for (uint32_t j = i + 1; j < n_outputs; ++j) { |
|
|
if (out_ids[j] < out_ids[j_min]) { |
|
|
j_min = j; |
|
|
} |
|
|
} |
|
|
if (j_min == i) { |
|
|
continue; |
|
|
} |
|
|
std::swap(out_ids[i], out_ids[j_min]); |
|
|
if (logits_size > 0) { |
|
|
for (uint32_t k = 0; k < n_vocab; k++) { |
|
|
std::swap(logits[i*n_vocab + k], logits[j_min*n_vocab + k]); |
|
|
} |
|
|
} |
|
|
if (embd_size > 0) { |
|
|
for (uint32_t k = 0; k < n_embd; k++) { |
|
|
std::swap(embd[i*n_embd + k], embd[j_min*n_embd + k]); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
std::fill(output_ids.begin(), output_ids.end(), -1); |
|
|
|
|
|
for (uint32_t i = 0; i < n_outputs; ++i) { |
|
|
output_ids[out_ids[i]] = i; |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return 0; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uint32_t llama_context::output_reserve(int32_t n_outputs) { |
|
|
const auto & hparams = model.hparams; |
|
|
const auto & vocab = model.vocab; |
|
|
|
|
|
const int64_t n_outputs_max = std::max<int64_t>(n_outputs, n_seq_max()); |
|
|
|
|
|
const auto n_batch = cparams.n_batch; |
|
|
const auto n_vocab = vocab.n_tokens(); |
|
|
const auto n_embd = hparams.n_embd; |
|
|
|
|
|
bool has_logits = true; |
|
|
bool has_embd = cparams.embeddings; |
|
|
|
|
|
|
|
|
if (model.arch == LLM_ARCH_T5) { |
|
|
has_logits = true; |
|
|
has_embd = true; |
|
|
} |
|
|
|
|
|
logits_size = has_logits ? n_vocab*n_outputs_max : 0; |
|
|
embd_size = has_embd ? n_embd*n_outputs_max : 0; |
|
|
|
|
|
if (output_ids.empty()) { |
|
|
|
|
|
output_ids.resize(n_batch); |
|
|
} |
|
|
|
|
|
const size_t prev_size = buf_output ? ggml_backend_buffer_get_size(buf_output.get()) : 0; |
|
|
const size_t new_size = (logits_size + embd_size) * sizeof(float); |
|
|
|
|
|
|
|
|
|
|
|
if (!buf_output || prev_size < new_size) { |
|
|
if (buf_output) { |
|
|
#ifndef NDEBUG |
|
|
|
|
|
LLAMA_LOG_INFO("%s: reallocating output buffer from size %.02f MiB to %.02f MiB\n", __func__, prev_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); |
|
|
#endif |
|
|
buf_output = nullptr; |
|
|
logits = nullptr; |
|
|
embd = nullptr; |
|
|
} |
|
|
|
|
|
auto * buft = ggml_backend_cpu_buffer_type(); |
|
|
|
|
|
auto * output_dev = model.dev_output(); |
|
|
auto * output_dev_host_buft = output_dev ? ggml_backend_dev_host_buffer_type(output_dev) : nullptr; |
|
|
if (output_dev_host_buft) { |
|
|
buft = output_dev_host_buft; |
|
|
} |
|
|
buf_output.reset(ggml_backend_buft_alloc_buffer(buft, new_size)); |
|
|
if (buf_output == nullptr) { |
|
|
LLAMA_LOG_ERROR("%s: failed to allocate output buffer of size %.2f MiB\n", __func__, new_size / (1024.0 * 1024.0)); |
|
|
return 0; |
|
|
} |
|
|
} |
|
|
|
|
|
float * output_base = (float *) ggml_backend_buffer_get_base(buf_output.get()); |
|
|
|
|
|
logits = has_logits ? output_base : nullptr; |
|
|
embd = has_embd ? output_base + logits_size : nullptr; |
|
|
|
|
|
|
|
|
std::fill(output_ids.begin(), output_ids.end(), -1); |
|
|
|
|
|
this->n_outputs = 0; |
|
|
|
|
|
return n_outputs_max; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uint32_t llama_context::graph_max_nodes() const { |
|
|
return std::max<uint32_t>(1024u, 8u*model.n_tensors()); |
|
|
} |
|
|
|
|
|
llm_graph_result * llama_context::get_gf_res_reserve() const { |
|
|
return static_cast<llm_graph_result *>(gf_res_reserve.get()); |
|
|
} |
|
|
|
|
|
ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx) { |
|
|
LLAMA_LOG_DEBUG("%s: reserving a graph for ubatch with n_tokens = %4u, n_seqs = %2u, n_outputs = %4u\n", __func__, n_tokens, n_seqs, n_outputs); |
|
|
|
|
|
if (n_tokens % n_seqs != 0) { |
|
|
n_tokens = ((n_tokens + (n_seqs - 1)) / n_seqs) * n_seqs; |
|
|
n_outputs = std::min(n_outputs, n_tokens); |
|
|
|
|
|
LLAMA_LOG_DEBUG("%s: making n_tokens a multiple of n_seqs - n_tokens = %u, n_seqs = %u, n_outputs = %u\n", __func__, n_tokens, n_seqs, n_outputs); |
|
|
} |
|
|
|
|
|
ggml_backend_sched_reset(sched.get()); |
|
|
|
|
|
|
|
|
gf_res_prev->reset(); |
|
|
|
|
|
|
|
|
|
|
|
const auto save_n_outputs = this->n_outputs; |
|
|
|
|
|
this->n_outputs = n_outputs; |
|
|
|
|
|
llama_batch_allocr balloc(model.hparams.n_pos_per_embd()); |
|
|
llama_ubatch ubatch = balloc.ubatch_reserve(n_tokens/n_seqs, n_seqs); |
|
|
|
|
|
auto * res = gf_res_reserve.get(); |
|
|
|
|
|
const auto gparams = graph_params(res, ubatch, mctx, LLM_GRAPH_TYPE_DEFAULT); |
|
|
|
|
|
res->reset(); |
|
|
|
|
|
auto * gf = model.build_graph(gparams); |
|
|
|
|
|
this->n_outputs = save_n_outputs; |
|
|
|
|
|
|
|
|
if (!ggml_backend_sched_reserve(sched.get(), gf)) { |
|
|
LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__); |
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
return gf; |
|
|
} |
|
|
|
|
|
llm_graph_params llama_context::graph_params( |
|
|
llm_graph_result * res, |
|
|
const llama_ubatch & ubatch, |
|
|
const llama_memory_context_i * mctx, |
|
|
llm_graph_type gtype) const { |
|
|
return { |
|
|
model.arch, |
|
|
model.hparams, |
|
|
cparams, |
|
|
ubatch, |
|
|
gtype, |
|
|
sched.get(), |
|
|
backend_cpu, |
|
|
&cvec, |
|
|
&loras, |
|
|
mctx, |
|
|
&cross, |
|
|
n_outputs, |
|
|
graph_get_cb(), |
|
|
res, |
|
|
}; |
|
|
} |
|
|
|
|
|
ggml_status llama_context::graph_compute( |
|
|
ggml_cgraph * gf, |
|
|
bool batched) { |
|
|
int n_threads = batched ? cparams.n_threads_batch : cparams.n_threads; |
|
|
ggml_threadpool_t tp = batched ? threadpool_batch : threadpool; |
|
|
|
|
|
if (backend_cpu != nullptr) { |
|
|
auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend_cpu)); |
|
|
auto * set_threadpool_fn = (decltype(ggml_backend_cpu_set_threadpool) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_set_threadpool"); |
|
|
set_threadpool_fn(backend_cpu, tp); |
|
|
} |
|
|
|
|
|
|
|
|
for (const auto & set_n_threads_fn : set_n_threads_fns) { |
|
|
set_n_threads_fn.second(set_n_threads_fn.first, n_threads); |
|
|
} |
|
|
|
|
|
auto status = ggml_backend_sched_graph_compute_async(sched.get(), gf); |
|
|
if (status != GGML_STATUS_SUCCESS) { |
|
|
LLAMA_LOG_ERROR("%s: ggml_backend_sched_graph_compute_async failed with error %d\n", __func__, status); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
return status; |
|
|
} |
|
|
|
|
|
llm_graph_cb llama_context::graph_get_cb() const { |
|
|
return [&](const llama_ubatch & ubatch, ggml_tensor * cur, const char * name, int il) { |
|
|
if (il >= 0) { |
|
|
ggml_format_name(cur, "%s-%d", name, il); |
|
|
} else { |
|
|
ggml_set_name(cur, name); |
|
|
} |
|
|
|
|
|
if (!cparams.offload_kqv) { |
|
|
if (strcmp(name, "kqv_merged_cont") == 0) { |
|
|
|
|
|
ggml_backend_sched_set_tensor_backend(sched.get(), cur, backend_cpu); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
const bool full_offload = model.params.n_gpu_layers > (int) model.hparams.n_layer; |
|
|
if (ubatch.n_tokens < 32 || full_offload) { |
|
|
if (il != -1 && strcmp(name, "norm") == 0) { |
|
|
const auto & dev_layer = model.dev_layer(il); |
|
|
for (const auto & backend : backends) { |
|
|
if (ggml_backend_get_device(backend.get()) == dev_layer) { |
|
|
if (ggml_backend_supports_op(backend.get(), cur)) { |
|
|
ggml_backend_sched_set_tensor_backend(sched.get(), cur, backend.get()); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
}; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class llama_io_write_dummy : public llama_io_write_i { |
|
|
public: |
|
|
llama_io_write_dummy() = default; |
|
|
|
|
|
void write(const void * , size_t size) override { |
|
|
size_written += size; |
|
|
} |
|
|
|
|
|
void write_tensor(const ggml_tensor * , size_t , size_t size) override { |
|
|
size_written += size; |
|
|
} |
|
|
|
|
|
size_t n_bytes() override { |
|
|
return size_written; |
|
|
} |
|
|
|
|
|
private: |
|
|
size_t size_written = 0; |
|
|
}; |
|
|
|
|
|
class llama_io_write_buffer : public llama_io_write_i { |
|
|
public: |
|
|
llama_io_write_buffer( |
|
|
uint8_t * p, size_t len) : ptr(p), buf_size(len) {} |
|
|
|
|
|
void write(const void * src, size_t size) override { |
|
|
if (size > buf_size) { |
|
|
throw std::runtime_error("unexpectedly reached end of buffer"); |
|
|
} |
|
|
memcpy(ptr, src, size); |
|
|
ptr += size; |
|
|
size_written += size; |
|
|
buf_size -= size; |
|
|
} |
|
|
|
|
|
void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) override { |
|
|
if (size > buf_size) { |
|
|
throw std::runtime_error("unexpectedly reached end of buffer"); |
|
|
} |
|
|
ggml_backend_tensor_get(tensor, ptr, offset, size); |
|
|
ptr += size; |
|
|
size_written += size; |
|
|
buf_size -= size; |
|
|
} |
|
|
|
|
|
size_t n_bytes() override { |
|
|
return size_written; |
|
|
} |
|
|
|
|
|
private: |
|
|
uint8_t * ptr; |
|
|
size_t buf_size = 0; |
|
|
size_t size_written = 0; |
|
|
}; |
|
|
|
|
|
class llama_io_read_buffer : public llama_io_read_i { |
|
|
public: |
|
|
llama_io_read_buffer(const uint8_t * p, size_t len) : ptr(p), buf_size(len) {} |
|
|
|
|
|
const uint8_t * read(size_t size) override { |
|
|
const uint8_t * base_ptr = ptr; |
|
|
if (size > buf_size) { |
|
|
throw std::runtime_error("unexpectedly reached end of buffer"); |
|
|
} |
|
|
ptr += size; |
|
|
size_read += size; |
|
|
buf_size -= size; |
|
|
return base_ptr; |
|
|
} |
|
|
|
|
|
void read_to(void * dst, size_t size) override { |
|
|
memcpy(dst, read(size), size); |
|
|
} |
|
|
|
|
|
size_t n_bytes() override { |
|
|
return size_read; |
|
|
} |
|
|
|
|
|
private: |
|
|
const uint8_t * ptr; |
|
|
size_t buf_size = 0; |
|
|
size_t size_read = 0; |
|
|
}; |
|
|
|
|
|
class llama_io_write_file : public llama_io_write_i { |
|
|
public: |
|
|
llama_io_write_file(llama_file * f) : file(f) {} |
|
|
|
|
|
void write(const void * src, size_t size) override { |
|
|
file->write_raw(src, size); |
|
|
size_written += size; |
|
|
} |
|
|
|
|
|
void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) override { |
|
|
temp_buffer.resize(size); |
|
|
ggml_backend_tensor_get(tensor, temp_buffer.data(), offset, size); |
|
|
write(temp_buffer.data(), temp_buffer.size()); |
|
|
} |
|
|
|
|
|
size_t n_bytes() override { |
|
|
return size_written; |
|
|
} |
|
|
|
|
|
private: |
|
|
llama_file * file; |
|
|
size_t size_written = 0; |
|
|
std::vector<uint8_t> temp_buffer; |
|
|
}; |
|
|
|
|
|
class llama_io_read_file : public llama_io_read_i { |
|
|
public: |
|
|
llama_io_read_file(llama_file * f) : file(f) {} |
|
|
|
|
|
void read_to(void * dst, size_t size) override { |
|
|
file->read_raw(dst, size); |
|
|
size_read += size; |
|
|
} |
|
|
|
|
|
const uint8_t * read(size_t size) override { |
|
|
temp_buffer.resize(size); |
|
|
read_to(temp_buffer.data(), size); |
|
|
return temp_buffer.data(); |
|
|
} |
|
|
|
|
|
size_t n_bytes() override { |
|
|
return size_read; |
|
|
} |
|
|
|
|
|
private: |
|
|
llama_file * file; |
|
|
size_t size_read = 0; |
|
|
std::vector<uint8_t> temp_buffer; |
|
|
}; |
|
|
|
|
|
size_t llama_context::state_get_size() { |
|
|
llama_io_write_dummy io; |
|
|
try { |
|
|
return state_write_data(io); |
|
|
} catch (const std::exception & err) { |
|
|
LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what()); |
|
|
return 0; |
|
|
} |
|
|
} |
|
|
|
|
|
size_t llama_context::state_get_data(uint8_t * dst, size_t size) { |
|
|
llama_io_write_buffer io(dst, size); |
|
|
try { |
|
|
return state_write_data(io); |
|
|
} catch (const std::exception & err) { |
|
|
LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what()); |
|
|
return 0; |
|
|
} |
|
|
} |
|
|
|
|
|
size_t llama_context::state_set_data(const uint8_t * src, size_t size) { |
|
|
llama_io_read_buffer io(src, size); |
|
|
try { |
|
|
return state_read_data(io); |
|
|
} catch (const std::exception & err) { |
|
|
LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what()); |
|
|
return 0; |
|
|
} |
|
|
} |
|
|
|
|
|
size_t llama_context::state_seq_get_size(llama_seq_id seq_id) { |
|
|
llama_io_write_dummy io; |
|
|
try { |
|
|
return state_seq_write_data(io, seq_id); |
|
|
} catch (const std::exception & err) { |
|
|
LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what()); |
|
|
return 0; |
|
|
} |
|
|
} |
|
|
|
|
|
size_t llama_context::state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size) { |
|
|
llama_io_write_buffer io(dst, size); |
|
|
try { |
|
|
return state_seq_write_data(io, seq_id); |
|
|
} catch (const std::exception & err) { |
|
|
LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what()); |
|
|
return 0; |
|
|
} |
|
|
} |
|
|
|
|
|
size_t llama_context::state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size) { |
|
|
llama_io_read_buffer io(src, size); |
|
|
try { |
|
|
return state_seq_read_data(io, seq_id); |
|
|
} catch (const std::exception & err) { |
|
|
LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what()); |
|
|
return 0; |
|
|
} |
|
|
} |
|
|
|
|
|
bool llama_context::state_load_file(const char * filepath, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { |
|
|
llama_file file(filepath, "rb"); |
|
|
|
|
|
|
|
|
{ |
|
|
const uint32_t magic = file.read_u32(); |
|
|
const uint32_t version = file.read_u32(); |
|
|
|
|
|
if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) { |
|
|
LLAMA_LOG_ERROR("%s: unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version); |
|
|
return false; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
const uint32_t n_token_count = file.read_u32(); |
|
|
|
|
|
if (n_token_count > n_token_capacity) { |
|
|
LLAMA_LOG_ERROR("%s: token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity); |
|
|
return false; |
|
|
} |
|
|
|
|
|
file.read_raw(tokens_out, sizeof(llama_token) * n_token_count); |
|
|
*n_token_count_out = n_token_count; |
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
const size_t n_state_size_cur = file.size() - file.tell(); |
|
|
|
|
|
llama_io_read_file io( &file); |
|
|
const size_t n_read = state_read_data(io); |
|
|
|
|
|
if (n_read != n_state_size_cur) { |
|
|
LLAMA_LOG_ERROR("%s: did not read all of the session file data! size %zu, got %zu\n", __func__, n_state_size_cur, n_read); |
|
|
return false; |
|
|
} |
|
|
} |
|
|
|
|
|
return true; |
|
|
} |
|
|
|
|
|
bool llama_context::state_save_file(const char * filepath, const llama_token * tokens, size_t n_token_count) { |
|
|
llama_file file(filepath, "wb"); |
|
|
|
|
|
file.write_u32(LLAMA_SESSION_MAGIC); |
|
|
file.write_u32(LLAMA_SESSION_VERSION); |
|
|
|
|
|
|
|
|
file.write_u32((uint32_t) n_token_count); |
|
|
file.write_raw(tokens, sizeof(llama_token) * n_token_count); |
|
|
|
|
|
|
|
|
llama_io_write_file io(&file); |
|
|
state_write_data(io); |
|
|
|
|
|
return true; |
|
|
} |
|
|
|
|
|
size_t llama_context::state_seq_load_file(llama_seq_id seq_id, const char * filepath, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { |
|
|
llama_file file(filepath, "rb"); |
|
|
|
|
|
|
|
|
{ |
|
|
const uint32_t magic = file.read_u32(); |
|
|
const uint32_t version = file.read_u32(); |
|
|
|
|
|
if (magic != LLAMA_STATE_SEQ_MAGIC || version != LLAMA_STATE_SEQ_VERSION) { |
|
|
LLAMA_LOG_ERROR("%s: unknown (magic, version) for sequence state file: %08x, %08x\n", __func__, magic, version); |
|
|
return 0; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
const uint32_t n_token_count = file.read_u32(); |
|
|
|
|
|
if (n_token_count > n_token_capacity) { |
|
|
LLAMA_LOG_ERROR("%s: token count in sequence state file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity); |
|
|
return 0; |
|
|
} |
|
|
|
|
|
file.read_raw(tokens_out, sizeof(llama_token) * n_token_count); |
|
|
*n_token_count_out = n_token_count; |
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
const size_t state_size = file.size() - file.tell(); |
|
|
llama_io_read_file io(&file); |
|
|
const size_t nread = state_seq_read_data(io, seq_id); |
|
|
if (!nread) { |
|
|
LLAMA_LOG_ERROR("%s: failed to restore sequence state\n", __func__); |
|
|
return 0; |
|
|
} |
|
|
GGML_ASSERT(nread <= state_size); |
|
|
GGML_ASSERT(nread + sizeof(uint32_t) * 3 + sizeof(llama_token) * *n_token_count_out == file.tell()); |
|
|
} |
|
|
|
|
|
return file.tell(); |
|
|
} |
|
|
|
|
|
size_t llama_context::state_seq_save_file(llama_seq_id seq_id, const char * filepath, const llama_token * tokens, size_t n_token_count) { |
|
|
llama_file file(filepath, "wb"); |
|
|
|
|
|
file.write_u32(LLAMA_STATE_SEQ_MAGIC); |
|
|
file.write_u32(LLAMA_STATE_SEQ_VERSION); |
|
|
|
|
|
|
|
|
file.write_u32((uint32_t) n_token_count); |
|
|
file.write_raw(tokens, sizeof(llama_token) * n_token_count); |
|
|
|
|
|
|
|
|
llama_io_write_file io(&file); |
|
|
state_seq_write_data(io, seq_id); |
|
|
|
|
|
const size_t res = file.tell(); |
|
|
GGML_ASSERT(res == sizeof(uint32_t) * 3 + sizeof(llama_token) * n_token_count + io.n_bytes()); |
|
|
|
|
|
return res; |
|
|
} |
|
|
|
|
|
size_t llama_context::state_write_data(llama_io_write_i & io) { |
|
|
LLAMA_LOG_DEBUG("%s: writing state\n", __func__); |
|
|
|
|
|
|
|
|
{ |
|
|
LLAMA_LOG_DEBUG("%s: - writing model info\n", __func__); |
|
|
|
|
|
const std::string arch_str = llm_arch_name(model.arch); |
|
|
io.write_string(arch_str); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
LLAMA_LOG_DEBUG("%s: - writing output ids\n", __func__); |
|
|
|
|
|
const auto n_outputs = this->n_outputs; |
|
|
const auto & output_ids = this->output_ids; |
|
|
|
|
|
std::vector<int32_t> w_output_pos; |
|
|
|
|
|
w_output_pos.resize(n_outputs); |
|
|
|
|
|
|
|
|
for (size_t i = 0; i < n_batch(); ++i) { |
|
|
|
|
|
int64_t pos = output_ids[i]; |
|
|
if (pos >= 0) { |
|
|
GGML_ASSERT(pos < n_outputs); |
|
|
w_output_pos[pos] = i; |
|
|
} |
|
|
} |
|
|
|
|
|
io.write(&n_outputs, sizeof(n_outputs)); |
|
|
|
|
|
if (n_outputs) { |
|
|
io.write(w_output_pos.data(), n_outputs * sizeof(int32_t)); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
LLAMA_LOG_DEBUG("%s: - writing logits\n", __func__); |
|
|
|
|
|
const uint64_t logits_size = std::min((uint64_t) this->logits_size, (uint64_t) n_outputs * model.vocab.n_tokens()); |
|
|
|
|
|
io.write(&logits_size, sizeof(logits_size)); |
|
|
|
|
|
if (logits_size) { |
|
|
io.write(logits, logits_size * sizeof(float)); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
LLAMA_LOG_DEBUG("%s: - writing embeddings\n", __func__); |
|
|
|
|
|
const uint64_t embd_size = std::min((uint64_t) this->embd_size, (uint64_t) n_outputs * model.hparams.n_embd); |
|
|
|
|
|
io.write(&embd_size, sizeof(embd_size)); |
|
|
|
|
|
if (embd_size) { |
|
|
io.write(embd, embd_size * sizeof(float)); |
|
|
} |
|
|
} |
|
|
|
|
|
if (memory != nullptr) { |
|
|
LLAMA_LOG_DEBUG("%s: - writing KV self\n", __func__); |
|
|
memory->state_write(io); |
|
|
} |
|
|
|
|
|
return io.n_bytes(); |
|
|
} |
|
|
|
|
|
size_t llama_context::state_read_data(llama_io_read_i & io) { |
|
|
LLAMA_LOG_DEBUG("%s: reading state\n", __func__); |
|
|
|
|
|
|
|
|
{ |
|
|
LLAMA_LOG_DEBUG("%s: - reading model info\n", __func__); |
|
|
|
|
|
const std::string cur_arch_str = llm_arch_name(model.arch); |
|
|
|
|
|
std::string arch_str; |
|
|
io.read_string(arch_str); |
|
|
if (cur_arch_str != arch_str) { |
|
|
throw std::runtime_error(format("wrong model arch: '%s' instead of '%s'", arch_str.c_str(), cur_arch_str.c_str())); |
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
LLAMA_LOG_DEBUG("%s: - reading output ids\n", __func__); |
|
|
|
|
|
auto n_outputs = this->n_outputs; |
|
|
io.read_to(&n_outputs, sizeof(n_outputs)); |
|
|
|
|
|
if (n_outputs > output_reserve(n_outputs)) { |
|
|
throw std::runtime_error("could not reserve outputs"); |
|
|
} |
|
|
|
|
|
std::vector<int32_t> output_pos; |
|
|
|
|
|
if (n_outputs) { |
|
|
output_pos.resize(n_outputs); |
|
|
io.read_to(output_pos.data(), n_outputs * sizeof(int32_t)); |
|
|
|
|
|
for (int32_t i = 0; i < (int32_t) output_pos.size(); ++i) { |
|
|
int32_t id = output_pos[i]; |
|
|
if ((uint32_t) id >= n_batch()) { |
|
|
throw std::runtime_error(format("invalid output id, %d does not fit in batch size of %u", id, n_batch())); |
|
|
} |
|
|
this->output_ids[id] = i; |
|
|
} |
|
|
|
|
|
this->n_outputs = n_outputs; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
LLAMA_LOG_DEBUG("%s: - reading logits\n", __func__); |
|
|
|
|
|
uint64_t logits_size; |
|
|
io.read_to(&logits_size, sizeof(logits_size)); |
|
|
|
|
|
if (this->logits_size < logits_size) { |
|
|
throw std::runtime_error("logits buffer too small"); |
|
|
} |
|
|
|
|
|
if (logits_size) { |
|
|
io.read_to(this->logits, logits_size * sizeof(float)); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
LLAMA_LOG_DEBUG("%s: - reading embeddings\n", __func__); |
|
|
|
|
|
uint64_t embd_size; |
|
|
io.read_to(&embd_size, sizeof(embd_size)); |
|
|
|
|
|
if (this->embd_size < embd_size) { |
|
|
throw std::runtime_error("embeddings buffer too small"); |
|
|
} |
|
|
|
|
|
if (embd_size) { |
|
|
io.read_to(this->embd, embd_size * sizeof(float)); |
|
|
} |
|
|
} |
|
|
|
|
|
if (memory) { |
|
|
LLAMA_LOG_DEBUG("%s: - reading KV self\n", __func__); |
|
|
|
|
|
memory->state_read(io); |
|
|
} |
|
|
|
|
|
return io.n_bytes(); |
|
|
} |
|
|
|
|
|
size_t llama_context::state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id) { |
|
|
GGML_UNUSED(seq_id); |
|
|
|
|
|
if (memory) { |
|
|
memory->state_write(io, seq_id); |
|
|
} |
|
|
|
|
|
return io.n_bytes(); |
|
|
} |
|
|
|
|
|
size_t llama_context::state_seq_read_data(llama_io_read_i & io, llama_seq_id seq_id) { |
|
|
GGML_UNUSED(seq_id); |
|
|
|
|
|
if (memory) { |
|
|
memory->state_read(io, seq_id); |
|
|
} |
|
|
|
|
|
return io.n_bytes(); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama_perf_context_data llama_context::perf_get_data() const { |
|
|
llama_perf_context_data data = {}; |
|
|
|
|
|
data.t_start_ms = 1e-3 * t_start_us; |
|
|
data.t_load_ms = 1e-3 * t_load_us; |
|
|
data.t_p_eval_ms = 1e-3 * t_p_eval_us; |
|
|
data.t_eval_ms = 1e-3 * t_eval_us; |
|
|
data.n_p_eval = std::max(1, n_p_eval); |
|
|
data.n_eval = std::max(1, n_eval); |
|
|
data.n_reused = std::max(0, n_reused); |
|
|
|
|
|
return data; |
|
|
} |
|
|
|
|
|
void llama_context::perf_reset() { |
|
|
t_start_us = ggml_time_us(); |
|
|
t_eval_us = n_eval = 0; |
|
|
t_p_eval_us = n_p_eval = 0; |
|
|
n_reused = 0; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void llama_set_param(struct ggml_tensor * tensor, llama_opt_param_filter param_filter, void * userdata) { |
|
|
if (!tensor || tensor->type != GGML_TYPE_F32) { |
|
|
return; |
|
|
} |
|
|
if (!param_filter(tensor, userdata)) { |
|
|
return; |
|
|
} |
|
|
if (strcmp(tensor->name, "token_embd.weight") == 0) { |
|
|
return; |
|
|
} |
|
|
if (strcmp(tensor->name, "rope_freqs.weight") == 0) { |
|
|
return; |
|
|
} |
|
|
ggml_set_param(tensor); |
|
|
} |
|
|
|
|
|
void llama_context::opt_init(struct llama_model * model, struct llama_opt_params lopt_params) { |
|
|
GGML_ASSERT(!opt_ctx); |
|
|
model->hparams.n_ctx_train = lopt_params.n_ctx_train > 0 ? lopt_params.n_ctx_train : n_ctx(); |
|
|
const uint32_t n_batch = std::min(this->n_batch(), model->hparams.n_ctx_train); |
|
|
const uint32_t n_ubatch = std::min(this->n_ubatch(), n_batch); |
|
|
GGML_ASSERT(model->hparams.n_ctx_train % n_batch == 0); |
|
|
GGML_ASSERT(n_batch % n_ubatch == 0); |
|
|
|
|
|
ggml_opt_params opt_params = ggml_opt_default_params(sched.get(), GGML_OPT_LOSS_TYPE_CROSS_ENTROPY); |
|
|
opt_params.opt_period = n_batch / n_ubatch; |
|
|
opt_params.get_opt_pars = lopt_params.get_opt_pars; |
|
|
opt_params.get_opt_pars_ud = lopt_params.get_opt_pars_ud; |
|
|
|
|
|
opt_ctx = ggml_opt_init(opt_params); |
|
|
|
|
|
llama_opt_param_filter param_filter = lopt_params.param_filter; |
|
|
void * param_filter_ud = lopt_params.param_filter_ud; |
|
|
|
|
|
|
|
|
llama_set_param(model->type_embd, param_filter, param_filter_ud); |
|
|
llama_set_param(model->pos_embd, param_filter, param_filter_ud); |
|
|
llama_set_param(model->tok_norm, param_filter, param_filter_ud); |
|
|
llama_set_param(model->tok_norm_b, param_filter, param_filter_ud); |
|
|
llama_set_param(model->output_norm, param_filter, param_filter_ud); |
|
|
llama_set_param(model->output_norm_b, param_filter, param_filter_ud); |
|
|
llama_set_param(model->output, param_filter, param_filter_ud); |
|
|
llama_set_param(model->output_b, param_filter, param_filter_ud); |
|
|
llama_set_param(model->output_norm_enc, param_filter, param_filter_ud); |
|
|
llama_set_param(model->cls, param_filter, param_filter_ud); |
|
|
llama_set_param(model->cls_b, param_filter, param_filter_ud); |
|
|
llama_set_param(model->cls_out, param_filter, param_filter_ud); |
|
|
llama_set_param(model->cls_out_b, param_filter, param_filter_ud); |
|
|
|
|
|
for (struct llama_layer & layer : model->layers) { |
|
|
for (size_t i = 0; i < sizeof(layer)/sizeof(struct ggml_tensor *); ++i) { |
|
|
llama_set_param(reinterpret_cast<struct ggml_tensor **>(&layer)[i], param_filter, param_filter_ud); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
void llama_context::opt_epoch_iter( |
|
|
ggml_opt_dataset_t dataset, |
|
|
ggml_opt_result_t result, |
|
|
const std::vector<llama_token> & tokens, |
|
|
const std::vector<llama_token> & labels_sparse, |
|
|
llama_batch & batch, |
|
|
ggml_opt_epoch_callback callback, |
|
|
bool train, |
|
|
int64_t idata_in_loop, |
|
|
int64_t ndata_in_loop, |
|
|
int64_t t_loop_start) { |
|
|
GGML_ASSERT(opt_ctx); |
|
|
const uint32_t n_ctx = llama_model_n_ctx_train(&model); |
|
|
const uint32_t n_batch = std::min(this->n_batch(), n_ctx); |
|
|
const uint32_t n_ubatch = std::min(this->n_ubatch(), n_batch); |
|
|
|
|
|
memory->clear(true); |
|
|
|
|
|
for (uint32_t pos_ctx = 0; pos_ctx < n_ctx; pos_ctx += n_batch) { |
|
|
batch.n_tokens = n_batch; |
|
|
for (uint32_t pos_batch = 0; pos_batch < n_batch; ++pos_batch) { |
|
|
batch.token [pos_batch] = tokens[pos_ctx + pos_batch]; |
|
|
batch.pos [pos_batch] = pos_ctx + pos_batch; |
|
|
batch.n_seq_id[pos_batch] = 1; |
|
|
batch.seq_id [pos_batch][0] = 0; |
|
|
batch.logits [pos_batch] = true; |
|
|
} |
|
|
|
|
|
if (!balloc->init(batch, model.vocab, nullptr, model.hparams.n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, true)) { |
|
|
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); |
|
|
return; |
|
|
} |
|
|
|
|
|
const uint32_t n_tokens_all = balloc->get_n_tokens(); |
|
|
|
|
|
n_queued_tokens += n_tokens_all; |
|
|
|
|
|
embd_seq.clear(); |
|
|
|
|
|
uint32_t n_outputs_all = n_tokens_all; |
|
|
|
|
|
auto mctx = memory->init_batch(*balloc, cparams.n_ubatch, true); |
|
|
if (!mctx || mctx->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) { |
|
|
LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__); |
|
|
break; |
|
|
} |
|
|
|
|
|
|
|
|
if (output_reserve(n_outputs_all) < n_outputs_all) { |
|
|
LLAMA_LOG_ERROR("%s: could not reserve space for batch with %d outputs\n", __func__, n_outputs_all); |
|
|
GGML_ABORT("TODO: handle this error"); |
|
|
}; |
|
|
|
|
|
uint32_t pos_batch = 0; |
|
|
do { |
|
|
const auto & ubatch = mctx->get_ubatch(); |
|
|
|
|
|
n_outputs = ubatch.n_tokens; |
|
|
|
|
|
if (!mctx->apply()) { |
|
|
LLAMA_LOG_ERROR("%s: failed to update the memory context\n", __func__); |
|
|
break; |
|
|
} |
|
|
|
|
|
auto * res = gf_res_prev.get(); |
|
|
|
|
|
const auto gparams = graph_params(res, ubatch, mctx.get(), LLM_GRAPH_TYPE_DEFAULT); |
|
|
|
|
|
res->reset(); |
|
|
|
|
|
auto * gf = model.build_graph(gparams); |
|
|
|
|
|
struct ggml_context * ctx_compute_opt; |
|
|
{ |
|
|
const size_t size_gf = ggml_graph_size(gf); |
|
|
const size_t size_meta = 4*size_gf*ggml_tensor_overhead() + 2*ggml_graph_overhead_custom(size_gf, true); |
|
|
struct ggml_init_params params = { |
|
|
size_meta, |
|
|
nullptr, |
|
|
true, |
|
|
}; |
|
|
ctx_compute_opt = ggml_init(params); |
|
|
} |
|
|
ggml_opt_prepare_alloc(opt_ctx, ctx_compute_opt, gf, res->get_tokens(), res->get_logits()); |
|
|
ggml_opt_alloc(opt_ctx, train); |
|
|
|
|
|
res->set_inputs(&ubatch); |
|
|
{ |
|
|
struct ggml_tensor * labels = ggml_opt_labels(opt_ctx); |
|
|
GGML_ASSERT(labels->ne[1] == n_ubatch); |
|
|
ggml_set_zero(labels); |
|
|
const float onef = 1.0f; |
|
|
for (uint32_t pos_ubatch = 0; pos_ubatch < n_ubatch; ++pos_ubatch) { |
|
|
const uint32_t ilabel = pos_ctx + pos_batch + pos_ubatch; |
|
|
GGML_ASSERT(labels_sparse[ilabel] < labels->ne[0]); |
|
|
ggml_backend_tensor_set(labels, &onef, (pos_ubatch*labels->ne[0] + labels_sparse[ilabel])*sizeof(float), sizeof(float)); |
|
|
} |
|
|
} |
|
|
ggml_opt_eval(opt_ctx, result); |
|
|
if (callback) { |
|
|
callback(train, opt_ctx, dataset, result, idata_in_loop + (pos_ctx + pos_batch)/n_ubatch + 1, ndata_in_loop, t_loop_start); |
|
|
} |
|
|
ggml_free(ctx_compute_opt); |
|
|
|
|
|
pos_batch += ubatch.n_tokens; |
|
|
} while (mctx->next()); |
|
|
} |
|
|
} |
|
|
|
|
|
void llama_context::opt_epoch( |
|
|
ggml_opt_dataset_t dataset, |
|
|
ggml_opt_result_t result_train, |
|
|
ggml_opt_result_t result_eval, |
|
|
int64_t idata_split, |
|
|
ggml_opt_epoch_callback callback_train, |
|
|
ggml_opt_epoch_callback callback_eval) { |
|
|
const uint32_t n_ctx = this->n_ctx(); |
|
|
const uint32_t n_batch = std::min(cparams.n_batch, n_ctx); |
|
|
const uint32_t n_ubatch = std::min(cparams.n_ubatch, n_batch); |
|
|
const int64_t ndata = ggml_opt_dataset_ndata(dataset); |
|
|
|
|
|
GGML_ASSERT(idata_split >= 0); |
|
|
GGML_ASSERT(idata_split <= ndata); |
|
|
|
|
|
const uint32_t ubatch_per_ctx = n_ctx / n_ubatch; |
|
|
|
|
|
struct llama_batch batch = llama_batch_init(n_batch, 0, 1); |
|
|
std::vector<llama_token> tokens(n_ctx); |
|
|
std::vector<llama_token> labels_sparse(n_ctx); |
|
|
|
|
|
int64_t idata = 0; |
|
|
|
|
|
int64_t t_loop_start = ggml_time_us(); |
|
|
int64_t ndata_in_loop = idata_split*ubatch_per_ctx; |
|
|
for (; idata < idata_split; ++idata) { |
|
|
constexpr bool train = true; |
|
|
const int64_t idata_in_loop = idata*ubatch_per_ctx; |
|
|
|
|
|
ggml_opt_dataset_get_batch_host(dataset, tokens.data(), n_ctx*sizeof(llama_token), labels_sparse.data(), idata); |
|
|
opt_epoch_iter(dataset, result_train, tokens, labels_sparse, batch, |
|
|
callback_train, train, idata_in_loop, ndata_in_loop, t_loop_start); |
|
|
} |
|
|
|
|
|
t_loop_start = ggml_time_us(); |
|
|
ndata_in_loop = (ndata - idata_split)*ubatch_per_ctx; |
|
|
for (; idata < ndata; ++idata) { |
|
|
constexpr bool train = false; |
|
|
const int64_t idata_in_loop = (idata - idata_split)*ubatch_per_ctx; |
|
|
|
|
|
ggml_opt_dataset_get_batch_host(dataset, tokens.data(), n_ctx*sizeof(llama_token), labels_sparse.data(), idata); |
|
|
opt_epoch_iter(dataset, result_eval, tokens, labels_sparse, batch, |
|
|
callback_eval, train, idata_in_loop, ndata_in_loop, t_loop_start); |
|
|
} |
|
|
|
|
|
llama_batch_free(batch); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama_context_params llama_context_default_params() { |
|
|
llama_context_params result = { |
|
|
512, |
|
|
2048, |
|
|
512, |
|
|
1, |
|
|
GGML_DEFAULT_N_THREADS, |
|
|
GGML_DEFAULT_N_THREADS, |
|
|
LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED, |
|
|
LLAMA_POOLING_TYPE_UNSPECIFIED, |
|
|
LLAMA_ATTENTION_TYPE_UNSPECIFIED, |
|
|
0.0f, |
|
|
0.0f, |
|
|
-1.0f, |
|
|
1.0f, |
|
|
32.0f, |
|
|
1.0f, |
|
|
0, |
|
|
-1.0f, |
|
|
nullptr, |
|
|
nullptr, |
|
|
GGML_TYPE_F16, |
|
|
GGML_TYPE_F16, |
|
|
nullptr, |
|
|
nullptr, |
|
|
false, |
|
|
true, |
|
|
false, |
|
|
true, |
|
|
true, |
|
|
true, |
|
|
false, |
|
|
}; |
|
|
|
|
|
return result; |
|
|
} |
|
|
|
|
|
llama_context * llama_init_from_model( |
|
|
llama_model * model, |
|
|
llama_context_params params) { |
|
|
if (!model) { |
|
|
LLAMA_LOG_ERROR("%s: model cannot be NULL\n", __func__); |
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
if (params.n_batch == 0 && params.n_ubatch == 0) { |
|
|
LLAMA_LOG_ERROR("%s: n_batch and n_ubatch cannot both be zero\n", __func__); |
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
if (params.n_ctx == 0 && model->hparams.n_ctx_train == 0) { |
|
|
LLAMA_LOG_ERROR("%s: n_ctx and model->hparams.n_ctx_train cannot both be zero\n", __func__); |
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
if (params.flash_attn && model->arch == LLM_ARCH_GROK) { |
|
|
LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__); |
|
|
params.flash_attn = false; |
|
|
} |
|
|
|
|
|
if (ggml_is_quantized(params.type_v) && !params.flash_attn) { |
|
|
LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__); |
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
try { |
|
|
auto * ctx = new llama_context(*model, params); |
|
|
return ctx; |
|
|
} catch (const std::exception & err) { |
|
|
LLAMA_LOG_ERROR("%s: failed to initialize the context: %s\n", __func__, err.what()); |
|
|
} |
|
|
|
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
|
|
|
llama_context * llama_new_context_with_model( |
|
|
llama_model * model, |
|
|
llama_context_params params) { |
|
|
return llama_init_from_model(model, params); |
|
|
} |
|
|
|
|
|
void llama_free(llama_context * ctx) { |
|
|
delete ctx; |
|
|
} |
|
|
|
|
|
uint32_t llama_n_ctx(const llama_context * ctx) { |
|
|
return ctx->n_ctx(); |
|
|
} |
|
|
|
|
|
uint32_t llama_n_batch(const llama_context * ctx) { |
|
|
return ctx->n_batch(); |
|
|
} |
|
|
|
|
|
uint32_t llama_n_ubatch(const llama_context * ctx) { |
|
|
return ctx->n_ubatch(); |
|
|
} |
|
|
|
|
|
uint32_t llama_n_seq_max(const llama_context * ctx) { |
|
|
return ctx->n_seq_max(); |
|
|
} |
|
|
|
|
|
const llama_model * llama_get_model(const llama_context * ctx) { |
|
|
return &ctx->get_model(); |
|
|
} |
|
|
|
|
|
|
|
|
llama_kv_cache * llama_get_kv_self(llama_context * ctx) { |
|
|
return dynamic_cast<llama_kv_cache *>(ctx->get_memory()); |
|
|
} |
|
|
|
|
|
|
|
|
void llama_kv_self_update(llama_context * ctx) { |
|
|
ctx->kv_self_update(false); |
|
|
} |
|
|
|
|
|
enum llama_pooling_type llama_pooling_type(const llama_context * ctx) { |
|
|
return ctx->pooling_type(); |
|
|
} |
|
|
|
|
|
void llama_attach_threadpool( |
|
|
llama_context * ctx, |
|
|
ggml_threadpool_t threadpool, |
|
|
ggml_threadpool_t threadpool_batch) { |
|
|
ctx->attach_threadpool(threadpool, threadpool_batch); |
|
|
} |
|
|
|
|
|
void llama_detach_threadpool(llama_context * ctx) { |
|
|
ctx->detach_threadpool(); |
|
|
} |
|
|
|
|
|
void llama_set_n_threads(llama_context * ctx, int32_t n_threads, int32_t n_threads_batch) { |
|
|
ctx->set_n_threads(n_threads, n_threads_batch); |
|
|
} |
|
|
|
|
|
int32_t llama_n_threads(llama_context * ctx) { |
|
|
return ctx->n_threads(); |
|
|
} |
|
|
|
|
|
int32_t llama_n_threads_batch(llama_context * ctx) { |
|
|
return ctx->n_threads_batch(); |
|
|
} |
|
|
|
|
|
void llama_set_abort_callback(llama_context * ctx, bool (*abort_callback)(void * data), void * abort_callback_data) { |
|
|
ctx->set_abort_callback(abort_callback, abort_callback_data); |
|
|
} |
|
|
|
|
|
void llama_set_embeddings(llama_context * ctx, bool embeddings) { |
|
|
ctx->set_embeddings(embeddings); |
|
|
} |
|
|
|
|
|
void llama_set_causal_attn(llama_context * ctx, bool causal_attn) { |
|
|
ctx->set_causal_attn(causal_attn); |
|
|
} |
|
|
|
|
|
void llama_set_warmup(llama_context * ctx, bool warmup) { |
|
|
ctx->set_warmup(warmup); |
|
|
} |
|
|
|
|
|
void llama_synchronize(llama_context * ctx) { |
|
|
ctx->synchronize(); |
|
|
} |
|
|
|
|
|
float * llama_get_logits(llama_context * ctx) { |
|
|
ctx->synchronize(); |
|
|
|
|
|
return ctx->get_logits(); |
|
|
} |
|
|
|
|
|
float * llama_get_logits_ith(llama_context * ctx, int32_t i) { |
|
|
ctx->synchronize(); |
|
|
|
|
|
return ctx->get_logits_ith(i); |
|
|
} |
|
|
|
|
|
float * llama_get_embeddings(llama_context * ctx) { |
|
|
ctx->synchronize(); |
|
|
|
|
|
return ctx->get_embeddings(); |
|
|
} |
|
|
|
|
|
float * llama_get_embeddings_ith(llama_context * ctx, int32_t i) { |
|
|
ctx->synchronize(); |
|
|
|
|
|
return ctx->get_embeddings_ith(i); |
|
|
} |
|
|
|
|
|
float * llama_get_embeddings_seq(llama_context * ctx, llama_seq_id seq_id) { |
|
|
ctx->synchronize(); |
|
|
|
|
|
return ctx->get_embeddings_seq(seq_id); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
int32_t llama_set_adapter_lora( |
|
|
llama_context * ctx, |
|
|
llama_adapter_lora * adapter, |
|
|
float scale) { |
|
|
ctx->set_adapter_lora(adapter, scale); |
|
|
|
|
|
return 0; |
|
|
} |
|
|
|
|
|
int32_t llama_rm_adapter_lora( |
|
|
llama_context * ctx, |
|
|
llama_adapter_lora * adapter) { |
|
|
bool res = ctx->rm_adapter_lora(adapter); |
|
|
|
|
|
return res ? 0 : -1; |
|
|
} |
|
|
|
|
|
void llama_clear_adapter_lora(llama_context * ctx) { |
|
|
ctx->clear_adapter_lora(); |
|
|
} |
|
|
|
|
|
int32_t llama_apply_adapter_cvec( |
|
|
llama_context * ctx, |
|
|
const float * data, |
|
|
size_t len, |
|
|
int32_t n_embd, |
|
|
int32_t il_start, |
|
|
int32_t il_end) { |
|
|
bool res = ctx->apply_adapter_cvec(data, len, n_embd, il_start, il_end); |
|
|
|
|
|
return res ? 0 : -1; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama_memory_t llama_get_memory(const struct llama_context * ctx) { |
|
|
return ctx->get_memory(); |
|
|
} |
|
|
|
|
|
void llama_memory_clear(llama_memory_t mem, bool data) { |
|
|
if (!mem) { |
|
|
return; |
|
|
} |
|
|
|
|
|
mem->clear(data); |
|
|
} |
|
|
|
|
|
bool llama_memory_seq_rm( |
|
|
llama_memory_t mem, |
|
|
llama_seq_id seq_id, |
|
|
llama_pos p0, |
|
|
llama_pos p1) { |
|
|
if (!mem) { |
|
|
return true; |
|
|
} |
|
|
|
|
|
return mem->seq_rm(seq_id, p0, p1); |
|
|
} |
|
|
|
|
|
void llama_memory_seq_cp( |
|
|
llama_memory_t mem, |
|
|
llama_seq_id seq_id_src, |
|
|
llama_seq_id seq_id_dst, |
|
|
llama_pos p0, |
|
|
llama_pos p1) { |
|
|
if (!mem) { |
|
|
return; |
|
|
} |
|
|
|
|
|
mem->seq_cp(seq_id_src, seq_id_dst, p0, p1); |
|
|
} |
|
|
|
|
|
void llama_memory_seq_keep( |
|
|
llama_memory_t mem, |
|
|
llama_seq_id seq_id) { |
|
|
if (!mem) { |
|
|
return; |
|
|
} |
|
|
|
|
|
mem->seq_keep(seq_id); |
|
|
} |
|
|
|
|
|
void llama_memory_seq_add( |
|
|
llama_memory_t mem, |
|
|
llama_seq_id seq_id, |
|
|
llama_pos p0, |
|
|
llama_pos p1, |
|
|
llama_pos delta) { |
|
|
if (!mem) { |
|
|
return; |
|
|
} |
|
|
|
|
|
mem->seq_add(seq_id, p0, p1, delta); |
|
|
} |
|
|
|
|
|
void llama_memory_seq_div( |
|
|
llama_memory_t mem, |
|
|
llama_seq_id seq_id, |
|
|
llama_pos p0, |
|
|
llama_pos p1, |
|
|
int d) { |
|
|
if (!mem) { |
|
|
return; |
|
|
} |
|
|
|
|
|
mem->seq_div(seq_id, p0, p1, d); |
|
|
} |
|
|
|
|
|
llama_pos llama_memory_seq_pos_min( |
|
|
llama_memory_t mem, |
|
|
llama_seq_id seq_id) { |
|
|
if (!mem) { |
|
|
return -1; |
|
|
} |
|
|
|
|
|
return mem->seq_pos_min(seq_id); |
|
|
} |
|
|
|
|
|
llama_pos llama_memory_seq_pos_max( |
|
|
llama_memory_t mem, |
|
|
llama_seq_id seq_id) { |
|
|
if (!mem) { |
|
|
return -1; |
|
|
} |
|
|
|
|
|
return mem->seq_pos_max(seq_id); |
|
|
} |
|
|
|
|
|
bool llama_memory_can_shift(llama_memory_t mem) { |
|
|
if (!mem) { |
|
|
return false; |
|
|
} |
|
|
|
|
|
return mem->get_can_shift(); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int32_t llama_kv_self_n_tokens(const llama_context * ctx) { |
|
|
const auto * kv = llama_get_memory(ctx); |
|
|
if (!kv) { |
|
|
return 0; |
|
|
} |
|
|
|
|
|
int32_t res = 0; |
|
|
|
|
|
for (uint32_t s = 0; s < ctx->get_cparams().n_seq_max; s++) { |
|
|
const llama_pos p0 = kv->seq_pos_min(s); |
|
|
const llama_pos p1 = kv->seq_pos_max(s); |
|
|
|
|
|
if (p0 >= 0) { |
|
|
res += (p1 - p0) + 1; |
|
|
} |
|
|
} |
|
|
|
|
|
return res; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
int32_t llama_kv_self_used_cells(const llama_context * ctx) { |
|
|
const auto * kv = llama_get_memory(ctx); |
|
|
if (!kv) { |
|
|
return 0; |
|
|
} |
|
|
|
|
|
int32_t res = 0; |
|
|
|
|
|
for (uint32_t s = 0; s < ctx->get_cparams().n_seq_max; s++) { |
|
|
const llama_pos p0 = kv->seq_pos_min(s); |
|
|
const llama_pos p1 = kv->seq_pos_max(s); |
|
|
|
|
|
if (p0 >= 0) { |
|
|
res += (p1 - p0) + 1; |
|
|
} |
|
|
} |
|
|
|
|
|
return res; |
|
|
} |
|
|
|
|
|
|
|
|
void llama_kv_self_clear(llama_context * ctx) { |
|
|
auto * kv = llama_get_memory(ctx); |
|
|
if (!kv) { |
|
|
return; |
|
|
} |
|
|
|
|
|
llama_memory_clear(kv, true); |
|
|
} |
|
|
|
|
|
|
|
|
bool llama_kv_self_seq_rm( |
|
|
llama_context * ctx, |
|
|
llama_seq_id seq_id, |
|
|
llama_pos p0, |
|
|
llama_pos p1) { |
|
|
auto * kv = llama_get_memory(ctx); |
|
|
if (!kv) { |
|
|
return true; |
|
|
} |
|
|
|
|
|
return llama_memory_seq_rm(kv, seq_id, p0, p1); |
|
|
} |
|
|
|
|
|
|
|
|
void llama_kv_self_seq_cp( |
|
|
llama_context * ctx, |
|
|
llama_seq_id seq_id_src, |
|
|
llama_seq_id seq_id_dst, |
|
|
llama_pos p0, |
|
|
llama_pos p1) { |
|
|
auto * kv = llama_get_memory(ctx); |
|
|
if (!kv) { |
|
|
return; |
|
|
} |
|
|
|
|
|
llama_memory_seq_cp(kv, seq_id_src, seq_id_dst, p0, p1); |
|
|
} |
|
|
|
|
|
|
|
|
void llama_kv_self_seq_keep(llama_context * ctx, llama_seq_id seq_id) { |
|
|
auto * kv = llama_get_memory(ctx); |
|
|
if (!kv) { |
|
|
return; |
|
|
} |
|
|
|
|
|
llama_memory_seq_keep(kv, seq_id); |
|
|
} |
|
|
|
|
|
|
|
|
void llama_kv_self_seq_add( |
|
|
llama_context * ctx, |
|
|
llama_seq_id seq_id, |
|
|
llama_pos p0, |
|
|
llama_pos p1, |
|
|
llama_pos delta) { |
|
|
auto * kv = llama_get_memory(ctx); |
|
|
if (!kv) { |
|
|
return; |
|
|
} |
|
|
|
|
|
llama_memory_seq_add(kv, seq_id, p0, p1, delta); |
|
|
} |
|
|
|
|
|
|
|
|
void llama_kv_self_seq_div( |
|
|
llama_context * ctx, |
|
|
llama_seq_id seq_id, |
|
|
llama_pos p0, |
|
|
llama_pos p1, |
|
|
int d) { |
|
|
auto * kv = llama_get_memory(ctx); |
|
|
if (!kv) { |
|
|
return; |
|
|
} |
|
|
|
|
|
llama_memory_seq_div(kv, seq_id, p0, p1, d); |
|
|
} |
|
|
|
|
|
|
|
|
llama_pos llama_kv_self_seq_pos_min(llama_context * ctx, llama_seq_id seq_id) { |
|
|
auto * kv = llama_get_memory(ctx); |
|
|
if (!kv) { |
|
|
return -1; |
|
|
} |
|
|
|
|
|
return llama_memory_seq_pos_min(kv, seq_id); |
|
|
} |
|
|
|
|
|
|
|
|
llama_pos llama_kv_self_seq_pos_max(llama_context * ctx, llama_seq_id seq_id) { |
|
|
auto * kv = llama_get_memory(ctx); |
|
|
if (!kv) { |
|
|
return -1; |
|
|
} |
|
|
|
|
|
return llama_memory_seq_pos_max(kv, seq_id); |
|
|
} |
|
|
|
|
|
|
|
|
void llama_kv_self_defrag(llama_context * ctx) { |
|
|
|
|
|
ctx->kv_self_defrag_sched(); |
|
|
} |
|
|
|
|
|
|
|
|
bool llama_kv_self_can_shift(const llama_context * ctx) { |
|
|
auto * kv = llama_get_memory(ctx); |
|
|
if (!kv) { |
|
|
return false; |
|
|
} |
|
|
|
|
|
return llama_memory_can_shift(kv); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
size_t llama_get_state_size(llama_context * ctx) { |
|
|
return llama_state_get_size(ctx); |
|
|
} |
|
|
|
|
|
|
|
|
size_t llama_copy_state_data(llama_context * ctx, uint8_t * dst) { |
|
|
return llama_state_get_data(ctx, dst, -1); |
|
|
} |
|
|
|
|
|
|
|
|
size_t llama_set_state_data(llama_context * ctx, const uint8_t * src) { |
|
|
return llama_state_set_data(ctx, src, -1); |
|
|
} |
|
|
|
|
|
|
|
|
bool llama_load_session_file(llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { |
|
|
return llama_state_load_file(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out); |
|
|
} |
|
|
|
|
|
|
|
|
bool llama_save_session_file(llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { |
|
|
return llama_state_save_file(ctx, path_session, tokens, n_token_count); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
size_t llama_state_get_size(llama_context * ctx) { |
|
|
return ctx->state_get_size(); |
|
|
} |
|
|
|
|
|
size_t llama_state_get_data(llama_context * ctx, uint8_t * dst, size_t size) { |
|
|
ctx->synchronize(); |
|
|
|
|
|
return ctx->state_get_data(dst, size); |
|
|
} |
|
|
|
|
|
|
|
|
size_t llama_state_set_data(llama_context * ctx, const uint8_t * src, size_t size) { |
|
|
ctx->synchronize(); |
|
|
|
|
|
return ctx->state_set_data(src, size); |
|
|
} |
|
|
|
|
|
bool llama_state_load_file(llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { |
|
|
ctx->synchronize(); |
|
|
|
|
|
try { |
|
|
return ctx->state_load_file(path_session, tokens_out, n_token_capacity, n_token_count_out); |
|
|
} catch (const std::exception & err) { |
|
|
LLAMA_LOG_ERROR("%s: error loading session file: %s\n", __func__, err.what()); |
|
|
return false; |
|
|
} |
|
|
} |
|
|
|
|
|
bool llama_state_save_file(llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { |
|
|
ctx->synchronize(); |
|
|
|
|
|
try { |
|
|
return ctx->state_save_file(path_session, tokens, n_token_count); |
|
|
} catch (const std::exception & err) { |
|
|
LLAMA_LOG_ERROR("%s: error saving session file: %s\n", __func__, err.what()); |
|
|
return false; |
|
|
} |
|
|
} |
|
|
|
|
|
size_t llama_state_seq_get_size(llama_context * ctx, llama_seq_id seq_id) { |
|
|
return ctx->state_seq_get_size(seq_id); |
|
|
} |
|
|
|
|
|
size_t llama_state_seq_get_data(llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id) { |
|
|
ctx->synchronize(); |
|
|
|
|
|
return ctx->state_seq_get_data(seq_id, dst, size); |
|
|
} |
|
|
|
|
|
size_t llama_state_seq_set_data(llama_context * ctx, const uint8_t * src, size_t size, llama_seq_id seq_id) { |
|
|
ctx->synchronize(); |
|
|
|
|
|
return ctx->state_seq_set_data(seq_id, src, size); |
|
|
} |
|
|
|
|
|
size_t llama_state_seq_save_file(llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) { |
|
|
ctx->synchronize(); |
|
|
|
|
|
try { |
|
|
return ctx->state_seq_save_file(seq_id, filepath, tokens, n_token_count); |
|
|
} catch (const std::exception & err) { |
|
|
LLAMA_LOG_ERROR("%s: error saving sequence state file: %s\n", __func__, err.what()); |
|
|
return 0; |
|
|
} |
|
|
} |
|
|
|
|
|
size_t llama_state_seq_load_file(llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { |
|
|
ctx->synchronize(); |
|
|
|
|
|
try { |
|
|
return ctx->state_seq_load_file(dest_seq_id, filepath, tokens_out, n_token_capacity, n_token_count_out); |
|
|
} catch (const std::exception & err) { |
|
|
LLAMA_LOG_ERROR("%s: error loading sequence state file: %s\n", __func__, err.what()); |
|
|
return 0; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
int32_t llama_encode( |
|
|
llama_context * ctx, |
|
|
llama_batch batch) { |
|
|
const int ret = ctx->encode(batch); |
|
|
if (ret != 0) { |
|
|
LLAMA_LOG_ERROR("%s: failed to encode, ret = %d\n", __func__, ret); |
|
|
} |
|
|
|
|
|
return ret; |
|
|
} |
|
|
|
|
|
int32_t llama_decode( |
|
|
llama_context * ctx, |
|
|
llama_batch batch) { |
|
|
const int ret = ctx->decode(batch); |
|
|
if (ret != 0 && ret != 1) { |
|
|
LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret); |
|
|
} |
|
|
|
|
|
return ret; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama_perf_context_data llama_perf_context(const llama_context * ctx) { |
|
|
llama_perf_context_data data = {}; |
|
|
|
|
|
if (ctx == nullptr) { |
|
|
return data; |
|
|
} |
|
|
|
|
|
data = ctx->perf_get_data(); |
|
|
|
|
|
return data; |
|
|
} |
|
|
|
|
|
void llama_perf_context_print(const llama_context * ctx) { |
|
|
const auto data = llama_perf_context(ctx); |
|
|
|
|
|
const double t_end_ms = 1e-3 * ggml_time_us(); |
|
|
|
|
|
LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, data.t_load_ms); |
|
|
LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n", |
|
|
__func__, data.t_p_eval_ms, data.n_p_eval, data.t_p_eval_ms / data.n_p_eval, 1e3 / data.t_p_eval_ms * data.n_p_eval); |
|
|
LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", |
|
|
__func__, data.t_eval_ms, data.n_eval, data.t_eval_ms / data.n_eval, 1e3 / data.t_eval_ms * data.n_eval); |
|
|
LLAMA_LOG_INFO("%s: total time = %10.2f ms / %5d tokens\n", __func__, (t_end_ms - data.t_start_ms), (data.n_p_eval + data.n_eval)); |
|
|
LLAMA_LOG_INFO("%s: graphs reused = %10d\n", __func__, data.n_reused); |
|
|
} |
|
|
|
|
|
void llama_perf_context_reset(llama_context * ctx) { |
|
|
ctx->perf_reset(); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bool llama_opt_param_filter_all(const struct ggml_tensor * tensor, void * userdata) { |
|
|
GGML_UNUSED(tensor); |
|
|
GGML_UNUSED(userdata); |
|
|
return true; |
|
|
} |
|
|
|
|
|
void llama_opt_init(struct llama_context * ctx, struct llama_model * model, struct llama_opt_params lopt_params) { |
|
|
ctx->opt_init(model, lopt_params); |
|
|
} |
|
|
|
|
|
void llama_opt_epoch( |
|
|
struct llama_context * ctx, |
|
|
ggml_opt_dataset_t dataset, |
|
|
ggml_opt_result_t result_train, |
|
|
ggml_opt_result_t result_eval, |
|
|
int64_t idata_split, |
|
|
ggml_opt_epoch_callback callback_train, |
|
|
ggml_opt_epoch_callback callback_eval) { |
|
|
ctx->opt_epoch( |
|
|
dataset, |
|
|
result_train, |
|
|
result_eval, |
|
|
idata_split, |
|
|
callback_train, |
|
|
callback_eval); |
|
|
} |
|
|
|