| #include "llama-sampler.h" |
|
|
| #include "llama-impl.h" |
| #include "llama-vocab.h" |
| #include "llama-grammar.h" |
|
|
| #include "ggml-cpp.h" |
|
|
| #include <array> |
| #include <algorithm> |
| #include <cassert> |
| #include <cfloat> |
| #include <chrono> |
| #include <cmath> |
| #include <cstdlib> |
| #include <cstring> |
| #include <ctime> |
| #include <numeric> |
| #include <random> |
| #include <unordered_map> |
| #include <stdexcept> |
|
|
| |
| template<typename T> |
| struct ring_buffer { |
| ring_buffer(size_t cap) : capacity(cap), data(cap) {} |
|
|
| T & front() { |
| if (sz == 0) { |
| throw std::runtime_error("ring buffer is empty"); |
| } |
| return data[first]; |
| } |
|
|
| const T & front() const { |
| if (sz == 0) { |
| throw std::runtime_error("ring buffer is empty"); |
| } |
| return data[first]; |
| } |
|
|
| T & back() { |
| if (sz == 0) { |
| throw std::runtime_error("ring buffer is empty"); |
| } |
| return data[pos]; |
| } |
|
|
| const T & back() const { |
| if (sz == 0) { |
| throw std::runtime_error("ring buffer is empty"); |
| } |
| return data[pos]; |
| } |
|
|
| void push_back(const T & value) { |
| if (capacity == 0) { |
| throw std::runtime_error("ring buffer: capacity is zero"); |
| } |
|
|
| if (sz == capacity) { |
| |
| first = (first + 1) % capacity; |
| } else { |
| sz++; |
| } |
| data[pos] = value; |
| pos = (pos + 1) % capacity; |
| } |
|
|
| T pop_front() { |
| if (sz == 0) { |
| throw std::runtime_error("ring buffer is empty"); |
| } |
| T value = data[first]; |
| first = (first + 1) % capacity; |
| sz--; |
| return value; |
| } |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
|
|
| const T & rat(size_t i) const { |
| if (i >= sz) { |
| throw std::runtime_error("ring buffer: index out of bounds"); |
| } |
| return data[(first + sz - i - 1) % capacity]; |
| } |
|
|
| std::vector<T> to_vector() const { |
| std::vector<T> result; |
| result.reserve(sz); |
| for (size_t i = 0; i < sz; i++) { |
| result.push_back(data[(first + i) % capacity]); |
| } |
| return result; |
| } |
|
|
| void clear() { |
| |
| sz = 0; |
| first = 0; |
| pos = 0; |
| } |
|
|
| bool empty() const { |
| return sz == 0; |
| } |
|
|
| size_t size() const { |
| return sz; |
| } |
|
|
| size_t capacity = 0; |
| size_t sz = 0; |
| size_t first = 0; |
| size_t pos = 0; |
|
|
| std::vector<T> data; |
| }; |
|
|
| |
| static void llama_token_data_array_partial_sort(const llama_token_data_array & cur, int npartial, std::vector<llama_token_data> & res) { |
| static const auto comp = [](const llama_token_data & a, const llama_token_data & b) { |
| return a.logit > b.logit; |
| }; |
|
|
| constexpr int nbuckets = 128; |
| constexpr float bucket_low = -10.0f; |
| constexpr float bucket_high = 10.0f; |
| constexpr float bucket_scale = nbuckets/(bucket_high - bucket_low); |
| constexpr float bucket_inter = -bucket_low * bucket_scale; |
|
|
| std::vector<int> bucket_idx; |
| std::vector<int> histo(nbuckets, 0); |
|
|
| std::vector<llama_token_data*> bucket_ptrs; |
|
|
| bucket_idx.reserve(cur.size); |
|
|
| for (int i = 0; i < (int)cur.size; ++i) { |
| const float val = cur.data[i].logit; |
| int ib = int(bucket_scale * val + bucket_inter); |
| ib = std::max(0, std::min(nbuckets - 1, ib)); |
| bucket_idx.push_back(ib); |
| ++histo[ib]; |
| } |
| int nhave = 0; |
| int ib = nbuckets - 1; |
| for ( ; ib >= 0; --ib) { |
| nhave += histo[ib]; |
| if (nhave >= npartial) { |
| break; |
| } |
| } |
| res.resize(nhave); |
| auto * ptr = res.data(); |
| bucket_ptrs.reserve(nbuckets - ib); |
| for (int j = nbuckets - 1; j >= ib; --j) { |
| bucket_ptrs.push_back(ptr); |
| ptr += histo[j]; |
| } |
| for (int i = 0; i < (int)cur.size; ++i) { |
| int j = bucket_idx[i]; |
| if (j >= ib) { |
| *bucket_ptrs[nbuckets - 1 - j]++ = cur.data[i]; |
| } |
| } |
|
|
| ptr = res.data(); |
| int ndone = 0; |
| for (int j = nbuckets - 1; j > ib; --j) { |
| std::sort(ptr, ptr + histo[j], comp); |
| ptr += histo[j]; |
| ndone += histo[j]; |
| } |
| std::partial_sort(ptr, ptr + npartial - ndone, ptr + histo[ib], comp); |
| } |
|
|
| |
| static void llama_token_data_array_partial_sort_inplace(llama_token_data_array * cur_p, int npartial) { |
| static const auto comp = [](const llama_token_data & a, const llama_token_data & b) { |
| return a.logit > b.logit; |
| }; |
|
|
| if (npartial <= 128) { |
| std::partial_sort(cur_p->data, cur_p->data + npartial, cur_p->data + cur_p->size, comp); |
|
|
| cur_p->size = npartial; |
| cur_p->sorted = true; |
|
|
| return; |
| } |
|
|
| std::vector<llama_token_data> tmp; |
|
|
| llama_token_data_array_partial_sort(*cur_p, npartial, tmp); |
|
|
| std::copy(tmp.data(), tmp.data() + npartial, cur_p->data); |
|
|
| cur_p->size = npartial; |
| cur_p->sorted = true; |
| } |
|
|
| static int llama_sample_dist(llama_token_data_array * cur_p, std::mt19937 & rng) { |
| |
| #ifdef __GNUC__ |
| #pragma GCC diagnostic push |
| #pragma GCC diagnostic ignored "-Wunused-local-typedefs" |
| #endif |
|
|
| struct probs_iterator { |
| typedef std::input_iterator_tag iterator_category; |
| typedef float value_type; |
| typedef float * pointer; |
| typedef float & reference; |
| typedef ptrdiff_t difference_type; |
|
|
| const llama_token_data * data; |
|
|
| bool operator==(const probs_iterator & other) const { return data == other.data; } |
| bool operator!=(const probs_iterator & other) const { return data != other.data; } |
| const float & operator*() const { return data->p; } |
| probs_iterator & operator++() { ++data; return *this; } |
| probs_iterator operator++(int) { probs_iterator tmp = *this; ++data; return tmp; } |
| }; |
|
|
| #ifdef __GNUC__ |
| #pragma GCC diagnostic pop |
| #endif |
|
|
| std::discrete_distribution<int> dist(probs_iterator{cur_p->data}, probs_iterator{cur_p->data + cur_p->size}); |
|
|
| return dist(rng); |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| static void llama_sampler_temp_impl(llama_token_data_array * cur_p, float temp) { |
| if (temp <= 0.0f) { |
| |
| size_t max_i = 0; |
| float max_l = cur_p->data[0].logit; |
|
|
| for (size_t i = 1; i < cur_p->size; ++i) { |
| if (cur_p->data[i ].logit > max_l) { |
| cur_p->data[max_i].logit = -INFINITY; |
| max_i = i; |
| max_l = cur_p->data[i].logit; |
| } else { |
| cur_p->data[i].logit = -INFINITY; |
| } |
| } |
|
|
| return; |
| } |
|
|
| for (size_t i = 0; i < cur_p->size; ++i) { |
| cur_p->data[i].logit /= temp; |
| } |
| } |
|
|
| static void llama_sampler_softmax_impl(llama_token_data_array * cur_p, bool do_sort) { |
| GGML_ASSERT(cur_p->size > 0); |
|
|
| |
| if (do_sort && !cur_p->sorted) { |
| llama_token_data_array_partial_sort_inplace(cur_p, cur_p->size); |
| } |
|
|
| float max_l = cur_p->data[0].logit; |
| if (!cur_p->sorted) { |
| for (size_t i = 1; i < cur_p->size; ++i) { |
| max_l = std::max(max_l, cur_p->data[i].logit); |
| } |
| } |
|
|
| float cum_sum = 0.0f; |
|
|
| for (size_t i = 0; i < cur_p->size; ++i) { |
| float p = expf(cur_p->data[i].logit - max_l); |
| cur_p->data[i].p = p; |
| cum_sum += p; |
| } |
|
|
| for (size_t i = 0; i < cur_p->size; ++i) { |
| cur_p->data[i].p /= cum_sum; |
| } |
| } |
|
|
| static void llama_sampler_top_k_impl(llama_token_data_array * cur_p, int32_t k) { |
| |
| |
| |
|
|
| if (k <= 0) { |
| return; |
| } |
|
|
| k = std::min(k, (int) cur_p->size); |
|
|
| |
| if (!cur_p->sorted) { |
| llama_token_data_array_partial_sort_inplace(cur_p, k); |
| } |
|
|
| cur_p->size = k; |
| } |
|
|
| static uint32_t get_rng_seed(uint32_t seed) { |
| if (seed == LLAMA_DEFAULT_SEED) { |
| |
| static bool is_rd_prng = std::random_device().entropy() == 0; |
| if (is_rd_prng) { |
| return (uint32_t) std::chrono::system_clock::now().time_since_epoch().count(); |
| } |
| std::random_device rd; |
| return rd(); |
| } |
| return seed; |
| } |
|
|
| |
|
|
| struct llama_sampler * llama_sampler_init( |
| struct llama_sampler_i * iface, |
| llama_sampler_context_t ctx) { |
| return new llama_sampler { |
| iface, |
| ctx, |
| }; |
| } |
|
|
| const char * llama_sampler_name(const struct llama_sampler * smpl) { |
| if (!smpl->iface) { |
| return "(null)"; |
| } |
|
|
| return smpl->iface->name(smpl); |
| } |
|
|
| void llama_sampler_accept(struct llama_sampler * smpl, llama_token token) { |
| if (!smpl) { |
| return; |
| } |
|
|
| if (smpl->iface->accept) { |
| smpl->iface->accept(smpl, token); |
| } |
| } |
|
|
| void llama_sampler_apply(struct llama_sampler * smpl, struct llama_token_data_array * cur_p) { |
| if (!smpl) { |
| return; |
| } |
|
|
| GGML_ASSERT(smpl->iface->apply); |
| smpl->iface->apply(smpl, cur_p); |
| } |
|
|
| void llama_sampler_reset(struct llama_sampler * smpl) { |
| if (!smpl) { |
| return; |
| } |
|
|
| if (smpl->iface->reset) { |
| smpl->iface->reset(smpl); |
| } |
| } |
|
|
| struct llama_sampler * llama_sampler_clone(const struct llama_sampler * smpl) { |
| if (!smpl) { |
| return nullptr; |
| } |
|
|
| if (smpl->iface->clone) { |
| return smpl->iface->clone(smpl); |
| } |
|
|
| if (smpl->ctx == nullptr) { |
| return llama_sampler_init( |
| smpl->iface, |
| nullptr |
| ); |
| } |
|
|
| GGML_ABORT("the sampler does not support cloning"); |
| } |
|
|
| void llama_sampler_free(struct llama_sampler * smpl) { |
| if (smpl == nullptr) { |
| return; |
| } |
|
|
| if (smpl->iface->free) { |
| smpl->iface->free(smpl); |
| } |
|
|
| delete smpl; |
| } |
|
|
| |
|
|
| struct llama_sampler_empty { |
| const char * name; |
| }; |
|
|
| static struct llama_sampler * llama_sampler_init_empty(const char * name); |
|
|
| static const char * llama_sampler_empty_name(const struct llama_sampler * smpl) { |
| auto * ctx = (llama_sampler_empty *) smpl->ctx; |
| return ctx->name; |
| } |
|
|
| static void llama_sampler_empty_accept(struct llama_sampler * smpl, llama_token token) { |
| GGML_UNUSED(smpl); |
| GGML_UNUSED(token); |
| } |
|
|
| static void llama_sampler_empty_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| GGML_UNUSED(smpl); |
| GGML_UNUSED(cur_p); |
| } |
|
|
| static void llama_sampler_empty_reset(struct llama_sampler * smpl) { |
| GGML_UNUSED(smpl); |
| } |
|
|
| static struct llama_sampler * llama_sampler_empty_clone(const struct llama_sampler * smpl) { |
| auto * ctx = (llama_sampler_empty *) smpl->ctx; |
| return llama_sampler_init_empty(ctx->name); |
| } |
|
|
| static void llama_sampler_empty_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_empty *) smpl->ctx; |
| } |
|
|
| static bool llama_sampler_empty_backend_init( |
| struct llama_sampler * smpl, |
| ggml_backend_buffer_type_t buft) { |
| GGML_UNUSED(smpl); |
| GGML_UNUSED(buft); |
|
|
| return true; |
| } |
|
|
| static void llama_sampler_empty_backend_accept( |
| struct llama_sampler * smpl, |
| ggml_context * ctx, |
| ggml_cgraph * gf, |
| struct ggml_tensor * selected_token) { |
| GGML_UNUSED(smpl); |
| GGML_UNUSED(ctx); |
| GGML_UNUSED(gf); |
| GGML_UNUSED(selected_token); |
| } |
|
|
| static void llama_sampler_empty_backend_apply( |
| struct llama_sampler * smpl, |
| struct ggml_context * ctx, |
| struct ggml_cgraph * gf, |
| struct llama_sampler_data * data) { |
| GGML_UNUSED(smpl); |
| GGML_UNUSED(ctx); |
| GGML_UNUSED(gf); |
| GGML_UNUSED(data); |
| } |
|
|
| static void llama_sampler_empty_backend_set_input(struct llama_sampler * smpl) { |
| GGML_UNUSED(smpl); |
| } |
|
|
| static struct llama_sampler_i llama_sampler_empty_i = { |
| llama_sampler_empty_name, |
| llama_sampler_empty_accept, |
| llama_sampler_empty_apply, |
| llama_sampler_empty_reset, |
| llama_sampler_empty_clone, |
| llama_sampler_empty_free, |
| llama_sampler_empty_backend_init, |
| llama_sampler_empty_backend_accept, |
| llama_sampler_empty_backend_apply, |
| llama_sampler_empty_backend_set_input, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_empty(const char * name) { |
| return llama_sampler_init( |
| &llama_sampler_empty_i, |
| new llama_sampler_empty { |
| name, |
| } |
| ); |
| } |
|
|
| |
| |
| |
| |
| |
| struct llama_sampler_backend { |
| llama_sampler_backend(const char * name) : name(name), name_ext(name), is_init(false), support(false) {} |
|
|
| const char * get_name() { |
| if (!is_init) { |
| return name.c_str(); |
| } |
|
|
| if (support) { |
| name_ext = "+" + name; |
| } else { |
| name_ext = "-" + name; |
| } |
|
|
| return name_ext.c_str(); |
| } |
|
|
| void init(bool support) { |
| GGML_ASSERT(this->is_init == false); |
|
|
| this->is_init = true; |
| this->support = support; |
| } |
|
|
| private: |
| std::string name; |
| std::string name_ext; |
|
|
| bool is_init; |
| bool support; |
| }; |
|
|
| |
| static bool llama_sampler_backend_support( |
| llama_sampler * smpl, |
| ggml_backend_buffer_type_t buft) { |
| auto * device = ggml_backend_buft_get_device(buft); |
| if (!device) { |
| |
| return true; |
| } |
|
|
| ggml_init_params params = { |
| 128*ggml_tensor_overhead() + ggml_graph_overhead(), |
| NULL, |
| true, |
| }; |
|
|
| ggml_context_ptr ctx_ptr { ggml_init(params) }; |
| if (!ctx_ptr) { |
| throw std::runtime_error(format("failed to create ggml context")); |
| } |
|
|
| ggml_context * ctx = ctx_ptr.get(); |
|
|
| const int64_t n = 1024*1024; |
|
|
| llama_sampler_data data = { |
| ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n), |
| nullptr, |
| nullptr, |
| ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n), |
| }; |
|
|
| ggml_cgraph * gf = ggml_new_graph(ctx); |
|
|
| smpl->iface->backend_apply(smpl, ctx, gf, &data); |
|
|
| if (data.logits) { |
| ggml_build_forward_expand(gf, data.logits); |
| } |
|
|
| if (data.probs) { |
| ggml_build_forward_expand(gf, data.probs); |
| } |
|
|
| if (data.sampled) { |
| ggml_build_forward_expand(gf, data.sampled); |
| } |
|
|
| if (data.candidates) { |
| ggml_build_forward_expand(gf, data.candidates); |
| } |
|
|
| for (int i = 0; i < ggml_graph_n_nodes(gf); i++) { |
| struct ggml_tensor * op = ggml_graph_node(gf, i); |
|
|
| if (!ggml_backend_dev_supports_op(device, op)) { |
| LLAMA_LOG_WARN("%s: device '%s' does not have support for op %s needed for sampler '%s'\n", |
| __func__, ggml_backend_dev_name(device), ggml_op_name(op->op), smpl->iface->name(smpl)); |
|
|
| return false; |
| } |
| } |
|
|
| return true; |
| } |
|
|
| |
|
|
| static const char * llama_sampler_chain_name(const struct llama_sampler * ) { |
| return "chain"; |
| } |
|
|
| static void llama_sampler_chain_accept(struct llama_sampler * smpl, llama_token token) { |
| auto * chain = (llama_sampler_chain *) smpl->ctx; |
|
|
| time_meas tm(chain->t_sample_us, chain->params.no_perf); |
|
|
| for (auto & smpl : chain->samplers) { |
| llama_sampler_accept(smpl.ptr, token); |
| } |
|
|
| chain->n_sample++; |
| } |
|
|
| static void llama_sampler_chain_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * chain = (llama_sampler_chain *) smpl->ctx; |
|
|
| time_meas tm(chain->t_sample_us, chain->params.no_perf); |
|
|
| bool is_backend = chain->is_init; |
|
|
| for (auto & smpl : chain->samplers) { |
| if (is_backend && smpl.is_backend) { |
| continue; |
| } |
|
|
| is_backend = false; |
|
|
| if (smpl.ptr->iface->apply == nullptr) { |
| continue; |
| } |
|
|
| llama_sampler_apply(smpl.ptr, cur_p); |
| } |
| } |
|
|
| static void llama_sampler_chain_reset(struct llama_sampler * smpl) { |
| auto * chain = (llama_sampler_chain *) smpl->ctx; |
|
|
| for (auto & smpl : chain->samplers) { |
| llama_sampler_reset(smpl.ptr); |
| } |
| } |
|
|
| static struct llama_sampler * llama_sampler_chain_clone(const struct llama_sampler * smpl) { |
| const auto * chain_src = (const llama_sampler_chain *) smpl->ctx; |
|
|
| auto * result = llama_sampler_chain_init(chain_src->params); |
|
|
| for (const auto & smpl : chain_src->samplers) { |
| llama_sampler_chain_add(result, llama_sampler_clone(smpl.ptr)); |
| } |
|
|
| return result; |
| } |
|
|
| static void llama_sampler_chain_free(struct llama_sampler * smpl) { |
| auto * chain = (llama_sampler_chain *) smpl->ctx; |
|
|
| for (auto & smpl : chain->samplers) { |
| llama_sampler_free(smpl.ptr); |
| } |
|
|
| delete chain; |
| } |
|
|
| static bool llama_sampler_chain_backend_init( |
| struct llama_sampler * smpl, |
| ggml_backend_buffer_type_t buft) { |
| auto * chain = (llama_sampler_chain *) smpl->ctx; |
|
|
| GGML_ASSERT(chain->is_init == false && "llama_sampler_chain_backend_init() called twice"); |
|
|
| chain->is_init = true; |
|
|
| bool res = true; |
|
|
| for (auto & smpl : chain->samplers) { |
| bool res_cur = true; |
|
|
| |
| |
| |
| if (smpl.ptr->iface->backend_init) { |
| if (!smpl.ptr->iface->backend_init(smpl.ptr, buft)) { |
| res_cur = false; |
| } |
| } else { |
| res_cur = false; |
| } |
|
|
| smpl.is_backend = res_cur; |
|
|
| res = res && res_cur; |
| } |
|
|
| return res; |
| } |
|
|
| static void llama_sampler_chain_backend_accept( |
| struct llama_sampler * smpl, |
| ggml_context * ctx, |
| ggml_cgraph * gf, |
| struct ggml_tensor * selected_token) { |
| auto * chain = (llama_sampler_chain *) smpl->ctx; |
|
|
| for (auto & smpl : chain->samplers) { |
| if (!smpl.is_backend) { |
| break; |
| } |
|
|
| if (smpl.ptr->iface->backend_accept) { |
| smpl.ptr->iface->backend_accept(smpl.ptr, ctx, gf, selected_token); |
| } |
| } |
| } |
|
|
| static void llama_sampler_chain_backend_apply( |
| struct llama_sampler * smpl, |
| struct ggml_context * ctx, |
| struct ggml_cgraph * gf, |
| struct llama_sampler_data * data) { |
| auto * chain = (llama_sampler_chain *) smpl->ctx; |
|
|
| GGML_ASSERT(chain->is_init && "llama_sampler_chain_backend_init() not called"); |
|
|
| for (auto & smpl : chain->samplers) { |
| if (!smpl.is_backend) { |
| break; |
| } |
|
|
| if (smpl.ptr->iface->backend_apply) { |
| smpl.ptr->iface->backend_apply(smpl.ptr, ctx, gf, data); |
| } |
| } |
| } |
|
|
| static void llama_sampler_chain_backend_set_input(struct llama_sampler * smpl) { |
| auto * chain = (llama_sampler_chain *) smpl->ctx; |
|
|
| for (auto & smpl : chain->samplers) { |
| if (!smpl.is_backend) { |
| break; |
| } |
|
|
| if (smpl.ptr->iface->backend_set_input) { |
| smpl.ptr->iface->backend_set_input(smpl.ptr); |
| } |
| } |
| } |
|
|
| static struct llama_sampler_i llama_sampler_chain_i = { |
| llama_sampler_chain_name, |
| llama_sampler_chain_accept, |
| llama_sampler_chain_apply, |
| llama_sampler_chain_reset, |
| llama_sampler_chain_clone, |
| llama_sampler_chain_free, |
| llama_sampler_chain_backend_init, |
| llama_sampler_chain_backend_accept, |
| llama_sampler_chain_backend_apply, |
| llama_sampler_chain_backend_set_input, |
| }; |
|
|
| struct llama_sampler * llama_sampler_chain_init(struct llama_sampler_chain_params params) { |
| return llama_sampler_init( |
| &llama_sampler_chain_i, |
| new llama_sampler_chain { |
| params, |
| false, |
| {}, |
| {}, |
| 0, |
| 0, |
| } |
| ); |
| } |
|
|
| llama_token llama_sampler_sample(struct llama_sampler * smpl, struct llama_context * ctx, int32_t idx) { |
| const llama_token sampled_token = llama_get_sampled_token_ith (ctx, idx); |
| const float * sampled_probs = llama_get_sampled_probs_ith (ctx, idx); |
| const float * sampled_logits = llama_get_sampled_logits_ith (ctx, idx); |
| const llama_token * sampled_ids = llama_get_sampled_candidates_ith(ctx, idx); |
|
|
| |
| if (sampled_token != LLAMA_TOKEN_NULL) { |
| LLAMA_LOG_DEBUG("%s: Backend sampler selected token for idx %d. Skipping CPU samplers\n", __func__, idx); |
| return sampled_token; |
| } |
|
|
| const llama_model * model = llama_get_model(ctx); |
| const llama_vocab * vocab = llama_model_get_vocab(model); |
|
|
| const int n_vocab = llama_vocab_n_tokens(vocab); |
|
|
| |
| std::vector<llama_token_data> * cur_ptr; |
| std::vector<llama_token_data> cur_local; |
|
|
| if (smpl->iface == &llama_sampler_chain_i) { |
| auto * chain = (llama_sampler_chain *) smpl->ctx; |
| cur_ptr = &chain->cur; |
| } else { |
| cur_ptr = &cur_local; |
| } |
|
|
| auto & cur = *cur_ptr; |
|
|
| if (sampled_probs) { |
| const uint32_t sampled_probs_count = llama_get_sampled_probs_count_ith(ctx, idx); |
| cur.resize(sampled_probs_count); |
| for (uint32_t i = 0; i < sampled_probs_count; ++i) { |
| cur[i] = llama_token_data{sampled_ids[i], sampled_logits[i], sampled_probs[i]}; |
| } |
| } else if (sampled_logits) { |
| const uint32_t sampled_logits_count = llama_get_sampled_logits_count_ith(ctx, idx); |
| cur.resize(sampled_logits_count); |
| for (llama_token i = 0; i < (int)sampled_logits_count; i++) { |
| cur[i] = llama_token_data{sampled_ids[i], sampled_logits[i], 0.0f}; |
| } |
| } else { |
| const auto * logits = llama_get_logits_ith(ctx, idx); |
| GGML_ASSERT(logits != nullptr); |
| cur.resize(n_vocab); |
| for (llama_token token_id = 0; token_id < n_vocab; token_id++) { |
| cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f}; |
| } |
| } |
|
|
| llama_token_data_array cur_p = { |
| cur.data(), |
| cur.size(), |
| -1, |
| false, |
| }; |
|
|
| llama_sampler_apply(smpl, &cur_p); |
|
|
| GGML_ASSERT(cur_p.selected >= 0 && cur_p.selected < (int32_t) cur_p.size); |
|
|
| auto token = cur_p.data[cur_p.selected].id; |
|
|
| llama_sampler_accept(smpl, token); |
|
|
| return token; |
| } |
|
|
|
|
| void llama_sampler_chain_add(struct llama_sampler * chain, struct llama_sampler * smpl) { |
| auto * p = (llama_sampler_chain *) chain->ctx; |
| p->samplers.push_back({ |
| false, |
| smpl, |
| }); |
| } |
|
|
| struct llama_sampler * llama_sampler_chain_get(struct llama_sampler * chain, int32_t i) { |
| if (chain == nullptr) { |
| return nullptr; |
| } |
|
|
| if (chain->iface != &llama_sampler_chain_i) { |
| return nullptr; |
| } |
|
|
| if (i == -1) { |
| return chain; |
| } |
|
|
| const auto * p = (const llama_sampler_chain *) chain->ctx; |
|
|
| if (i < 0 || (size_t) i >= p->samplers.size()) { |
| return nullptr; |
| } |
|
|
| return p->samplers[i].ptr; |
| } |
|
|
| struct llama_sampler * llama_sampler_chain_remove(struct llama_sampler * chain, int32_t i) { |
| auto * p = (llama_sampler_chain *) chain->ctx; |
|
|
| if (i < 0 || (size_t) i >= p->samplers.size()) { |
| return nullptr; |
| } |
|
|
| auto * result = p->samplers[i].ptr; |
| p->samplers.erase(p->samplers.begin() + i); |
|
|
| return result; |
| } |
|
|
| int llama_sampler_chain_n(const struct llama_sampler * chain) { |
| const auto * p = (const llama_sampler_chain *) chain->ctx; |
|
|
| return p->samplers.size(); |
| } |
|
|
| |
| |
| |
|
|
| |
|
|
| struct llama_sampler_greedy : public llama_sampler_backend { |
| }; |
|
|
| static const char * llama_sampler_greedy_name(const struct llama_sampler * smpl) { |
| auto * sctx = (llama_sampler_greedy *) smpl->ctx; |
| return sctx->get_name(); |
| } |
|
|
| static void llama_sampler_greedy_reset(struct llama_sampler * smpl) { |
| auto * ctx = (llama_sampler_greedy *) smpl->ctx; |
| GGML_UNUSED(ctx); |
| } |
|
|
| static struct llama_sampler * llama_sampler_greedy_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_greedy *) smpl->ctx; |
| auto * result = llama_sampler_init_greedy(); |
|
|
| |
| { |
| auto * result_ctx = (llama_sampler_greedy *) result->ctx; |
|
|
| GGML_UNUSED(ctx); |
| GGML_UNUSED(result_ctx); |
| } |
|
|
| return result; |
| } |
|
|
| static void llama_sampler_greedy_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_greedy *) smpl->ctx; |
| } |
|
|
| static void llama_sampler_greedy_apply(struct llama_sampler * , llama_token_data_array * cur_p) { |
| cur_p->selected = 0; |
| for (size_t i = 1; i < cur_p->size; ++i) { |
| if (cur_p->data[i].logit > cur_p->data[cur_p->selected].logit) { |
| cur_p->selected = i; |
| } |
| } |
| } |
|
|
| static bool llama_sampler_greedy_backend_init( |
| struct llama_sampler * smpl, |
| ggml_backend_buffer_type_t buft) { |
| auto * sctx = (llama_sampler_greedy *) smpl->ctx; |
|
|
| const bool res = llama_sampler_backend_support(smpl, buft); |
|
|
| sctx->init(res); |
|
|
| return res; |
| } |
|
|
| static void llama_sampler_greedy_backend_apply( |
| struct llama_sampler * smpl, |
| struct ggml_context * ctx, |
| struct ggml_cgraph * gf, |
| struct llama_sampler_data * data) { |
| GGML_UNUSED(gf); |
| GGML_UNUSED(smpl); |
|
|
| struct ggml_tensor * curl = ggml_argmax(ctx, data->logits); |
| ggml_set_name(curl, "greedy_argmax"); |
|
|
| data->sampled = curl; |
| } |
|
|
| static struct llama_sampler_i llama_sampler_greedy_i = { |
| llama_sampler_greedy_name, |
| nullptr, |
| llama_sampler_greedy_apply, |
| llama_sampler_greedy_reset, |
| llama_sampler_greedy_clone, |
| llama_sampler_greedy_free, |
| llama_sampler_greedy_backend_init, |
| nullptr, |
| llama_sampler_greedy_backend_apply, |
| nullptr, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_greedy() { |
| return llama_sampler_init( |
| &llama_sampler_greedy_i, |
| new llama_sampler_greedy { |
| ("greedy"), |
| } |
| ); |
| } |
|
|
| |
|
|
| struct llama_sampler_dist : public llama_sampler_backend { |
| const uint32_t seed; |
| uint32_t seed_cur; |
|
|
| std::mt19937 rng; |
|
|
| ggml_tensor * inp_uniform; |
| }; |
|
|
| static const char * llama_sampler_dist_name(const struct llama_sampler * smpl) { |
| auto * sctx = (llama_sampler_dist *) smpl->ctx; |
| return sctx->get_name(); |
| } |
|
|
| static void llama_sampler_dist_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_dist *) smpl->ctx; |
|
|
| |
| if (cur_p->size == 0) { |
| cur_p->selected = -1; |
| return; |
| } |
|
|
| cur_p->selected = 0; |
|
|
| if (cur_p->size == 1) { |
| cur_p->data[0].p = 1.0f; |
| return; |
| } |
|
|
| |
| float max_l = cur_p->data[0].logit; |
| if (!cur_p->sorted) { |
| for (size_t i = 1; i < cur_p->size; ++i) { |
| max_l = std::max(max_l, cur_p->data[i].logit); |
| } |
| } |
|
|
| |
| double sum_cum = 0.0f; |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| float p = expf(cur_p->data[i].logit - max_l); |
| cur_p->data[i].p = p; |
| sum_cum += p; |
| } |
|
|
| #if 1 |
| |
| |
| |
| std::uniform_real_distribution<double> dist(0.0f, 1.0f); |
| const double rnd = dist(ctx->rng); |
|
|
| double sum_run = 0.0f; |
| const double sum_tgt = sum_cum*rnd; |
|
|
| bool found = false; |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| if (!found) { |
| |
| sum_run += cur_p->data[i].p; |
| if (sum_run >= sum_tgt) { |
| cur_p->selected = i; |
| found = true; |
| } |
| } |
|
|
| |
| cur_p->data[i].p /= sum_cum; |
| } |
|
|
| |
| assert(found); |
| if (!found) { |
| cur_p->selected = cur_p->size - 1; |
| } |
| #else |
| |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| cur_p->data[i].p /= sum_cum; |
| } |
|
|
| cur_p->selected = llama_sample_dist(cur_p, ctx->rng); |
| #endif |
| } |
|
|
| static void llama_sampler_dist_reset(struct llama_sampler * smpl) { |
| auto * ctx = (llama_sampler_dist *) smpl->ctx; |
| ctx->seed_cur = get_rng_seed(ctx->seed); |
| ctx->rng.seed(ctx->seed_cur); |
| } |
|
|
| static struct llama_sampler * llama_sampler_dist_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_dist *) smpl->ctx; |
| auto * result = llama_sampler_init_dist(ctx->seed); |
|
|
| |
| { |
| auto * result_ctx = (llama_sampler_dist *) result->ctx; |
|
|
| result_ctx->rng = ctx->rng; |
| } |
|
|
| return result; |
| } |
|
|
| static void llama_sampler_dist_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_dist *) smpl->ctx; |
| } |
|
|
| static bool llama_sampler_dist_backend_init( |
| struct llama_sampler * smpl, |
| ggml_backend_buffer_type_t buft) { |
| auto * sctx = (llama_sampler_dist *) smpl->ctx; |
|
|
| const bool res = llama_sampler_backend_support(smpl, buft); |
|
|
| sctx->init(res); |
|
|
| return res; |
| } |
|
|
| static void llama_sampler_dist_backend_apply( |
| struct llama_sampler * smpl, |
| struct ggml_context * ctx, |
| struct ggml_cgraph * gf, |
| struct llama_sampler_data * data) { |
| GGML_UNUSED(gf); |
|
|
| auto * sctx = (llama_sampler_dist *) smpl->ctx; |
|
|
| sctx->inp_uniform = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); |
| ggml_set_name (sctx->inp_uniform, "uniform"); |
| ggml_set_input(sctx->inp_uniform); |
|
|
| struct ggml_tensor * probs = ggml_soft_max(ctx, data->logits); |
| ggml_set_name(probs, "dist_probs"); |
|
|
| struct ggml_tensor * cumsum = ggml_cumsum(ctx, probs); |
| ggml_set_name(cumsum, "dist_cumsum"); |
|
|
| |
| |
| |
| |
| |
| struct ggml_tensor * diff = ggml_sub(ctx, cumsum, sctx->inp_uniform); |
| ggml_set_name(diff, "dist_cumsum"); |
|
|
| |
| |
| |
| |
| struct ggml_tensor * mask = ggml_step(ctx, diff); |
| ggml_set_name(mask, "dist_mask"); |
|
|
| |
| |
| struct ggml_tensor * idxf = ggml_sum(ctx, mask); |
| ggml_set_name(idxf, "dist_index_f32"); |
|
|
| |
| |
| struct ggml_tensor * idx = ggml_cast(ctx, ggml_scale_bias(ctx, idxf, -1.0f, mask->ne[0]), GGML_TYPE_I32); |
| ggml_set_name(idx, "dist_index_i32"); |
|
|
| |
| struct ggml_tensor * sampled_token = idx; |
| if (data->candidates != nullptr) { |
| struct ggml_tensor * candidates = ggml_reshape_2d(ctx, data->candidates, 1, ggml_nelements(data->candidates)); |
|
|
| sampled_token = ggml_get_rows(ctx, candidates, idx); |
| ggml_set_name(sampled_token, "dist_sampled_token"); |
| } |
|
|
| data->sampled = sampled_token; |
| data->probs = probs; |
| } |
|
|
| static void llama_sampler_dist_backend_set_input(struct llama_sampler * smpl) { |
| auto * sctx = (llama_sampler_dist *) smpl->ctx; |
|
|
| GGML_ASSERT(sctx->inp_uniform != nullptr); |
|
|
| |
| |
| |
| |
| |
| std::uniform_real_distribution<double> dist(0.0f, 1.0f); |
| const float rnd = dist(sctx->rng); |
|
|
| ggml_backend_tensor_set(sctx->inp_uniform, &rnd, 0, sizeof(float)); |
| } |
|
|
| static struct llama_sampler_i llama_sampler_dist_i = { |
| llama_sampler_dist_name, |
| nullptr, |
| llama_sampler_dist_apply, |
| llama_sampler_dist_reset, |
| llama_sampler_dist_clone, |
| llama_sampler_dist_free, |
| llama_sampler_dist_backend_init, |
| nullptr, |
| llama_sampler_dist_backend_apply, |
| llama_sampler_dist_backend_set_input, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_dist(uint32_t seed) { |
| auto seed_cur = get_rng_seed(seed); |
| return llama_sampler_init( |
| &llama_sampler_dist_i, |
| new llama_sampler_dist { |
| ("dist"), |
| seed, |
| seed_cur, |
| std::mt19937(seed_cur), |
| nullptr, |
| } |
| ); |
| } |
|
|
| |
|
|
| struct llama_sampler_top_k : public llama_sampler_backend { |
| const int32_t k; |
| }; |
|
|
| static const char * llama_sampler_top_k_name(const struct llama_sampler * smpl) { |
| auto * sctx = (llama_sampler_top_k *) smpl->ctx; |
| return sctx->get_name(); |
| } |
|
|
| static void llama_sampler_top_k_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_top_k *) smpl->ctx; |
| llama_sampler_top_k_impl(cur_p, ctx->k); |
| } |
|
|
| static struct llama_sampler * llama_sampler_top_k_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_top_k *) smpl->ctx; |
| return llama_sampler_init_top_k(ctx->k); |
| } |
|
|
| static void llama_sampler_top_k_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_top_k *) smpl->ctx; |
| } |
|
|
| static bool llama_sampler_top_k_backend_init( |
| struct llama_sampler * smpl, |
| ggml_backend_buffer_type_t buft) { |
| auto * sctx = (llama_sampler_top_k *) smpl->ctx; |
|
|
| const bool res = llama_sampler_backend_support(smpl, buft); |
|
|
| sctx->init(res); |
|
|
| return res; |
| } |
|
|
| static void llama_sampler_top_k_backend_apply( |
| struct llama_sampler * smpl, |
| struct ggml_context * ctx, |
| struct ggml_cgraph * gf, |
| struct llama_sampler_data * data) { |
| auto * sctx = (llama_sampler_top_k *) smpl->ctx; |
|
|
| struct ggml_tensor * top_k = ggml_top_k(ctx, data->logits, sctx->k); |
| ggml_set_name(top_k, "top_k"); |
|
|
| if (data->candidates) { |
| struct ggml_tensor * candidates_rows = ggml_reshape_2d(ctx, data->candidates, 1, data->candidates->ne[0]); |
| data->candidates = ggml_get_rows(ctx, candidates_rows, top_k); |
| data->candidates = ggml_reshape_1d(ctx, data->candidates, sctx->k); |
| ggml_set_name(data->candidates, "top_k_candidates"); |
| } else { |
| data->candidates = top_k; |
| } |
|
|
| struct ggml_tensor * logits_rows = ggml_reshape_2d(ctx, data->logits, 1, data->logits->ne[0]); |
| struct ggml_tensor * top_k_rows = ggml_get_rows(ctx, logits_rows, top_k); |
| data->logits = ggml_reshape_1d(ctx, top_k_rows, sctx->k); |
| ggml_set_name(top_k_rows, "top_k_rows"); |
|
|
| GGML_UNUSED(gf); |
| } |
|
|
| static struct llama_sampler_i llama_sampler_top_k_i = { |
| llama_sampler_top_k_name, |
| nullptr, |
| llama_sampler_top_k_apply, |
| nullptr, |
| llama_sampler_top_k_clone, |
| llama_sampler_top_k_free, |
| llama_sampler_top_k_backend_init, |
| nullptr, |
| llama_sampler_top_k_backend_apply, |
| nullptr, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_top_k(int32_t k) { |
| const bool is_empty = (k <= 0); |
|
|
| if (is_empty) { |
| return llama_sampler_init_empty("?top-k"); |
| } |
|
|
| return llama_sampler_init( |
| &llama_sampler_top_k_i, |
| new llama_sampler_top_k { |
| ("top-k"), |
| k, |
| } |
| ); |
| } |
|
|
| |
|
|
| struct llama_sampler_top_p : public llama_sampler_backend { |
| const float p; |
| const size_t min_keep; |
|
|
| std::vector<llama_token_data> buf_sort; |
| }; |
|
|
| static const char * llama_sampler_top_p_name(const struct llama_sampler * smpl) { |
| auto * sctx = (llama_sampler_top_p *) smpl->ctx; |
| return sctx->get_name(); |
| } |
|
|
| static void llama_sampler_top_p_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_top_p *) smpl->ctx; |
|
|
| if (ctx->p >= 1.0f) { |
| return; |
| } |
|
|
| llama_sampler_softmax_impl(cur_p, false); |
|
|
| size_t k = cur_p->size; |
| auto * pdata = cur_p->data; |
|
|
| auto & buf_sort = ctx->buf_sort; |
|
|
| |
| if (!cur_p->sorted && cur_p->size > 1024) { |
| k = std::min<size_t>(256, cur_p->size); |
| llama_token_data_array_partial_sort(*cur_p, k, buf_sort); |
| pdata = buf_sort.data(); |
| } else if (!cur_p->sorted) { |
| |
| llama_token_data_array_partial_sort_inplace(cur_p, k); |
| } |
|
|
| |
| float cum_sum = 0.0f; |
| size_t last_idx = cur_p->size; |
|
|
| for (size_t i = 0; i < cur_p->size; ++i) { |
| cum_sum += pdata[i].p; |
|
|
| |
| |
| if (cum_sum >= ctx->p && i + 1 >= ctx->min_keep) { |
| last_idx = i + 1; |
| break; |
| } |
|
|
| |
| if (!cur_p->sorted && i == k - 1) { |
| k = cur_p->size; |
| llama_token_data_array_partial_sort(*cur_p, k, buf_sort); |
| pdata = buf_sort.data(); |
| } |
| } |
|
|
| |
| if (!cur_p->sorted) { |
| std::copy(buf_sort.data(), buf_sort.data() + last_idx, cur_p->data); |
| cur_p->sorted = true; |
| } |
|
|
| cur_p->size = last_idx; |
| } |
|
|
| static struct llama_sampler * llama_sampler_top_p_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_top_p *) smpl->ctx; |
| return llama_sampler_init_top_p(ctx->p, ctx->min_keep); |
| } |
|
|
| static void llama_sampler_top_p_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_top_p *) smpl->ctx; |
| } |
|
|
| static bool llama_sampler_top_p_backend_init( |
| struct llama_sampler * smpl, |
| ggml_backend_buffer_type_t buft) { |
| auto * sctx = (llama_sampler_top_p *) smpl->ctx; |
|
|
| const bool res = llama_sampler_backend_support(smpl, buft); |
|
|
| sctx->init(res); |
|
|
| return res; |
| } |
|
|
| static void llama_sampler_top_p_backend_apply( |
| struct llama_sampler * smpl, |
| struct ggml_context * ctx, |
| struct ggml_cgraph * gf, |
| struct llama_sampler_data * data) { |
| auto * sctx = (llama_sampler_top_p *) smpl->ctx; |
|
|
| auto ggml_sort = [ctx](struct ggml_tensor * a, struct ggml_tensor * b) { |
| GGML_ASSERT(ggml_nrows(a) == 1); |
| struct ggml_tensor * a_reshaped = ggml_reshape_2d(ctx, a, 1, a->ne[0]); |
| struct ggml_tensor * a_sorted = ggml_get_rows(ctx, a_reshaped, b); |
| return ggml_reshape_1d(ctx, a_sorted, a->ne[0]); |
| }; |
|
|
| |
| struct ggml_tensor * sorted_idx = ggml_argsort(ctx, data->logits, GGML_SORT_ORDER_DESC); |
| ggml_set_name(sorted_idx, "top_p_sorted_idx"); |
|
|
| |
| struct ggml_tensor * sorted_logits = ggml_sort(data->logits, sorted_idx); |
| ggml_set_name(sorted_logits, "top_p_sorted_logits"); |
|
|
| struct ggml_tensor * softmax = ggml_soft_max(ctx, sorted_logits); |
| ggml_set_name(softmax, "top_p_softmax"); |
|
|
| |
| if (data->candidates) { |
| data->candidates = ggml_sort(data->candidates, sorted_idx); |
| } else { |
| data->candidates = sorted_idx; |
| } |
| ggml_set_name(data->candidates, "top_p_candidates"); |
|
|
| |
| struct ggml_tensor * cdf = ggml_cumsum(ctx, softmax); |
| ggml_set_name(cdf, "top_p_cdf"); |
|
|
| |
| struct ggml_tensor * cdf_scaled = ggml_scale_bias(ctx, cdf, -1.0f, sctx->p); |
| ggml_set_name(cdf_scaled, "top_p_cdf_scaled"); |
|
|
| struct ggml_tensor * mask = ggml_step(ctx, cdf_scaled); |
| ggml_set_name(mask, "top_p_mask"); |
|
|
| |
| |
| struct ggml_tensor * idxf = ggml_sum(ctx, mask); |
| ggml_set_name(idxf, "top_p_index_f32"); |
|
|
| |
| idxf = ggml_clamp(ctx, idxf, 0.0f, mask->ne[0] - 1); |
|
|
| |
| struct ggml_tensor * ones = ggml_scale_bias(ctx, idxf, 0.0f, 1.0f); |
| ggml_set_name(ones, "top_p_ones"); |
|
|
| |
| struct ggml_tensor * mask_reshaped = ggml_reshape_2d(ctx, mask, 1, mask->ne[0]); |
|
|
| mask_reshaped = ggml_set_rows(ctx, mask_reshaped, ones, ggml_cast(ctx, idxf, GGML_TYPE_I32)); |
| mask = ggml_reshape_1d(ctx, mask_reshaped, mask->ne[0]); |
|
|
| |
| |
| struct ggml_tensor * top_p_bias = ggml_log(ctx, mask); |
| ggml_set_name(top_p_bias, "top_p_bias"); |
|
|
| data->logits = ggml_add(ctx, sorted_logits, top_p_bias); |
| ggml_set_name(data->logits, "top_p_logits"); |
|
|
| GGML_UNUSED(gf); |
| } |
|
|
| static struct llama_sampler_i llama_sampler_top_p_i = { |
| llama_sampler_top_p_name, |
| nullptr, |
| llama_sampler_top_p_apply, |
| nullptr, |
| llama_sampler_top_p_clone, |
| llama_sampler_top_p_free, |
| llama_sampler_top_p_backend_init, |
| nullptr, |
| llama_sampler_top_p_backend_apply, |
| nullptr, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_top_p(float p, size_t min_keep) { |
| const bool is_empty = p >= 1.0f; |
|
|
| if (is_empty) { |
| return llama_sampler_init_empty("?top-p"); |
| } |
|
|
| return llama_sampler_init( |
| &llama_sampler_top_p_i, |
| new llama_sampler_top_p { |
| ("top-p"), |
| p, |
| min_keep, |
| {}, |
| } |
| ); |
| } |
|
|
| |
|
|
| struct llama_sampler_min_p : public llama_sampler_backend { |
| const float p; |
| const size_t min_keep; |
| }; |
|
|
| static const char * llama_sampler_min_p_name(const struct llama_sampler * smpl) { |
| auto * sctx = (llama_sampler_min_p *) smpl->ctx; |
| return sctx->get_name(); |
| } |
|
|
| static void llama_sampler_min_p_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_min_p *) smpl->ctx; |
|
|
| if (ctx->p <= 0.0f || !cur_p->size) { |
| return; |
| } |
|
|
| bool min_p_applied = false; |
|
|
| |
| if (!cur_p->sorted) { |
| std::vector<llama_token_data> filtered_tokens; |
|
|
| float max_logit = -FLT_MAX; |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| max_logit = std::max(max_logit, cur_p->data[i].logit); |
| } |
| const float min_logit = max_logit + logf(ctx->p); |
|
|
| for (size_t i = 0; i < cur_p->size; ++i) { |
| if (cur_p->data[i].logit >= min_logit) { |
| filtered_tokens.push_back(cur_p->data[i]); |
| } |
| } |
|
|
| |
| if (!filtered_tokens.empty() && filtered_tokens.size() >= ctx->min_keep) { |
| std::copy(filtered_tokens.begin(), filtered_tokens.end(), cur_p->data); |
| cur_p->size = filtered_tokens.size(); |
| min_p_applied = true; |
| } |
| } |
|
|
| |
| if (!min_p_applied) { |
| |
| if (!cur_p->sorted) { |
| llama_token_data_array_partial_sort_inplace(cur_p, cur_p->size); |
| } |
|
|
| const float min_logit = cur_p->data[0].logit + logf(ctx->p); |
| size_t i = 1; |
|
|
| for (; i < cur_p->size; ++i) { |
| if (cur_p->data[i].logit < min_logit && i >= ctx->min_keep) { |
| break; |
| } |
| } |
|
|
| |
| cur_p->size = i; |
| } |
| } |
|
|
| static struct llama_sampler * llama_sampler_min_p_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_min_p *) smpl->ctx; |
| return llama_sampler_init_min_p(ctx->p, ctx->min_keep); |
| } |
|
|
| static void llama_sampler_min_p_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_min_p *) smpl->ctx; |
| } |
|
|
| static bool llama_sampler_min_p_backend_init( |
| struct llama_sampler * smpl, |
| ggml_backend_buffer_type_t buft) { |
| auto * sctx = (llama_sampler_min_p *) smpl->ctx; |
|
|
| const bool res = llama_sampler_backend_support(smpl, buft); |
|
|
| sctx->init(res); |
|
|
| return res; |
| } |
|
|
| static void llama_sampler_min_p_backend_apply( |
| struct llama_sampler * smpl, |
| struct ggml_context * ctx, |
| struct ggml_cgraph * gf, |
| struct llama_sampler_data * data) { |
| auto * sctx = (llama_sampler_min_p *) smpl->ctx; |
|
|
| struct ggml_tensor * max_idx = ggml_argmax(ctx, data->logits); |
| ggml_set_name(max_idx, "max_idx"); |
|
|
| struct ggml_tensor * logits_rows = ggml_reshape_2d(ctx, data->logits, 1, data->logits->ne[0]); |
| ggml_set_name(logits_rows, "logits_rows"); |
|
|
| struct ggml_tensor * max_logit = ggml_get_rows(ctx, logits_rows, max_idx); |
| ggml_set_name(max_logit, "max_logit"); |
|
|
| |
| struct ggml_tensor * threshold = ggml_scale_bias(ctx, max_logit, 1.0f, logf(sctx->p)); |
| ggml_set_name(threshold, "min_p_threshold"); |
|
|
| |
| struct ggml_tensor * sub = ggml_sub(ctx, data->logits, threshold); |
|
|
| |
| |
| struct ggml_tensor * mask = ggml_step(ctx, sub); |
| ggml_set_name(mask, "min_p_mask"); |
|
|
| |
| |
| struct ggml_tensor * min_p_bias = ggml_log(ctx, mask); |
| ggml_set_name(min_p_bias, "min_p_bias"); |
|
|
| data->logits = ggml_add(ctx, data->logits, min_p_bias); |
| ggml_set_name(data->logits, "min_p_logits"); |
|
|
| GGML_UNUSED(gf); |
| } |
|
|
| static struct llama_sampler_i llama_sampler_min_p_i = { |
| llama_sampler_min_p_name, |
| nullptr, |
| llama_sampler_min_p_apply, |
| nullptr, |
| llama_sampler_min_p_clone, |
| llama_sampler_min_p_free, |
| llama_sampler_min_p_backend_init, |
| nullptr, |
| llama_sampler_min_p_backend_apply, |
| nullptr, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_min_p(float p, size_t min_keep) { |
| const bool is_empty = (p <= 0.0f); |
|
|
| if (is_empty) { |
| return llama_sampler_init_empty("?min-p"); |
| } |
|
|
| return llama_sampler_init( |
| &llama_sampler_min_p_i, |
| new llama_sampler_min_p { |
| ("min-p"), |
| p, |
| min_keep, |
| } |
| ); |
| } |
|
|
| |
|
|
| struct llama_sampler_typical { |
| const float p; |
| const size_t min_keep; |
| }; |
|
|
| static const char * llama_sampler_typical_name(const struct llama_sampler * ) { |
| return "typical"; |
| } |
|
|
| static void llama_sampler_typical_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_typical *) smpl->ctx; |
|
|
| |
| |
| if (ctx->p >= 1.0f) { |
| return; |
| } |
|
|
| |
| llama_sampler_softmax_impl(cur_p, true); |
|
|
| float entropy = 0.0f; |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| entropy += -cur_p->data[i].p * logf(cur_p->data[i].p); |
| } |
|
|
| |
| std::vector<float> shifted_scores; |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| float shifted_score = fabsf(-logf(cur_p->data[i].p) - entropy); |
| shifted_scores.push_back(shifted_score); |
| } |
|
|
| |
| std::vector<size_t> indices(cur_p->size); |
| std::iota(indices.begin(), indices.end(), 0); |
|
|
| std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) { |
| return shifted_scores[a] < shifted_scores[b]; |
| }); |
|
|
| |
| float cum_sum = 0.0f; |
| size_t last_idx = indices.size(); |
|
|
| for (size_t i = 0; i < indices.size(); ++i) { |
| size_t idx = indices[i]; |
| cum_sum += cur_p->data[idx].p; |
|
|
| |
| if (cum_sum > ctx->p && (ctx->min_keep == 0 || i >= ctx->min_keep - 1)) { |
| last_idx = i + 1; |
| break; |
| } |
| } |
|
|
| |
| std::vector<llama_token_data> cur_p_new; |
| for (size_t i = 0; i < last_idx; ++i) { |
| size_t idx = indices[i]; |
| cur_p_new.push_back(cur_p->data[idx]); |
| } |
|
|
| |
| std::copy(cur_p_new.begin(), cur_p_new.end(), cur_p->data); |
| cur_p->size = cur_p_new.size(); |
| cur_p->sorted = false; |
| } |
|
|
| static struct llama_sampler * llama_sampler_typical_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_typical *) smpl->ctx; |
| return llama_sampler_init_typical(ctx->p, ctx->min_keep); |
| } |
|
|
| static void llama_sampler_typical_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_typical *) smpl->ctx; |
| } |
|
|
| static struct llama_sampler_i llama_sampler_typical_i = { |
| llama_sampler_typical_name, |
| nullptr, |
| llama_sampler_typical_apply, |
| nullptr, |
| llama_sampler_typical_clone, |
| llama_sampler_typical_free, |
| nullptr, |
| nullptr, |
| nullptr, |
| nullptr, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_typical(float p, size_t min_keep) { |
| const bool is_empty = (p >= 1.0f); |
|
|
| if (is_empty) { |
| return llama_sampler_init_empty("?typical"); |
| } |
|
|
| return llama_sampler_init( |
| &llama_sampler_typical_i, |
| new llama_sampler_typical { |
| p, |
| min_keep, |
| } |
| ); |
| } |
|
|
| |
|
|
| struct llama_sampler_temp : public llama_sampler_backend { |
| const float temp; |
| }; |
|
|
| static const char * llama_sampler_temp_name(const struct llama_sampler * smpl) { |
| auto * sctx = (llama_sampler_temp *) smpl->ctx; |
| return sctx->get_name(); |
| } |
|
|
| static void llama_sampler_temp_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| const auto * ctx = (llama_sampler_temp *) smpl->ctx; |
|
|
| llama_sampler_temp_impl(cur_p, ctx->temp); |
| } |
|
|
| static struct llama_sampler * llama_sampler_temp_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_temp *) smpl->ctx; |
| return llama_sampler_init_temp(ctx->temp); |
| } |
|
|
| static void llama_sampler_temp_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_temp *) smpl->ctx; |
| } |
|
|
| static void llama_sampler_backend_temp_sampling( |
| struct ggml_context * ctx, |
| struct ggml_cgraph * gf, |
| struct llama_sampler_data * data, |
| float temp) { |
| if (temp <= 0.0f) { |
| |
| struct ggml_tensor * max_idx = ggml_argmax(ctx, data->logits); |
| ggml_set_name(max_idx, "temp_max_idx"); |
|
|
| if (data->candidates) { |
| struct ggml_tensor * candidates_rows = ggml_reshape_2d(ctx, data->candidates, 1, data->candidates->ne[0]); |
| data->candidates = ggml_get_rows(ctx, candidates_rows, max_idx); |
| } else { |
| data->candidates = max_idx; |
| } |
|
|
| struct ggml_tensor * logits_rows = ggml_reshape_2d(ctx, data->logits, 1, data->logits->ne[0]); |
| data->logits = ggml_get_rows(ctx, logits_rows, max_idx); |
|
|
| return; |
| } |
|
|
| data->logits = ggml_scale(ctx, data->logits, 1.0f / temp); |
|
|
| GGML_UNUSED(gf); |
| } |
|
|
| static bool llama_sampler_temp_backend_init( |
| struct llama_sampler * smpl, |
| ggml_backend_buffer_type_t buft) { |
| auto * sctx = (llama_sampler_temp *) smpl->ctx; |
|
|
| const bool res = llama_sampler_backend_support(smpl, buft); |
|
|
| sctx->init(res); |
|
|
| return res; |
| } |
|
|
| static void llama_sampler_temp_backend_apply( |
| struct llama_sampler * smpl, |
| struct ggml_context * ctx, |
| struct ggml_cgraph * gf, |
| struct llama_sampler_data * data) { |
| auto * sctx = (llama_sampler_temp *) smpl->ctx; |
| llama_sampler_backend_temp_sampling(ctx, gf, data, sctx->temp); |
| } |
|
|
| static struct llama_sampler_i llama_sampler_temp_i = { |
| llama_sampler_temp_name, |
| nullptr, |
| llama_sampler_temp_apply, |
| nullptr, |
| llama_sampler_temp_clone, |
| llama_sampler_temp_free, |
| llama_sampler_temp_backend_init, |
| nullptr, |
| llama_sampler_temp_backend_apply, |
| nullptr, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_temp(float temp) { |
| const bool is_empty = temp == 1.0f; |
|
|
| if (is_empty) { |
| return llama_sampler_init_empty("?temp"); |
| } |
|
|
| return llama_sampler_init( |
| &llama_sampler_temp_i, |
| new llama_sampler_temp { |
| ("temp"), |
| temp, |
| } |
| ); |
| } |
|
|
| |
|
|
| struct llama_sampler_temp_ext : public llama_sampler_backend { |
| const float temp; |
| const float delta; |
| const float exponent; |
| }; |
|
|
| static const char * llama_sampler_temp_ext_name(const struct llama_sampler * smpl) { |
| auto * sctx = (llama_sampler_temp_ext *) smpl->ctx; |
| return sctx->get_name(); |
| } |
|
|
| static void llama_sampler_temp_ext_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_temp_ext *) smpl->ctx; |
| if (ctx->delta > 0) { |
| const float min_temp = std::max(0.0f, ctx->temp - ctx->delta); |
| const float max_temp = ctx->temp + ctx->delta; |
|
|
| float exponent_val = ctx->exponent; |
|
|
| |
| if (cur_p->size <= 1) { |
| return; |
| } |
|
|
| |
| float max_entropy = -logf(1.0f / cur_p->size); |
|
|
| llama_sampler_softmax_impl(cur_p, true); |
|
|
| |
| float entropy = 0.0f; |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| float prob = cur_p->data[i].p; |
| if (prob > 0.0f) { |
| entropy -= prob * logf(prob); |
| } |
| } |
|
|
| |
| float normalized_entropy = entropy / max_entropy; |
|
|
| |
| float dyn_temp = min_temp + (max_temp - min_temp) * powf(normalized_entropy, exponent_val); |
|
|
| #ifdef DEBUG |
| LLAMA_LOG_INFO("Your text maxtemp value is: %f\n", max_temp); |
| LLAMA_LOG_INFO("Entropy: %f\n", entropy); |
| LLAMA_LOG_INFO("Max Possible Entropy: %f\n", max_entropy); |
| LLAMA_LOG_INFO("Normalized Entropy: %f\n", normalized_entropy); |
| LLAMA_LOG_INFO("Exponent: %f\n", exponent_val); |
| LLAMA_LOG_INFO("Dynamic Temperature (dyn_temp): %f\n", dyn_temp); |
| #endif |
|
|
| |
| llama_sampler_temp_impl(cur_p, dyn_temp); |
|
|
| |
| const double max_l_double = cur_p->data[0].logit; |
|
|
| double cum_sum_double = 0.0; |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| double p = exp(cur_p->data[i].logit - max_l_double); |
| cur_p->data[i].p = p; |
| cum_sum_double += p; |
| } |
|
|
| for (size_t i = 0; i < cur_p->size; ++i) { |
| cur_p->data[i].p /= cum_sum_double; |
| } |
|
|
| #ifdef DEBUG |
| |
| LLAMA_LOG_INFO("\nUpdated Top 25 Probabilities After Dynamic Temperature Scaling (in percentages):\n"); |
| for (size_t i = 0; i < 25 && i < cur_p->size; ++i) { |
| LLAMA_LOG_INFO("Token %zu: %f%%\n", i + 1, cur_p->data[i].p * 100.0f); |
| } |
| #endif |
| } else { |
| llama_sampler_temp_impl(cur_p, ctx->temp); |
| } |
| } |
|
|
| static struct llama_sampler * llama_sampler_temp_ext_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_temp_ext *) smpl->ctx; |
| return llama_sampler_init_temp_ext(ctx->temp, ctx->delta, ctx->exponent); |
| } |
|
|
| static void llama_sampler_temp_ext_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_temp_ext *) smpl->ctx; |
| } |
|
|
| static bool llama_sampler_temp_ext_backend_init( |
| struct llama_sampler * smpl, |
| ggml_backend_buffer_type_t buft) { |
| auto * sctx = (llama_sampler_temp_ext *) smpl->ctx; |
|
|
| const bool res = llama_sampler_backend_support(smpl, buft); |
|
|
| sctx->init(res); |
|
|
| return res; |
| } |
|
|
| static void llama_sampler_temp_ext_backend_apply( |
| struct llama_sampler * smpl, |
| struct ggml_context * ctx, |
| struct ggml_cgraph * gf, |
| struct llama_sampler_data * data) { |
| auto * sctx = (llama_sampler_temp_ext *) smpl->ctx; |
|
|
| |
| if (sctx->delta <= 0.0f || sctx->temp <= 0.0f) { |
| llama_sampler_backend_temp_sampling(ctx, gf, data, sctx->temp); |
| return; |
| } |
|
|
| |
| const float min_temp = std::max(0.0f, sctx->temp - sctx->delta); |
| const float max_temp = sctx->temp + sctx->delta; |
| const float max_entropy = logf(data->logits->ne[0]); |
|
|
| |
| struct ggml_tensor * probs = ggml_soft_max(ctx, data->logits); |
| ggml_set_name(probs, "temp_ext_softmax_probs"); |
|
|
| |
| struct ggml_tensor * probs_clamped = ggml_clamp(ctx, probs, 1e-10f, 1.0f); |
| ggml_set_name(probs_clamped, "temp_ext_probs_clamped"); |
|
|
| |
| struct ggml_tensor * log_probs = ggml_log(ctx, probs_clamped); |
| struct ggml_tensor * p_log_p = ggml_mul(ctx, probs_clamped, log_probs); |
| struct ggml_tensor * sum_p_log_p = ggml_sum(ctx, p_log_p); |
| struct ggml_tensor * entropy = ggml_scale(ctx, sum_p_log_p, -1.0f); |
| ggml_set_name(log_probs, "temp_ext_log_probs"); |
| ggml_set_name(p_log_p, "temp_ext_p_log_p"); |
| ggml_set_name(sum_p_log_p, "temp_ext_sum_p_log_p"); |
| ggml_set_name(entropy, "temp_ext_entropy"); |
|
|
| |
| struct ggml_tensor * norm_entropy = ggml_scale(ctx, entropy, 1.0f / max_entropy); |
| ggml_set_name(norm_entropy, "temp_ext_norm_entropy"); |
|
|
| |
| |
| |
| |
| |
| struct ggml_tensor * log_norm_entropy = ggml_log(ctx, norm_entropy); |
| struct ggml_tensor * scaled_log = ggml_scale(ctx, log_norm_entropy, sctx->exponent); |
| struct ggml_tensor * pow_entropy = ggml_exp(ctx, scaled_log); |
| |
| |
| struct ggml_tensor * dyn_temp = ggml_scale_bias(ctx, pow_entropy, max_temp - min_temp, min_temp); |
| ggml_set_name(log_norm_entropy, "temp_ext_log_norm_entropy"); |
| ggml_set_name(scaled_log, "temp_ext_scaled_log"); |
| ggml_set_name(pow_entropy, "temp_ext_pow_entropy"); |
| ggml_set_name(dyn_temp, "temp_ext_dyn_temp"); |
|
|
| |
| struct ggml_tensor * scaled_logits = ggml_div(ctx, data->logits, dyn_temp); |
| ggml_set_name(scaled_logits, "temp_ext_scaled_logits"); |
|
|
| data->logits = scaled_logits; |
| } |
|
|
| static struct llama_sampler_i llama_sampler_temp_ext_i = { |
| llama_sampler_temp_ext_name, |
| nullptr, |
| llama_sampler_temp_ext_apply, |
| nullptr, |
| llama_sampler_temp_ext_clone, |
| llama_sampler_temp_ext_free, |
| llama_sampler_temp_ext_backend_init, |
| nullptr, |
| llama_sampler_temp_ext_backend_apply, |
| nullptr, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_temp_ext(float temp, float delta, float exponent) { |
| const bool is_empty = temp == 1.0f && delta <= 0.0f; |
|
|
| if (is_empty) { |
| return llama_sampler_init_empty("?temp-ext"); |
| } |
|
|
| auto * res = llama_sampler_init( |
| &llama_sampler_temp_ext_i, |
| new llama_sampler_temp_ext { |
| ("temp-ext"), |
| temp, |
| delta, |
| exponent, |
| } |
| ); |
|
|
| return res; |
| } |
|
|
| |
|
|
| struct llama_sampler_xtc { |
| const float probability; |
| const float threshold; |
| const size_t min_keep; |
|
|
| const uint32_t seed; |
| uint32_t seed_cur; |
|
|
| std::mt19937 rng; |
| }; |
|
|
| static const char * llama_sampler_xtc_name(const struct llama_sampler * ) { |
| return "xtc"; |
| } |
|
|
| static void llama_sample_xtc_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_xtc *) smpl->ctx; |
|
|
| if (ctx->probability <= 0.0f |
| || ctx->threshold > 0.5f |
| || cur_p->size < 2) { |
| return; |
| } |
|
|
| std::uniform_real_distribution<float> distribution(0.0f, 1.0f); |
| float chance = distribution(ctx->rng); |
| if (chance > ctx->probability) { |
| return; |
| } |
|
|
| llama_sampler_softmax_impl(cur_p, true); |
|
|
| int pos_last = 0; |
|
|
| for (size_t i = 0; i < cur_p->size; ++i) { |
| if (cur_p->data[i].p >= ctx->threshold) { |
| pos_last = i; |
| } else { |
| break; |
| } |
| } |
|
|
| if (cur_p->size - pos_last >= ctx->min_keep && pos_last > 0) { |
| cur_p->data += pos_last; |
| cur_p->size -= pos_last; |
| } |
| } |
|
|
| static struct llama_sampler * llama_sampler_xtc_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_xtc *) smpl->ctx; |
| auto * result = llama_sampler_init_xtc(ctx->probability, ctx->threshold, ctx->min_keep, ctx->seed); |
|
|
| |
| { |
| auto * result_ctx = (llama_sampler_xtc *) result->ctx; |
|
|
| result_ctx->rng = ctx->rng; |
| } |
|
|
| return result; |
| } |
|
|
| static void llama_sampler_xtc_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_xtc *) smpl->ctx; |
| } |
|
|
| static void llama_sampler_xtc_reset(struct llama_sampler * smpl) { |
| auto * ctx = (llama_sampler_xtc *) smpl->ctx; |
| ctx->seed_cur = get_rng_seed(ctx->seed); |
| ctx->rng.seed(ctx->seed_cur); |
| } |
|
|
| static struct llama_sampler_i llama_sampler_xtc_i = { |
| llama_sampler_xtc_name, |
| nullptr, |
| llama_sample_xtc_apply, |
| llama_sampler_xtc_reset, |
| llama_sampler_xtc_clone, |
| llama_sampler_xtc_free, |
| nullptr, |
| nullptr, |
| nullptr, |
| nullptr, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_xtc(float p, float t, size_t min_keep, uint32_t seed) { |
| const bool is_empty = (p <= 0.0f || t > 0.5f); |
|
|
| if (is_empty) { |
| return llama_sampler_init_empty("?xtc"); |
| } |
|
|
| const auto seed_cur = get_rng_seed(seed); |
|
|
| return llama_sampler_init( |
| &llama_sampler_xtc_i, |
| new llama_sampler_xtc { |
| p, |
| t, |
| min_keep, |
| seed, |
| seed_cur, |
| std::mt19937(seed_cur), |
| } |
| ); |
| } |
|
|
| |
|
|
| struct llama_sampler_mirostat { |
| const int32_t n_vocab; |
|
|
| const uint32_t seed; |
| uint32_t seed_cur; |
|
|
| const float tau; |
| const float eta; |
|
|
| const int32_t m; |
|
|
| float mu; |
|
|
| std::mt19937 rng; |
| }; |
|
|
| static const char * llama_sampler_mirostat_name(const struct llama_sampler * ) { |
| return "mirostat"; |
| } |
|
|
| static void llama_sampler_mirostat_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_mirostat *) smpl->ctx; |
|
|
| llama_sampler_softmax_impl(cur_p, true); |
|
|
| |
| float s_hat = 0.0; |
| float sum_ti_bi = 0.0; |
| float sum_ti_sq = 0.0; |
| for (size_t i = 0; i < size_t(ctx->m - 1) && i < cur_p->size - 1; ++i) { |
| float t_i = logf(float(i + 2) / float(i + 1)); |
| float b_i = logf(cur_p->data[i].p / cur_p->data[i + 1].p); |
| sum_ti_bi += t_i * b_i; |
| sum_ti_sq += t_i * t_i; |
| } |
| s_hat = sum_ti_bi / sum_ti_sq; |
|
|
| |
| float epsilon_hat = s_hat - 1; |
| float k = powf((epsilon_hat * powf(2, ctx->mu)) / (1 - powf(ctx->n_vocab, -epsilon_hat)), 1 / s_hat); |
|
|
| llama_sampler_top_k_impl(cur_p, std::max(int(k), 1)); |
|
|
| llama_sampler_softmax_impl(cur_p, true); |
|
|
| const int idx = llama_sample_dist(cur_p, ctx->rng); |
|
|
| cur_p->selected = idx; |
|
|
| float observed_surprise = -log2f(cur_p->data[idx].p); |
| float e = observed_surprise - ctx->tau; |
|
|
| |
| ctx->mu = ctx->mu - ctx->eta * e; |
| } |
|
|
| static struct llama_sampler * llama_sampler_mirostat_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_mirostat *) smpl->ctx; |
| auto * result = llama_sampler_init_mirostat(ctx->n_vocab, ctx->seed, ctx->tau, ctx->eta, ctx->m); |
|
|
| |
| { |
| auto * result_ctx = (llama_sampler_mirostat *) smpl->ctx; |
|
|
| result_ctx->mu = ctx->mu; |
| result_ctx->rng = ctx->rng; |
| } |
|
|
| return result; |
| } |
|
|
| static void llama_sampler_mirostat_reset(struct llama_sampler * smpl) { |
| auto * ctx = (llama_sampler_mirostat *) smpl->ctx; |
| ctx->mu = 2.0f*ctx->tau; |
| ctx->seed_cur = get_rng_seed(ctx->seed); |
| ctx->rng.seed(ctx->seed_cur); |
| } |
|
|
| static void llama_sampler_mirostat_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_mirostat *) smpl->ctx; |
| } |
|
|
| static struct llama_sampler_i llama_sampler_mirostat_i = { |
| llama_sampler_mirostat_name, |
| nullptr, |
| llama_sampler_mirostat_apply, |
| llama_sampler_mirostat_reset, |
| llama_sampler_mirostat_clone, |
| llama_sampler_mirostat_free, |
| nullptr, |
| nullptr, |
| nullptr, |
| nullptr, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_mirostat(int32_t n_vocab, uint32_t seed, float tau, float eta, int32_t m) { |
| const auto seed_cur = get_rng_seed(seed); |
|
|
| return llama_sampler_init( |
| &llama_sampler_mirostat_i, |
| new llama_sampler_mirostat { |
| n_vocab, |
| seed, |
| seed_cur, |
| tau, |
| eta, |
| m, |
| 2.0f*tau, |
| std::mt19937(seed_cur), |
| } |
| ); |
| } |
|
|
| |
|
|
| struct llama_sampler_mirostat_v2 { |
| const uint32_t seed; |
| uint32_t seed_cur; |
|
|
| const float tau; |
| const float eta; |
|
|
| float mu; |
|
|
| std::mt19937 rng; |
| }; |
|
|
| static const char * llama_sampler_mirostat_v2_name(const struct llama_sampler * ) { |
| return "mirostat-v2"; |
| } |
|
|
| static void llama_sampler_mirostat_v2_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_mirostat_v2 *) smpl->ctx; |
|
|
| llama_sampler_softmax_impl(cur_p, true); |
|
|
| |
| cur_p->size = std::distance(cur_p->data, std::find_if(cur_p->data, cur_p->data + cur_p->size, [&](const llama_token_data & candidate) { |
| return -log2f(candidate.p) > ctx->mu; |
| })); |
|
|
| if (cur_p->size == 0) { |
| cur_p->size = 1; |
| } |
|
|
| |
| llama_sampler_softmax_impl(cur_p, true); |
|
|
| const int idx = llama_sample_dist(cur_p, ctx->rng); |
|
|
| cur_p->selected = idx; |
|
|
| float observed_surprise = -log2f(cur_p->data[idx].p); |
| float e = observed_surprise - ctx->tau; |
|
|
| |
| ctx->mu = ctx->mu - ctx->eta * e; |
| } |
|
|
| static void llama_sampler_mirostat_v2_reset(struct llama_sampler * smpl) { |
| auto * ctx = (llama_sampler_mirostat_v2 *) smpl->ctx; |
| ctx->mu = 2.0f*ctx->tau; |
| ctx->seed_cur = get_rng_seed(ctx->seed); |
| ctx->rng.seed(ctx->seed_cur); |
| } |
|
|
| static struct llama_sampler * llama_sampler_mirostat_v2_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_mirostat_v2 *) smpl->ctx; |
|
|
| auto * result = llama_sampler_init_mirostat_v2(ctx->seed, ctx->tau, ctx->eta); |
|
|
| |
| { |
| auto * result_ctx = (llama_sampler_mirostat_v2 *) result->ctx; |
|
|
| result_ctx->mu = ctx->mu; |
| result_ctx->rng = ctx->rng; |
| } |
|
|
| return result; |
| } |
|
|
| static void llama_sampler_mirostat_v2_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_mirostat_v2 *) smpl->ctx; |
| } |
|
|
| static struct llama_sampler_i llama_sampler_mirostat_v2_i = { |
| llama_sampler_mirostat_v2_name, |
| nullptr, |
| llama_sampler_mirostat_v2_apply, |
| llama_sampler_mirostat_v2_reset, |
| llama_sampler_mirostat_v2_clone, |
| llama_sampler_mirostat_v2_free, |
| nullptr, |
| nullptr, |
| nullptr, |
| nullptr, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_mirostat_v2(uint32_t seed, float tau, float eta) { |
| auto seed_cur = get_rng_seed(seed); |
| return llama_sampler_init( |
| &llama_sampler_mirostat_v2_i, |
| new llama_sampler_mirostat_v2 { |
| seed, |
| seed_cur, |
| tau, |
| eta, |
| 2.0f*tau, |
| std::mt19937(seed_cur), |
| } |
| ); |
| } |
|
|
| |
|
|
| struct llama_sampler_grammar { |
| const struct llama_vocab * vocab; |
|
|
| std::string grammar_str; |
| std::string grammar_root; |
|
|
| struct llama_grammar * grammar; |
| }; |
|
|
| static const char * llama_sampler_grammar_name(const struct llama_sampler * ) { |
| return "grammar"; |
| } |
|
|
| static void llama_sampler_grammar_accept_impl(struct llama_sampler * smpl, llama_token token) { |
| auto * ctx = (llama_sampler_grammar *) smpl->ctx; |
| if (ctx->grammar) { |
| llama_grammar_accept_impl(*ctx->grammar, token); |
| } |
| } |
|
|
| static void llama_sampler_grammar_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_grammar *) smpl->ctx; |
| if (ctx->grammar) { |
| llama_grammar_apply_impl(*ctx->grammar, cur_p); |
| } |
| } |
|
|
| |
| static struct llama_sampler * llama_sampler_init_grammar_impl( |
| const struct llama_vocab * vocab, |
| const char * grammar_str, |
| const char * grammar_root, |
| bool lazy, |
| const char ** trigger_words, |
| size_t num_trigger_words, |
| const llama_token * trigger_tokens, |
| size_t num_trigger_tokens, |
| const char ** trigger_patterns, |
| size_t num_trigger_patterns); |
|
|
| static void llama_sampler_grammar_reset(struct llama_sampler * smpl) { |
| auto * ctx = (llama_sampler_grammar *) smpl->ctx; |
| if (!ctx->grammar) { |
| return; |
| } |
|
|
| std::vector<const char *> trigger_patterns_c; |
| trigger_patterns_c.reserve(ctx->grammar->trigger_patterns.size()); |
| for (auto & trigger_pattern : ctx->grammar->trigger_patterns) { |
| trigger_patterns_c.push_back(trigger_pattern.pattern.c_str()); |
| } |
|
|
| auto * grammar_new = llama_grammar_init_impl(ctx->grammar->vocab, ctx->grammar_str.c_str(), ctx->grammar_root.c_str(), |
| ctx->grammar->lazy, trigger_patterns_c.data(), trigger_patterns_c.size(), |
| ctx->grammar->trigger_tokens.data(), ctx->grammar->trigger_tokens.size()); |
|
|
| llama_grammar_free_impl(ctx->grammar); |
| ctx->grammar = grammar_new; |
| } |
|
|
| static struct llama_sampler * llama_sampler_grammar_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_grammar *) smpl->ctx; |
|
|
| auto * result = llama_sampler_init_grammar_impl(ctx->vocab, nullptr, nullptr, false, nullptr, 0, nullptr, 0, nullptr, 0); |
| GGML_ASSERT(result); |
|
|
| |
| { |
| auto * result_ctx = (llama_sampler_grammar *) result->ctx; |
|
|
| if (ctx->grammar) { |
| result_ctx->grammar_str = ctx->grammar_str; |
| result_ctx->grammar_root = ctx->grammar_root; |
|
|
| result_ctx->grammar = llama_grammar_clone_impl(*ctx->grammar); |
| } |
| } |
|
|
| return result; |
| } |
|
|
| static void llama_sampler_grammar_free(struct llama_sampler * smpl) { |
| const auto * ctx = (llama_sampler_grammar *) smpl->ctx; |
|
|
| if (ctx->grammar) { |
| llama_grammar_free_impl(ctx->grammar); |
| } |
|
|
| delete ctx; |
| } |
|
|
| static struct llama_sampler_i llama_sampler_grammar_i = { |
| llama_sampler_grammar_name, |
| llama_sampler_grammar_accept_impl, |
| llama_sampler_grammar_apply, |
| llama_sampler_grammar_reset, |
| llama_sampler_grammar_clone, |
| llama_sampler_grammar_free, |
| nullptr, |
| nullptr, |
| nullptr, |
| nullptr, |
| }; |
|
|
| static struct llama_sampler * llama_sampler_init_grammar_impl( |
| const struct llama_vocab * vocab, |
| const char * grammar_str, |
| const char * grammar_root, |
| bool lazy, |
| const char ** trigger_words, |
| size_t num_trigger_words, |
| const llama_token * trigger_tokens, |
| size_t num_trigger_tokens, |
| const char ** trigger_patterns, |
| size_t num_trigger_patterns) { |
| auto * ctx = new llama_sampler_grammar; |
|
|
| if (grammar_str != nullptr && grammar_str[0] != '\0') { |
| std::string trigger_pattern; |
| llama_grammar * grammar = nullptr; |
| |
| if (trigger_words != nullptr && num_trigger_words > 0) { |
| GGML_ASSERT(trigger_patterns == nullptr && num_trigger_patterns == 0); |
| trigger_pattern = "[\\s\\S]*?("; |
| for (size_t i = 0; i < num_trigger_words; ++i) { |
| static const std::regex special_chars("[.^$|()*+?\\[\\]{}\\\\]"); |
| if (i > 0) { |
| trigger_pattern += "|"; |
| } |
| trigger_pattern += std::regex_replace(trigger_words[i], special_chars, "\\$0"); |
| } |
| trigger_pattern += ")[\\s\\S]*"; |
|
|
| std::array<const char *, 1> tmp_trigger_patterns = { trigger_pattern.c_str() }; |
| grammar = llama_grammar_init_impl(vocab, grammar_str, grammar_root, lazy, tmp_trigger_patterns.data(), tmp_trigger_patterns.size(), trigger_tokens, num_trigger_tokens); |
| } else { |
| grammar = llama_grammar_init_impl(vocab, grammar_str, grammar_root, lazy, trigger_patterns, num_trigger_patterns, trigger_tokens, num_trigger_tokens); |
| } |
| *ctx = { |
| vocab, |
| grammar_str, |
| grammar_root, |
| grammar, |
| }; |
| if (!ctx->grammar) { |
| delete ctx; |
| return nullptr; |
| } |
| } else { |
| *ctx = { |
| vocab, |
| {}, |
| {}, |
| nullptr, |
| }; |
| } |
|
|
| return llama_sampler_init( |
| &llama_sampler_grammar_i, |
| ctx |
| ); |
| } |
|
|
| struct llama_sampler * llama_sampler_init_grammar( |
| const struct llama_vocab * vocab, |
| const char * grammar_str, |
| const char * grammar_root) { |
| return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, false, nullptr, 0, nullptr, 0, nullptr, 0); |
| } |
|
|
| struct llama_sampler * llama_sampler_init_grammar_lazy( |
| const struct llama_vocab * vocab, |
| const char * grammar_str, |
| const char * grammar_root, |
| const char ** trigger_words, |
| size_t num_trigger_words, |
| const llama_token * trigger_tokens, |
| size_t num_trigger_tokens) { |
| return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, true, trigger_words, num_trigger_words, trigger_tokens, num_trigger_tokens, nullptr, 0); |
| } |
|
|
| struct llama_sampler * llama_sampler_init_grammar_lazy_patterns( |
| const struct llama_vocab * vocab, |
| const char * grammar_str, |
| const char * grammar_root, |
| const char ** trigger_patterns, |
| size_t num_trigger_patterns, |
| const llama_token * trigger_tokens, |
| size_t num_trigger_tokens) { |
| return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, true, nullptr, 0, trigger_tokens, num_trigger_tokens, trigger_patterns, num_trigger_patterns); |
| } |
|
|
| |
|
|
| struct llama_sampler_penalties { |
| const int32_t penalty_last_n; |
| const float penalty_repeat; |
| const float penalty_freq; |
| const float penalty_present; |
|
|
| ring_buffer<llama_token> prev; |
|
|
| |
| std::unordered_map<llama_token, int> token_count; |
| }; |
|
|
| static const char * llama_sampler_penalties_name(const struct llama_sampler * ) { |
| return "penalties"; |
| } |
|
|
| static void llama_sampler_penalties_accept(struct llama_sampler * smpl, llama_token token) { |
| auto * ctx = (llama_sampler_penalties *) smpl->ctx; |
| if (ctx->penalty_last_n == 0) { |
| return; |
| } |
|
|
| ctx->token_count[token]++; |
|
|
| |
| if (ctx->prev.size() >= (size_t) ctx->penalty_last_n) { |
| const auto old = ctx->prev.front(); |
|
|
| ctx->token_count[old]--; |
| if (ctx->token_count[old] == 0) { |
| ctx->token_count.erase(old); |
| } |
| } |
|
|
| ctx->prev.push_back(token); |
|
|
| #if 0 |
| |
| std::unordered_map<llama_token, int> tmp; |
| for (int i = 0; i < std::min<int>(ctx->penalty_last_n, ctx->prev.size()); ++i) { |
| tmp[ctx->prev.rat(i)]++; |
| } |
|
|
| assert(ctx->token_count == tmp); |
| #endif |
| } |
|
|
| static void llama_sampler_penalties_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_penalties *) smpl->ctx; |
|
|
| if ((ctx->penalty_last_n == 0) || |
| (ctx->penalty_repeat == 1.0f && ctx->penalty_freq == 0.0f && ctx->penalty_present == 0.0f)) { |
| return; |
| } |
|
|
| |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| const auto token_iter = ctx->token_count.find(cur_p->data[i].id); |
| if (token_iter == ctx->token_count.end()) { |
| continue; |
| } |
|
|
| const int count = token_iter->second; |
|
|
| assert(count > 0 && count <= ctx->penalty_last_n); |
|
|
| |
| |
| if (cur_p->data[i].logit <= 0) { |
| cur_p->data[i].logit *= ctx->penalty_repeat; |
| } else { |
| cur_p->data[i].logit /= ctx->penalty_repeat; |
| } |
|
|
| cur_p->data[i].logit -= float(count) * ctx->penalty_freq + float(count > 0) * ctx->penalty_present; |
| } |
|
|
| cur_p->sorted = false; |
| } |
|
|
| static void llama_sampler_penalties_reset(struct llama_sampler * smpl) { |
| auto * ctx = (llama_sampler_penalties *) smpl->ctx; |
| ctx->prev.clear(); |
| ctx->token_count.clear(); |
| } |
|
|
| static struct llama_sampler * llama_sampler_penalties_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_penalties *) smpl->ctx; |
| auto * result = llama_sampler_init_penalties( |
| ctx->penalty_last_n, |
| ctx->penalty_repeat, |
| ctx->penalty_freq, |
| ctx->penalty_present); |
|
|
| |
| { |
| auto * result_ctx = (llama_sampler_penalties *) result->ctx; |
|
|
| result_ctx->prev = ctx->prev; |
| } |
|
|
| return result; |
| } |
|
|
| static void llama_sampler_penalties_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_penalties *) smpl->ctx; |
| } |
|
|
| static struct llama_sampler_i llama_sampler_penalties_i = { |
| llama_sampler_penalties_name, |
| llama_sampler_penalties_accept, |
| llama_sampler_penalties_apply, |
| llama_sampler_penalties_reset, |
| llama_sampler_penalties_clone, |
| llama_sampler_penalties_free, |
| nullptr, |
| nullptr, |
| nullptr, |
| nullptr, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_penalties( |
| int32_t penalty_last_n, |
| float penalty_repeat, |
| float penalty_freq, |
| float penalty_present) { |
| penalty_last_n = std::max(penalty_last_n, 0); |
|
|
| const bool is_empty = (penalty_last_n == 0 || (penalty_repeat == 1.0f && penalty_freq == 0.0f && penalty_present == 0.0f)); |
|
|
| if (is_empty) { |
| return llama_sampler_init_empty("?penalties"); |
| } |
|
|
| return llama_sampler_init( |
| &llama_sampler_penalties_i, |
| new llama_sampler_penalties { |
| penalty_last_n, |
| penalty_repeat, |
| penalty_freq, |
| penalty_present, |
| ring_buffer<llama_token>(penalty_last_n), |
| {}, |
| } |
| ); |
| } |
|
|
| |
|
|
| struct llama_sampler_top_n_sigma { |
| const float n; |
| }; |
|
|
| static const char * llama_sampler_top_n_sigma_name(const struct llama_sampler * ) { |
| return "top-n-sigma"; |
| } |
|
|
| static void llama_sampler_top_n_sigma_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_top_n_sigma *) smpl->ctx; |
|
|
| if (ctx->n <= 0.0f || cur_p->size <= 1) { |
| return; |
| } |
|
|
| |
| float max = cur_p->data[0].logit; |
| float logits_sum = 0; |
| size_t valid_count = 0; |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| |
| if (cur_p->data[i].logit != -INFINITY) { |
| max = std::max(max, cur_p->data[i].logit); |
| logits_sum += cur_p->data[i].logit; |
| valid_count++; |
| } |
| } |
| float mean = valid_count > 0 ? logits_sum/valid_count : 0; |
|
|
| |
| float acc = 0; |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| |
| if (cur_p->data[i].logit != -INFINITY) { |
| acc += pow(cur_p->data[i].logit - mean, 2); |
| } |
| } |
| float std = valid_count > 0 ? sqrt(acc/valid_count) : 0; |
|
|
| |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| if (cur_p->data[i].logit < max - (ctx->n * std)) { |
| cur_p->data[i].logit = -INFINITY; |
| } |
| } |
|
|
| llama_sampler_softmax_impl(cur_p, true); |
| } |
|
|
| static struct llama_sampler * llama_sampler_top_n_sigma_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_top_n_sigma *) smpl->ctx; |
| return llama_sampler_init_top_n_sigma(ctx->n); |
| } |
|
|
| static void llama_sampler_top_n_sigma_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_top_n_sigma *) smpl->ctx; |
| } |
|
|
| static struct llama_sampler_i llama_sampler_top_n_sigma_i = { |
| llama_sampler_top_n_sigma_name, |
| nullptr, |
| llama_sampler_top_n_sigma_apply, |
| nullptr, |
| llama_sampler_top_n_sigma_clone, |
| llama_sampler_top_n_sigma_free, |
| nullptr, |
| nullptr, |
| nullptr, |
| nullptr, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_top_n_sigma(float n) { |
| const bool is_empty = (n <= 0.0f); |
|
|
| if (is_empty) { |
| return llama_sampler_init_empty("?top-n-sigma"); |
| } |
|
|
| return llama_sampler_init( |
| &llama_sampler_top_n_sigma_i, |
| new llama_sampler_top_n_sigma { |
| n, |
| } |
| ); |
| } |
|
|
| |
|
|
| struct llama_sampler_dry { |
| int32_t total_context_size; |
|
|
| const float dry_multiplier; |
| const float dry_base; |
| const int32_t dry_allowed_length; |
| const int32_t dry_penalty_last_n; |
|
|
| std::unordered_multimap<llama_token, std::vector<llama_token>> dry_processed_breakers; |
| std::vector<int> dry_repeat_count; |
| std::unordered_map<llama_token, int> dry_max_token_repeat; |
| ring_buffer<llama_token> last_tokens; |
| }; |
|
|
| |
| static void get_overlapping_token_sequences(const llama_vocab & vocab, const std::string& str, std::unordered_multimap<llama_token, std::vector<llama_token>>& token_sequences, int max_tail_len = -1) { |
| for (llama_token token_id = 0; token_id < (llama_token) vocab.n_tokens(); token_id++) { |
| std::string word = vocab.detokenize({token_id}, true); |
| if (word.find(str) != std::string::npos) { |
| token_sequences.emplace(token_id, std::vector<llama_token>()); |
| } else { |
| size_t word_len = word.size(); |
| size_t str_len = str.size(); |
| size_t pos = -1; |
| while ((pos = word.find(str[0], pos + 1)) != std::string::npos) { |
| bool match = true; |
| size_t i; |
| for (i = 1; i < str_len && i + pos < word_len; ++i) { |
| if (word[pos + i] != str[i]) { |
| match = false; |
| break; |
| } |
| } |
| if (match) { |
| std::vector<llama_token> tokenization = vocab.tokenize(str.substr(i), false, false); |
| if (max_tail_len >= 0 && tokenization.size() > (size_t)max_tail_len) { |
| tokenization.resize(max_tail_len); |
| } |
|
|
| |
| auto its = token_sequences.equal_range(token_id); |
| bool found = false; |
| for (auto it = its.first; it != its.second; ++it) { |
| if (tokenization == it->second) { |
| found = true; |
| break; |
| } |
| } |
| if (!found) { |
| token_sequences.emplace(token_id, tokenization); |
| } |
| } |
| } |
| } |
| } |
| } |
|
|
| static const char * llama_sampler_dry_name(const struct llama_sampler * ) { |
| return "dry"; |
| } |
|
|
| static void llama_sampler_dry_accept(struct llama_sampler * smpl, llama_token token) { |
| auto * ctx = (llama_sampler_dry *) smpl->ctx; |
| if (ctx->dry_multiplier == 0.0f || ctx->dry_base < 1.0f || ctx->dry_penalty_last_n == 0) { |
| return; |
| } |
|
|
| ctx->last_tokens.push_back(token); |
| } |
|
|
| |
| static void llama_sampler_dry_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_dry *) smpl->ctx; |
|
|
| if (ctx->dry_multiplier == 0.0f || ctx->dry_base < 1.0f || ctx->dry_penalty_last_n == 0) { |
| return; |
| } |
|
|
| int32_t effective_dry_penalty_last_n = (ctx->dry_penalty_last_n == -1) ? ctx->total_context_size : std::max(ctx->dry_penalty_last_n, 0); |
| int last_n_repeat = std::min(std::min((int)ctx->last_tokens.size(), effective_dry_penalty_last_n), ctx->total_context_size); |
|
|
| if (last_n_repeat <= ctx->dry_allowed_length) { |
| return; |
| } |
|
|
| ctx->dry_repeat_count.assign(last_n_repeat, 0); |
| ctx->dry_max_token_repeat.clear(); |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| int rep_limit = last_n_repeat; |
| for (int i = 0; i < last_n_repeat; ++i) { |
| llama_token token = ctx->last_tokens.rat(i); |
| auto its = ctx->dry_processed_breakers.equal_range(token); |
| if (its.first == ctx->dry_processed_breakers.end()) { |
| continue; |
| } |
| int longest_match = -1; |
| for (auto it = its.first; it != its.second; ++it) { |
| |
| |
| |
| |
| int seq_len = (int)it->second.size(); |
| if (seq_len > longest_match && seq_len <= (int)i) { |
| bool match = true; |
| for (int offset = 0; offset < seq_len; ++offset) { |
| |
| if (it->second[offset] != ctx->last_tokens.rat(i - offset - 1)) { |
| match = false; |
| break; |
| } |
| } |
| if (match) { |
| longest_match = seq_len; |
| } |
| } |
| } |
| if (longest_match >= 0) { |
| |
| |
| rep_limit = i - longest_match; |
| break; |
| } |
| } |
| if (rep_limit < ctx->dry_allowed_length) { |
| return; |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| { |
| const int last = last_n_repeat - 1; |
|
|
| int rt = 0; |
| int lt = 0; |
|
|
| for (int k = 1; k < last_n_repeat; ++k) { |
| if (k > rt) { |
| |
| int n = 0; |
| while (n + k < last_n_repeat && ctx->last_tokens.rat(n) == ctx->last_tokens.rat(n+k)) { |
| ++n; |
| } |
| ctx->dry_repeat_count[last - k] = std::min(n, rep_limit); |
| if (n > 0) { |
| lt = k; |
| rt = k + n - 1; |
| } |
| } else { |
| |
|
|
| int p = k - lt; |
| int right_part_len = rt - k + 1; |
|
|
| if (ctx->dry_repeat_count[last - p] < right_part_len) { |
| int n = std::min(ctx->dry_repeat_count[last - p], rep_limit); |
| ctx->dry_repeat_count[last - k] = n; |
| } else { |
| int i = rt + 1; |
| while (i < last_n_repeat && ctx->last_tokens.rat(i) == ctx->last_tokens.rat(i - k)) { |
| i += 1; |
| } |
|
|
| int n = std::min(i - k, rep_limit); |
| ctx->dry_repeat_count[last - k] = n; |
| lt = k; |
| rt = i - 1; |
| } |
| } |
| } |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| for (int i = 0; i < last_n_repeat - 1; ++i) { |
| int repeat_len = ctx->dry_repeat_count[i]; |
| if (repeat_len >= ctx->dry_allowed_length) { |
| |
| |
| |
| llama_token token = ctx->last_tokens.rat(last_n_repeat - 2 - i); |
| |
| const auto& it = ctx->dry_max_token_repeat.find(token); |
| if (it == ctx->dry_max_token_repeat.end() || it->second < repeat_len) { |
| ctx->dry_max_token_repeat[token] = repeat_len; |
| } |
| } |
| } |
|
|
| |
|
|
| |
| |
| const float FLOAT_MAX_LOG = 88.7228391f; |
| int max_exponent = 0; |
| if (ctx->dry_base > 1.000001f) { |
| max_exponent = FLOAT_MAX_LOG / std::log(ctx->dry_base); |
| } |
|
|
| for (size_t i = 0; i < cur_p->size; ++i) { |
| const auto& af_kvp = ctx->dry_max_token_repeat.find(cur_p->data[i].id); |
| if (af_kvp != ctx->dry_max_token_repeat.end()) { |
| |
| auto range = ctx->dry_processed_breakers.equal_range(cur_p->data[i].id); |
| bool is_single_token_breaker = false; |
|
|
| for (auto it = range.first; it != range.second; ++it) { |
| if (it->second.empty()) { |
| is_single_token_breaker = true; |
| break; |
| } |
| } |
|
|
| |
| if (!is_single_token_breaker) { |
| int repeat_exp = af_kvp->second - ctx->dry_allowed_length; |
| if (max_exponent > 0 && repeat_exp > max_exponent) { |
| repeat_exp = max_exponent; |
| } |
| float penalty = ctx->dry_multiplier * std::pow(ctx->dry_base, repeat_exp); |
| cur_p->data[i].logit -= penalty; |
| } |
| } |
| } |
|
|
| cur_p->sorted = false; |
| } |
|
|
| static void llama_sampler_dry_reset(struct llama_sampler * smpl) { |
| auto * ctx = (llama_sampler_dry *) smpl->ctx; |
| ctx->last_tokens.clear(); |
| ctx->dry_repeat_count.clear(); |
| ctx->dry_max_token_repeat.clear(); |
| } |
|
|
| static struct llama_sampler * llama_sampler_dry_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (llama_sampler_dry *) smpl->ctx; |
|
|
| llama_vocab dummy_vocab; |
|
|
| |
| auto * result = llama_sampler_init_dry(&dummy_vocab, ctx->total_context_size, ctx->dry_multiplier, ctx->dry_base, ctx->dry_allowed_length, ctx->dry_penalty_last_n, NULL, 0); |
|
|
| |
| { |
| auto * result_ctx = (llama_sampler_dry *) result->ctx; |
| result_ctx->dry_processed_breakers = ctx->dry_processed_breakers; |
| result_ctx->dry_repeat_count = ctx->dry_repeat_count; |
| result_ctx->dry_max_token_repeat = ctx->dry_max_token_repeat; |
| result_ctx->last_tokens = ctx->last_tokens; |
| } |
|
|
| return result; |
| } |
|
|
| static void llama_sampler_dry_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_dry *) smpl->ctx; |
| } |
|
|
| static struct llama_sampler_i llama_sampler_dry_i = { |
| llama_sampler_dry_name, |
| llama_sampler_dry_accept, |
| llama_sampler_dry_apply, |
| llama_sampler_dry_reset, |
| llama_sampler_dry_clone, |
| llama_sampler_dry_free, |
| nullptr, |
| nullptr, |
| nullptr, |
| nullptr, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_dry(const struct llama_vocab * vocab, int32_t n_ctx_train, float dry_multiplier, float dry_base, int32_t dry_allowed_length, int32_t dry_penalty_last_n, const char** seq_breakers, size_t num_breakers) { |
| int32_t effective_dry_penalty_last_n = (dry_penalty_last_n == -1) ? n_ctx_train : std::max(dry_penalty_last_n, 0); |
| std::unordered_multimap<llama_token, std::vector<llama_token>> processed_breakers; |
| const int MAX_CHAR_LEN = 40; |
| const int MAX_SEQ_LEN = 20; |
|
|
| const bool dry_enabled = (dry_multiplier != 0.0f && dry_base >= 1.0f && dry_penalty_last_n != 0); |
|
|
| if (!dry_enabled) { |
| return llama_sampler_init_empty("?dry"); |
| } |
|
|
| if (dry_enabled && seq_breakers != nullptr && num_breakers > 0) { |
| |
| for (size_t i = 0; i < num_breakers; ++i) { |
| if (seq_breakers[i] == nullptr || std::strlen(seq_breakers[i]) == 0) { |
| LLAMA_LOG_WARN("skipping null or empty DRY sequence breaker at index %zu\n", i); |
| continue; |
| } |
|
|
| std::string sequence_break(seq_breakers[i]); |
| if (sequence_break.empty()) { |
| LLAMA_LOG_WARN("skipping empty DRY sequence breaker\n"); |
| continue; |
| } |
|
|
| if (sequence_break.size() > MAX_CHAR_LEN) { |
| LLAMA_LOG_WARN("truncating DRY sequence breaker to %d characters\n", MAX_CHAR_LEN); |
| sequence_break.resize(MAX_CHAR_LEN); |
| } |
|
|
| get_overlapping_token_sequences(*vocab, sequence_break, processed_breakers, MAX_SEQ_LEN); |
| } |
| } |
|
|
| return llama_sampler_init( |
| &llama_sampler_dry_i, |
| new llama_sampler_dry { |
| n_ctx_train, |
| dry_multiplier, |
| dry_base, |
| dry_allowed_length, |
| dry_penalty_last_n, |
| std::move(processed_breakers), |
| dry_enabled ? std::vector<int>(effective_dry_penalty_last_n, 0) : std::vector<int>{}, |
| {}, |
| dry_enabled ? ring_buffer<llama_token>(effective_dry_penalty_last_n) : ring_buffer<llama_token>(0), |
| } |
| ); |
| } |
|
|
| |
| struct llama_sampler * llama_sampler_init_dry_testing(int32_t context_size, float dry_multiplier, float dry_base, int32_t dry_allowed_length, int32_t dry_penalty_last_n, const std::vector<std::vector<llama_token>>& seq_breakers) { |
| llama_vocab dummy_vocab; |
| auto * result = llama_sampler_init_dry(&dummy_vocab, context_size, dry_multiplier, dry_base, dry_allowed_length, dry_penalty_last_n, NULL, 0); |
| auto * ctx = (llama_sampler_dry *) result->ctx; |
|
|
| |
| ctx->dry_processed_breakers.clear(); |
| if (seq_breakers.empty()) { |
| LLAMA_LOG_WARN("empty DRY sequence breakers list in llama_sampler_init_dry_testing\n"); |
| } else { |
| for (const auto& breaker : seq_breakers) { |
| if (breaker.empty()) { |
| LLAMA_LOG_WARN("skipping DRY empty sequence breaker\n"); |
| continue; |
| } |
| llama_token head_token = breaker[0]; |
| std::vector<llama_token> tail_tokens(breaker.begin() + 1, breaker.end()); |
| ctx->dry_processed_breakers.emplace(head_token, std::move(tail_tokens)); |
| } |
|
|
| if (ctx->dry_processed_breakers.empty()) { |
| LLAMA_LOG_WARN("no valid DRY sequence breakers processed in llama_sampler_init_dry_testing\n"); |
| } |
| } |
|
|
| return result; |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| struct llama_sampler_adaptive_p { |
| const float target; |
| const float decay; |
| const uint32_t seed; |
| uint32_t seed_cur; |
| std::mt19937 rng; |
| float weighted_sum; |
| float total_weight; |
| std::vector<float> original_probs; |
| llama_token pending_token_id; |
| int32_t pending_token_idx; |
| }; |
|
|
| |
| static constexpr float DISTRIBUTION_WIDTH = 0.3f; |
| static constexpr float PEAK_LOGIT_VALUE = 5.0f; |
| static constexpr float SHARPNESS = 10.0f; |
| static constexpr float INV_WIDTH = 1.0f / DISTRIBUTION_WIDTH; |
|
|
| static const char * llama_sampler_adaptive_p_name(const struct llama_sampler * ) { |
| return "adaptive-p"; |
| } |
|
|
| static void llama_sampler_adaptive_p_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_adaptive_p *) smpl->ctx; |
|
|
| llama_sampler_softmax_impl(cur_p, false); |
|
|
| if (ctx->target < 0.0f) { |
| |
| |
| cur_p->selected = llama_sample_dist(cur_p, ctx->rng); |
| return; |
| } |
|
|
| |
| ctx->original_probs.resize(cur_p->size); |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| ctx->original_probs[i] = cur_p->data[i].p; |
| } |
|
|
| |
| auto target = std::clamp(ctx->target, 0.0f, 1.0f); |
| float adapted_target = std::clamp( |
| ctx->total_weight == 0.0f ? target : 2.0f * target - (ctx->weighted_sum / ctx->total_weight), |
| 0.0f, 1.0f |
| ); |
|
|
| |
| |
| |
| |
| |
| |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| if (cur_p->data[i].logit == -INFINITY) { |
| |
| |
| continue; |
| } |
| float dist = std::abs((cur_p->data[i].p - adapted_target) * INV_WIDTH); |
| cur_p->data[i].logit = PEAK_LOGIT_VALUE - SHARPNESS * dist * dist / (1.0f + dist); |
| } |
|
|
| |
| llama_sampler_softmax_impl(cur_p, false); |
| const int idx = llama_sample_dist(cur_p, ctx->rng); |
| cur_p->selected = idx; |
|
|
| |
| ctx->pending_token_id = cur_p->data[idx].id; |
| ctx->pending_token_idx = idx; |
| } |
|
|
| static void llama_sampler_adaptive_p_accept(struct llama_sampler * smpl, llama_token token) { |
| auto * ctx = (llama_sampler_adaptive_p *) smpl->ctx; |
| if (ctx->pending_token_id == token) { |
| GGML_ASSERT(ctx->pending_token_id != LLAMA_TOKEN_NULL); |
| GGML_ASSERT(ctx->pending_token_idx != -1); |
| |
| ctx->weighted_sum = ctx->original_probs[ctx->pending_token_idx] + ctx->decay * ctx->weighted_sum; |
| ctx->total_weight = 1.0f + ctx->decay * ctx->total_weight; |
| } |
| ctx->pending_token_id = LLAMA_TOKEN_NULL; |
| ctx->pending_token_idx = -1; |
| } |
|
|
| static void llama_sampler_adaptive_p_reset(struct llama_sampler * smpl) { |
| auto * ctx = (llama_sampler_adaptive_p *) smpl->ctx; |
| |
| |
| |
| ctx->weighted_sum = ctx->target / (1.0f - ctx->decay); |
| ctx->total_weight = 1.0f / (1.0f - ctx->decay); |
| ctx->pending_token_id = LLAMA_TOKEN_NULL; |
| ctx->pending_token_idx = -1; |
| ctx->seed_cur = get_rng_seed(ctx->seed); |
| ctx->rng.seed(ctx->seed_cur); |
| } |
|
|
| static struct llama_sampler * llama_sampler_adaptive_p_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_adaptive_p *) smpl->ctx; |
| auto * result = llama_sampler_init_adaptive_p(ctx->target, ctx->decay, ctx->seed); |
| auto * result_ctx = (llama_sampler_adaptive_p *) result->ctx; |
|
|
| |
| result_ctx->weighted_sum = ctx->weighted_sum; |
| result_ctx->total_weight = ctx->total_weight; |
| result_ctx->pending_token_id = ctx->pending_token_id; |
| result_ctx->pending_token_idx = ctx->pending_token_idx; |
|
|
| return result; |
| } |
|
|
| static void llama_sampler_adaptive_p_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_adaptive_p *) smpl->ctx; |
| } |
|
|
| static struct llama_sampler_i llama_sampler_adaptive_p_i = { |
| llama_sampler_adaptive_p_name, |
| llama_sampler_adaptive_p_accept, |
| llama_sampler_adaptive_p_apply, |
| llama_sampler_adaptive_p_reset, |
| llama_sampler_adaptive_p_clone, |
| llama_sampler_adaptive_p_free, |
| nullptr, |
| nullptr, |
| nullptr, |
| nullptr, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_adaptive_p( |
| float target, |
| float decay, |
| uint32_t seed |
| ) { |
| auto seed_cur = get_rng_seed(seed); |
| float clamped_decay = std::clamp(decay, 0.0f, 0.99f); |
| return llama_sampler_init( |
| &llama_sampler_adaptive_p_i, |
| new llama_sampler_adaptive_p { |
| target, |
| clamped_decay, |
| seed, |
| seed_cur, |
| std::mt19937(seed_cur), |
| target / (1.0f - clamped_decay), |
| 1.0f / (1.0f - clamped_decay), |
| {}, |
| LLAMA_TOKEN_NULL, |
| -1 |
| } |
| ); |
| } |
|
|
| |
|
|
| struct llama_sampler_logit_bias : public llama_sampler_backend { |
| const int32_t n_vocab; |
|
|
| const std::vector<llama_logit_bias> logit_bias; |
|
|
| std::vector<llama_logit_bias> to_search; |
|
|
| struct ggml_tensor * inp_logit_bias; |
| struct ggml_tensor * inp_logit_idxs; |
| }; |
|
|
| static const char * llama_sampler_logit_bias_name(const struct llama_sampler * smpl) { |
| auto * ctx = (llama_sampler_logit_bias *) smpl->ctx; |
| return ctx->get_name(); |
| } |
|
|
| static void llama_sampler_logit_bias_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_logit_bias *) smpl->ctx; |
|
|
| if (ctx->logit_bias.empty()) { |
| return; |
| } |
|
|
| ctx->to_search.clear(); |
|
|
| |
| for (const auto & lb : ctx->logit_bias) { |
| if (lb.token >= 0 && cur_p->size > (size_t) lb.token && cur_p->data[lb.token].id == lb.token) { |
| cur_p->data[lb.token].logit += lb.bias; |
| } else { |
| ctx->to_search.push_back(lb); |
| } |
| } |
|
|
| if (ctx->to_search.empty()) { |
| return; |
| } |
|
|
| |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| for (const auto & lb : ctx->to_search) { |
| if (cur_p->data[i].id == lb.token) { |
| cur_p->data[i].logit += lb.bias; |
| break; |
| } |
| } |
| } |
| } |
|
|
| static struct llama_sampler * llama_sampler_logit_bias_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_logit_bias *) smpl->ctx; |
| return llama_sampler_init_logit_bias(ctx->n_vocab, ctx->logit_bias.size(), ctx->logit_bias.data()); |
| } |
|
|
| static void llama_sampler_logit_bias_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_logit_bias *) smpl->ctx; |
| } |
|
|
| static void llama_sampler_logit_bias_backend_apply( |
| struct llama_sampler * smpl, |
| struct ggml_context * ctx, |
| struct ggml_cgraph * gf, |
| struct llama_sampler_data * data) { |
| GGML_UNUSED(gf); |
| GGML_UNUSED(ctx); |
|
|
| auto * sctx = (llama_sampler_logit_bias *) smpl->ctx; |
| if (sctx->logit_bias.empty()) { |
| return; |
| } |
|
|
| const size_t n = sctx->logit_bias.size(); |
|
|
| sctx->inp_logit_bias = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 1, n); |
| ggml_set_name(sctx->inp_logit_bias, "logit_bias"); |
| ggml_set_input(sctx->inp_logit_bias); |
|
|
| sctx->inp_logit_idxs = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n); |
| ggml_set_name(sctx->inp_logit_idxs, "logit_idxs"); |
| ggml_set_input(sctx->inp_logit_idxs); |
|
|
| ggml_tensor * cur = ggml_fill(ctx, data->logits, 0.0f); |
|
|
| cur = ggml_reshape_2d(ctx, cur, 1, ggml_nelements(cur)); |
| cur = ggml_set_rows(ctx, cur, sctx->inp_logit_bias, sctx->inp_logit_idxs); |
| cur = ggml_reshape_1d(ctx, cur, ggml_nelements(cur)); |
|
|
| data->logits = ggml_add(ctx, data->logits, cur); |
| } |
|
|
| static void llama_sampler_logit_bias_backend_set_input(struct llama_sampler * smpl) { |
| auto * sctx = (llama_sampler_logit_bias *) smpl->ctx; |
| if (sctx->logit_bias.empty()) { |
| return; |
| } |
|
|
| GGML_ASSERT(sctx->inp_logit_bias != nullptr); |
| GGML_ASSERT(sctx->inp_logit_idxs != nullptr); |
|
|
| const size_t n = sctx->logit_bias.size(); |
|
|
| std::vector<float> data_logit_bias(n, 0.0f); |
| std::vector<int32_t> data_logit_idxs(n, 0); |
| for (size_t i = 0; i < n; ++i) { |
| const auto & lb = sctx->logit_bias[i]; |
| GGML_ASSERT(lb.token >= 0 && lb.token < (int32_t) sctx->n_vocab); |
| data_logit_bias[i] = lb.bias; |
| data_logit_idxs[i] = lb.token; |
| } |
|
|
| ggml_backend_tensor_set(sctx->inp_logit_bias, data_logit_bias.data(), 0, ggml_nbytes(sctx->inp_logit_bias)); |
| ggml_backend_tensor_set(sctx->inp_logit_idxs, data_logit_idxs.data(), 0, ggml_nbytes(sctx->inp_logit_idxs)); |
| } |
|
|
| static bool llama_sampler_logit_bias_backend_init( |
| struct llama_sampler * smpl, |
| ggml_backend_buffer_type_t buft) { |
| GGML_UNUSED(buft); |
|
|
| auto * sctx = (llama_sampler_logit_bias *) smpl->ctx; |
|
|
| sctx->init(true); |
|
|
| if (sctx->logit_bias.empty()) { |
| return true; |
| } |
|
|
| return true; |
| } |
|
|
| static struct llama_sampler_i llama_sampler_logit_bias_i = { |
| llama_sampler_logit_bias_name, |
| nullptr, |
| llama_sampler_logit_bias_apply, |
| nullptr, |
| llama_sampler_logit_bias_clone, |
| llama_sampler_logit_bias_free, |
| llama_sampler_logit_bias_backend_init, |
| nullptr, |
| llama_sampler_logit_bias_backend_apply, |
| llama_sampler_logit_bias_backend_set_input, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_logit_bias( |
| int32_t n_vocab, |
| int32_t n_logit_bias, |
| const llama_logit_bias * logit_bias) { |
| const bool is_empty = n_logit_bias <= 0; |
|
|
| if (is_empty) { |
| return llama_sampler_init_empty("?logit-bias"); |
| } |
|
|
| return llama_sampler_init( |
| &llama_sampler_logit_bias_i, |
| new llama_sampler_logit_bias { |
| ("logit-bias"), |
| n_vocab, |
| std::vector<llama_logit_bias>(logit_bias, logit_bias + n_logit_bias), |
| {}, |
| nullptr, |
| nullptr, |
| } |
| ); |
| } |
|
|
| |
|
|
| |
|
|
| struct llama_sampler_infill { |
| const struct llama_vocab * vocab; |
|
|
| std::vector<char> buf0; |
| std::vector<char> buf1; |
| }; |
|
|
| static const char * llama_sampler_infill_name(const struct llama_sampler * ) { |
| return "infill"; |
| } |
|
|
| static void llama_sampler_infill_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { |
| auto * ctx = (llama_sampler_infill *) smpl->ctx; |
|
|
| llama_sampler_softmax_impl(cur_p, true); |
|
|
| #if defined(GGML_DEBUG_SAMPLER_INFILL) |
| #define LOG_DBG_CUR LLAMA_LOG_DEBUG |
| #else |
| #define LOG_DBG_CUR(...) |
| #endif |
|
|
| for (size_t i = 0; i < cur_p->size; ++i) { |
| LOG_DBG_CUR("%s: cur_p[%3zu] = { id: %6d, p: %.6f, logit: %6.3f }\n", __func__, i, cur_p->data[i].id, cur_p->data[i].p, cur_p->data[i].logit); |
| } |
|
|
| float p_txt_sum = 0.0f; |
| float p_eog_sum = 0.0f; |
|
|
| for (size_t i = 0; i < cur_p->size; ++i) { |
| if (ctx->vocab->is_eog(cur_p->data[i].id)) { |
| p_eog_sum += cur_p->data[i].p; |
| } else { |
| p_txt_sum += cur_p->data[i].p; |
| } |
| } |
|
|
| const float rat = p_eog_sum == 0.0 ? INFINITY : p_txt_sum / p_eog_sum; GGML_UNUSED(rat); |
|
|
| LOG_DBG_CUR("%s: p_txt_sum = %.2f, p_eog_sum = %.2f, rat = %.2f, n = %zu\n", __func__, p_txt_sum, p_eog_sum, rat, cur_p->size); |
|
|
| if (3*p_eog_sum*cur_p->size > p_txt_sum) { |
| LOG_DBG_CUR("%s: the ratio p_txt/p_eog = %.2f is too low -> sampling EOG\n", __func__, p_txt_sum/p_eog_sum); |
|
|
| |
| const auto size_org = cur_p->size; |
|
|
| cur_p->size = 0; |
|
|
| float p_sum = 0.0f; |
|
|
| for (size_t i = 0; i < size_org; ++i) { |
| if (ctx->vocab->is_eog(cur_p->data[i].id)) { |
| p_sum += cur_p->data[i].p; |
|
|
| cur_p->data[cur_p->size++] = cur_p->data[i]; |
| } |
| } |
|
|
| |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| cur_p->data[i].p /= p_sum; |
| } |
|
|
| return; |
| } |
|
|
| size_t n_combined = 0; GGML_UNUSED(n_combined); |
|
|
| |
| for (size_t i0 = 0; i0 < cur_p->size; ++i0) { |
| for (size_t i1 = 0; i1 < cur_p->size; ++i1) { |
| if (cur_p->data[i0].logit == -INFINITY) { |
| break; |
| } |
|
|
| if (i0 == i1 || cur_p->data[i1].logit == -INFINITY) { |
| continue; |
| } |
|
|
| int len0 = ctx->vocab->token_to_piece(cur_p->data[i0].id, ctx->buf0.data(), ctx->buf0.size(), 0, false); |
| if (len0 < 0) { |
| ctx->buf0.resize(len0); |
| len0 = ctx->vocab->token_to_piece(cur_p->data[i0].id, ctx->buf0.data(), ctx->buf0.size(), 0, false); |
| assert(len0 > 0); |
| } |
|
|
| int len1 = ctx->vocab->token_to_piece(cur_p->data[i1].id, ctx->buf1.data(), ctx->buf1.size(), 0, false); |
| if (len1 < 0) { |
| ctx->buf1.resize(len1); |
| len1 = ctx->vocab->token_to_piece(cur_p->data[i1].id, ctx->buf1.data(), ctx->buf1.size(), 0, false); |
| assert(len1 > 0); |
| } |
|
|
| |
| if (len0 > 0 && len0 <= len1 && memcmp(ctx->buf0.data(), ctx->buf1.data(), len0) == 0) { |
| int dst = i0; |
| int src = i1; |
|
|
| |
| if (cur_p->data[i1].p > cur_p->data[i0].p) { |
| std::swap(dst, src); |
| } |
|
|
| cur_p->data[dst].p += cur_p->data[src].p; |
| cur_p->data[src].logit = -INFINITY; |
| cur_p->data[src].p = 0.0f; |
|
|
| n_combined++; |
| } |
| } |
| } |
|
|
| size_t n_non_eog = 0; |
|
|
| size_t size_org = cur_p->size; |
|
|
| float p_sum = 0.0f; |
| float thold = 0.2f; |
|
|
| cur_p->size = 0; |
|
|
| LOG_DBG_CUR("%s: n_combined = %zu, applying thold = %.3f\n", __func__, n_combined, thold); |
|
|
| for (size_t i = 0; i < size_org; ++i) { |
| const bool is_eog = ctx->vocab->is_eog(cur_p->data[i].id); |
|
|
| if (cur_p->data[i].p < thold && !is_eog) { |
| continue; |
| } |
|
|
| if (!is_eog) { |
| ++n_non_eog; |
| } |
|
|
| p_sum += cur_p->data[i].p; |
|
|
| |
| cur_p->data[cur_p->size++] = cur_p->data[i]; |
| } |
|
|
| LOG_DBG_CUR("%s: n_non_eog = %zu\n", __func__, n_non_eog); |
|
|
| |
| if (n_non_eog == 0) { |
| cur_p->size = 1; |
| cur_p->data[0].id = ctx->vocab->token_eot(); |
| if (cur_p->data[0].id == LLAMA_TOKEN_NULL) { |
| cur_p->data[0].id = ctx->vocab->token_eos(); |
| } |
| cur_p->data[0].logit = 1.0f; |
|
|
| GGML_ASSERT(cur_p->data[0].id != LLAMA_TOKEN_NULL); |
|
|
| return; |
| } |
|
|
| |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| cur_p->data[i].p /= p_sum; |
|
|
| LOG_DBG_CUR("%s: cur_p[%3zu] = { id: %6d, p: %.6f, logit: %6.3f }\n", __func__, i, cur_p->data[i].id, cur_p->data[i].p, cur_p->data[i].logit); |
| } |
|
|
| size_org = cur_p->size; |
| p_sum = 0.0f; |
| thold = 1.0/(n_non_eog + 1); |
|
|
| cur_p->size = 0; |
|
|
| LOG_DBG_CUR("%s: applying thold = %.3f\n", __func__, thold); |
|
|
| for (size_t i = 0; i < size_org; ++i) { |
| const bool is_eog = ctx->vocab->is_eog(cur_p->data[i].id); |
|
|
| if (cur_p->data[i].p < thold && !is_eog) { |
| continue; |
| } |
|
|
| p_sum += cur_p->data[i].p; |
|
|
| cur_p->data[cur_p->size++] = cur_p->data[i]; |
| } |
|
|
| |
| for (size_t i = 0; i < cur_p->size; ++i) { |
| cur_p->data[i].p /= p_sum; |
|
|
| LOG_DBG_CUR("%s: cur_p[%3zu] = { id: %6d, p: %.6f, logit: %6.3f }\n", __func__, i, cur_p->data[i].id, cur_p->data[i].p, cur_p->data[i].logit); |
| } |
|
|
| #undef LOG_DBG_CUR |
| } |
|
|
| static struct llama_sampler * llama_sampler_infill_clone(const struct llama_sampler * smpl) { |
| const auto * ctx = (const llama_sampler_infill *) smpl->ctx; |
| return llama_sampler_init_infill(ctx->vocab); |
| } |
|
|
| static void llama_sampler_infill_free(struct llama_sampler * smpl) { |
| delete (llama_sampler_infill *) smpl->ctx; |
| } |
|
|
| static struct llama_sampler_i llama_sampler_infill_i = { |
| llama_sampler_infill_name, |
| nullptr, |
| llama_sampler_infill_apply, |
| nullptr, |
| llama_sampler_infill_clone, |
| llama_sampler_infill_free, |
| nullptr, |
| nullptr, |
| nullptr, |
| nullptr, |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_infill(const struct llama_vocab * vocab) { |
| return llama_sampler_init( |
| &llama_sampler_infill_i, |
| new llama_sampler_infill { |
| vocab, |
| std::vector<char>(512), |
| std::vector<char>(512), |
| } |
| ); |
| } |
|
|
| |
|
|
| uint32_t llama_sampler_get_seed(const struct llama_sampler * smpl) { |
| if (smpl->iface == &llama_sampler_dist_i) { |
| return ((const llama_sampler_dist *) smpl->ctx)->seed_cur; |
| } |
|
|
| if (smpl->iface == &llama_sampler_mirostat_i) { |
| return ((const llama_sampler_mirostat *) smpl->ctx)->seed_cur; |
| } |
|
|
| if (smpl->iface == &llama_sampler_mirostat_v2_i) { |
| return ((const llama_sampler_mirostat_v2 *) smpl->ctx)->seed_cur; |
| } |
|
|
| if (smpl->iface == &llama_sampler_chain_i) { |
| const auto * ctx = (const llama_sampler_chain *) smpl->ctx; |
| for (auto it = ctx->samplers.rbegin(); it != ctx->samplers.rend(); ++it) { |
| const uint32_t seed = llama_sampler_get_seed(it->ptr); |
| if (seed != LLAMA_DEFAULT_SEED) { |
| return seed; |
| } |
| } |
| } |
|
|
| return LLAMA_DEFAULT_SEED; |
| } |
|
|
| |
|
|
| struct llama_perf_sampler_data llama_perf_sampler(const struct llama_sampler * chain) { |
| struct llama_perf_sampler_data data = {}; |
|
|
| if (chain == nullptr || chain->iface != &llama_sampler_chain_i) { |
| GGML_ABORT("%s: invalid sampler passed - requires a sampler created with llama_sampler_chain_init()\n", __func__); |
| } |
|
|
| const auto * ctx = (const struct llama_sampler_chain *) chain->ctx; |
|
|
| data.t_sample_ms = 1e-3 * ctx->t_sample_us; |
| data.n_sample = std::max(0, ctx->n_sample); |
|
|
| return data; |
| } |
|
|
| void llama_perf_sampler_print(const struct llama_sampler * chain) { |
| const auto data = llama_perf_sampler(chain); |
|
|
| LLAMA_LOG_INFO("%s: samplers time = %10.2f ms / %5d runs\n", __func__, data.t_sample_ms, data.n_sample); |
| } |
|
|
| void llama_perf_sampler_reset(struct llama_sampler * chain) { |
| if (chain == nullptr || chain->iface != &llama_sampler_chain_i) { |
| GGML_ABORT("%s: invalid sampler passed - requires a sampler created with llama_sampler_chain_init()\n", __func__); |
| } |
|
|
| auto * ctx = (struct llama_sampler_chain *) chain->ctx; |
|
|
| ctx->t_sample_us = 0; |
| ctx->n_sample = 0; |
| } |
|
|