| #pragma once |
|
|
| #include "llama.h" |
|
|
| #include <vector> |
|
|
| struct llama_vocab; |
| struct llama_grammar; |
|
|
| |
|
|
| struct llama_sampler_chain { |
| llama_sampler_chain_params params; |
|
|
| |
| bool is_init = false; |
|
|
| struct info { |
| bool is_backend; |
|
|
| llama_sampler * ptr; |
| }; |
|
|
| std::vector<info> samplers; |
|
|
| |
| std::vector<llama_token_data> cur; |
|
|
| |
|
|
| mutable int64_t t_sample_us; |
|
|
| mutable int32_t n_sample; |
| }; |
|
|
| struct llama_sampler * llama_sampler_init_dry_testing( |
| int32_t context_size, |
| float dry_multiplier, |
| float dry_base, |
| int32_t dry_allowed_length, |
| int32_t dry_penalty_last_n, |
| const std::vector<std::vector<llama_token>> & seq_breakers); |
|
|