| | #pragma once
|
| |
|
| | #include "llama.h"
|
| |
|
| | #include "ggml-cpp.h"
|
| |
|
| | #include <string>
|
| | #include <unordered_map>
|
| | #include <vector>
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | struct llama_adapter_cvec {
|
| | ggml_tensor * tensor_for(int il) const;
|
| |
|
| | ggml_tensor * apply_to(ggml_context * ctx, ggml_tensor * cur, int il) const;
|
| |
|
| | bool apply(
|
| | const llama_model & model,
|
| | const float * data,
|
| | size_t len,
|
| | int32_t n_embd,
|
| | int32_t il_start,
|
| | int32_t il_end);
|
| |
|
| | private:
|
| | bool init(const llama_model & model);
|
| |
|
| | int32_t layer_start = -1;
|
| | int32_t layer_end = -1;
|
| |
|
| | std::vector<ggml_context_ptr> ctxs;
|
| | std::vector<ggml_backend_buffer_ptr> bufs;
|
| |
|
| | std::vector<ggml_tensor *> tensors;
|
| | };
|
| |
|
| | using llama_adapter_cvec_ptr = std::shared_ptr<llama_adapter_cvec>;
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | struct llama_adapter_lora_weight {
|
| | ggml_tensor * a = nullptr;
|
| | ggml_tensor * b = nullptr;
|
| |
|
| |
|
| | float get_scale(float alpha, float adapter_scale) const {
|
| | const float rank = (float) b->ne[0];
|
| | const float scale = alpha ? adapter_scale * alpha / rank : adapter_scale;
|
| | return scale;
|
| | }
|
| |
|
| | llama_adapter_lora_weight() = default;
|
| | llama_adapter_lora_weight(ggml_tensor * a, ggml_tensor * b) : a(a), b(b) {}
|
| | };
|
| |
|
| | struct llama_adapter_lora {
|
| |
|
| | std::unordered_map<std::string, llama_adapter_lora_weight> ab_map;
|
| |
|
| | std::vector<ggml_context_ptr> ctxs;
|
| | std::vector<ggml_backend_buffer_ptr> bufs;
|
| |
|
| | float alpha;
|
| |
|
| |
|
| | std::unordered_map<std::string, std::string> gguf_kv;
|
| |
|
| |
|
| | std::vector<llama_token> alora_invocation_tokens;
|
| |
|
| | llama_adapter_lora() = default;
|
| | ~llama_adapter_lora() = default;
|
| |
|
| | llama_adapter_lora_weight * get_weight(ggml_tensor * w);
|
| |
|
| | uint32_t get_n_nodes() const {
|
| | return ab_map.size() * 6u;
|
| | }
|
| | };
|
| |
|
| | using llama_adapter_loras = std::unordered_map<llama_adapter_lora *, float>;
|
| | using llama_adapter_loras_ptr = std::unique_ptr<llama_adapter_loras>;
|
| |
|