| #include "llama-model-loader.h" |
|
|
| #include "ggml-alloc.h" |
| #include "ggml.h" |
| #include "gguf.h" |
| #include "llama-hparams.h" |
|
|
| #include <algorithm> |
| #include <array> |
| #include <cinttypes> |
| #include <cstdint> |
| #include <cstring> |
| #include <future> |
| #include <regex> |
|
|
| static const size_t kiB = 1024; |
| static const size_t MiB = 1024*kiB; |
| static const size_t GiB = 1024*MiB; |
|
|
| const char * llama_file_version_name(llama_fver version) { |
| switch (version) { |
| case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)"; |
| case GGUF_FILE_VERSION_V2: return "GGUF V2"; |
| case GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)"; |
| } |
|
|
| return "unknown"; |
| } |
|
|
| static std::string llama_model_ftype_name(llama_ftype ftype) { |
| if (ftype & LLAMA_FTYPE_GUESSED) { |
| return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)"; |
| } |
|
|
| switch (ftype) { |
| case LLAMA_FTYPE_ALL_F32: return "all F32"; |
| case LLAMA_FTYPE_MOSTLY_F16: return "F16"; |
| case LLAMA_FTYPE_MOSTLY_BF16: return "BF16"; |
| case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0"; |
| case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1"; |
| case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0"; |
| case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1"; |
| case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0"; |
| case LLAMA_FTYPE_MOSTLY_MXFP4_MOE: return "MXFP4 MoE"; |
| case LLAMA_FTYPE_MOSTLY_NVFP4: return "NVFP4"; |
| case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K - Medium"; |
| case LLAMA_FTYPE_MOSTLY_Q2_K_S: return "Q2_K - Small"; |
| case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small"; |
| case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium"; |
| case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large"; |
| case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small"; |
| case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium"; |
| case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small"; |
| case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium"; |
| case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K"; |
| case LLAMA_FTYPE_MOSTLY_TQ1_0: return "TQ1_0 - 1.69 bpw ternary"; |
| case LLAMA_FTYPE_MOSTLY_TQ2_0: return "TQ2_0 - 2.06 bpw ternary"; |
| case LLAMA_FTYPE_MOSTLY_IQ2_XXS: return "IQ2_XXS - 2.0625 bpw"; |
| case LLAMA_FTYPE_MOSTLY_IQ2_XS: return "IQ2_XS - 2.3125 bpw"; |
| case LLAMA_FTYPE_MOSTLY_IQ2_S: return "IQ2_S - 2.5 bpw"; |
| case LLAMA_FTYPE_MOSTLY_IQ2_M: return "IQ2_M - 2.7 bpw"; |
| case LLAMA_FTYPE_MOSTLY_IQ3_XS: return "IQ3_XS - 3.3 bpw"; |
| case LLAMA_FTYPE_MOSTLY_IQ3_XXS: return "IQ3_XXS - 3.0625 bpw"; |
| case LLAMA_FTYPE_MOSTLY_IQ1_S: return "IQ1_S - 1.5625 bpw"; |
| case LLAMA_FTYPE_MOSTLY_IQ1_M: return "IQ1_M - 1.75 bpw"; |
| case LLAMA_FTYPE_MOSTLY_IQ4_NL: return "IQ4_NL - 4.5 bpw"; |
| case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw"; |
| case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw"; |
| case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw"; |
|
|
| default: return "unknown, may not work"; |
| } |
| } |
|
|
| |
| |
| static std::vector<std::string> llama_get_list_splits(const std::string & path, const int idx, const int n_split) { |
| std::vector<std::string> paths; |
| std::string split_prefix; |
| std::vector<char> buf(llama_path_max(), 0); |
|
|
| { |
| int ret = llama_split_prefix(buf.data(), buf.size(), path.c_str(), idx, n_split); |
| if (!ret) { |
| throw std::runtime_error(format("invalid split file name: %s", path.c_str())); |
| } |
| split_prefix = std::string(buf.data(), ret); |
| } |
|
|
| if (split_prefix.empty()) { |
| throw std::runtime_error(format("invalid split file: %s", path.c_str())); |
| } |
|
|
| for (int idx = 0; idx < n_split; ++idx) { |
| int ret = llama_split_path(buf.data(), buf.size(), split_prefix.c_str(), idx, n_split); |
| paths.push_back(std::string(buf.data(), ret)); |
| } |
|
|
| return paths; |
| } |
|
|
| namespace GGUFMeta { |
| template <typename T, gguf_type gt_, T (*gfun)(const gguf_context *, const int64_t)> |
| struct GKV_Base_Type { |
| static constexpr gguf_type gt = gt_; |
|
|
| static T getter(const gguf_context * ctx, const int kid) { |
| return gfun(ctx, kid); |
| } |
| }; |
|
|
| template<typename T> struct GKV_Base; |
|
|
| template<> struct GKV_Base<bool >: GKV_Base_Type<bool, GGUF_TYPE_BOOL, gguf_get_val_bool> {}; |
| template<> struct GKV_Base<uint8_t >: GKV_Base_Type<uint8_t, GGUF_TYPE_UINT8, gguf_get_val_u8 > {}; |
| template<> struct GKV_Base<uint16_t >: GKV_Base_Type<uint16_t, GGUF_TYPE_UINT16, gguf_get_val_u16 > {}; |
| template<> struct GKV_Base<uint32_t >: GKV_Base_Type<uint32_t, GGUF_TYPE_UINT32, gguf_get_val_u32 > {}; |
| template<> struct GKV_Base<uint64_t >: GKV_Base_Type<uint64_t, GGUF_TYPE_UINT64, gguf_get_val_u64 > {}; |
| template<> struct GKV_Base<int8_t >: GKV_Base_Type<int8_t, GGUF_TYPE_INT8, gguf_get_val_i8 > {}; |
| template<> struct GKV_Base<int16_t >: GKV_Base_Type<int16_t, GGUF_TYPE_INT16, gguf_get_val_i16 > {}; |
| template<> struct GKV_Base<int32_t >: GKV_Base_Type<int32_t, GGUF_TYPE_INT32, gguf_get_val_i32 > {}; |
| template<> struct GKV_Base<int64_t >: GKV_Base_Type<int64_t, GGUF_TYPE_INT64, gguf_get_val_i64 > {}; |
| template<> struct GKV_Base<float >: GKV_Base_Type<float, GGUF_TYPE_FLOAT32, gguf_get_val_f32 > {}; |
| template<> struct GKV_Base<double >: GKV_Base_Type<double, GGUF_TYPE_FLOAT64, gguf_get_val_f64 > {}; |
| template<> struct GKV_Base<const char *>: GKV_Base_Type<const char *, GGUF_TYPE_STRING, gguf_get_val_str > {}; |
|
|
| template<> struct GKV_Base<std::string> { |
| static constexpr gguf_type gt = GGUF_TYPE_STRING; |
|
|
| static std::string getter(const gguf_context * ctx, const int kid) { |
| return gguf_get_val_str(ctx, kid); |
| } |
| }; |
|
|
| struct ArrayInfo { |
| const gguf_type gt; |
| const size_t length; |
| const void * data; |
| }; |
|
|
| template<> struct GKV_Base<ArrayInfo> { |
| public: |
| static constexpr gguf_type gt = GGUF_TYPE_ARRAY; |
| static ArrayInfo getter(const gguf_context *ctx, const int k) { |
| const enum gguf_type arr_type = gguf_get_arr_type(ctx, k); |
| return ArrayInfo { |
| arr_type, |
| size_t(gguf_get_arr_n(ctx, k)), |
| arr_type == GGUF_TYPE_STRING ? nullptr : gguf_get_arr_data(ctx, k), |
| }; |
| } |
| }; |
|
|
| template<typename T> |
| class GKV : public GKV_Base<T> { |
| GKV() = delete; |
|
|
| public: |
| static T get_kv(const gguf_context * ctx, const int k) { |
| const enum gguf_type kt = gguf_get_kv_type(ctx, k); |
|
|
| if (kt != GKV::gt) { |
| throw std::runtime_error(format("key %s has wrong type %s but expected type %s", |
| gguf_get_key(ctx, k), gguf_type_name(kt), gguf_type_name(GKV::gt))); |
| } |
| return GKV::getter(ctx, k); |
| } |
|
|
| static const char * override_type_to_str(const llama_model_kv_override_type ty) { |
| switch (ty) { |
| case LLAMA_KV_OVERRIDE_TYPE_BOOL: return "bool"; |
| case LLAMA_KV_OVERRIDE_TYPE_INT: return "int"; |
| case LLAMA_KV_OVERRIDE_TYPE_FLOAT: return "float"; |
| case LLAMA_KV_OVERRIDE_TYPE_STR: return "str"; |
| } |
| return "unknown"; |
| } |
|
|
| static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override * ovrd) { |
| if (!ovrd) { return false; } |
| if (ovrd->tag == expected_type) { |
| LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ", |
| __func__, override_type_to_str(ovrd->tag), ovrd->key); |
| switch (ovrd->tag) { |
| case LLAMA_KV_OVERRIDE_TYPE_BOOL: { |
| LLAMA_LOG_INFO("%s\n", ovrd->val_bool ? "true" : "false"); |
| } break; |
| case LLAMA_KV_OVERRIDE_TYPE_INT: { |
| LLAMA_LOG_INFO("%" PRId64 "\n", ovrd->val_i64); |
| } break; |
| case LLAMA_KV_OVERRIDE_TYPE_FLOAT: { |
| LLAMA_LOG_INFO("%.6f\n", ovrd->val_f64); |
| } break; |
| case LLAMA_KV_OVERRIDE_TYPE_STR: { |
| LLAMA_LOG_INFO("%s\n", ovrd->val_str); |
| } break; |
| default: |
| |
| throw std::runtime_error( |
| format("Unsupported attempt to override %s type for metadata key %s\n", |
| override_type_to_str(ovrd->tag), ovrd->key)); |
| } |
| return true; |
| } |
| LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n", |
| __func__, ovrd->key, override_type_to_str(expected_type), override_type_to_str(ovrd->tag)); |
| return false; |
| } |
|
|
| template<typename OT> |
| static typename std::enable_if<std::is_same<OT, bool>::value, bool>::type |
| try_override(OT & target, const struct llama_model_kv_override * ovrd) { |
| if (validate_override(LLAMA_KV_OVERRIDE_TYPE_BOOL, ovrd)) { |
| target = ovrd->val_bool; |
| return true; |
| } |
| return false; |
| } |
|
|
| template<typename OT> |
| static typename std::enable_if<!std::is_same<OT, bool>::value && std::is_integral<OT>::value, bool>::type |
| try_override(OT & target, const struct llama_model_kv_override * ovrd) { |
| if (validate_override(LLAMA_KV_OVERRIDE_TYPE_INT, ovrd)) { |
| target = ovrd->val_i64; |
| return true; |
| } |
| return false; |
| } |
|
|
| template<typename OT> |
| static typename std::enable_if<std::is_floating_point<OT>::value, bool>::type |
| try_override(T & target, const struct llama_model_kv_override * ovrd) { |
| if (validate_override(LLAMA_KV_OVERRIDE_TYPE_FLOAT, ovrd)) { |
| target = ovrd->val_f64; |
| return true; |
| } |
| return false; |
| } |
|
|
| template<typename OT> |
| static typename std::enable_if<std::is_same<OT, std::string>::value, bool>::type |
| try_override(T & target, const struct llama_model_kv_override * ovrd) { |
| if (validate_override(LLAMA_KV_OVERRIDE_TYPE_STR, ovrd)) { |
| target = ovrd->val_str; |
| return true; |
| } |
| return false; |
| } |
|
|
| static bool set(const gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override * ovrd = nullptr) { |
| if (try_override<T>(target, ovrd)) { |
| return true; |
| } |
| if (k < 0) { return false; } |
| target = get_kv(ctx, k); |
| return true; |
| } |
|
|
| static bool set(const gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override * ovrd = nullptr) { |
| return set(ctx, gguf_find_key(ctx, key), target, ovrd); |
| } |
|
|
| static bool set(const gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override * ovrd = nullptr) { |
| return set(ctx, key.c_str(), target, ovrd); |
| } |
| }; |
| } |
|
|
| template<typename T> |
| typename std::enable_if<std::is_integral<T>::value, bool>::type |
| llama_model_loader::get_arr_n(const std::string & key, T & result, bool required) { |
| const int kid = gguf_find_key(metadata, key.c_str()); |
|
|
| if (kid < 0) { |
| if (required) { |
| throw std::runtime_error(format("key not found in model: %s", key.c_str())); |
| } |
| return false; |
| } |
|
|
| struct GGUFMeta::ArrayInfo arr_info = |
| GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(metadata, kid); |
|
|
|
|
| result = arr_info.length; |
| return true; |
| } |
|
|
| template<typename T> |
| typename std::enable_if<std::is_integral<T>::value, bool>::type |
| llama_model_loader::get_arr_n(enum llm_kv kid, T & result, bool required) { |
| return get_arr_n(llm_kv(kid), result, required); |
| } |
|
|
| template bool llama_model_loader::get_arr_n(enum llm_kv kid, uint32_t & result, bool required); |
|
|
| template<typename T> |
| bool llama_model_loader::get_arr(const std::string & key, std::vector<T> & result, bool required) { |
| const gguf_context * ctx = metadata; |
| const int kid = gguf_find_key(ctx, key.c_str()); |
|
|
| if (kid < 0 || gguf_get_kv_type(ctx, kid) != GGUF_TYPE_ARRAY) { |
| if (required) { |
| throw std::runtime_error(format("array key not found in model: %s", key.c_str())); |
| } |
| return false; |
| } |
|
|
| struct GGUFMeta::ArrayInfo arr_info = |
| GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(ctx, kid); |
|
|
| switch (arr_info.gt) { |
| case GGUF_TYPE_UINT32: |
| case GGUF_TYPE_INT32: GGML_ASSERT((std::is_same<T, int32_t>::value) || |
| (std::is_same<T, uint32_t>::value)); break; |
| case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same<T, float>::value)); break; |
| case GGUF_TYPE_STRING: GGML_ASSERT((std::is_same<T, std::string>::value)); break; |
| default: |
| throw std::runtime_error(format("%s is not a string/float32/uint32/int32 array", key.c_str())); |
| } |
|
|
| if constexpr (std::is_same<T, std::string>::value) { |
| const size_t n_items = gguf_get_arr_n(ctx, kid); |
| result.clear(); |
|
|
| for (size_t i = 0; i < n_items; i++) { |
| const T value = gguf_get_arr_str(ctx, kid, i); |
| result.emplace_back(value); |
| } |
| } else { |
| result.resize(arr_info.length); |
| result.assign((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length); |
| } |
|
|
| return true; |
| } |
|
|
| template<typename T, size_t N_MAX> |
| bool llama_model_loader::get_arr(const std::string & key, std::array<T, N_MAX> & result, bool required) { |
| const gguf_context * ctx = metadata; |
| const int kid = gguf_find_key(ctx, key.c_str()); |
|
|
| if (kid < 0 || gguf_get_kv_type(ctx, kid) != GGUF_TYPE_ARRAY) { |
| if (required) { |
| throw std::runtime_error(format("array key not found in model: %s", key.c_str())); |
| } |
| return false; |
| } |
|
|
| struct GGUFMeta::ArrayInfo arr_info = |
| GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(ctx, kid); |
|
|
| switch (arr_info.gt) { |
| case GGUF_TYPE_BOOL: |
| case GGUF_TYPE_UINT32: |
| case GGUF_TYPE_INT32: GGML_ASSERT((std::is_same<T, int32_t>::value) || |
| (std::is_same<T, uint32_t>::value)); break; |
| case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same<T, float>::value)); break; |
| case GGUF_TYPE_STRING: GGML_ASSERT((std::is_same<T, std::string>::value)); break; |
| default: |
| throw std::runtime_error(format("%s is not a string/float32/uint32/int32 array", key.c_str())); |
| } |
|
|
| if (arr_info.length > N_MAX) { |
| throw std::runtime_error(format("array length %u for key %s exceeds max %u", (uint32_t) arr_info.length, key.c_str(), (uint32_t) N_MAX)); |
| } |
|
|
| if constexpr (std::is_same<T, std::string>::value) { |
| const size_t n_items = gguf_get_arr_n(ctx, kid); |
|
|
| for (size_t i = 0; i < n_items; i++) { |
| const T value = gguf_get_arr_str(ctx, kid, i); |
| result[i] = value; |
| } |
| } else { |
| if (arr_info.gt == GGUF_TYPE_BOOL) { |
| std::transform((const bool *)arr_info.data, (const bool *)arr_info.data + arr_info.length, result.begin(), [](bool x) { |
| return static_cast<T>(x); |
| }); |
| } else { |
| std::copy((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length, result.begin()); |
| } |
| } |
|
|
| return true; |
| } |
|
|
| template<typename T> |
| bool llama_model_loader::get_arr(enum llm_kv kid, T & result, bool required) { |
| return get_arr(llm_kv(kid), result, required); |
| } |
|
|
| template bool llama_model_loader::get_arr<std::vector<std::string>>(enum llm_kv kid, std::vector<std::string> & result, bool required); |
|
|
| template<typename T> |
| bool llama_model_loader::get_key(const std::string & key, T & result, bool required) { |
| auto it = kv_overrides.find(key); |
|
|
| const struct llama_model_kv_override * override = |
| it != kv_overrides.end() ? &it->second : nullptr; |
|
|
| const bool found = GGUFMeta::GKV<T>::set(metadata, key, result, override); |
|
|
| if (required && !found) { |
| throw std::runtime_error(format("key not found in model: %s", key.c_str())); |
| } |
|
|
| return found; |
| } |
|
|
| template<typename T> |
| bool llama_model_loader::get_key(enum llm_kv kid, T & result, bool required) { |
| return get_key(llm_kv(kid), result, required); |
| } |
|
|
| template bool llama_model_loader::get_key<bool> (enum llm_kv kid, bool & result, bool required); |
| template bool llama_model_loader::get_key<float> (enum llm_kv kid, float & result, bool required); |
| template bool llama_model_loader::get_key<uint32_t> (enum llm_kv kid, uint32_t & result, bool required); |
| template bool llama_model_loader::get_key<std::string>(enum llm_kv kid, std::string & result, bool required); |
|
|
| template<> |
| bool llama_model_loader::get_key(enum llm_kv kid, enum llama_pooling_type & result, bool required) { |
| uint32_t tmp; |
| const bool found = get_key(kid, tmp, required); |
| if (found) { |
| result = (enum llama_pooling_type) tmp; |
| } else { |
| result = LLAMA_POOLING_TYPE_UNSPECIFIED; |
| } |
| return found; |
| } |
|
|
| |
| template<typename T, size_t N_MAX> |
| bool llama_model_loader::get_key_or_arr(const std::string & key, std::array<T, N_MAX> & result, uint32_t n, bool required) { |
| const int kid = gguf_find_key(metadata, key.c_str()); |
|
|
| if (kid < 0) { |
| if (required) { |
| throw std::runtime_error(format("key not found in model: %s", key.c_str())); |
| } |
| return false; |
| } |
|
|
| if (n > N_MAX) { |
| throw std::runtime_error(format("n > N_MAX: %u > %u for key %s", (uint32_t) n, (uint32_t) N_MAX, key.c_str())); |
| } |
|
|
| if (gguf_get_kv_type(metadata, kid) == GGUF_TYPE_ARRAY) { |
| struct GGUFMeta::ArrayInfo arr_info = |
| GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(metadata, kid); |
|
|
| if (n != arr_info.length) { |
| throw std::runtime_error(format("key %s has wrong array length; expected %u, got %u", key.c_str(), n, (uint32_t) arr_info.length)); |
| } |
|
|
| return get_arr(key, result, required); |
| } |
|
|
| T value; |
|
|
| bool ok = get_key(key, value, required); |
| if (!ok) { |
| return false; |
| } |
|
|
| for (uint32_t i = 0; i < n; i++) { |
| result[i] = value; |
| } |
|
|
| return true; |
| } |
|
|
| template<typename T> |
| bool llama_model_loader::get_key_or_arr(enum llm_kv kid, T & result, uint32_t n, bool required) { |
| return get_key_or_arr(llm_kv(kid), result, n, required); |
| } |
|
|
| bool llama_model_loader::get_key_or_arr(enum llm_kv kid, uint32_t & result, bool required) { |
| const std::string key = llm_kv(kid); |
|
|
| const int id = gguf_find_key(metadata, key.c_str()); |
|
|
| if (id < 0) { |
| if (required) { |
| throw std::runtime_error(format("key not found in model: %s", key.c_str())); |
| } |
| return false; |
| } |
|
|
| |
| if (gguf_get_kv_type(metadata, id) == GGUF_TYPE_ARRAY) { |
| if (required) { |
| throw std::runtime_error(format("expected scalar, found array for key: %s", key.c_str())); |
| } |
| return false; |
| } |
|
|
| return get_key(key, result, required); |
| } |
|
|
| |
| template bool llama_model_loader::get_key_or_arr<std::array<int, 4>>(enum llm_kv kid, std::array<int, 4> & result, uint32_t n, bool required); |
| template bool llama_model_loader::get_key_or_arr<std::array<uint32_t, 512>>(enum llm_kv kid, std::array<uint32_t, 512> & result, uint32_t n, bool required); |
| template bool llama_model_loader::get_key_or_arr<std::array<float, 512>>(enum llm_kv kid, std::array<float, 512> & result, uint32_t n, bool required); |
|
|
|
|
| llama_model_loader::llama_model_loader( |
| struct gguf_context * meta, |
| llama_model_set_tensor_data_t set_tensor_data, |
| void * set_tensor_data_ud, |
| const std::string & fname, |
| std::vector<std::string> & splits, |
| bool use_mmap, |
| bool use_direct_io, |
| bool check_tensors, |
| bool no_alloc, |
| const llama_model_kv_override * param_overrides_p, |
| const llama_model_tensor_buft_override * param_tensor_buft_overrides_p) |
| : metadata(meta), set_tensor_data(set_tensor_data), set_tensor_data_ud(set_tensor_data_ud) { |
| int trace = 0; |
| if (getenv("LLAMA_TRACE")) { |
| trace = atoi(getenv("LLAMA_TRACE")); |
| } |
|
|
| if (param_overrides_p != nullptr) { |
| for (const struct llama_model_kv_override * p = param_overrides_p; p->key[0] != 0; p++) { |
| kv_overrides.insert({std::string(p->key), *p}); |
| } |
| } |
|
|
| tensor_buft_overrides = param_tensor_buft_overrides_p; |
|
|
| if (!fname.empty()) { |
| |
| struct ggml_context * ctx = NULL; |
| struct gguf_init_params params = { |
| true, |
| &ctx, |
| }; |
|
|
| metadata_ptr.reset(gguf_init_from_file(fname.c_str(), params)); |
| metadata = metadata_ptr.get(); |
| if (metadata == nullptr) { |
| throw std::runtime_error(format("%s: failed to load model from %s", __func__, fname.c_str())); |
| } |
|
|
| get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false); |
| llm_kv = LLM_KV(llm_arch_from_string(arch_name)); |
|
|
| files.emplace_back(new llama_file(fname.c_str(), "rb", use_direct_io)); |
| contexts.emplace_back(ctx); |
|
|
| if (use_mmap && use_direct_io) { |
| if (files.back()->has_direct_io()) { |
| LLAMA_LOG_WARN("%s: direct I/O is enabled, disabling mmap\n", __func__); |
| use_mmap = false; |
| } else { |
| LLAMA_LOG_WARN("%s: direct I/O is not available, using mmap\n", __func__); |
| use_direct_io = false; |
|
|
| |
| files.pop_back(); |
| files.emplace_back(new llama_file(fname.c_str(), "rb", false)); |
| } |
| } |
|
|
| |
| |
| |
| for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) { |
| std::string tensor_name = std::string(cur->name); |
| |
| if (weights_map.find(tensor_name) != weights_map.end()) { |
| throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", ggml_get_name(cur))); |
| } |
| n_elements += ggml_nelements(cur); |
| n_bytes += ggml_nbytes(cur); |
| weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), 0, metadata, cur)); |
| } |
| uint16_t n_split = 0; |
| get_key(llm_kv(LLM_KV_SPLIT_COUNT), n_split, false); |
|
|
| |
| if (n_split > 1) { |
| |
| uint16_t idx = 0; |
| const std::string kv_split_no = llm_kv(LLM_KV_SPLIT_NO); |
| get_key(kv_split_no, idx); |
| if (idx != 0) { |
| throw std::runtime_error(format("illegal split file idx: %d (file: %s), model must be loaded with the first split", idx, fname.c_str())); |
| } |
|
|
| |
| if (splits.empty()) { |
| splits = llama_get_list_splits(fname, idx, n_split); |
| } |
|
|
| |
| if (n_split != (uint16_t)splits.size()) { |
| throw std::runtime_error(format("invalid split count, given: %zu splits, but expected %d", splits.size(), n_split)); |
| } |
|
|
| if (trace > 0) { |
| LLAMA_LOG_INFO("%s: loading additional %d GGUFs\n", __func__, n_split); |
| } |
|
|
| |
| for (idx = 1; idx < n_split; idx++) { |
| const char * fname_split = splits[idx].c_str(); |
|
|
| struct gguf_init_params split_params = { |
| true, |
| &ctx, |
| }; |
| gguf_context_ptr ctx_gguf { gguf_init_from_file(fname_split, split_params) }; |
| if (!ctx_gguf) { |
| throw std::runtime_error(format("%s: failed to load GGUF split from %s", __func__, fname_split)); |
| } |
|
|
| |
| { |
| const int kid = gguf_find_key(ctx_gguf.get(), kv_split_no.c_str()); |
| if (kid < 0) { |
| throw std::runtime_error(format("missing key %s in GGUF split %s", kv_split_no.c_str(), fname_split)); |
| } |
| int idx_gguf = gguf_get_val_u16(ctx_gguf.get(), kid); |
| if (idx_gguf != idx) { |
| throw std::runtime_error(format("invalid split file idx: %d (file: %s), expected %d", idx_gguf, fname_split, idx)); |
| } |
| } |
|
|
| files.emplace_back(new llama_file(fname_split, "rb", use_direct_io)); |
| contexts.emplace_back(ctx); |
|
|
| |
| for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) { |
| std::string tensor_name = std::string(cur->name); |
| |
| if (weights_map.find(tensor_name) != weights_map.end()) { |
| throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", ggml_get_name(cur))); |
| } |
| n_elements += ggml_nelements(cur); |
| n_bytes += ggml_nbytes(cur); |
| weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), idx, ctx_gguf.get(), cur)); |
| } |
| } |
|
|
| get_key(llm_kv(LLM_KV_SPLIT_TENSORS_COUNT), n_tensors); |
|
|
| |
| { |
| const int n_tensors_loaded = (int) weights_map.size(); |
| if (n_tensors != n_tensors_loaded) { |
| throw std::runtime_error(format("corrupted model: %d tensors expected but %d found", n_tensors, n_tensors_loaded)); |
| } |
| } |
|
|
| LLAMA_LOG_INFO("%s: additional %d GGUFs metadata loaded.\n", __func__, n_split - 1); |
| } |
| } else { |
| get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false); |
| llm_kv = LLM_KV(llm_arch_from_string(arch_name)); |
| } |
|
|
| n_kv = gguf_get_n_kv(metadata); |
| n_tensors = weights_map.size(); |
|
|
| fver = (enum llama_fver) gguf_get_version(metadata); |
|
|
| LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n", |
| __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver)); |
|
|
| |
| |
| { |
| std::map<enum ggml_type, uint32_t> n_type; |
|
|
| uint32_t n_type_max = 0; |
| enum ggml_type type_max = GGML_TYPE_F32; |
|
|
| for (const auto & it : weights_map) { |
| const llama_tensor_weight & w = it.second; |
| const ggml_tensor * tensor = w.tensor; |
|
|
| enum ggml_type type = tensor->type; |
|
|
| n_type[type]++; |
|
|
| if (n_type_max < n_type[type]) { |
| n_type_max = n_type[type]; |
| type_max = type; |
| } |
|
|
| if (trace > 0) { |
| const uint16_t sid = w.idx; |
| LLAMA_LOG_INFO("%s: - tensor split %2d: %32s %-8s [ %s ] %8.2f MiB\n", __func__, |
| sid, ggml_get_name(tensor), ggml_type_name(type), llama_format_tensor_shape(tensor).c_str(), |
| ggml_nbytes(tensor)/1024.0f/1024.0f); |
| } |
| } |
|
|
| switch (type_max) { |
| case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break; |
| case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break; |
| case GGML_TYPE_BF16: ftype = LLAMA_FTYPE_MOSTLY_BF16; break; |
| case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break; |
| case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break; |
| case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break; |
| case GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break; |
| case GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break; |
| case GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break; |
| case GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break; |
| case GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break; |
| case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break; |
| case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break; |
| case GGML_TYPE_TQ1_0: ftype = LLAMA_FTYPE_MOSTLY_TQ1_0; break; |
| case GGML_TYPE_TQ2_0: ftype = LLAMA_FTYPE_MOSTLY_TQ2_0; break; |
| case GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break; |
| case GGML_TYPE_IQ2_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS; break; |
| case GGML_TYPE_IQ2_S: ftype = LLAMA_FTYPE_MOSTLY_IQ2_S; break; |
| case GGML_TYPE_IQ3_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_XXS; break; |
| case GGML_TYPE_IQ1_S: ftype = LLAMA_FTYPE_MOSTLY_IQ1_S; break; |
| case GGML_TYPE_IQ1_M: ftype = LLAMA_FTYPE_MOSTLY_IQ1_M; break; |
| case GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break; |
| case GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break; |
| case GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break; |
| case GGML_TYPE_NVFP4: ftype = LLAMA_FTYPE_MOSTLY_NVFP4; break; |
| default: |
| { |
| LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max)); |
| ftype = LLAMA_FTYPE_ALL_F32; |
| } break; |
| } |
|
|
| |
| ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED); |
|
|
| { |
| uint32_t ftype_val = 0; |
| if (get_key(LLM_KV_GENERAL_FILE_TYPE, ftype_val, false)) { |
| ftype = (llama_ftype) ftype_val; |
| } |
| } |
|
|
| LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__); |
|
|
| for (int i = 0; i < n_kv; i++) { |
| const char * name = gguf_get_key(metadata, i); |
| const enum gguf_type type = gguf_get_kv_type(metadata, i); |
| const std::string type_name = |
| type == GGUF_TYPE_ARRAY |
| ? format("%s[%s,%zu]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(metadata, i)), gguf_get_arr_n(metadata, i)) |
| : gguf_type_name(type); |
|
|
| std::string value = gguf_kv_to_str(metadata, i); |
| const size_t MAX_VALUE_LEN = 40; |
| if (value.size() > MAX_VALUE_LEN) { |
| value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()); |
| } |
| replace_all(value, "\n", "\\n"); |
|
|
| LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str()); |
| } |
|
|
| |
| for (auto & kv : n_type) { |
| if (kv.second == 0) { |
| continue; |
| } |
|
|
| LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second); |
| } |
| } |
|
|
| if (!llama_mmap::SUPPORTED) { |
| LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__); |
| use_mmap = false; |
| } |
|
|
| this->use_mmap = use_mmap; |
| this->use_direct_io = use_direct_io; |
| this->check_tensors = check_tensors; |
| this->no_alloc = no_alloc; |
| } |
|
|
| std::string llama_model_loader::get_arch_name() const { |
| return arch_name; |
| } |
|
|
| enum llm_arch llama_model_loader::get_arch() const { |
| return llm_kv.arch; |
| } |
|
|
| const llama_model_loader::llama_tensor_weight * llama_model_loader::get_weight(const char * name) const { |
| auto pos = weights_map.find(name); |
| if (pos != weights_map.end()) { |
| return &pos->second; |
| } |
|
|
| return nullptr; |
| } |
|
|
| const llama_model_loader::llama_tensor_weight & llama_model_loader::require_weight(const char * name) const { |
| const llama_tensor_weight * weight = get_weight(name); |
| if (!weight) { |
| throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name)); |
| } |
| return *weight; |
| } |
|
|
| struct ggml_tensor * llama_model_loader::get_tensor_meta(const char * name) const { |
| const auto * weight = get_weight(name); |
| if (!weight) { |
| return nullptr; |
| } |
| return weight->tensor; |
| } |
|
|
| struct ggml_tensor * llama_model_loader::require_tensor_meta(const std::string & name) const { |
| struct ggml_tensor * tensor = get_tensor_meta(name.c_str()); |
| if (!tensor) { |
| throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str())); |
| } |
| return tensor; |
| } |
|
|
| const struct ggml_tensor * llama_model_loader::check_tensor_dims(const std::string & name, const std::vector<int64_t> & ne, bool required) const { |
| const struct ggml_tensor * cur = get_tensor_meta(name.c_str()); |
|
|
| if (cur == NULL) { |
| if (!required) { |
| return NULL; |
| } |
| throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str())); |
| } |
|
|
| { |
| bool is_ok = true; |
| for (size_t i = 0; i < GGML_MAX_DIMS; ++i) { |
| if ((i < ne.size() && ne[i] != cur->ne[i]) || (i >= ne.size() && cur->ne[i] != 1)) { |
| is_ok = false; |
| break; |
| } |
| } |
| if (!is_ok) { |
| throw std::runtime_error( |
| format("%s: tensor '%s' has wrong shape; expected %s, got %s", |
| __func__, name.c_str(), |
| llama_format_tensor_shape(ne).c_str(), |
| llama_format_tensor_shape(cur).c_str())); |
| } |
| } |
|
|
| return cur; |
| } |
|
|
| |
| static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w, ggml_op op, ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev) { |
| GGML_ASSERT(w != nullptr); |
|
|
| if (op == GGML_OP_NONE) { |
| return true; |
| } |
|
|
| ggml_init_params params = { |
| ggml_tensor_overhead()*8, |
| NULL, |
| true, |
| }; |
| ggml_context_ptr ctx_ptr { ggml_init(params) }; |
| if (!ctx_ptr) { |
| throw std::runtime_error(format("failed to create ggml context")); |
| } |
| ggml_context * ctx = ctx_ptr.get(); |
|
|
| ggml_tensor * op_tensor = nullptr; |
|
|
| switch (op) { |
| case GGML_OP_GET_ROWS: |
| { |
| ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 512); |
| op_tensor = ggml_get_rows(ctx, w, b); |
| } break; |
| case GGML_OP_MUL_MAT: |
| { |
| ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], 512, w->ne[2], w->ne[3]); |
| op_tensor = ggml_mul_mat(ctx, w, b); |
| } break; |
| case GGML_OP_MUL_MAT_ID: |
| { |
| const int n_expert_used = hparams.n_expert_used; |
| GGML_ASSERT(n_expert_used > 0); |
| ggml_tensor * b = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, w->ne[0], n_expert_used, 512); |
| ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_expert_used, 512); |
| op_tensor = ggml_mul_mat_id(ctx, w, b, ids); |
| } break; |
| case GGML_OP_ADD: |
| { |
| ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]); |
| op_tensor = ggml_add(ctx, a, w); |
| } break; |
| case GGML_OP_ADD_ID: |
| { |
| const int n_expert_used = hparams.n_expert_used; |
| GGML_ASSERT(n_expert_used > 0); |
| ggml_tensor * a = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, w->ne[0], n_expert_used, 512); |
| ggml_tensor * c = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_expert_used, 512); |
| op_tensor = ggml_add_id(ctx, a, w, c); |
| } break; |
| case GGML_OP_MUL: |
| { |
| ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]); |
| op_tensor = ggml_mul(ctx, a, w); |
| } break; |
| case GGML_OP_DIV: |
| { |
| ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, w->ne[0]); |
| op_tensor = ggml_div(ctx, a, w); |
| } break; |
| case GGML_OP_ROPE: |
| { |
| const int n_embd_head = hparams.n_embd_head_v(); |
| const int n_head = hparams.n_head(); |
| ggml_tensor * a = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, n_embd_head, n_head, 512); |
| ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 512); |
| op_tensor = ggml_rope_ext( |
| ctx, a, b, w, |
| 0, 0, 0, 0, 0, |
| 0, 0, 0, 0 |
| ); |
|
|
| } break; |
| case GGML_OP_SSM_CONV: |
| { |
| const int64_t n_seq_tokens = 512; |
| const int64_t n_seqs = 3; |
| ggml_tensor * conv_x = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, w->ne[0] - 1 + n_seq_tokens, w->ne[1], n_seqs); |
| op_tensor = ggml_ssm_conv(ctx, conv_x, w); |
| } break; |
| case GGML_OP_SSM_SCAN: |
| { |
| |
| const int64_t d_state = w->ne[0] == 1 ? hparams.ssm_d_state : w->ne[0]; |
| const int64_t n_head = w->ne[1]; |
| const int64_t head_dim = hparams.ssm_d_inner / n_head; |
| const int64_t n_group = hparams.ssm_n_group ? hparams.ssm_n_group : 1; |
| const int64_t n_seq_tokens = 512; |
| const int64_t n_seqs = 3; |
| ggml_tensor * s = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, d_state, head_dim, n_head, n_seqs); |
| ggml_tensor * x = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, head_dim, n_head, n_seq_tokens, n_seqs); |
| ggml_tensor * dt = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, n_head, n_seq_tokens, n_seqs); |
| ggml_tensor * B = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, d_state, n_group, n_seq_tokens, n_seqs); |
| ggml_tensor * C = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, d_state, n_group, n_seq_tokens, n_seqs); |
| ggml_tensor * ids = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_seqs); |
| op_tensor = ggml_ssm_scan(ctx, s, x, dt, w, B, C, ids); |
| } break; |
| case GGML_OP_RWKV_WKV6: |
| { |
| |
| const int64_t S = 123; |
| const int64_t H = 123; |
| const int64_t n_tokens = 123; |
| const int64_t n_seqs = 123; |
| ggml_tensor * k = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens); |
| ggml_tensor * v = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens); |
| ggml_tensor * r = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens); |
| ggml_tensor * tf = w; |
| ggml_tensor * td = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens); |
| ggml_tensor * state = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, S, n_seqs, S, H); |
| op_tensor = ggml_rwkv_wkv6(ctx, k, v, r, tf, td, state); |
| } break; |
| case GGML_OP_IM2COL: |
| { |
| const int n_embd_inp = hparams.n_embd_inp(); |
| ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, n_embd_inp, w->ne[1], 1, 1); |
| op_tensor = ggml_im2col(ctx, w, b, 1, 0, 0, 0, 1, 0, false, GGML_TYPE_F16); |
| } break; |
| case GGML_OP_SCALE: |
| { |
| op_tensor = ggml_scale(ctx, w, 1.0f); |
| } break; |
| default: |
| GGML_ABORT("%s: missing test for op %s for tensor %s", __func__, ggml_op_name(op), w->name); |
| } |
|
|
| |
| GGML_ASSERT(w->buffer == nullptr); |
| w->buffer = ggml_backend_buft_alloc_buffer(buft, 0); |
| bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor); |
| ggml_backend_buffer_free(w->buffer); |
| w->buffer = nullptr; |
|
|
| return op_supported; |
| } |
|
|
| |
| static ggml_backend_buffer_type_t select_weight_buft(const llama_hparams & hparams, ggml_tensor * tensor, ggml_op op, const buft_list_t * buft_list) { |
| GGML_ASSERT(!buft_list->empty()); |
| for (const auto & cur : *buft_list) { |
| ggml_backend_dev_t cur_dev = cur.first; |
| ggml_backend_buffer_type_t cur_buft = cur.second; |
| if (weight_buft_supported(hparams, tensor, op, cur_buft, cur_dev)) { |
| return cur_buft; |
| } |
| } |
|
|
| return nullptr; |
| } |
|
|
| struct ggml_tensor * llama_model_loader::create_tensor( |
| const llama_hparams & hparams, const buft_list_t * buft_list_cpu, const buft_list_t * buft_list_input, const buft_list_t * buft_list_output, |
| const buft_list_t * buft_list_layer, const LLM_TN_IMPL & tn, const std::initializer_list<int64_t> & ne, int flags) { |
| auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { |
| auto it = ctx_map.find(buft); |
| if (it == ctx_map.end()) { |
| |
| int max_n_tensors = n_tensors; |
| max_n_tensors += 1; |
| max_n_tensors += hparams.n_layer*2; |
| if (files.empty()) { |
| max_n_tensors += hparams.n_layer*256; |
| } |
| const size_t ctx_size = ggml_tensor_overhead()*max_n_tensors; |
|
|
| ggml_init_params params = { |
| ctx_size, |
| NULL, |
| true, |
| }; |
|
|
| ggml_context * ctx = ggml_init(params); |
| if (!ctx) { |
| throw std::runtime_error(format("failed to create ggml context")); |
| } |
|
|
| ctx_map.emplace(buft, ctx); |
|
|
| return ctx; |
| } |
| return it->second.get(); |
| }; |
|
|
| auto buft_for_tensor = [&](ggml_tensor * t_meta) -> ggml_backend_buffer_type_t { |
| if (!t_meta) { |
| if (flags & TENSOR_NOT_REQUIRED) { |
| return nullptr; |
| } |
| throw std::runtime_error(format("missing tensor '%s'", tn.str().c_str())); |
| } |
|
|
| |
| |
| |
| llm_tensor tn_tensor = tn.tensor; |
| if (tn.tensor == LLM_TENSOR_TOKEN_EMBD && (flags & TENSOR_DUPLICATED)) { |
| tn_tensor = LLM_TENSOR_OUTPUT; |
| } |
|
|
| llm_tensor_info info; |
| try { |
| info = llm_tensor_info_for(tn_tensor); |
| } catch (const std::out_of_range & e) { |
| throw std::runtime_error(format("missing tensor info mapping for %s", tn.str().c_str())); |
| } |
|
|
| |
| if (info.op == GGML_OP_NONE || (flags & TENSOR_SKIP)) { |
| const size_t nbytes = ggml_nbytes(t_meta); |
| LLAMA_LOG_WARN("model has unused tensor %s (size = %zu bytes) -- ignoring\n", tn.str().c_str(), nbytes); |
|
|
| size_data -= nbytes; |
| n_created++; |
|
|
| return nullptr; |
| } |
|
|
| |
| ggml_op op; |
| bool bias = tn.suffix != nullptr && strcmp(tn.suffix, "bias") == 0; |
| if (bias) { |
| if (info.op == GGML_OP_MUL_MAT_ID) { |
| op = GGML_OP_ADD_ID; |
| } else { |
| op = GGML_OP_ADD; |
| } |
| } else { |
| op = info.op; |
| } |
|
|
| |
| if (info.layer == LLM_TENSOR_LAYER_INPUT || info.layer == LLM_TENSOR_LAYER_OUTPUT) { |
| if (tn.bid != -1) { |
| GGML_ABORT("input/output layer tensor %s used with a layer number", tn.str().c_str()); |
| } |
| } else { |
| if (tn.bid == -1) { |
| GGML_ABORT("repeating layer tensor %s used without a layer number", tn.str().c_str()); |
| } |
| } |
|
|
| |
| const buft_list_t * buft_list; |
| switch (info.layer) { |
| case LLM_TENSOR_LAYER_INPUT: |
| buft_list = buft_list_input; |
| break; |
| case LLM_TENSOR_LAYER_OUTPUT: |
| buft_list = buft_list_output; |
| break; |
| case LLM_TENSOR_LAYER_REPEATING: |
| GGML_ASSERT(buft_list_layer != nullptr); |
| buft_list = buft_list_layer; |
| break; |
| default: |
| GGML_ABORT("invalid layer %d for tensor %s", info.layer, tn.str().c_str()); |
| } |
|
|
| ggml_backend_buffer_type_t buft = nullptr; |
|
|
| |
| if (tensor_buft_overrides) { |
| std::string tensor_name = tn.str(); |
| for (const auto * overrides = tensor_buft_overrides; overrides->pattern != nullptr; ++overrides) { |
| std::regex pattern(overrides->pattern); |
| if (std::regex_search(tensor_name, pattern)) { |
| if (overrides->buft == ggml_backend_cpu_buffer_type()) { |
| |
| buft = select_weight_buft(hparams, t_meta, op, buft_list_cpu); |
| } else { |
| buft = overrides->buft; |
| } |
|
|
| LLAMA_LOG_DEBUG("tensor %s (%zu MiB %s) buffer type overridden to %s\n", |
| tensor_name.c_str(), |
| ggml_nbytes(t_meta) / 1024 / 1024, ggml_type_name(t_meta->type), |
| ggml_backend_buft_name(buft)); |
| break; |
| } |
| } |
| } |
|
|
| if (!buft) { |
| buft = select_weight_buft(hparams, t_meta, op, buft_list); |
| if (!buft) { |
| throw std::runtime_error(format("failed to find a compatible buffer type for tensor %s", tn.str().c_str())); |
| } |
| } |
|
|
| |
| auto * buft_dev = ggml_backend_buft_get_device(buft); |
| if (use_mmap && buft_dev && buft == ggml_backend_dev_host_buffer_type(buft_dev)) { |
| auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); |
| if (!cpu_dev) { |
| throw std::runtime_error("no CPU backend found"); |
| } |
| buft = ggml_backend_dev_buffer_type(cpu_dev); |
| } |
|
|
| if (buft != buft_list->front().second) { |
| if (n_tensors_moved == 0) { |
| first_tensor_moved_name = t_meta->name; |
| first_tensor_moved_type_name = ggml_type_name(t_meta->type); |
| first_moved_from_buft = buft_list->front().second; |
| first_moved_to_buft = buft; |
| } |
| n_tensors_moved++; |
| } |
|
|
| return buft; |
| }; |
|
|
| if (files.empty()) { |
| if (flags & TENSOR_SKIP_IF_VIRTUAL) { |
| return nullptr; |
| } |
| ggml_type type = GGML_TYPE_F32; |
| const int64_t tid = gguf_find_tensor(metadata, tn.str().c_str()); |
| if (tid != -1) { |
| type = gguf_get_tensor_type(metadata, tid); |
| } |
|
|
| |
| if (flags & TENSOR_NOT_REQUIRED) { |
| for (size_t dim = 0; dim < ne.size(); dim++) { |
| if (ne.begin()[dim] <= 0) { |
| return nullptr; |
| } |
| } |
| } |
|
|
| ggml_tensor t_meta; |
| memset(&t_meta, 0, sizeof(ggml_tensor)); |
| t_meta.type = type; |
| for (size_t dim = 0; dim < GGML_MAX_DIMS; dim++) { |
| t_meta.ne[dim] = dim < ne.size() ? ne.begin()[dim] : 1; |
| GGML_ASSERT(t_meta.ne[dim] >= 1); |
| t_meta.nb[dim] = dim == 0 ? ggml_type_size(type) : t_meta.ne[dim-1]*t_meta.nb[dim-1]; |
| GGML_ASSERT(t_meta.nb[dim] >= 1); |
| } |
| ggml_set_name(&t_meta, tn.str().c_str()); |
|
|
| ggml_backend_buffer_type_t buft = buft_for_tensor(&t_meta); |
| GGML_ASSERT(buft != nullptr); |
| ggml_context * ctx = ctx_for_buft(buft); |
| ggml_tensor * ret = ggml_dup_tensor(ctx, &t_meta); |
| ggml_set_name(ret, tn.str().c_str()); |
| return ret; |
| } |
|
|
| ggml_tensor * t_meta = get_tensor_meta(tn.str().c_str()); |
| ggml_backend_buffer_type_t buft = buft_for_tensor(t_meta); |
| if (buft == nullptr) { |
| return nullptr; |
| } |
| ggml_context * ctx = ctx_for_buft(buft); |
|
|
| |
| if (flags & TENSOR_DUPLICATED) { |
| ggml_tensor * t = ggml_get_tensor(ctx, tn.str().c_str()); |
| if (t) { |
| return t; |
| } |
| } |
|
|
| LLAMA_LOG_DEBUG("%s: loading tensor %s\n", __func__, tn.str().c_str()); |
| const struct ggml_tensor * cur = check_tensor_dims(tn.str(), ne, !(flags & TENSOR_NOT_REQUIRED)); |
|
|
| if (cur == NULL) { |
| return NULL; |
| } |
|
|
| const bool duplicated = flags & TENSOR_DUPLICATED; |
|
|
| struct ggml_tensor * tensor = ggml_dup_tensor(ctx, cur); |
| ggml_set_name(tensor, ggml_get_name(cur)); |
|
|
| if (duplicated) { |
| size_data += ggml_nbytes(cur); |
| } else { |
| n_created++; |
| } |
|
|
| return tensor; |
| } |
|
|
| struct ggml_tensor * llama_model_loader::create_tensor_as_view(struct ggml_context * ctx, struct ggml_tensor * base, const std::string & name, const std::initializer_list<int64_t> & ne, size_t offset, bool required) { |
| const struct ggml_tensor * cur = check_tensor_dims(name, ne, required); |
|
|
| if (cur == NULL) { |
| return NULL; |
| } |
|
|
| if (cur->type != base->type) { |
| throw std::runtime_error(format("%s: tensor '%s' has wrong type; expected %s, got %s", __func__, name.c_str(), ggml_type_name(base->type), ggml_type_name(cur->type))); |
| } |
|
|
| std::array<int64_t, GGML_MAX_DIMS> dims; |
| for (size_t i = 0; i < GGML_MAX_DIMS; ++i) { |
| dims[i] = i < ne.size() ? ne.begin()[i] : 1; |
| } |
|
|
| struct ggml_tensor * tensor = ggml_view_4d(ctx, base, |
| dims[0], dims[1], dims[2], dims[3], |
| cur->nb[1], cur->nb[2], cur->nb[3], |
| offset); |
|
|
| ggml_set_name(tensor, name.c_str()); |
|
|
| n_created++; |
|
|
| return tensor; |
| } |
|
|
| void llama_model_loader::done_getting_tensors() const { |
| if (n_created != n_tensors) { |
| throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created)); |
| } |
| if (n_tensors_moved > 0) { |
| LLAMA_LOG_DEBUG("%s: tensor '%s' (%s) (and %zu others) cannot be used with preferred buffer type %s, using %s instead\n", |
| __func__, first_tensor_moved_name.c_str(), first_tensor_moved_type_name.c_str(), n_tensors_moved - 1, |
| ggml_backend_buft_name(first_moved_from_buft), ggml_backend_buft_name(first_moved_to_buft)); |
| } |
| } |
|
|
| void llama_model_loader::init_mappings(bool prefetch, llama_mlocks * mlock_mmaps) { |
| if (use_mmap) { |
| mappings.reserve(files.size()); |
| mmaps_used.reserve(files.size()); |
| for (const auto & file : files) { |
| bool is_numa = false; |
|
|
| auto * dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); |
| if (dev) { |
| auto * reg = ggml_backend_dev_backend_reg(dev); |
| auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa"); |
| if (is_numa_fn) { |
| is_numa = is_numa_fn(); |
| } |
| } |
|
|
| std::unique_ptr<llama_mmap> mapping = std::make_unique<llama_mmap>(file.get(), prefetch ? -1 : 0, is_numa); |
| mmaps_used.emplace_back(mapping->size(), 0); |
| if (mlock_mmaps) { |
| std::unique_ptr<llama_mlock> mlock_mmap(new llama_mlock()); |
| mlock_mmap->init(mapping->addr()); |
| mlock_mmaps->emplace_back(std::move(mlock_mmap)); |
| } |
| mappings.emplace_back(std::move(mapping)); |
| } |
| } |
|
|
| |
| for (const auto & it : weights_map) { |
| size_data += ggml_nbytes(it.second.tensor); |
| } |
| } |
|
|
| void llama_model_loader::get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, ggml_context * ctx) const { |
| GGML_ASSERT(!mappings.empty()); |
| const auto & mapping = mappings.at(idx); |
|
|
| *first = mapping->size(); |
| *last = 0; |
| *addr = mapping->addr(); |
| for (ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor; tensor = ggml_get_next_tensor(ctx, tensor)) { |
| const auto * weight = get_weight(ggml_get_name(tensor)); |
| if (!weight || weight->idx != idx) { |
| continue; |
| } |
| *first = std::min(*first, weight->offs); |
| *last = std::max(*last, weight->offs + ggml_nbytes(tensor)); |
| } |
| } |
|
|
| void llama_model_loader::load_data_for(struct ggml_tensor * cur) const { |
| const auto & w = require_weight(ggml_get_name(cur)); |
|
|
| if (use_mmap) { |
| const auto & mapping = mappings.at(w.idx); |
| if (cur->data == nullptr) { |
| cur->data = (uint8_t *)mapping->addr() + w.offs; |
| } else { |
| memcpy(cur->data, (uint8_t *)mapping->addr() + w.offs, ggml_nbytes(cur)); |
| } |
| } else { |
| GGML_ASSERT(cur->data != nullptr); |
| GGML_ASSERT(w.idx < files.size()); |
| const auto & file = files.at(w.idx); |
| file->seek(w.offs, SEEK_SET); |
| file->read_raw(cur->data, ggml_nbytes(cur)); |
| } |
|
|
| if (check_tensors && !ggml_validate_row_data(cur->type, cur->data, ggml_nbytes(cur))) { |
| throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur))); |
| } |
| } |
|
|
| bool llama_model_loader::load_all_data( |
| struct ggml_context * ctx, |
| llama_buf_map & bufs, |
| llama_mlocks * lmlocks, |
| llama_progress_callback progress_callback, |
| void * progress_callback_user_data) { |
| if (files.empty()) { |
| for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) { |
| set_tensor_data(t, set_tensor_data_ud); |
| } |
| return true; |
| } |
| GGML_ASSERT(size_data != 0 && "call init_mappings() first"); |
|
|
| std::vector<no_init<uint8_t>> read_buf; |
| std::vector<std::future<std::pair<ggml_tensor *, bool>>> validation_result; |
|
|
| |
| |
| constexpr size_t n_buffers = 4; |
|
|
| size_t alignment = 1; |
| for (const auto & file : files) { |
| alignment = std::max(file->read_alignment(), alignment); |
| } |
|
|
| |
| |
| const size_t buffer_size = alignment != 1 ? 64 * 1024 * 1024 + 2 * alignment : 1 * 1024 * 1024; |
|
|
| std::vector<ggml_backend_buffer_t> host_buffers; |
| std::vector<ggml_backend_event_t> events; |
| std::vector<void *> host_ptrs; |
| size_t buffer_idx = 0; |
| ggml_backend_t upload_backend = [&](const char * func) -> ggml_backend_t { |
| if (use_mmap || check_tensors) { |
| return nullptr; |
| } |
| |
| |
| auto * buf = bufs.count(0) ? bufs.at(0) : nullptr; |
| if (!buf) { |
| LLAMA_LOG_DEBUG("%s: no buffer found for async uploads\n", func); |
| return nullptr; |
| } |
|
|
| auto * buft = ggml_backend_buffer_get_type(buf); |
| auto * dev = ggml_backend_buft_get_device(buft); |
| if (!dev) { |
| LLAMA_LOG_DEBUG("%s: no device found for buffer type %s for async uploads\n", func, |
| ggml_backend_buft_name(buft)); |
| return nullptr; |
| } |
|
|
| if (buft != ggml_backend_dev_buffer_type(dev)) { |
| LLAMA_LOG_DEBUG("%s: buffer type %s is not the default buffer type for device %s for async uploads\n", func, |
| ggml_backend_buft_name(buft), ggml_backend_dev_name(dev)); |
| return nullptr; |
| } |
|
|
| ggml_backend_dev_props props; |
| ggml_backend_dev_get_props(dev, &props); |
| if (!props.caps.async || !props.caps.host_buffer || !props.caps.events) { |
| LLAMA_LOG_DEBUG("%s: device %s does not support async, host buffers or events\n", func, |
| ggml_backend_dev_name(dev)); |
| return nullptr; |
| } |
|
|
| auto * host_buft = ggml_backend_dev_host_buffer_type(dev); |
| if (!host_buft) { |
| LLAMA_LOG_DEBUG("%s: no host buffer type found for device %s\n", func, |
| ggml_backend_dev_name(dev)); |
| return nullptr; |
| } |
|
|
| |
| for (size_t idx = 0; idx < n_buffers; ++idx) { |
| auto * buf = ggml_backend_buft_alloc_buffer(host_buft, buffer_size); |
|
|
| if (!buf) { |
| LLAMA_LOG_DEBUG("%s: failed to allocate host buffer for async uploads for device %s\n", func, |
| ggml_backend_dev_name(dev)); |
| return nullptr; |
| } |
|
|
| host_buffers.emplace_back(buf); |
| host_ptrs.emplace_back(ggml_backend_buffer_get_base(buf)); |
|
|
| auto * event = ggml_backend_event_new(dev); |
| if (!event) { |
| LLAMA_LOG_DEBUG("%s: failed to create event for async uploads for device %s\n", func, |
| ggml_backend_dev_name(dev)); |
| return nullptr; |
| } |
|
|
| events.emplace_back(event); |
| } |
|
|
| ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); |
| if (!backend) { |
| LLAMA_LOG_DEBUG("%s: failed to initialize backend for device %s for async uploads\n", func, |
| ggml_backend_dev_name(dev)); |
| return nullptr; |
| } |
|
|
| return backend; |
| }(__func__); |
|
|
| if (upload_backend) { |
| LLAMA_LOG_DEBUG("%s: using async uploads for device %s, buffer type %s, backend %s\n", __func__, |
| ggml_backend_dev_name(ggml_backend_get_device(upload_backend)), |
| ggml_backend_buft_name(ggml_backend_buffer_get_type(bufs.at(0))), |
| ggml_backend_name(upload_backend)); |
| } |
|
|
| for (struct ggml_tensor * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) { |
| const auto * weight = get_weight(ggml_get_name(cur)); |
| if (weight == nullptr) { |
| |
| continue; |
| } |
|
|
| if (progress_callback) { |
| if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) { |
| return false; |
| } |
| } |
|
|
| size_t n_size = ggml_nbytes(cur); |
|
|
| if (use_mmap) { |
| const auto & mapping = mappings.at(weight->idx); |
| ggml_backend_buffer_t buf_mmap = nullptr; |
| if (bufs.count(weight->idx)) { |
| buf_mmap = bufs.at(weight->idx); |
| } |
| uint8_t * data = (uint8_t *) mapping->addr() + weight->offs; |
|
|
| if (check_tensors) { |
| validation_result.emplace_back(std::async(std::launch::async, [cur, data, n_size] { |
| return std::make_pair(cur, ggml_validate_row_data(cur->type, data, n_size)); |
| })); |
| } |
|
|
| GGML_ASSERT(buf_mmap || cur->data); |
| if (buf_mmap && cur->data == nullptr) { |
| ggml_backend_tensor_alloc(buf_mmap, cur, data); |
| if (lmlocks) { |
| const auto & lmlock = lmlocks->at(weight->idx); |
| lmlock->grow_to(weight->offs + n_size); |
| } |
|
|
| auto & mmap_used = mmaps_used[weight->idx]; |
| mmap_used.first = std::min(mmap_used.first, weight->offs); |
| mmap_used.second = std::max(mmap_used.second, weight->offs + n_size); |
| } else { |
| ggml_backend_tensor_set(cur, data, 0, n_size); |
| } |
| } else { |
| const auto & file = files.at(weight->idx); |
|
|
| if (ggml_backend_buffer_is_host(cur->buffer)) { |
| file->seek(weight->offs, SEEK_SET); |
| file->read_raw(cur->data, n_size); |
| if (check_tensors) { |
| validation_result.emplace_back(std::async(std::launch::async, [cur, n_size] { |
| return std::make_pair(cur, ggml_validate_row_data(cur->type, cur->data, n_size)); |
| })); |
| } |
| } else { |
| |
| if (upload_backend) { |
| size_t offset = weight->offs; |
| alignment = file->read_alignment(); |
| size_t aligned_offset = offset & ~(alignment - 1); |
| size_t offset_from_alignment = offset - aligned_offset; |
| file->seek(aligned_offset, SEEK_SET); |
|
|
| |
| size_t read_start = aligned_offset; |
| size_t read_end = (offset + n_size + alignment - 1) & ~(alignment - 1); |
|
|
| size_t bytes_read = 0; |
| size_t data_read = 0; |
|
|
| while (bytes_read < read_end - read_start) { |
| size_t read_size = std::min<size_t>(buffer_size, read_end - read_start - bytes_read); |
|
|
| |
| uintptr_t ptr_dest_aligned = (reinterpret_cast<uintptr_t>(host_ptrs[buffer_idx]) + alignment - 1) & ~(alignment - 1); |
|
|
| |
| ggml_backend_event_synchronize(events[buffer_idx]); |
|
|
| |
| file->read_raw_unsafe(reinterpret_cast<void *>(ptr_dest_aligned), read_size); |
|
|
| |
| uintptr_t ptr_data = ptr_dest_aligned; |
| size_t data_to_copy = read_size; |
|
|
| |
| if (bytes_read == 0) { |
| ptr_data += offset_from_alignment; |
| data_to_copy -= offset_from_alignment; |
| } |
|
|
| |
| if (aligned_offset + bytes_read + read_size > offset + n_size) { |
| data_to_copy -= (read_end - (offset + n_size)); |
| } |
|
|
| |
| ggml_backend_tensor_set_async(upload_backend, cur, |
| reinterpret_cast<void *>(ptr_data), data_read, data_to_copy); |
| ggml_backend_event_record(events[buffer_idx], upload_backend); |
|
|
| data_read += data_to_copy; |
| bytes_read += read_size; |
|
|
| ++buffer_idx; |
| buffer_idx %= n_buffers; |
| } |
| } else { |
| read_buf.resize(n_size); |
| file->seek(weight->offs, SEEK_SET); |
| file->read_raw(read_buf.data(), n_size); |
| ggml_backend_tensor_set(cur, read_buf.data(), 0, n_size); |
| if (check_tensors && !ggml_validate_row_data(cur->type, read_buf.data(), n_size)) { |
| throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur))); |
| } |
| } |
| } |
| } |
|
|
| size_done += n_size; |
| } |
|
|
| |
| for (auto * event : events) { |
| ggml_backend_event_synchronize(event); |
| ggml_backend_event_free(event); |
| } |
| for (auto * buf : host_buffers) { |
| ggml_backend_buffer_free(buf); |
| } |
| ggml_backend_free(upload_backend); |
|
|
| |
| bool validation_failed = false; |
| for (auto & future : validation_result) { |
| auto result = future.get(); |
| if (!result.second) { |
| LLAMA_LOG_ERROR("%s: tensor '%s' has invalid data\n", __func__, ggml_get_name(result.first)); |
| validation_failed = true; |
| } |
| } |
| if (validation_failed) { |
| throw std::runtime_error("found tensors with invalid data"); |
| } |
|
|
| |
| if (size_done >= size_data) { |
| |
| if (use_mmap) { |
| for (uint32_t idx = 0; idx < mappings.size(); idx++) { |
| const auto & mmap_used = mmaps_used.at(idx); |
| auto & mapping = mappings.at(idx); |
| mapping->unmap_fragment(0, mmap_used.first); |
| if (mmap_used.second != 0) { |
| mapping->unmap_fragment(mmap_used.second, mapping->size()); |
| } |
| } |
| } |
| if (progress_callback) { |
| |
| |
| return progress_callback(1.0f, progress_callback_user_data); |
| } |
| } |
|
|
| return true; |
| } |
|
|
| std::string llama_model_loader::ftype_name() const { |
| return llama_model_ftype_name(ftype); |
| } |
|
|
| void llama_model_loader::print_info() const { |
| LLAMA_LOG_INFO("%s: file format = %s\n", __func__, llama_file_version_name(fver)); |
| LLAMA_LOG_INFO("%s: file type = %s\n", __func__, llama_model_ftype_name(ftype).c_str()); |
| if (n_bytes < GiB) { |
| LLAMA_LOG_INFO("%s: file size = %.2f MiB (%.2f BPW) \n", __func__, n_bytes/1024.0/1024.0, n_bytes*8.0/n_elements); |
| } else { |
| LLAMA_LOG_INFO("%s: file size = %.2f GiB (%.2f BPW) \n", __func__, n_bytes/1024.0/1024.0/1024.0, n_bytes*8.0/n_elements); |
| } |
| } |
|
|