|
|
#include "arg.h" |
|
|
#include "common.h" |
|
|
#include "log.h" |
|
|
#include "llama.h" |
|
|
|
|
|
#include <cmath> |
|
|
#include <cstdio> |
|
|
#include <cstring> |
|
|
#include <ctime> |
|
|
#include <vector> |
|
|
|
|
|
#if defined(_MSC_VER) |
|
|
#pragma warning(disable: 4244 4267) |
|
|
#endif |
|
|
|
|
|
int main(int argc, char ** argv) { |
|
|
common_params params; |
|
|
|
|
|
params.escape = false; |
|
|
|
|
|
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_PERPLEXITY)) { |
|
|
return 1; |
|
|
} |
|
|
|
|
|
if (params.use_mmap) { |
|
|
LOG_INF("%s: force disabling memory mapping because it would result in-read-only pointers to the weights\n", __func__); |
|
|
params.use_mmap = false; |
|
|
} |
|
|
if (params.cache_type_k != GGML_TYPE_F32) { |
|
|
LOG_INF("%s: force changing k cache type to f32 due to a lack of f16 support for OUT_PROD\n", __func__); |
|
|
params.cache_type_k = GGML_TYPE_F32; |
|
|
} |
|
|
if (params.cache_type_v != GGML_TYPE_F32) { |
|
|
LOG_INF("%s: force changing v cache type to f32 due to a lack of f16 support for OUT_PROD\n", __func__); |
|
|
params.cache_type_v = GGML_TYPE_F32; |
|
|
} |
|
|
|
|
|
common_init(); |
|
|
llama_backend_init(); |
|
|
llama_numa_init(params.numa); |
|
|
|
|
|
|
|
|
common_init_result llama_init = common_init_from_params(params); |
|
|
llama_model_ptr & model = llama_init.model; |
|
|
llama_context_ptr & ctx = llama_init.context; |
|
|
|
|
|
if (model == NULL) { |
|
|
LOG_ERR("%s: unable to load model\n", __func__); |
|
|
return 1; |
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
LOG_INF("\n"); |
|
|
LOG_INF("%s\n", common_params_get_system_info(params).c_str()); |
|
|
} |
|
|
|
|
|
constexpr float val_split = 0.05f; |
|
|
|
|
|
std::vector<llama_token> tokens = common_tokenize(ctx.get(), params.prompt, true); |
|
|
ggml_opt_dataset_t dataset = common_opt_dataset_init(ctx.get(), tokens, llama_n_ctx(ctx.get())/2); |
|
|
|
|
|
struct ggml_opt_optimizer_params optimizer_params = ggml_opt_get_default_optimizer_params(nullptr); |
|
|
optimizer_params.adamw.alpha = 1e-7f; |
|
|
|
|
|
struct llama_opt_params lopt_params { |
|
|
0, |
|
|
llama_opt_param_filter_all, |
|
|
nullptr, |
|
|
ggml_opt_get_constant_optimizer_params, |
|
|
&optimizer_params, |
|
|
}; |
|
|
llama_opt_init(ctx.get(), model.get(), lopt_params); |
|
|
|
|
|
const int64_t idata_split = ggml_opt_dataset_ndata(dataset) * (1.0f - val_split); |
|
|
|
|
|
ggml_opt_result_t result_train = ggml_opt_result_init(); |
|
|
ggml_opt_result_t result_eval = ggml_opt_result_init(); |
|
|
|
|
|
for (int epoch = 0; epoch < 2; ++epoch) { |
|
|
llama_opt_epoch(ctx.get(), dataset, result_train, result_eval, idata_split, |
|
|
ggml_opt_epoch_callback_progress_bar, ggml_opt_epoch_callback_progress_bar); |
|
|
fprintf(stderr, "\n"); |
|
|
|
|
|
ggml_opt_result_reset(result_train); |
|
|
ggml_opt_result_reset(result_eval); |
|
|
} |
|
|
ggml_opt_result_free(result_train); |
|
|
ggml_opt_result_free(result_eval); |
|
|
|
|
|
llama_model_save_to_file(model.get(), "finetuned-model.gguf"); |
|
|
|
|
|
llama_backend_free(); |
|
|
|
|
|
return 0; |
|
|
} |
|
|
|