Training in progress, step 200, checkpoint
Browse files- checkpoint-200/README.md +21 -0
- checkpoint-200/adapter_config.json +26 -0
- checkpoint-200/adapter_model.bin +3 -0
- checkpoint-200/adapter_model/adapter_model/README.md +21 -0
- checkpoint-200/adapter_model/adapter_model/adapter_config.json +26 -0
- checkpoint-200/adapter_model/adapter_model/adapter_model.bin +3 -0
- checkpoint-200/added_tokens.json +8 -0
- checkpoint-200/optimizer.pt +3 -0
- checkpoint-200/rng_state.pth +3 -0
- checkpoint-200/scheduler.pt +3 -0
- checkpoint-200/special_tokens_map.json +11 -0
- checkpoint-200/tokenizer.model +3 -0
- checkpoint-200/tokenizer_config.json +74 -0
- checkpoint-200/trainer_state.json +210 -0
- checkpoint-200/training_args.bin +3 -0
checkpoint-200/README.md
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
| 4 |
+
## Training procedure
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 8 |
+
- quant_method: bitsandbytes
|
| 9 |
+
- load_in_8bit: False
|
| 10 |
+
- load_in_4bit: True
|
| 11 |
+
- llm_int8_threshold: 6.0
|
| 12 |
+
- llm_int8_skip_modules: None
|
| 13 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 14 |
+
- llm_int8_has_fp16_weight: False
|
| 15 |
+
- bnb_4bit_quant_type: nf4
|
| 16 |
+
- bnb_4bit_use_double_quant: True
|
| 17 |
+
- bnb_4bit_compute_dtype: bfloat16
|
| 18 |
+
### Framework versions
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
- PEFT 0.4.0
|
checkpoint-200/adapter_config.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_mapping": null,
|
| 3 |
+
"base_model_name_or_path": "SkunkworksAI/Mistralic-7B-1",
|
| 4 |
+
"bias": "none",
|
| 5 |
+
"fan_in_fan_out": false,
|
| 6 |
+
"inference_mode": true,
|
| 7 |
+
"init_lora_weights": true,
|
| 8 |
+
"layers_pattern": null,
|
| 9 |
+
"layers_to_transform": null,
|
| 10 |
+
"lora_alpha": 16.0,
|
| 11 |
+
"lora_dropout": 0.1,
|
| 12 |
+
"modules_to_save": null,
|
| 13 |
+
"peft_type": "LORA",
|
| 14 |
+
"r": 64,
|
| 15 |
+
"revision": null,
|
| 16 |
+
"target_modules": [
|
| 17 |
+
"k_proj",
|
| 18 |
+
"down_proj",
|
| 19 |
+
"v_proj",
|
| 20 |
+
"q_proj",
|
| 21 |
+
"o_proj",
|
| 22 |
+
"up_proj",
|
| 23 |
+
"gate_proj"
|
| 24 |
+
],
|
| 25 |
+
"task_type": "CAUSAL_LM"
|
| 26 |
+
}
|
checkpoint-200/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:778a29baab199ce9ecb992060f28f7b204f45b356fac78efcb211c5f874cae2f
|
| 3 |
+
size 335706314
|
checkpoint-200/adapter_model/adapter_model/README.md
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
| 4 |
+
## Training procedure
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 8 |
+
- quant_method: bitsandbytes
|
| 9 |
+
- load_in_8bit: False
|
| 10 |
+
- load_in_4bit: True
|
| 11 |
+
- llm_int8_threshold: 6.0
|
| 12 |
+
- llm_int8_skip_modules: None
|
| 13 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 14 |
+
- llm_int8_has_fp16_weight: False
|
| 15 |
+
- bnb_4bit_quant_type: nf4
|
| 16 |
+
- bnb_4bit_use_double_quant: True
|
| 17 |
+
- bnb_4bit_compute_dtype: bfloat16
|
| 18 |
+
### Framework versions
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
- PEFT 0.4.0
|
checkpoint-200/adapter_model/adapter_model/adapter_config.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_mapping": null,
|
| 3 |
+
"base_model_name_or_path": "SkunkworksAI/Mistralic-7B-1",
|
| 4 |
+
"bias": "none",
|
| 5 |
+
"fan_in_fan_out": false,
|
| 6 |
+
"inference_mode": true,
|
| 7 |
+
"init_lora_weights": true,
|
| 8 |
+
"layers_pattern": null,
|
| 9 |
+
"layers_to_transform": null,
|
| 10 |
+
"lora_alpha": 16.0,
|
| 11 |
+
"lora_dropout": 0.1,
|
| 12 |
+
"modules_to_save": null,
|
| 13 |
+
"peft_type": "LORA",
|
| 14 |
+
"r": 64,
|
| 15 |
+
"revision": null,
|
| 16 |
+
"target_modules": [
|
| 17 |
+
"k_proj",
|
| 18 |
+
"down_proj",
|
| 19 |
+
"v_proj",
|
| 20 |
+
"q_proj",
|
| 21 |
+
"o_proj",
|
| 22 |
+
"up_proj",
|
| 23 |
+
"gate_proj"
|
| 24 |
+
],
|
| 25 |
+
"task_type": "CAUSAL_LM"
|
| 26 |
+
}
|
checkpoint-200/adapter_model/adapter_model/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:778a29baab199ce9ecb992060f28f7b204f45b356fac78efcb211c5f874cae2f
|
| 3 |
+
size 335706314
|
checkpoint-200/added_tokens.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"</s>": 2,
|
| 3 |
+
"<s>": 1,
|
| 4 |
+
"<unk>": 0,
|
| 5 |
+
"<|im_end|>": 32000,
|
| 6 |
+
"<|im_start|>": 32001,
|
| 7 |
+
"[PAD]": 32002
|
| 8 |
+
}
|
checkpoint-200/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aa3a94f01b5cd14fc5cb8514ae029ecec733a29023e43ea5f5b8f674421dcebb
|
| 3 |
+
size 1342452986
|
checkpoint-200/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b761bb1f0f7296d39bc57c9f1b11904cf801043bd68f3b7e4444184903524d80
|
| 3 |
+
size 14180
|
checkpoint-200/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:859ff0676471245c9481ca25d6d6778d1c7963c39b7877af46bb8ca30a9ead21
|
| 3 |
+
size 1064
|
checkpoint-200/special_tokens_map.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<unk>",
|
| 4 |
+
"<s>",
|
| 5 |
+
"</s>"
|
| 6 |
+
],
|
| 7 |
+
"bos_token": "<s>",
|
| 8 |
+
"eos_token": "</s>",
|
| 9 |
+
"pad_token": "[PAD]",
|
| 10 |
+
"unk_token": "<unk>"
|
| 11 |
+
}
|
checkpoint-200/tokenizer.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
|
| 3 |
+
size 493443
|
checkpoint-200/tokenizer_config.json
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": true,
|
| 3 |
+
"add_eos_token": false,
|
| 4 |
+
"added_tokens_decoder": {
|
| 5 |
+
"0": {
|
| 6 |
+
"content": "<unk>",
|
| 7 |
+
"lstrip": true,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": true,
|
| 10 |
+
"single_word": false,
|
| 11 |
+
"special": true
|
| 12 |
+
},
|
| 13 |
+
"1": {
|
| 14 |
+
"content": "<s>",
|
| 15 |
+
"lstrip": true,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": true,
|
| 18 |
+
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
+
},
|
| 21 |
+
"2": {
|
| 22 |
+
"content": "</s>",
|
| 23 |
+
"lstrip": true,
|
| 24 |
+
"normalized": false,
|
| 25 |
+
"rstrip": true,
|
| 26 |
+
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
+
},
|
| 29 |
+
"32000": {
|
| 30 |
+
"content": "<|im_end|>",
|
| 31 |
+
"lstrip": true,
|
| 32 |
+
"normalized": true,
|
| 33 |
+
"rstrip": true,
|
| 34 |
+
"single_word": false,
|
| 35 |
+
"special": false
|
| 36 |
+
},
|
| 37 |
+
"32001": {
|
| 38 |
+
"content": "<|im_start|>",
|
| 39 |
+
"lstrip": true,
|
| 40 |
+
"normalized": true,
|
| 41 |
+
"rstrip": true,
|
| 42 |
+
"single_word": false,
|
| 43 |
+
"special": false
|
| 44 |
+
},
|
| 45 |
+
"32002": {
|
| 46 |
+
"content": "[PAD]",
|
| 47 |
+
"lstrip": true,
|
| 48 |
+
"normalized": false,
|
| 49 |
+
"rstrip": true,
|
| 50 |
+
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
+
}
|
| 53 |
+
},
|
| 54 |
+
"additional_special_tokens": [
|
| 55 |
+
"<unk>",
|
| 56 |
+
"<s>",
|
| 57 |
+
"</s>"
|
| 58 |
+
],
|
| 59 |
+
"bos_token": "<s>",
|
| 60 |
+
"clean_up_tokenization_spaces": false,
|
| 61 |
+
"eos_token": "</s>",
|
| 62 |
+
"legacy": true,
|
| 63 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 64 |
+
"pad_token": "[PAD]",
|
| 65 |
+
"padding_side": "right",
|
| 66 |
+
"sp_model_kwargs": {},
|
| 67 |
+
"spaces_between_special_tokens": false,
|
| 68 |
+
"tokenizer_class": "LlamaTokenizer",
|
| 69 |
+
"tokenizer_file": null,
|
| 70 |
+
"trust_remote_code": false,
|
| 71 |
+
"unk_token": "<unk>",
|
| 72 |
+
"use_default_system_prompt": true,
|
| 73 |
+
"use_fast": true
|
| 74 |
+
}
|
checkpoint-200/trainer_state.json
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 0.43659043312072754,
|
| 3 |
+
"best_model_checkpoint": "experts/mistralic-expert-3/checkpoint-200",
|
| 4 |
+
"epoch": 0.07897334649555775,
|
| 5 |
+
"eval_steps": 200,
|
| 6 |
+
"global_step": 200,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.0,
|
| 13 |
+
"learning_rate": 0.0002,
|
| 14 |
+
"loss": 0.5036,
|
| 15 |
+
"step": 10
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"epoch": 0.01,
|
| 19 |
+
"learning_rate": 0.0002,
|
| 20 |
+
"loss": 0.4814,
|
| 21 |
+
"step": 20
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"epoch": 0.01,
|
| 25 |
+
"learning_rate": 0.0002,
|
| 26 |
+
"loss": 0.4493,
|
| 27 |
+
"step": 30
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"epoch": 0.02,
|
| 31 |
+
"learning_rate": 0.0002,
|
| 32 |
+
"loss": 0.4445,
|
| 33 |
+
"step": 40
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"epoch": 0.02,
|
| 37 |
+
"learning_rate": 0.0002,
|
| 38 |
+
"loss": 0.4996,
|
| 39 |
+
"step": 50
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"epoch": 0.02,
|
| 43 |
+
"learning_rate": 0.0002,
|
| 44 |
+
"loss": 0.4473,
|
| 45 |
+
"step": 60
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"epoch": 0.03,
|
| 49 |
+
"learning_rate": 0.0002,
|
| 50 |
+
"loss": 0.4851,
|
| 51 |
+
"step": 70
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"epoch": 0.03,
|
| 55 |
+
"learning_rate": 0.0002,
|
| 56 |
+
"loss": 0.4328,
|
| 57 |
+
"step": 80
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"epoch": 0.04,
|
| 61 |
+
"learning_rate": 0.0002,
|
| 62 |
+
"loss": 0.4486,
|
| 63 |
+
"step": 90
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"epoch": 0.04,
|
| 67 |
+
"learning_rate": 0.0002,
|
| 68 |
+
"loss": 0.4732,
|
| 69 |
+
"step": 100
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"epoch": 0.04,
|
| 73 |
+
"learning_rate": 0.0002,
|
| 74 |
+
"loss": 0.4512,
|
| 75 |
+
"step": 110
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"epoch": 0.05,
|
| 79 |
+
"learning_rate": 0.0002,
|
| 80 |
+
"loss": 0.4638,
|
| 81 |
+
"step": 120
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"epoch": 0.05,
|
| 85 |
+
"learning_rate": 0.0002,
|
| 86 |
+
"loss": 0.4559,
|
| 87 |
+
"step": 130
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"epoch": 0.06,
|
| 91 |
+
"learning_rate": 0.0002,
|
| 92 |
+
"loss": 0.4411,
|
| 93 |
+
"step": 140
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"epoch": 0.06,
|
| 97 |
+
"learning_rate": 0.0002,
|
| 98 |
+
"loss": 0.4187,
|
| 99 |
+
"step": 150
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"epoch": 0.06,
|
| 103 |
+
"learning_rate": 0.0002,
|
| 104 |
+
"loss": 0.4495,
|
| 105 |
+
"step": 160
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"epoch": 0.07,
|
| 109 |
+
"learning_rate": 0.0002,
|
| 110 |
+
"loss": 0.4172,
|
| 111 |
+
"step": 170
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"epoch": 0.07,
|
| 115 |
+
"learning_rate": 0.0002,
|
| 116 |
+
"loss": 0.4359,
|
| 117 |
+
"step": 180
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"epoch": 0.08,
|
| 121 |
+
"learning_rate": 0.0002,
|
| 122 |
+
"loss": 0.4444,
|
| 123 |
+
"step": 190
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"epoch": 0.08,
|
| 127 |
+
"learning_rate": 0.0002,
|
| 128 |
+
"loss": 0.4625,
|
| 129 |
+
"step": 200
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"epoch": 0.08,
|
| 133 |
+
"eval_loss": 0.43659043312072754,
|
| 134 |
+
"eval_runtime": 125.7967,
|
| 135 |
+
"eval_samples_per_second": 7.949,
|
| 136 |
+
"eval_steps_per_second": 3.975,
|
| 137 |
+
"step": 200
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"epoch": 0.08,
|
| 141 |
+
"mmlu_eval_accuracy": 0.59599123107443,
|
| 142 |
+
"mmlu_eval_accuracy_abstract_algebra": 0.45454545454545453,
|
| 143 |
+
"mmlu_eval_accuracy_anatomy": 0.42857142857142855,
|
| 144 |
+
"mmlu_eval_accuracy_astronomy": 0.6875,
|
| 145 |
+
"mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
|
| 146 |
+
"mmlu_eval_accuracy_clinical_knowledge": 0.5862068965517241,
|
| 147 |
+
"mmlu_eval_accuracy_college_biology": 0.625,
|
| 148 |
+
"mmlu_eval_accuracy_college_chemistry": 0.375,
|
| 149 |
+
"mmlu_eval_accuracy_college_computer_science": 0.45454545454545453,
|
| 150 |
+
"mmlu_eval_accuracy_college_mathematics": 0.5454545454545454,
|
| 151 |
+
"mmlu_eval_accuracy_college_medicine": 0.5909090909090909,
|
| 152 |
+
"mmlu_eval_accuracy_college_physics": 0.45454545454545453,
|
| 153 |
+
"mmlu_eval_accuracy_computer_security": 0.5454545454545454,
|
| 154 |
+
"mmlu_eval_accuracy_conceptual_physics": 0.46153846153846156,
|
| 155 |
+
"mmlu_eval_accuracy_econometrics": 0.5833333333333334,
|
| 156 |
+
"mmlu_eval_accuracy_electrical_engineering": 0.625,
|
| 157 |
+
"mmlu_eval_accuracy_elementary_mathematics": 0.4634146341463415,
|
| 158 |
+
"mmlu_eval_accuracy_formal_logic": 0.21428571428571427,
|
| 159 |
+
"mmlu_eval_accuracy_global_facts": 0.4,
|
| 160 |
+
"mmlu_eval_accuracy_high_school_biology": 0.625,
|
| 161 |
+
"mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
|
| 162 |
+
"mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
|
| 163 |
+
"mmlu_eval_accuracy_high_school_european_history": 0.7777777777777778,
|
| 164 |
+
"mmlu_eval_accuracy_high_school_geography": 0.8181818181818182,
|
| 165 |
+
"mmlu_eval_accuracy_high_school_government_and_politics": 0.7142857142857143,
|
| 166 |
+
"mmlu_eval_accuracy_high_school_macroeconomics": 0.5581395348837209,
|
| 167 |
+
"mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
|
| 168 |
+
"mmlu_eval_accuracy_high_school_microeconomics": 0.5769230769230769,
|
| 169 |
+
"mmlu_eval_accuracy_high_school_physics": 0.058823529411764705,
|
| 170 |
+
"mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
|
| 171 |
+
"mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
|
| 172 |
+
"mmlu_eval_accuracy_high_school_us_history": 0.7727272727272727,
|
| 173 |
+
"mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
|
| 174 |
+
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
|
| 175 |
+
"mmlu_eval_accuracy_human_sexuality": 0.5833333333333334,
|
| 176 |
+
"mmlu_eval_accuracy_international_law": 0.8461538461538461,
|
| 177 |
+
"mmlu_eval_accuracy_jurisprudence": 0.6363636363636364,
|
| 178 |
+
"mmlu_eval_accuracy_logical_fallacies": 0.7222222222222222,
|
| 179 |
+
"mmlu_eval_accuracy_machine_learning": 0.45454545454545453,
|
| 180 |
+
"mmlu_eval_accuracy_management": 0.8181818181818182,
|
| 181 |
+
"mmlu_eval_accuracy_marketing": 0.92,
|
| 182 |
+
"mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
|
| 183 |
+
"mmlu_eval_accuracy_miscellaneous": 0.7558139534883721,
|
| 184 |
+
"mmlu_eval_accuracy_moral_disputes": 0.5526315789473685,
|
| 185 |
+
"mmlu_eval_accuracy_moral_scenarios": 0.33,
|
| 186 |
+
"mmlu_eval_accuracy_nutrition": 0.696969696969697,
|
| 187 |
+
"mmlu_eval_accuracy_philosophy": 0.7352941176470589,
|
| 188 |
+
"mmlu_eval_accuracy_prehistory": 0.5428571428571428,
|
| 189 |
+
"mmlu_eval_accuracy_professional_accounting": 0.5161290322580645,
|
| 190 |
+
"mmlu_eval_accuracy_professional_law": 0.40588235294117647,
|
| 191 |
+
"mmlu_eval_accuracy_professional_medicine": 0.6451612903225806,
|
| 192 |
+
"mmlu_eval_accuracy_professional_psychology": 0.6086956521739131,
|
| 193 |
+
"mmlu_eval_accuracy_public_relations": 0.5,
|
| 194 |
+
"mmlu_eval_accuracy_security_studies": 0.6666666666666666,
|
| 195 |
+
"mmlu_eval_accuracy_sociology": 0.9545454545454546,
|
| 196 |
+
"mmlu_eval_accuracy_us_foreign_policy": 0.8181818181818182,
|
| 197 |
+
"mmlu_eval_accuracy_virology": 0.5555555555555556,
|
| 198 |
+
"mmlu_eval_accuracy_world_religions": 0.8421052631578947,
|
| 199 |
+
"mmlu_loss": 1.5606910698407623,
|
| 200 |
+
"step": 200
|
| 201 |
+
}
|
| 202 |
+
],
|
| 203 |
+
"logging_steps": 10,
|
| 204 |
+
"max_steps": 7596,
|
| 205 |
+
"num_train_epochs": 3,
|
| 206 |
+
"save_steps": 200,
|
| 207 |
+
"total_flos": 9.152149079851008e+16,
|
| 208 |
+
"trial_name": null,
|
| 209 |
+
"trial_params": null
|
| 210 |
+
}
|
checkpoint-200/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c5ab8ef2a53a353c81e0e27307b4f9b37e7b4be919328eb7405b3b561276e06a
|
| 3 |
+
size 6456
|