Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- Area/README.md +62 -0
- Area/adapter_config.json +34 -0
- Area/all_results.json +8 -0
- Area/config.json +34 -0
- Area/dpo/README.md +72 -0
- Area/generation_config.json +6 -0
- Area/model.safetensors.index.json +298 -0
- Area/special_tokens_map.json +23 -0
- Area/tokenizer.json +0 -0
- Area/tokenizer_config.json +196 -0
- Area/train_results.json +8 -0
- Area/trainer_log.jsonl +59 -0
- Area/trainer_state.json +912 -0
- Area/training_loss.png +0 -0
- Area/training_rewards_accuracies.png +0 -0
- Power/checkpoint-100/rng_state_0.pth +3 -0
- Power/checkpoint-100/rng_state_1.pth +3 -0
- Power/checkpoint-100/rng_state_2.pth +3 -0
- Power/checkpoint-100/rng_state_3.pth +3 -0
- Power/checkpoint-100/rng_state_4.pth +3 -0
- Power/checkpoint-100/rng_state_5.pth +3 -0
- Power/checkpoint-100/rng_state_6.pth +3 -0
- Power/checkpoint-100/rng_state_7.pth +3 -0
- Power/checkpoint-100/scheduler.pt +3 -0
- Power/checkpoint-100/training_args.bin +3 -0
- Power/checkpoint-150/rng_state_0.pth +3 -0
- Power/checkpoint-150/rng_state_1.pth +3 -0
- Power/checkpoint-150/rng_state_2.pth +3 -0
- Power/checkpoint-150/rng_state_3.pth +3 -0
- Power/checkpoint-150/rng_state_4.pth +3 -0
- Power/checkpoint-150/rng_state_5.pth +3 -0
- Power/checkpoint-150/rng_state_6.pth +3 -0
- Power/checkpoint-150/rng_state_7.pth +3 -0
- Power/checkpoint-150/scheduler.pt +3 -0
- Power/checkpoint-150/training_args.bin +3 -0
- Power/checkpoint-200/rng_state_0.pth +3 -0
- Power/checkpoint-200/rng_state_1.pth +3 -0
- Power/checkpoint-200/rng_state_2.pth +3 -0
- Power/checkpoint-200/rng_state_3.pth +3 -0
- Power/checkpoint-200/rng_state_4.pth +3 -0
- Power/checkpoint-200/rng_state_5.pth +3 -0
- Power/checkpoint-200/rng_state_6.pth +3 -0
- Power/checkpoint-200/rng_state_7.pth +3 -0
- Power/checkpoint-200/scheduler.pt +3 -0
- Power/checkpoint-200/training_args.bin +3 -0
- Power/checkpoint-250/rng_state_0.pth +3 -0
- Power/checkpoint-250/rng_state_1.pth +3 -0
- Power/checkpoint-250/rng_state_2.pth +3 -0
- Power/checkpoint-250/rng_state_3.pth +3 -0
- Power/checkpoint-250/rng_state_4.pth +3 -0
Area/README.md
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: deepseek-ai/deepseek-coder-6.7b-instruct
|
| 3 |
+
library_name: peft
|
| 4 |
+
license: other
|
| 5 |
+
tags:
|
| 6 |
+
- llama-factory
|
| 7 |
+
- lora
|
| 8 |
+
- generated_from_trainer
|
| 9 |
+
model-index:
|
| 10 |
+
- name: Area
|
| 11 |
+
results: []
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 15 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 16 |
+
|
| 17 |
+
# Area
|
| 18 |
+
|
| 19 |
+
This model is a fine-tuned version of [deepseek-ai/deepseek-coder-6.7b-instruct](https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-instruct) on the area dataset.
|
| 20 |
+
|
| 21 |
+
## Model description
|
| 22 |
+
|
| 23 |
+
More information needed
|
| 24 |
+
|
| 25 |
+
## Intended uses & limitations
|
| 26 |
+
|
| 27 |
+
More information needed
|
| 28 |
+
|
| 29 |
+
## Training and evaluation data
|
| 30 |
+
|
| 31 |
+
More information needed
|
| 32 |
+
|
| 33 |
+
## Training procedure
|
| 34 |
+
|
| 35 |
+
### Training hyperparameters
|
| 36 |
+
|
| 37 |
+
The following hyperparameters were used during training:
|
| 38 |
+
- learning_rate: 5e-06
|
| 39 |
+
- train_batch_size: 1
|
| 40 |
+
- eval_batch_size: 8
|
| 41 |
+
- seed: 42
|
| 42 |
+
- distributed_type: multi-GPU
|
| 43 |
+
- num_devices: 8
|
| 44 |
+
- gradient_accumulation_steps: 8
|
| 45 |
+
- total_train_batch_size: 64
|
| 46 |
+
- total_eval_batch_size: 64
|
| 47 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 48 |
+
- lr_scheduler_type: cosine
|
| 49 |
+
- lr_scheduler_warmup_ratio: 0.1
|
| 50 |
+
- num_epochs: 20.0
|
| 51 |
+
|
| 52 |
+
### Training results
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
### Framework versions
|
| 57 |
+
|
| 58 |
+
- PEFT 0.12.0
|
| 59 |
+
- Transformers 4.45.2
|
| 60 |
+
- Pytorch 2.4.1+cu124
|
| 61 |
+
- Datasets 2.21.0
|
| 62 |
+
- Tokenizers 0.20.0
|
Area/adapter_config.json
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": "deepseek-ai/deepseek-coder-6.7b-instruct",
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"fan_in_fan_out": false,
|
| 7 |
+
"inference_mode": true,
|
| 8 |
+
"init_lora_weights": true,
|
| 9 |
+
"layer_replication": null,
|
| 10 |
+
"layers_pattern": null,
|
| 11 |
+
"layers_to_transform": null,
|
| 12 |
+
"loftq_config": {},
|
| 13 |
+
"lora_alpha": 16,
|
| 14 |
+
"lora_dropout": 0.0,
|
| 15 |
+
"megatron_config": null,
|
| 16 |
+
"megatron_core": "megatron.core",
|
| 17 |
+
"modules_to_save": null,
|
| 18 |
+
"peft_type": "LORA",
|
| 19 |
+
"r": 8,
|
| 20 |
+
"rank_pattern": {},
|
| 21 |
+
"revision": null,
|
| 22 |
+
"target_modules": [
|
| 23 |
+
"gate_proj",
|
| 24 |
+
"v_proj",
|
| 25 |
+
"up_proj",
|
| 26 |
+
"down_proj",
|
| 27 |
+
"q_proj",
|
| 28 |
+
"k_proj",
|
| 29 |
+
"o_proj"
|
| 30 |
+
],
|
| 31 |
+
"task_type": "CAUSAL_LM",
|
| 32 |
+
"use_dora": false,
|
| 33 |
+
"use_rslora": false
|
| 34 |
+
}
|
Area/all_results.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"epoch": 19.414225941422593,
|
| 3 |
+
"total_flos": 1.9755070908268544e+18,
|
| 4 |
+
"train_loss": 0.6148436521661693,
|
| 5 |
+
"train_runtime": 4276.8425,
|
| 6 |
+
"train_samples_per_second": 8.922,
|
| 7 |
+
"train_steps_per_second": 0.136
|
| 8 |
+
}
|
Area/config.json
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "deepseek-ai/deepseek-coder-6.7b-instruct",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"LlamaForCausalLM"
|
| 5 |
+
],
|
| 6 |
+
"attention_bias": false,
|
| 7 |
+
"attention_dropout": 0.0,
|
| 8 |
+
"bos_token_id": 32013,
|
| 9 |
+
"eos_token_id": 32021,
|
| 10 |
+
"head_dim": 128,
|
| 11 |
+
"hidden_act": "silu",
|
| 12 |
+
"hidden_size": 4096,
|
| 13 |
+
"initializer_range": 0.02,
|
| 14 |
+
"intermediate_size": 11008,
|
| 15 |
+
"max_position_embeddings": 16384,
|
| 16 |
+
"mlp_bias": false,
|
| 17 |
+
"model_type": "llama",
|
| 18 |
+
"num_attention_heads": 32,
|
| 19 |
+
"num_hidden_layers": 32,
|
| 20 |
+
"num_key_value_heads": 32,
|
| 21 |
+
"pretraining_tp": 1,
|
| 22 |
+
"rms_norm_eps": 1e-06,
|
| 23 |
+
"rope_scaling": {
|
| 24 |
+
"factor": 4.0,
|
| 25 |
+
"rope_type": "linear",
|
| 26 |
+
"type": "linear"
|
| 27 |
+
},
|
| 28 |
+
"rope_theta": 100000,
|
| 29 |
+
"tie_word_embeddings": false,
|
| 30 |
+
"torch_dtype": "bfloat16",
|
| 31 |
+
"transformers_version": "4.45.2",
|
| 32 |
+
"use_cache": true,
|
| 33 |
+
"vocab_size": 32256
|
| 34 |
+
}
|
Area/dpo/README.md
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: deepseek-ai/deepseek-coder-6.7b-instruct
|
| 3 |
+
library_name: peft
|
| 4 |
+
license: other
|
| 5 |
+
tags:
|
| 6 |
+
- llama-factory
|
| 7 |
+
- lora
|
| 8 |
+
- generated_from_trainer
|
| 9 |
+
model-index:
|
| 10 |
+
- name: dpo
|
| 11 |
+
results: []
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 15 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 16 |
+
|
| 17 |
+
# dpo
|
| 18 |
+
|
| 19 |
+
This model is a fine-tuned version of [deepseek-ai/deepseek-coder-6.7b-instruct](https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-instruct) on the area dataset.
|
| 20 |
+
It achieves the following results on the evaluation set:
|
| 21 |
+
- Loss: 0.6819
|
| 22 |
+
- Rewards/chosen: 0.0319
|
| 23 |
+
- Rewards/rejected: -0.0669
|
| 24 |
+
- Rewards/accuracies: 0.6667
|
| 25 |
+
- Rewards/margins: 0.0988
|
| 26 |
+
- Logps/rejected: -82.3286
|
| 27 |
+
- Logps/chosen: -161.5981
|
| 28 |
+
- Logits/rejected: 1.6967
|
| 29 |
+
- Logits/chosen: 1.8235
|
| 30 |
+
|
| 31 |
+
## Model description
|
| 32 |
+
|
| 33 |
+
More information needed
|
| 34 |
+
|
| 35 |
+
## Intended uses & limitations
|
| 36 |
+
|
| 37 |
+
More information needed
|
| 38 |
+
|
| 39 |
+
## Training and evaluation data
|
| 40 |
+
|
| 41 |
+
More information needed
|
| 42 |
+
|
| 43 |
+
## Training procedure
|
| 44 |
+
|
| 45 |
+
### Training hyperparameters
|
| 46 |
+
|
| 47 |
+
The following hyperparameters were used during training:
|
| 48 |
+
- learning_rate: 5e-06
|
| 49 |
+
- train_batch_size: 1
|
| 50 |
+
- eval_batch_size: 1
|
| 51 |
+
- seed: 42
|
| 52 |
+
- distributed_type: multi-GPU
|
| 53 |
+
- num_devices: 8
|
| 54 |
+
- gradient_accumulation_steps: 8
|
| 55 |
+
- total_train_batch_size: 64
|
| 56 |
+
- total_eval_batch_size: 8
|
| 57 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 58 |
+
- lr_scheduler_type: cosine
|
| 59 |
+
- lr_scheduler_warmup_ratio: 0.1
|
| 60 |
+
- num_epochs: 10.0
|
| 61 |
+
|
| 62 |
+
### Training results
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
### Framework versions
|
| 67 |
+
|
| 68 |
+
- PEFT 0.12.0
|
| 69 |
+
- Transformers 4.45.2
|
| 70 |
+
- Pytorch 2.4.1+cu124
|
| 71 |
+
- Datasets 2.21.0
|
| 72 |
+
- Tokenizers 0.20.0
|
Area/generation_config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 32013,
|
| 4 |
+
"eos_token_id": 32021,
|
| 5 |
+
"transformers_version": "4.45.2"
|
| 6 |
+
}
|
Area/model.safetensors.index.json
ADDED
|
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"total_size": 13481025536
|
| 4 |
+
},
|
| 5 |
+
"weight_map": {
|
| 6 |
+
"lm_head.weight": "model-00003-of-00003.safetensors",
|
| 7 |
+
"model.embed_tokens.weight": "model-00001-of-00003.safetensors",
|
| 8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 13 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 14 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 15 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 16 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 17 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 18 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 19 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 20 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 21 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 22 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 23 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 24 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 25 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 26 |
+
"model.layers.10.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 27 |
+
"model.layers.10.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 28 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 29 |
+
"model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 30 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 31 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 32 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 33 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 34 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 35 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 36 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 37 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 38 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 39 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 40 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 41 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 42 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 43 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 44 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 45 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 46 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 47 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 48 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 49 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 50 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 51 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 52 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 53 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 54 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 55 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 56 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 57 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 58 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 59 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 60 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 61 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 62 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 63 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 64 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 65 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 66 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 67 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 68 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 69 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 70 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 71 |
+
"model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 72 |
+
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 73 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 74 |
+
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 75 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 76 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 77 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 78 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 79 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 80 |
+
"model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 81 |
+
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 82 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 83 |
+
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 84 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 85 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 86 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 87 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 88 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 89 |
+
"model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 90 |
+
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 91 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 92 |
+
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 93 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 94 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 95 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 96 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 97 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 98 |
+
"model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 99 |
+
"model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 100 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 101 |
+
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 102 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 103 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 104 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 105 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 106 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 107 |
+
"model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 108 |
+
"model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 109 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 110 |
+
"model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 111 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 112 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 113 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 114 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 115 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 116 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 117 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 118 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 119 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 120 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 121 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 122 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 123 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 124 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 125 |
+
"model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 126 |
+
"model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 127 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 128 |
+
"model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 129 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 130 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 131 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 132 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 133 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 134 |
+
"model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 135 |
+
"model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 136 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 137 |
+
"model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 138 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 139 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 140 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 141 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 142 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 143 |
+
"model.layers.22.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 144 |
+
"model.layers.22.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
| 145 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 146 |
+
"model.layers.22.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 147 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
| 148 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 149 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 150 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 151 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 152 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 153 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
| 154 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
| 155 |
+
"model.layers.23.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
| 156 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 157 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
| 158 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
| 159 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
| 160 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
| 161 |
+
"model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 162 |
+
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
| 163 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
| 164 |
+
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
| 165 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 166 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
| 167 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
| 168 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
| 169 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
| 170 |
+
"model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 171 |
+
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
| 172 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
| 173 |
+
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
| 174 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 175 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
| 176 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
| 177 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
| 178 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
| 179 |
+
"model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 180 |
+
"model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
| 181 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
| 182 |
+
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
| 183 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 184 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
| 185 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
| 186 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
| 187 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
| 188 |
+
"model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 189 |
+
"model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
| 190 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
| 191 |
+
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
| 192 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 193 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
| 194 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
| 195 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
| 196 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
| 197 |
+
"model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 198 |
+
"model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
| 199 |
+
"model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
| 200 |
+
"model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
| 201 |
+
"model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 202 |
+
"model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
| 203 |
+
"model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
| 204 |
+
"model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
| 205 |
+
"model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
| 206 |
+
"model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 207 |
+
"model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
| 208 |
+
"model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
| 209 |
+
"model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
| 210 |
+
"model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 211 |
+
"model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
| 212 |
+
"model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
| 213 |
+
"model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
| 214 |
+
"model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
| 215 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 216 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 217 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 218 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 219 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 220 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 221 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 222 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 223 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 224 |
+
"model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 225 |
+
"model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
| 226 |
+
"model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
| 227 |
+
"model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
| 228 |
+
"model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 229 |
+
"model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
| 230 |
+
"model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
| 231 |
+
"model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
| 232 |
+
"model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
| 233 |
+
"model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 234 |
+
"model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
| 235 |
+
"model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
| 236 |
+
"model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
| 237 |
+
"model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
| 238 |
+
"model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
| 239 |
+
"model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
| 240 |
+
"model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
| 241 |
+
"model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
| 242 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 243 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 244 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 245 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 246 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 247 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 248 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 249 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 250 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 251 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 252 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 253 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 254 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 255 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 256 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 257 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 258 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 259 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 260 |
+
"model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 261 |
+
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 262 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 263 |
+
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 264 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 265 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 266 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 267 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 268 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 269 |
+
"model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 270 |
+
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 271 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 272 |
+
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 273 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 274 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 275 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 276 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 277 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 278 |
+
"model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 279 |
+
"model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 280 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 281 |
+
"model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 282 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 283 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 284 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 285 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 286 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 287 |
+
"model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 288 |
+
"model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
| 289 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
| 290 |
+
"model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
| 291 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
| 292 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
| 293 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
| 294 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
| 295 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
| 296 |
+
"model.norm.weight": "model-00003-of-00003.safetensors"
|
| 297 |
+
}
|
| 298 |
+
}
|
Area/special_tokens_map.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<|begin▁of▁sentence|>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": true,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"eos_token": {
|
| 10 |
+
"content": "<|EOT|>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": true,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": {
|
| 17 |
+
"content": "<|end▁of▁sentence|>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": true,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
}
|
| 23 |
+
}
|
Area/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Area/tokenizer_config.json
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": true,
|
| 3 |
+
"add_eos_token": false,
|
| 4 |
+
"add_prefix_space": null,
|
| 5 |
+
"added_tokens_decoder": {
|
| 6 |
+
"32000": {
|
| 7 |
+
"content": "õ",
|
| 8 |
+
"lstrip": false,
|
| 9 |
+
"normalized": true,
|
| 10 |
+
"rstrip": false,
|
| 11 |
+
"single_word": false,
|
| 12 |
+
"special": false
|
| 13 |
+
},
|
| 14 |
+
"32001": {
|
| 15 |
+
"content": "÷",
|
| 16 |
+
"lstrip": false,
|
| 17 |
+
"normalized": true,
|
| 18 |
+
"rstrip": false,
|
| 19 |
+
"single_word": false,
|
| 20 |
+
"special": false
|
| 21 |
+
},
|
| 22 |
+
"32002": {
|
| 23 |
+
"content": "Á",
|
| 24 |
+
"lstrip": false,
|
| 25 |
+
"normalized": true,
|
| 26 |
+
"rstrip": false,
|
| 27 |
+
"single_word": false,
|
| 28 |
+
"special": false
|
| 29 |
+
},
|
| 30 |
+
"32003": {
|
| 31 |
+
"content": "ý",
|
| 32 |
+
"lstrip": false,
|
| 33 |
+
"normalized": true,
|
| 34 |
+
"rstrip": false,
|
| 35 |
+
"single_word": false,
|
| 36 |
+
"special": false
|
| 37 |
+
},
|
| 38 |
+
"32004": {
|
| 39 |
+
"content": "À",
|
| 40 |
+
"lstrip": false,
|
| 41 |
+
"normalized": true,
|
| 42 |
+
"rstrip": false,
|
| 43 |
+
"single_word": false,
|
| 44 |
+
"special": false
|
| 45 |
+
},
|
| 46 |
+
"32005": {
|
| 47 |
+
"content": "ÿ",
|
| 48 |
+
"lstrip": false,
|
| 49 |
+
"normalized": true,
|
| 50 |
+
"rstrip": false,
|
| 51 |
+
"single_word": false,
|
| 52 |
+
"special": false
|
| 53 |
+
},
|
| 54 |
+
"32006": {
|
| 55 |
+
"content": "ø",
|
| 56 |
+
"lstrip": false,
|
| 57 |
+
"normalized": true,
|
| 58 |
+
"rstrip": false,
|
| 59 |
+
"single_word": false,
|
| 60 |
+
"special": false
|
| 61 |
+
},
|
| 62 |
+
"32007": {
|
| 63 |
+
"content": "ú",
|
| 64 |
+
"lstrip": false,
|
| 65 |
+
"normalized": true,
|
| 66 |
+
"rstrip": false,
|
| 67 |
+
"single_word": false,
|
| 68 |
+
"special": false
|
| 69 |
+
},
|
| 70 |
+
"32008": {
|
| 71 |
+
"content": "þ",
|
| 72 |
+
"lstrip": false,
|
| 73 |
+
"normalized": true,
|
| 74 |
+
"rstrip": false,
|
| 75 |
+
"single_word": false,
|
| 76 |
+
"special": false
|
| 77 |
+
},
|
| 78 |
+
"32009": {
|
| 79 |
+
"content": "ü",
|
| 80 |
+
"lstrip": false,
|
| 81 |
+
"normalized": true,
|
| 82 |
+
"rstrip": false,
|
| 83 |
+
"single_word": false,
|
| 84 |
+
"special": false
|
| 85 |
+
},
|
| 86 |
+
"32010": {
|
| 87 |
+
"content": "ù",
|
| 88 |
+
"lstrip": false,
|
| 89 |
+
"normalized": true,
|
| 90 |
+
"rstrip": false,
|
| 91 |
+
"single_word": false,
|
| 92 |
+
"special": false
|
| 93 |
+
},
|
| 94 |
+
"32011": {
|
| 95 |
+
"content": "ö",
|
| 96 |
+
"lstrip": false,
|
| 97 |
+
"normalized": true,
|
| 98 |
+
"rstrip": false,
|
| 99 |
+
"single_word": false,
|
| 100 |
+
"special": false
|
| 101 |
+
},
|
| 102 |
+
"32012": {
|
| 103 |
+
"content": "û",
|
| 104 |
+
"lstrip": false,
|
| 105 |
+
"normalized": true,
|
| 106 |
+
"rstrip": false,
|
| 107 |
+
"single_word": false,
|
| 108 |
+
"special": false
|
| 109 |
+
},
|
| 110 |
+
"32013": {
|
| 111 |
+
"content": "<|begin▁of▁sentence|>",
|
| 112 |
+
"lstrip": false,
|
| 113 |
+
"normalized": true,
|
| 114 |
+
"rstrip": false,
|
| 115 |
+
"single_word": false,
|
| 116 |
+
"special": true
|
| 117 |
+
},
|
| 118 |
+
"32014": {
|
| 119 |
+
"content": "<|end▁of▁sentence|>",
|
| 120 |
+
"lstrip": false,
|
| 121 |
+
"normalized": true,
|
| 122 |
+
"rstrip": false,
|
| 123 |
+
"single_word": false,
|
| 124 |
+
"special": true
|
| 125 |
+
},
|
| 126 |
+
"32015": {
|
| 127 |
+
"content": "<|fim▁hole|>",
|
| 128 |
+
"lstrip": false,
|
| 129 |
+
"normalized": true,
|
| 130 |
+
"rstrip": false,
|
| 131 |
+
"single_word": false,
|
| 132 |
+
"special": false
|
| 133 |
+
},
|
| 134 |
+
"32016": {
|
| 135 |
+
"content": "<|fim▁begin|>",
|
| 136 |
+
"lstrip": false,
|
| 137 |
+
"normalized": true,
|
| 138 |
+
"rstrip": false,
|
| 139 |
+
"single_word": false,
|
| 140 |
+
"special": false
|
| 141 |
+
},
|
| 142 |
+
"32017": {
|
| 143 |
+
"content": "<|fim▁end|>",
|
| 144 |
+
"lstrip": false,
|
| 145 |
+
"normalized": true,
|
| 146 |
+
"rstrip": false,
|
| 147 |
+
"single_word": false,
|
| 148 |
+
"special": false
|
| 149 |
+
},
|
| 150 |
+
"32018": {
|
| 151 |
+
"content": "<pad>",
|
| 152 |
+
"lstrip": false,
|
| 153 |
+
"normalized": true,
|
| 154 |
+
"rstrip": false,
|
| 155 |
+
"single_word": false,
|
| 156 |
+
"special": false
|
| 157 |
+
},
|
| 158 |
+
"32019": {
|
| 159 |
+
"content": "<|User|>",
|
| 160 |
+
"lstrip": false,
|
| 161 |
+
"normalized": true,
|
| 162 |
+
"rstrip": false,
|
| 163 |
+
"single_word": false,
|
| 164 |
+
"special": false
|
| 165 |
+
},
|
| 166 |
+
"32020": {
|
| 167 |
+
"content": "<|Assistant|>",
|
| 168 |
+
"lstrip": false,
|
| 169 |
+
"normalized": true,
|
| 170 |
+
"rstrip": false,
|
| 171 |
+
"single_word": false,
|
| 172 |
+
"special": false
|
| 173 |
+
},
|
| 174 |
+
"32021": {
|
| 175 |
+
"content": "<|EOT|>",
|
| 176 |
+
"lstrip": false,
|
| 177 |
+
"normalized": true,
|
| 178 |
+
"rstrip": false,
|
| 179 |
+
"single_word": false,
|
| 180 |
+
"special": true
|
| 181 |
+
}
|
| 182 |
+
},
|
| 183 |
+
"bos_token": "<|begin▁of▁sentence|>",
|
| 184 |
+
"chat_template": "{{ '<|begin▁of▁sentence|>' }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ system_message + '\n\n' }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ 'User: ' + content + '\n\nAssistant:' }}{% elif message['role'] == 'assistant' %}{{ content + '<|EOT|>' }}{% endif %}{% endfor %}",
|
| 185 |
+
"clean_up_tokenization_spaces": false,
|
| 186 |
+
"eos_token": "<|EOT|>",
|
| 187 |
+
"legacy": true,
|
| 188 |
+
"model_max_length": 16384,
|
| 189 |
+
"pad_token": "<|end▁of▁sentence|>",
|
| 190 |
+
"padding_side": "left",
|
| 191 |
+
"sp_model_kwargs": {},
|
| 192 |
+
"split_special_tokens": false,
|
| 193 |
+
"tokenizer_class": "LlamaTokenizer",
|
| 194 |
+
"unk_token": null,
|
| 195 |
+
"use_default_system_prompt": false
|
| 196 |
+
}
|
Area/train_results.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"epoch": 19.414225941422593,
|
| 3 |
+
"total_flos": 1.9755070908268544e+18,
|
| 4 |
+
"train_loss": 0.6148436521661693,
|
| 5 |
+
"train_runtime": 4276.8425,
|
| 6 |
+
"train_samples_per_second": 8.922,
|
| 7 |
+
"train_steps_per_second": 0.136
|
| 8 |
+
}
|
Area/trainer_log.jsonl
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"current_steps": 10, "total_steps": 580, "loss": 0.6938, "accuracy": 0.32499998807907104, "learning_rate": 8.620689655172415e-07, "epoch": 0.33472803347280333, "percentage": 1.72, "elapsed_time": "0:01:17", "remaining_time": "1:13:49"}
|
| 2 |
+
{"current_steps": 20, "total_steps": 580, "loss": 0.6931, "accuracy": 0.48750001192092896, "learning_rate": 1.724137931034483e-06, "epoch": 0.6694560669456067, "percentage": 3.45, "elapsed_time": "0:02:26", "remaining_time": "1:08:25"}
|
| 3 |
+
{"current_steps": 30, "total_steps": 580, "loss": 0.6946, "accuracy": 0.44999998807907104, "learning_rate": 2.5862068965517246e-06, "epoch": 1.00418410041841, "percentage": 5.17, "elapsed_time": "0:03:39", "remaining_time": "1:07:01"}
|
| 4 |
+
{"current_steps": 40, "total_steps": 580, "loss": 0.6939, "accuracy": 0.4625000059604645, "learning_rate": 3.448275862068966e-06, "epoch": 1.3389121338912133, "percentage": 6.9, "elapsed_time": "0:04:57", "remaining_time": "1:06:52"}
|
| 5 |
+
{"current_steps": 50, "total_steps": 580, "loss": 0.6938, "accuracy": 0.550000011920929, "learning_rate": 4.310344827586207e-06, "epoch": 1.6736401673640167, "percentage": 8.62, "elapsed_time": "0:06:12", "remaining_time": "1:05:52"}
|
| 6 |
+
{"current_steps": 60, "total_steps": 580, "loss": 0.6952, "accuracy": 0.4375, "learning_rate": 4.999818897894192e-06, "epoch": 2.00836820083682, "percentage": 10.34, "elapsed_time": "0:07:21", "remaining_time": "1:03:49"}
|
| 7 |
+
{"current_steps": 70, "total_steps": 580, "loss": 0.6937, "accuracy": 0.5625, "learning_rate": 4.9934830787948756e-06, "epoch": 2.3430962343096233, "percentage": 12.07, "elapsed_time": "0:08:36", "remaining_time": "1:02:42"}
|
| 8 |
+
{"current_steps": 80, "total_steps": 580, "loss": 0.6927, "accuracy": 0.574999988079071, "learning_rate": 4.978118375700895e-06, "epoch": 2.6778242677824267, "percentage": 13.79, "elapsed_time": "0:09:53", "remaining_time": "1:01:52"}
|
| 9 |
+
{"current_steps": 90, "total_steps": 580, "loss": 0.6923, "accuracy": 0.5, "learning_rate": 4.953780424089803e-06, "epoch": 3.01255230125523, "percentage": 15.52, "elapsed_time": "0:11:05", "remaining_time": "1:00:25"}
|
| 10 |
+
{"current_steps": 100, "total_steps": 580, "loss": 0.6894, "accuracy": 0.6000000238418579, "learning_rate": 4.920557351506409e-06, "epoch": 3.3472803347280333, "percentage": 17.24, "elapsed_time": "0:12:20", "remaining_time": "0:59:15"}
|
| 11 |
+
{"current_steps": 110, "total_steps": 580, "loss": 0.6889, "accuracy": 0.5375000238418579, "learning_rate": 4.878569458453592e-06, "epoch": 3.6820083682008367, "percentage": 18.97, "elapsed_time": "0:13:35", "remaining_time": "0:58:03"}
|
| 12 |
+
{"current_steps": 120, "total_steps": 580, "loss": 0.6886, "accuracy": 0.6625000238418579, "learning_rate": 4.827968782785062e-06, "epoch": 4.01673640167364, "percentage": 20.69, "elapsed_time": "0:14:45", "remaining_time": "0:56:36"}
|
| 13 |
+
{"current_steps": 130, "total_steps": 580, "loss": 0.6847, "accuracy": 0.612500011920929, "learning_rate": 4.7689385491773934e-06, "epoch": 4.351464435146443, "percentage": 22.41, "elapsed_time": "0:16:02", "remaining_time": "0:55:30"}
|
| 14 |
+
{"current_steps": 140, "total_steps": 580, "loss": 0.6809, "accuracy": 0.574999988079071, "learning_rate": 4.70169250567482e-06, "epoch": 4.686192468619247, "percentage": 24.14, "elapsed_time": "0:17:09", "remaining_time": "0:53:56"}
|
| 15 |
+
{"current_steps": 150, "total_steps": 580, "loss": 0.6773, "accuracy": 0.5375000238418579, "learning_rate": 4.626474149709127e-06, "epoch": 5.02092050209205, "percentage": 25.86, "elapsed_time": "0:18:26", "remaining_time": "0:52:53"}
|
| 16 |
+
{"current_steps": 160, "total_steps": 580, "loss": 0.673, "accuracy": 0.625, "learning_rate": 4.54355584639723e-06, "epoch": 5.355648535564853, "percentage": 27.59, "elapsed_time": "0:19:41", "remaining_time": "0:51:41"}
|
| 17 |
+
{"current_steps": 170, "total_steps": 580, "loss": 0.6679, "accuracy": 0.637499988079071, "learning_rate": 4.45323784230908e-06, "epoch": 5.690376569037657, "percentage": 29.31, "elapsed_time": "0:20:53", "remaining_time": "0:50:23"}
|
| 18 |
+
{"current_steps": 180, "total_steps": 580, "loss": 0.6621, "accuracy": 0.5874999761581421, "learning_rate": 4.355847178277025e-06, "epoch": 6.02510460251046, "percentage": 31.03, "elapsed_time": "0:22:11", "remaining_time": "0:49:18"}
|
| 19 |
+
{"current_steps": 190, "total_steps": 580, "loss": 0.6538, "accuracy": 0.75, "learning_rate": 4.2517365051833564e-06, "epoch": 6.359832635983263, "percentage": 32.76, "elapsed_time": "0:23:18", "remaining_time": "0:47:49"}
|
| 20 |
+
{"current_steps": 200, "total_steps": 580, "loss": 0.657, "accuracy": 0.699999988079071, "learning_rate": 4.141282807014034e-06, "epoch": 6.694560669456067, "percentage": 34.48, "elapsed_time": "0:24:33", "remaining_time": "0:46:38"}
|
| 21 |
+
{"current_steps": 210, "total_steps": 580, "loss": 0.6462, "accuracy": 0.762499988079071, "learning_rate": 4.024886035802432e-06, "epoch": 7.02928870292887, "percentage": 36.21, "elapsed_time": "0:25:47", "remaining_time": "0:45:26"}
|
| 22 |
+
{"current_steps": 220, "total_steps": 580, "loss": 0.6413, "accuracy": 0.6875, "learning_rate": 3.9029676634059565e-06, "epoch": 7.364016736401673, "percentage": 37.93, "elapsed_time": "0:26:55", "remaining_time": "0:44:03"}
|
| 23 |
+
{"current_steps": 230, "total_steps": 580, "loss": 0.6346, "accuracy": 0.675000011920929, "learning_rate": 3.7759691553595214e-06, "epoch": 7.698744769874477, "percentage": 39.66, "elapsed_time": "0:28:16", "remaining_time": "0:43:01"}
|
| 24 |
+
{"current_steps": 240, "total_steps": 580, "loss": 0.6377, "accuracy": 0.612500011920929, "learning_rate": 3.6443503723320837e-06, "epoch": 8.03347280334728, "percentage": 41.38, "elapsed_time": "0:29:30", "remaining_time": "0:41:48"}
|
| 25 |
+
{"current_steps": 250, "total_steps": 580, "loss": 0.6216, "accuracy": 0.800000011920929, "learning_rate": 3.508587904974522e-06, "epoch": 8.368200836820083, "percentage": 43.1, "elapsed_time": "0:30:44", "remaining_time": "0:40:35"}
|
| 26 |
+
{"current_steps": 260, "total_steps": 580, "loss": 0.6193, "accuracy": 0.6625000238418579, "learning_rate": 3.3691733481883693e-06, "epoch": 8.702928870292887, "percentage": 44.83, "elapsed_time": "0:32:00", "remaining_time": "0:39:24"}
|
| 27 |
+
{"current_steps": 270, "total_steps": 580, "loss": 0.6201, "accuracy": 0.7124999761581421, "learning_rate": 3.226611521064278e-06, "epoch": 9.03765690376569, "percentage": 46.55, "elapsed_time": "0:33:06", "remaining_time": "0:38:00"}
|
| 28 |
+
{"current_steps": 280, "total_steps": 580, "loss": 0.6157, "accuracy": 0.6625000238418579, "learning_rate": 3.0814186389357765e-06, "epoch": 9.372384937238493, "percentage": 48.28, "elapsed_time": "0:34:20", "remaining_time": "0:36:48"}
|
| 29 |
+
{"current_steps": 290, "total_steps": 580, "loss": 0.5972, "accuracy": 0.7875000238418579, "learning_rate": 2.9341204441673267e-06, "epoch": 9.707112970711297, "percentage": 50.0, "elapsed_time": "0:35:29", "remaining_time": "0:35:29"}
|
| 30 |
+
{"current_steps": 300, "total_steps": 580, "loss": 0.6038, "accuracy": 0.7875000238418579, "learning_rate": 2.785250302445062e-06, "epoch": 10.0418410041841, "percentage": 51.72, "elapsed_time": "0:36:44", "remaining_time": "0:34:17"}
|
| 31 |
+
{"current_steps": 310, "total_steps": 580, "loss": 0.5897, "accuracy": 0.762499988079071, "learning_rate": 2.6353472714635443e-06, "epoch": 10.376569037656903, "percentage": 53.45, "elapsed_time": "0:37:56", "remaining_time": "0:33:02"}
|
| 32 |
+
{"current_steps": 320, "total_steps": 580, "loss": 0.5951, "accuracy": 0.7124999761581421, "learning_rate": 2.4849541490017868e-06, "epoch": 10.711297071129707, "percentage": 55.17, "elapsed_time": "0:39:10", "remaining_time": "0:31:49"}
|
| 33 |
+
{"current_steps": 330, "total_steps": 580, "loss": 0.5895, "accuracy": 0.762499988079071, "learning_rate": 2.3346155074564712e-06, "epoch": 11.04602510460251, "percentage": 56.9, "elapsed_time": "0:40:29", "remaining_time": "0:30:40"}
|
| 34 |
+
{"current_steps": 340, "total_steps": 580, "loss": 0.577, "accuracy": 0.8374999761581421, "learning_rate": 2.184875721949277e-06, "epoch": 11.380753138075313, "percentage": 58.62, "elapsed_time": "0:41:40", "remaining_time": "0:29:25"}
|
| 35 |
+
{"current_steps": 350, "total_steps": 580, "loss": 0.5825, "accuracy": 0.7250000238418579, "learning_rate": 2.0362769991485514e-06, "epoch": 11.715481171548117, "percentage": 60.34, "elapsed_time": "0:42:58", "remaining_time": "0:28:14"}
|
| 36 |
+
{"current_steps": 360, "total_steps": 580, "loss": 0.5819, "accuracy": 0.7749999761581421, "learning_rate": 1.8893574139429226e-06, "epoch": 12.05020920502092, "percentage": 62.07, "elapsed_time": "0:44:12", "remaining_time": "0:27:00"}
|
| 37 |
+
{"current_steps": 370, "total_steps": 580, "loss": 0.5737, "accuracy": 0.75, "learning_rate": 1.744648961076068e-06, "epoch": 12.384937238493723, "percentage": 63.79, "elapsed_time": "0:45:23", "remaining_time": "0:25:45"}
|
| 38 |
+
{"current_steps": 380, "total_steps": 580, "loss": 0.5655, "accuracy": 0.8125, "learning_rate": 1.602675628797636e-06, "epoch": 12.719665271966527, "percentage": 65.52, "elapsed_time": "0:46:39", "remaining_time": "0:24:33"}
|
| 39 |
+
{"current_steps": 390, "total_steps": 580, "loss": 0.5654, "accuracy": 0.800000011920929, "learning_rate": 1.4639515015056205e-06, "epoch": 13.05439330543933, "percentage": 67.24, "elapsed_time": "0:47:54", "remaining_time": "0:23:20"}
|
| 40 |
+
{"current_steps": 400, "total_steps": 580, "loss": 0.5713, "accuracy": 0.7749999761581421, "learning_rate": 1.328978898250525e-06, "epoch": 13.389121338912133, "percentage": 68.97, "elapsed_time": "0:49:08", "remaining_time": "0:22:06"}
|
| 41 |
+
{"current_steps": 410, "total_steps": 580, "loss": 0.5569, "accuracy": 0.824999988079071, "learning_rate": 1.198246553841744e-06, "epoch": 13.723849372384937, "percentage": 70.69, "elapsed_time": "0:50:19", "remaining_time": "0:20:51"}
|
| 42 |
+
{"current_steps": 420, "total_steps": 580, "loss": 0.567, "accuracy": 0.675000011920929, "learning_rate": 1.0722278491423998e-06, "epoch": 14.05857740585774, "percentage": 72.41, "elapsed_time": "0:51:29", "remaining_time": "0:19:36"}
|
| 43 |
+
{"current_steps": 430, "total_steps": 580, "loss": 0.5605, "accuracy": 0.737500011920929, "learning_rate": 9.513790969606926e-07, "epoch": 14.393305439330543, "percentage": 74.14, "elapsed_time": "0:52:44", "remaining_time": "0:18:23"}
|
| 44 |
+
{"current_steps": 440, "total_steps": 580, "loss": 0.5549, "accuracy": 0.75, "learning_rate": 8.361378897445643e-07, "epoch": 14.728033472803347, "percentage": 75.86, "elapsed_time": "0:53:53", "remaining_time": "0:17:08"}
|
| 45 |
+
{"current_steps": 450, "total_steps": 580, "loss": 0.5606, "accuracy": 0.7875000238418579, "learning_rate": 7.269215150626391e-07, "epoch": 15.06276150627615, "percentage": 77.59, "elapsed_time": "0:55:09", "remaining_time": "0:15:56"}
|
| 46 |
+
{"current_steps": 460, "total_steps": 580, "loss": 0.5545, "accuracy": 0.8500000238418579, "learning_rate": 6.241254446089942e-07, "epoch": 15.397489539748953, "percentage": 79.31, "elapsed_time": "0:56:22", "remaining_time": "0:14:42"}
|
| 47 |
+
{"current_steps": 470, "total_steps": 580, "loss": 0.5525, "accuracy": 0.7749999761581421, "learning_rate": 5.281219022030423e-07, "epoch": 15.732217573221757, "percentage": 81.03, "elapsed_time": "0:57:34", "remaining_time": "0:13:28"}
|
| 48 |
+
{"current_steps": 480, "total_steps": 580, "loss": 0.5499, "accuracy": 0.8500000238418579, "learning_rate": 4.392585159698087e-07, "epoch": 16.06694560669456, "percentage": 82.76, "elapsed_time": "0:58:50", "remaining_time": "0:12:15"}
|
| 49 |
+
{"current_steps": 490, "total_steps": 580, "loss": 0.5471, "accuracy": 0.699999988079071, "learning_rate": 3.578570595810274e-07, "epoch": 16.401673640167363, "percentage": 84.48, "elapsed_time": "0:59:58", "remaining_time": "0:11:00"}
|
| 50 |
+
{"current_steps": 500, "total_steps": 580, "loss": 0.5539, "accuracy": 0.7250000238418579, "learning_rate": 2.8421228711503127e-07, "epoch": 16.736401673640167, "percentage": 86.21, "elapsed_time": "1:01:15", "remaining_time": "0:09:48"}
|
| 51 |
+
{"current_steps": 510, "total_steps": 580, "loss": 0.549, "accuracy": 0.887499988079071, "learning_rate": 2.1859086575439225e-07, "epoch": 17.07112970711297, "percentage": 87.93, "elapsed_time": "1:02:31", "remaining_time": "0:08:34"}
|
| 52 |
+
{"current_steps": 520, "total_steps": 580, "loss": 0.5567, "accuracy": 0.8374999761581421, "learning_rate": 1.6123041018599766e-07, "epoch": 17.405857740585773, "percentage": 89.66, "elapsed_time": "1:03:43", "remaining_time": "0:07:21"}
|
| 53 |
+
{"current_steps": 530, "total_steps": 580, "loss": 0.555, "accuracy": 0.824999988079071, "learning_rate": 1.1233862220001168e-07, "epoch": 17.740585774058577, "percentage": 91.38, "elapsed_time": "1:05:03", "remaining_time": "0:06:08"}
|
| 54 |
+
{"current_steps": 540, "total_steps": 580, "loss": 0.5493, "accuracy": 0.8125, "learning_rate": 7.209253860320897e-08, "epoch": 18.07531380753138, "percentage": 93.1, "elapsed_time": "1:06:15", "remaining_time": "0:04:54"}
|
| 55 |
+
{"current_steps": 550, "total_steps": 580, "loss": 0.5472, "accuracy": 0.8500000238418579, "learning_rate": 4.063789016999331e-08, "epoch": 18.410041841004183, "percentage": 94.83, "elapsed_time": "1:07:30", "remaining_time": "0:03:40"}
|
| 56 |
+
{"current_steps": 560, "total_steps": 580, "loss": 0.5574, "accuracy": 0.824999988079071, "learning_rate": 1.808857395232788e-08, "epoch": 18.744769874476987, "percentage": 96.55, "elapsed_time": "1:08:43", "remaining_time": "0:02:27"}
|
| 57 |
+
{"current_steps": 570, "total_steps": 580, "loss": 0.5463, "accuracy": 0.7749999761581421, "learning_rate": 4.526240859345499e-09, "epoch": 19.07949790794979, "percentage": 98.28, "elapsed_time": "1:10:01", "remaining_time": "0:01:13"}
|
| 58 |
+
{"current_steps": 580, "total_steps": 580, "loss": 0.5464, "accuracy": 0.824999988079071, "learning_rate": 0.0, "epoch": 19.414225941422593, "percentage": 100.0, "elapsed_time": "1:11:14", "remaining_time": "0:00:00"}
|
| 59 |
+
{"current_steps": 580, "total_steps": 580, "epoch": 19.414225941422593, "percentage": 100.0, "elapsed_time": "1:11:15", "remaining_time": "0:00:00"}
|
Area/trainer_state.json
ADDED
|
@@ -0,0 +1,912 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": null,
|
| 3 |
+
"best_model_checkpoint": null,
|
| 4 |
+
"epoch": 19.414225941422593,
|
| 5 |
+
"eval_steps": 500,
|
| 6 |
+
"global_step": 580,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.33472803347280333,
|
| 13 |
+
"grad_norm": 0.4676998555660248,
|
| 14 |
+
"learning_rate": 8.620689655172415e-07,
|
| 15 |
+
"logits/chosen": 1.7077701091766357,
|
| 16 |
+
"logits/rejected": 1.8646482229232788,
|
| 17 |
+
"logps/chosen": -85.7728271484375,
|
| 18 |
+
"logps/rejected": -88.1952896118164,
|
| 19 |
+
"loss": 0.6938,
|
| 20 |
+
"rewards/accuracies": 0.32499998807907104,
|
| 21 |
+
"rewards/chosen": -0.004770822823047638,
|
| 22 |
+
"rewards/margins": -0.007881464436650276,
|
| 23 |
+
"rewards/rejected": 0.0031106427777558565,
|
| 24 |
+
"step": 10
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"epoch": 0.6694560669456067,
|
| 28 |
+
"grad_norm": 0.43230223655700684,
|
| 29 |
+
"learning_rate": 1.724137931034483e-06,
|
| 30 |
+
"logits/chosen": 1.8023220300674438,
|
| 31 |
+
"logits/rejected": 1.8210970163345337,
|
| 32 |
+
"logps/chosen": -78.37618255615234,
|
| 33 |
+
"logps/rejected": -75.44720458984375,
|
| 34 |
+
"loss": 0.6931,
|
| 35 |
+
"rewards/accuracies": 0.48750001192092896,
|
| 36 |
+
"rewards/chosen": -0.0056939031928777695,
|
| 37 |
+
"rewards/margins": -0.003848772030323744,
|
| 38 |
+
"rewards/rejected": -0.001845130929723382,
|
| 39 |
+
"step": 20
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"epoch": 1.00418410041841,
|
| 43 |
+
"grad_norm": 2.431753396987915,
|
| 44 |
+
"learning_rate": 2.5862068965517246e-06,
|
| 45 |
+
"logits/chosen": 1.8643144369125366,
|
| 46 |
+
"logits/rejected": 1.8500900268554688,
|
| 47 |
+
"logps/chosen": -86.84412384033203,
|
| 48 |
+
"logps/rejected": -90.78925323486328,
|
| 49 |
+
"loss": 0.6946,
|
| 50 |
+
"rewards/accuracies": 0.44999998807907104,
|
| 51 |
+
"rewards/chosen": -0.0008674233104102314,
|
| 52 |
+
"rewards/margins": -0.008064134046435356,
|
| 53 |
+
"rewards/rejected": 0.0071967123076319695,
|
| 54 |
+
"step": 30
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"epoch": 1.3389121338912133,
|
| 58 |
+
"grad_norm": 0.5327289700508118,
|
| 59 |
+
"learning_rate": 3.448275862068966e-06,
|
| 60 |
+
"logits/chosen": 1.7701479196548462,
|
| 61 |
+
"logits/rejected": 1.7749736309051514,
|
| 62 |
+
"logps/chosen": -83.61552429199219,
|
| 63 |
+
"logps/rejected": -72.89176177978516,
|
| 64 |
+
"loss": 0.6939,
|
| 65 |
+
"rewards/accuracies": 0.4625000059604645,
|
| 66 |
+
"rewards/chosen": -0.0019066383829340339,
|
| 67 |
+
"rewards/margins": -0.0035008196718990803,
|
| 68 |
+
"rewards/rejected": 0.0015941811725497246,
|
| 69 |
+
"step": 40
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"epoch": 1.6736401673640167,
|
| 73 |
+
"grad_norm": 0.5017096996307373,
|
| 74 |
+
"learning_rate": 4.310344827586207e-06,
|
| 75 |
+
"logits/chosen": 1.7465788125991821,
|
| 76 |
+
"logits/rejected": 1.7925498485565186,
|
| 77 |
+
"logps/chosen": -84.85249328613281,
|
| 78 |
+
"logps/rejected": -88.79469299316406,
|
| 79 |
+
"loss": 0.6938,
|
| 80 |
+
"rewards/accuracies": 0.550000011920929,
|
| 81 |
+
"rewards/chosen": 0.0003656863118521869,
|
| 82 |
+
"rewards/margins": 0.00012080222222721204,
|
| 83 |
+
"rewards/rejected": 0.00024488387862220407,
|
| 84 |
+
"step": 50
|
| 85 |
+
},
|
| 86 |
+
{
|
| 87 |
+
"epoch": 2.00836820083682,
|
| 88 |
+
"grad_norm": 0.4994066059589386,
|
| 89 |
+
"learning_rate": 4.999818897894192e-06,
|
| 90 |
+
"logits/chosen": 1.8359416723251343,
|
| 91 |
+
"logits/rejected": 1.814234733581543,
|
| 92 |
+
"logps/chosen": -79.98396301269531,
|
| 93 |
+
"logps/rejected": -66.29866027832031,
|
| 94 |
+
"loss": 0.6952,
|
| 95 |
+
"rewards/accuracies": 0.4375,
|
| 96 |
+
"rewards/chosen": 0.00456579215824604,
|
| 97 |
+
"rewards/margins": -0.002460189163684845,
|
| 98 |
+
"rewards/rejected": 0.007025980856269598,
|
| 99 |
+
"step": 60
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"epoch": 2.3430962343096233,
|
| 103 |
+
"grad_norm": 0.6425362825393677,
|
| 104 |
+
"learning_rate": 4.9934830787948756e-06,
|
| 105 |
+
"logits/chosen": 1.7550818920135498,
|
| 106 |
+
"logits/rejected": 1.7802051305770874,
|
| 107 |
+
"logps/chosen": -83.60951232910156,
|
| 108 |
+
"logps/rejected": -70.81080627441406,
|
| 109 |
+
"loss": 0.6937,
|
| 110 |
+
"rewards/accuracies": 0.5625,
|
| 111 |
+
"rewards/chosen": 0.004059882368892431,
|
| 112 |
+
"rewards/margins": 0.00932090263813734,
|
| 113 |
+
"rewards/rejected": -0.005261021666228771,
|
| 114 |
+
"step": 70
|
| 115 |
+
},
|
| 116 |
+
{
|
| 117 |
+
"epoch": 2.6778242677824267,
|
| 118 |
+
"grad_norm": 0.5591297149658203,
|
| 119 |
+
"learning_rate": 4.978118375700895e-06,
|
| 120 |
+
"logits/chosen": 1.7742525339126587,
|
| 121 |
+
"logits/rejected": 1.8790937662124634,
|
| 122 |
+
"logps/chosen": -86.7696533203125,
|
| 123 |
+
"logps/rejected": -84.90727233886719,
|
| 124 |
+
"loss": 0.6927,
|
| 125 |
+
"rewards/accuracies": 0.574999988079071,
|
| 126 |
+
"rewards/chosen": 0.0033963967580348253,
|
| 127 |
+
"rewards/margins": 0.005500240251421928,
|
| 128 |
+
"rewards/rejected": -0.0021038432605564594,
|
| 129 |
+
"step": 80
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"epoch": 3.01255230125523,
|
| 133 |
+
"grad_norm": 0.6774541735649109,
|
| 134 |
+
"learning_rate": 4.953780424089803e-06,
|
| 135 |
+
"logits/chosen": 1.8815631866455078,
|
| 136 |
+
"logits/rejected": 1.911620855331421,
|
| 137 |
+
"logps/chosen": -92.14398193359375,
|
| 138 |
+
"logps/rejected": -82.83556365966797,
|
| 139 |
+
"loss": 0.6923,
|
| 140 |
+
"rewards/accuracies": 0.5,
|
| 141 |
+
"rewards/chosen": 0.007645039353519678,
|
| 142 |
+
"rewards/margins": 0.005134005565196276,
|
| 143 |
+
"rewards/rejected": 0.0025110342539846897,
|
| 144 |
+
"step": 90
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"epoch": 3.3472803347280333,
|
| 148 |
+
"grad_norm": 0.6908814907073975,
|
| 149 |
+
"learning_rate": 4.920557351506409e-06,
|
| 150 |
+
"logits/chosen": 1.7718560695648193,
|
| 151 |
+
"logits/rejected": 1.8823477029800415,
|
| 152 |
+
"logps/chosen": -91.2622299194336,
|
| 153 |
+
"logps/rejected": -96.467529296875,
|
| 154 |
+
"loss": 0.6894,
|
| 155 |
+
"rewards/accuracies": 0.6000000238418579,
|
| 156 |
+
"rewards/chosen": 0.004941435065120459,
|
| 157 |
+
"rewards/margins": 0.01582186669111252,
|
| 158 |
+
"rewards/rejected": -0.010880433022975922,
|
| 159 |
+
"step": 100
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"epoch": 3.6820083682008367,
|
| 163 |
+
"grad_norm": 0.6954344511032104,
|
| 164 |
+
"learning_rate": 4.878569458453592e-06,
|
| 165 |
+
"logits/chosen": 1.6916942596435547,
|
| 166 |
+
"logits/rejected": 1.7603965997695923,
|
| 167 |
+
"logps/chosen": -84.66748046875,
|
| 168 |
+
"logps/rejected": -103.5328369140625,
|
| 169 |
+
"loss": 0.6889,
|
| 170 |
+
"rewards/accuracies": 0.5375000238418579,
|
| 171 |
+
"rewards/chosen": -0.003184904810041189,
|
| 172 |
+
"rewards/margins": 0.007628746330738068,
|
| 173 |
+
"rewards/rejected": -0.01081365067511797,
|
| 174 |
+
"step": 110
|
| 175 |
+
},
|
| 176 |
+
{
|
| 177 |
+
"epoch": 4.01673640167364,
|
| 178 |
+
"grad_norm": 0.6846771240234375,
|
| 179 |
+
"learning_rate": 4.827968782785062e-06,
|
| 180 |
+
"logits/chosen": 1.8839175701141357,
|
| 181 |
+
"logits/rejected": 1.956974744796753,
|
| 182 |
+
"logps/chosen": -76.89198303222656,
|
| 183 |
+
"logps/rejected": -95.03202056884766,
|
| 184 |
+
"loss": 0.6886,
|
| 185 |
+
"rewards/accuracies": 0.6625000238418579,
|
| 186 |
+
"rewards/chosen": 0.009448022581636906,
|
| 187 |
+
"rewards/margins": 0.018458742648363113,
|
| 188 |
+
"rewards/rejected": -0.009010720066726208,
|
| 189 |
+
"step": 120
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"epoch": 4.351464435146443,
|
| 193 |
+
"grad_norm": 0.762366771697998,
|
| 194 |
+
"learning_rate": 4.7689385491773934e-06,
|
| 195 |
+
"logits/chosen": 1.8294252157211304,
|
| 196 |
+
"logits/rejected": 1.8144447803497314,
|
| 197 |
+
"logps/chosen": -87.03471374511719,
|
| 198 |
+
"logps/rejected": -73.86766052246094,
|
| 199 |
+
"loss": 0.6847,
|
| 200 |
+
"rewards/accuracies": 0.612500011920929,
|
| 201 |
+
"rewards/chosen": -0.01010747067630291,
|
| 202 |
+
"rewards/margins": 0.01468165498226881,
|
| 203 |
+
"rewards/rejected": -0.024789121001958847,
|
| 204 |
+
"step": 130
|
| 205 |
+
},
|
| 206 |
+
{
|
| 207 |
+
"epoch": 4.686192468619247,
|
| 208 |
+
"grad_norm": 0.7172746658325195,
|
| 209 |
+
"learning_rate": 4.70169250567482e-06,
|
| 210 |
+
"logits/chosen": 1.7118749618530273,
|
| 211 |
+
"logits/rejected": 1.8259985446929932,
|
| 212 |
+
"logps/chosen": -68.12081146240234,
|
| 213 |
+
"logps/rejected": -75.31085205078125,
|
| 214 |
+
"loss": 0.6809,
|
| 215 |
+
"rewards/accuracies": 0.574999988079071,
|
| 216 |
+
"rewards/chosen": -0.014076923951506615,
|
| 217 |
+
"rewards/margins": 0.014298361726105213,
|
| 218 |
+
"rewards/rejected": -0.028375286608934402,
|
| 219 |
+
"step": 140
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"epoch": 5.02092050209205,
|
| 223 |
+
"grad_norm": 0.7188512682914734,
|
| 224 |
+
"learning_rate": 4.626474149709127e-06,
|
| 225 |
+
"logits/chosen": 1.8864549398422241,
|
| 226 |
+
"logits/rejected": 1.8488889932632446,
|
| 227 |
+
"logps/chosen": -101.3166275024414,
|
| 228 |
+
"logps/rejected": -76.73490905761719,
|
| 229 |
+
"loss": 0.6773,
|
| 230 |
+
"rewards/accuracies": 0.5375000238418579,
|
| 231 |
+
"rewards/chosen": -0.021979983896017075,
|
| 232 |
+
"rewards/margins": 0.023767748847603798,
|
| 233 |
+
"rewards/rejected": -0.04574773460626602,
|
| 234 |
+
"step": 150
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"epoch": 5.355648535564853,
|
| 238 |
+
"grad_norm": 0.8493714928627014,
|
| 239 |
+
"learning_rate": 4.54355584639723e-06,
|
| 240 |
+
"logits/chosen": 1.7861169576644897,
|
| 241 |
+
"logits/rejected": 1.7927452325820923,
|
| 242 |
+
"logps/chosen": -83.28593444824219,
|
| 243 |
+
"logps/rejected": -76.04238891601562,
|
| 244 |
+
"loss": 0.673,
|
| 245 |
+
"rewards/accuracies": 0.625,
|
| 246 |
+
"rewards/chosen": -0.0320693664252758,
|
| 247 |
+
"rewards/margins": 0.04263792932033539,
|
| 248 |
+
"rewards/rejected": -0.07470729202032089,
|
| 249 |
+
"step": 160
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"epoch": 5.690376569037657,
|
| 253 |
+
"grad_norm": 1.1378726959228516,
|
| 254 |
+
"learning_rate": 4.45323784230908e-06,
|
| 255 |
+
"logits/chosen": 1.8622329235076904,
|
| 256 |
+
"logits/rejected": 1.875741958618164,
|
| 257 |
+
"logps/chosen": -72.01219177246094,
|
| 258 |
+
"logps/rejected": -77.62818908691406,
|
| 259 |
+
"loss": 0.6679,
|
| 260 |
+
"rewards/accuracies": 0.637499988079071,
|
| 261 |
+
"rewards/chosen": -0.05134737491607666,
|
| 262 |
+
"rewards/margins": 0.049249742180109024,
|
| 263 |
+
"rewards/rejected": -0.10059712082147598,
|
| 264 |
+
"step": 170
|
| 265 |
+
},
|
| 266 |
+
{
|
| 267 |
+
"epoch": 6.02510460251046,
|
| 268 |
+
"grad_norm": 0.847227156162262,
|
| 269 |
+
"learning_rate": 4.355847178277025e-06,
|
| 270 |
+
"logits/chosen": 1.9445765018463135,
|
| 271 |
+
"logits/rejected": 2.0065901279449463,
|
| 272 |
+
"logps/chosen": -76.14964294433594,
|
| 273 |
+
"logps/rejected": -83.71626281738281,
|
| 274 |
+
"loss": 0.6621,
|
| 275 |
+
"rewards/accuracies": 0.5874999761581421,
|
| 276 |
+
"rewards/chosen": -0.05519520118832588,
|
| 277 |
+
"rewards/margins": 0.06803703308105469,
|
| 278 |
+
"rewards/rejected": -0.12323222309350967,
|
| 279 |
+
"step": 180
|
| 280 |
+
},
|
| 281 |
+
{
|
| 282 |
+
"epoch": 6.359832635983263,
|
| 283 |
+
"grad_norm": 0.8368040919303894,
|
| 284 |
+
"learning_rate": 4.2517365051833564e-06,
|
| 285 |
+
"logits/chosen": 1.8116003274917603,
|
| 286 |
+
"logits/rejected": 1.8839362859725952,
|
| 287 |
+
"logps/chosen": -82.7087631225586,
|
| 288 |
+
"logps/rejected": -67.3785629272461,
|
| 289 |
+
"loss": 0.6538,
|
| 290 |
+
"rewards/accuracies": 0.75,
|
| 291 |
+
"rewards/chosen": -0.03855596110224724,
|
| 292 |
+
"rewards/margins": 0.11562293767929077,
|
| 293 |
+
"rewards/rejected": -0.15417888760566711,
|
| 294 |
+
"step": 190
|
| 295 |
+
},
|
| 296 |
+
{
|
| 297 |
+
"epoch": 6.694560669456067,
|
| 298 |
+
"grad_norm": 0.8636891841888428,
|
| 299 |
+
"learning_rate": 4.141282807014034e-06,
|
| 300 |
+
"logits/chosen": 1.7250430583953857,
|
| 301 |
+
"logits/rejected": 1.775418996810913,
|
| 302 |
+
"logps/chosen": -68.51094055175781,
|
| 303 |
+
"logps/rejected": -79.88996124267578,
|
| 304 |
+
"loss": 0.657,
|
| 305 |
+
"rewards/accuracies": 0.699999988079071,
|
| 306 |
+
"rewards/chosen": -0.11151214689016342,
|
| 307 |
+
"rewards/margins": 0.07863004505634308,
|
| 308 |
+
"rewards/rejected": -0.1901421844959259,
|
| 309 |
+
"step": 200
|
| 310 |
+
},
|
| 311 |
+
{
|
| 312 |
+
"epoch": 7.02928870292887,
|
| 313 |
+
"grad_norm": 0.945520281791687,
|
| 314 |
+
"learning_rate": 4.024886035802432e-06,
|
| 315 |
+
"logits/chosen": 1.7329381704330444,
|
| 316 |
+
"logits/rejected": 1.799551010131836,
|
| 317 |
+
"logps/chosen": -81.49295043945312,
|
| 318 |
+
"logps/rejected": -87.97386169433594,
|
| 319 |
+
"loss": 0.6462,
|
| 320 |
+
"rewards/accuracies": 0.762499988079071,
|
| 321 |
+
"rewards/chosen": -0.09862186014652252,
|
| 322 |
+
"rewards/margins": 0.10664049535989761,
|
| 323 |
+
"rewards/rejected": -0.20526234805583954,
|
| 324 |
+
"step": 210
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"epoch": 7.364016736401673,
|
| 328 |
+
"grad_norm": 0.9736716151237488,
|
| 329 |
+
"learning_rate": 3.9029676634059565e-06,
|
| 330 |
+
"logits/chosen": 1.8787791728973389,
|
| 331 |
+
"logits/rejected": 1.8832632303237915,
|
| 332 |
+
"logps/chosen": -83.21014404296875,
|
| 333 |
+
"logps/rejected": -76.4449691772461,
|
| 334 |
+
"loss": 0.6413,
|
| 335 |
+
"rewards/accuracies": 0.6875,
|
| 336 |
+
"rewards/chosen": -0.07923021167516708,
|
| 337 |
+
"rewards/margins": 0.11194779723882675,
|
| 338 |
+
"rewards/rejected": -0.19117799401283264,
|
| 339 |
+
"step": 220
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"epoch": 7.698744769874477,
|
| 343 |
+
"grad_norm": 0.9238549470901489,
|
| 344 |
+
"learning_rate": 3.7759691553595214e-06,
|
| 345 |
+
"logits/chosen": 1.9044986963272095,
|
| 346 |
+
"logits/rejected": 1.8805242776870728,
|
| 347 |
+
"logps/chosen": -90.17919158935547,
|
| 348 |
+
"logps/rejected": -86.05476379394531,
|
| 349 |
+
"loss": 0.6346,
|
| 350 |
+
"rewards/accuracies": 0.675000011920929,
|
| 351 |
+
"rewards/chosen": -0.0993318110704422,
|
| 352 |
+
"rewards/margins": 0.12289313971996307,
|
| 353 |
+
"rewards/rejected": -0.22222498059272766,
|
| 354 |
+
"step": 230
|
| 355 |
+
},
|
| 356 |
+
{
|
| 357 |
+
"epoch": 8.03347280334728,
|
| 358 |
+
"grad_norm": 1.0052326917648315,
|
| 359 |
+
"learning_rate": 3.6443503723320837e-06,
|
| 360 |
+
"logits/chosen": 1.873815894126892,
|
| 361 |
+
"logits/rejected": 1.8809674978256226,
|
| 362 |
+
"logps/chosen": -74.86824035644531,
|
| 363 |
+
"logps/rejected": -81.79768371582031,
|
| 364 |
+
"loss": 0.6377,
|
| 365 |
+
"rewards/accuracies": 0.612500011920929,
|
| 366 |
+
"rewards/chosen": -0.17602138221263885,
|
| 367 |
+
"rewards/margins": 0.09737586975097656,
|
| 368 |
+
"rewards/rejected": -0.2733972668647766,
|
| 369 |
+
"step": 240
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"epoch": 8.368200836820083,
|
| 373 |
+
"grad_norm": 1.0167930126190186,
|
| 374 |
+
"learning_rate": 3.508587904974522e-06,
|
| 375 |
+
"logits/chosen": 1.868101716041565,
|
| 376 |
+
"logits/rejected": 1.8685039281845093,
|
| 377 |
+
"logps/chosen": -94.48133850097656,
|
| 378 |
+
"logps/rejected": -94.00556182861328,
|
| 379 |
+
"loss": 0.6216,
|
| 380 |
+
"rewards/accuracies": 0.800000011920929,
|
| 381 |
+
"rewards/chosen": -0.1260211318731308,
|
| 382 |
+
"rewards/margins": 0.21002164483070374,
|
| 383 |
+
"rewards/rejected": -0.3360427916049957,
|
| 384 |
+
"step": 250
|
| 385 |
+
},
|
| 386 |
+
{
|
| 387 |
+
"epoch": 8.702928870292887,
|
| 388 |
+
"grad_norm": 0.9955905079841614,
|
| 389 |
+
"learning_rate": 3.3691733481883693e-06,
|
| 390 |
+
"logits/chosen": 1.7905946969985962,
|
| 391 |
+
"logits/rejected": 1.7784850597381592,
|
| 392 |
+
"logps/chosen": -94.9794921875,
|
| 393 |
+
"logps/rejected": -84.99113464355469,
|
| 394 |
+
"loss": 0.6193,
|
| 395 |
+
"rewards/accuracies": 0.6625000238418579,
|
| 396 |
+
"rewards/chosen": -0.18853916227817535,
|
| 397 |
+
"rewards/margins": 0.19185101985931396,
|
| 398 |
+
"rewards/rejected": -0.3803902268409729,
|
| 399 |
+
"step": 260
|
| 400 |
+
},
|
| 401 |
+
{
|
| 402 |
+
"epoch": 9.03765690376569,
|
| 403 |
+
"grad_norm": 0.943289577960968,
|
| 404 |
+
"learning_rate": 3.226611521064278e-06,
|
| 405 |
+
"logits/chosen": 1.8119779825210571,
|
| 406 |
+
"logits/rejected": 1.817567229270935,
|
| 407 |
+
"logps/chosen": -80.78002166748047,
|
| 408 |
+
"logps/rejected": -85.42848205566406,
|
| 409 |
+
"loss": 0.6201,
|
| 410 |
+
"rewards/accuracies": 0.7124999761581421,
|
| 411 |
+
"rewards/chosen": -0.16673071682453156,
|
| 412 |
+
"rewards/margins": 0.19530437886714935,
|
| 413 |
+
"rewards/rejected": -0.3620350658893585,
|
| 414 |
+
"step": 270
|
| 415 |
+
},
|
| 416 |
+
{
|
| 417 |
+
"epoch": 9.372384937238493,
|
| 418 |
+
"grad_norm": 0.9868927597999573,
|
| 419 |
+
"learning_rate": 3.0814186389357765e-06,
|
| 420 |
+
"logits/chosen": 1.816300392150879,
|
| 421 |
+
"logits/rejected": 1.7763961553573608,
|
| 422 |
+
"logps/chosen": -77.19395446777344,
|
| 423 |
+
"logps/rejected": -68.16056060791016,
|
| 424 |
+
"loss": 0.6157,
|
| 425 |
+
"rewards/accuracies": 0.6625000238418579,
|
| 426 |
+
"rewards/chosen": -0.21062004566192627,
|
| 427 |
+
"rewards/margins": 0.19489461183547974,
|
| 428 |
+
"rewards/rejected": -0.4055147171020508,
|
| 429 |
+
"step": 280
|
| 430 |
+
},
|
| 431 |
+
{
|
| 432 |
+
"epoch": 9.707112970711297,
|
| 433 |
+
"grad_norm": 1.0557409524917603,
|
| 434 |
+
"learning_rate": 2.9341204441673267e-06,
|
| 435 |
+
"logits/chosen": 1.8154022693634033,
|
| 436 |
+
"logits/rejected": 1.842792272567749,
|
| 437 |
+
"logps/chosen": -84.95299530029297,
|
| 438 |
+
"logps/rejected": -82.22926330566406,
|
| 439 |
+
"loss": 0.5972,
|
| 440 |
+
"rewards/accuracies": 0.7875000238418579,
|
| 441 |
+
"rewards/chosen": -0.25716403126716614,
|
| 442 |
+
"rewards/margins": 0.2636396288871765,
|
| 443 |
+
"rewards/rejected": -0.5208036303520203,
|
| 444 |
+
"step": 290
|
| 445 |
+
},
|
| 446 |
+
{
|
| 447 |
+
"epoch": 10.0418410041841,
|
| 448 |
+
"grad_norm": 1.0787400007247925,
|
| 449 |
+
"learning_rate": 2.785250302445062e-06,
|
| 450 |
+
"logits/chosen": 1.8255424499511719,
|
| 451 |
+
"logits/rejected": 1.8123111724853516,
|
| 452 |
+
"logps/chosen": -94.89508056640625,
|
| 453 |
+
"logps/rejected": -99.48709869384766,
|
| 454 |
+
"loss": 0.6038,
|
| 455 |
+
"rewards/accuracies": 0.7875000238418579,
|
| 456 |
+
"rewards/chosen": -0.25526899099349976,
|
| 457 |
+
"rewards/margins": 0.24981728196144104,
|
| 458 |
+
"rewards/rejected": -0.5050862431526184,
|
| 459 |
+
"step": 300
|
| 460 |
+
},
|
| 461 |
+
{
|
| 462 |
+
"epoch": 10.376569037656903,
|
| 463 |
+
"grad_norm": 0.9934303164482117,
|
| 464 |
+
"learning_rate": 2.6353472714635443e-06,
|
| 465 |
+
"logits/chosen": 1.6809011697769165,
|
| 466 |
+
"logits/rejected": 1.6883894205093384,
|
| 467 |
+
"logps/chosen": -83.52108001708984,
|
| 468 |
+
"logps/rejected": -89.83765411376953,
|
| 469 |
+
"loss": 0.5897,
|
| 470 |
+
"rewards/accuracies": 0.762499988079071,
|
| 471 |
+
"rewards/chosen": -0.20843425393104553,
|
| 472 |
+
"rewards/margins": 0.2531076967716217,
|
| 473 |
+
"rewards/rejected": -0.46154195070266724,
|
| 474 |
+
"step": 310
|
| 475 |
+
},
|
| 476 |
+
{
|
| 477 |
+
"epoch": 10.711297071129707,
|
| 478 |
+
"grad_norm": 1.0989857912063599,
|
| 479 |
+
"learning_rate": 2.4849541490017868e-06,
|
| 480 |
+
"logits/chosen": 1.8184492588043213,
|
| 481 |
+
"logits/rejected": 1.8702239990234375,
|
| 482 |
+
"logps/chosen": -76.5416030883789,
|
| 483 |
+
"logps/rejected": -87.98771667480469,
|
| 484 |
+
"loss": 0.5951,
|
| 485 |
+
"rewards/accuracies": 0.7124999761581421,
|
| 486 |
+
"rewards/chosen": -0.26176151633262634,
|
| 487 |
+
"rewards/margins": 0.17908410727977753,
|
| 488 |
+
"rewards/rejected": -0.44084563851356506,
|
| 489 |
+
"step": 320
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"epoch": 11.04602510460251,
|
| 493 |
+
"grad_norm": 1.1377506256103516,
|
| 494 |
+
"learning_rate": 2.3346155074564712e-06,
|
| 495 |
+
"logits/chosen": 1.7781150341033936,
|
| 496 |
+
"logits/rejected": 1.7945045232772827,
|
| 497 |
+
"logps/chosen": -94.21954345703125,
|
| 498 |
+
"logps/rejected": -87.9439926147461,
|
| 499 |
+
"loss": 0.5895,
|
| 500 |
+
"rewards/accuracies": 0.762499988079071,
|
| 501 |
+
"rewards/chosen": -0.3070668578147888,
|
| 502 |
+
"rewards/margins": 0.26864537596702576,
|
| 503 |
+
"rewards/rejected": -0.5757122039794922,
|
| 504 |
+
"step": 330
|
| 505 |
+
},
|
| 506 |
+
{
|
| 507 |
+
"epoch": 11.380753138075313,
|
| 508 |
+
"grad_norm": 1.1378768682479858,
|
| 509 |
+
"learning_rate": 2.184875721949277e-06,
|
| 510 |
+
"logits/chosen": 1.7889184951782227,
|
| 511 |
+
"logits/rejected": 1.7863588333129883,
|
| 512 |
+
"logps/chosen": -77.19974517822266,
|
| 513 |
+
"logps/rejected": -74.92039489746094,
|
| 514 |
+
"loss": 0.577,
|
| 515 |
+
"rewards/accuracies": 0.8374999761581421,
|
| 516 |
+
"rewards/chosen": -0.2269454449415207,
|
| 517 |
+
"rewards/margins": 0.3004661798477173,
|
| 518 |
+
"rewards/rejected": -0.5274116396903992,
|
| 519 |
+
"step": 340
|
| 520 |
+
},
|
| 521 |
+
{
|
| 522 |
+
"epoch": 11.715481171548117,
|
| 523 |
+
"grad_norm": 1.0102859735488892,
|
| 524 |
+
"learning_rate": 2.0362769991485514e-06,
|
| 525 |
+
"logits/chosen": 1.6868798732757568,
|
| 526 |
+
"logits/rejected": 1.743287444114685,
|
| 527 |
+
"logps/chosen": -80.24299621582031,
|
| 528 |
+
"logps/rejected": -91.3736343383789,
|
| 529 |
+
"loss": 0.5825,
|
| 530 |
+
"rewards/accuracies": 0.7250000238418579,
|
| 531 |
+
"rewards/chosen": -0.2850210964679718,
|
| 532 |
+
"rewards/margins": 0.30632922053337097,
|
| 533 |
+
"rewards/rejected": -0.5913503170013428,
|
| 534 |
+
"step": 350
|
| 535 |
+
},
|
| 536 |
+
{
|
| 537 |
+
"epoch": 12.05020920502092,
|
| 538 |
+
"grad_norm": 1.0598143339157104,
|
| 539 |
+
"learning_rate": 1.8893574139429226e-06,
|
| 540 |
+
"logits/chosen": 1.6859172582626343,
|
| 541 |
+
"logits/rejected": 1.7635313272476196,
|
| 542 |
+
"logps/chosen": -88.87372589111328,
|
| 543 |
+
"logps/rejected": -93.02122497558594,
|
| 544 |
+
"loss": 0.5819,
|
| 545 |
+
"rewards/accuracies": 0.7749999761581421,
|
| 546 |
+
"rewards/chosen": -0.317017138004303,
|
| 547 |
+
"rewards/margins": 0.2608889639377594,
|
| 548 |
+
"rewards/rejected": -0.5779060125350952,
|
| 549 |
+
"step": 360
|
| 550 |
+
},
|
| 551 |
+
{
|
| 552 |
+
"epoch": 12.384937238493723,
|
| 553 |
+
"grad_norm": 1.0845669507980347,
|
| 554 |
+
"learning_rate": 1.744648961076068e-06,
|
| 555 |
+
"logits/chosen": 1.7980632781982422,
|
| 556 |
+
"logits/rejected": 1.8205795288085938,
|
| 557 |
+
"logps/chosen": -83.31995391845703,
|
| 558 |
+
"logps/rejected": -87.28046417236328,
|
| 559 |
+
"loss": 0.5737,
|
| 560 |
+
"rewards/accuracies": 0.75,
|
| 561 |
+
"rewards/chosen": -0.3205794394016266,
|
| 562 |
+
"rewards/margins": 0.24214692413806915,
|
| 563 |
+
"rewards/rejected": -0.5627263784408569,
|
| 564 |
+
"step": 370
|
| 565 |
+
},
|
| 566 |
+
{
|
| 567 |
+
"epoch": 12.719665271966527,
|
| 568 |
+
"grad_norm": 1.180468201637268,
|
| 569 |
+
"learning_rate": 1.602675628797636e-06,
|
| 570 |
+
"logits/chosen": 1.8093292713165283,
|
| 571 |
+
"logits/rejected": 1.8195394277572632,
|
| 572 |
+
"logps/chosen": -99.64051055908203,
|
| 573 |
+
"logps/rejected": -82.13116455078125,
|
| 574 |
+
"loss": 0.5655,
|
| 575 |
+
"rewards/accuracies": 0.8125,
|
| 576 |
+
"rewards/chosen": -0.28717803955078125,
|
| 577 |
+
"rewards/margins": 0.355060875415802,
|
| 578 |
+
"rewards/rejected": -0.6422389149665833,
|
| 579 |
+
"step": 380
|
| 580 |
+
},
|
| 581 |
+
{
|
| 582 |
+
"epoch": 13.05439330543933,
|
| 583 |
+
"grad_norm": 1.0443357229232788,
|
| 584 |
+
"learning_rate": 1.4639515015056205e-06,
|
| 585 |
+
"logits/chosen": 1.7365925312042236,
|
| 586 |
+
"logits/rejected": 1.7514903545379639,
|
| 587 |
+
"logps/chosen": -96.92813110351562,
|
| 588 |
+
"logps/rejected": -83.5425796508789,
|
| 589 |
+
"loss": 0.5654,
|
| 590 |
+
"rewards/accuracies": 0.800000011920929,
|
| 591 |
+
"rewards/chosen": -0.3064219653606415,
|
| 592 |
+
"rewards/margins": 0.38930463790893555,
|
| 593 |
+
"rewards/rejected": -0.6957265138626099,
|
| 594 |
+
"step": 390
|
| 595 |
+
},
|
| 596 |
+
{
|
| 597 |
+
"epoch": 13.389121338912133,
|
| 598 |
+
"grad_norm": 1.2010602951049805,
|
| 599 |
+
"learning_rate": 1.328978898250525e-06,
|
| 600 |
+
"logits/chosen": 1.7649974822998047,
|
| 601 |
+
"logits/rejected": 1.7864068746566772,
|
| 602 |
+
"logps/chosen": -78.26787567138672,
|
| 603 |
+
"logps/rejected": -82.81205749511719,
|
| 604 |
+
"loss": 0.5713,
|
| 605 |
+
"rewards/accuracies": 0.7749999761581421,
|
| 606 |
+
"rewards/chosen": -0.32541388273239136,
|
| 607 |
+
"rewards/margins": 0.34498968720436096,
|
| 608 |
+
"rewards/rejected": -0.6704035997390747,
|
| 609 |
+
"step": 400
|
| 610 |
+
},
|
| 611 |
+
{
|
| 612 |
+
"epoch": 13.723849372384937,
|
| 613 |
+
"grad_norm": 1.656805396080017,
|
| 614 |
+
"learning_rate": 1.198246553841744e-06,
|
| 615 |
+
"logits/chosen": 1.6543798446655273,
|
| 616 |
+
"logits/rejected": 1.700377106666565,
|
| 617 |
+
"logps/chosen": -88.14425659179688,
|
| 618 |
+
"logps/rejected": -86.7034912109375,
|
| 619 |
+
"loss": 0.5569,
|
| 620 |
+
"rewards/accuracies": 0.824999988079071,
|
| 621 |
+
"rewards/chosen": -0.307409405708313,
|
| 622 |
+
"rewards/margins": 0.346424400806427,
|
| 623 |
+
"rewards/rejected": -0.65383380651474,
|
| 624 |
+
"step": 410
|
| 625 |
+
},
|
| 626 |
+
{
|
| 627 |
+
"epoch": 14.05857740585774,
|
| 628 |
+
"grad_norm": 1.3017597198486328,
|
| 629 |
+
"learning_rate": 1.0722278491423998e-06,
|
| 630 |
+
"logits/chosen": 1.861400842666626,
|
| 631 |
+
"logits/rejected": 1.7919048070907593,
|
| 632 |
+
"logps/chosen": -101.43531036376953,
|
| 633 |
+
"logps/rejected": -77.99034881591797,
|
| 634 |
+
"loss": 0.567,
|
| 635 |
+
"rewards/accuracies": 0.675000011920929,
|
| 636 |
+
"rewards/chosen": -0.34600889682769775,
|
| 637 |
+
"rewards/margins": 0.2590746283531189,
|
| 638 |
+
"rewards/rejected": -0.6050835251808167,
|
| 639 |
+
"step": 420
|
| 640 |
+
},
|
| 641 |
+
{
|
| 642 |
+
"epoch": 14.393305439330543,
|
| 643 |
+
"grad_norm": 1.0269688367843628,
|
| 644 |
+
"learning_rate": 9.513790969606926e-07,
|
| 645 |
+
"logits/chosen": 1.809483289718628,
|
| 646 |
+
"logits/rejected": 1.8456366062164307,
|
| 647 |
+
"logps/chosen": -102.51566314697266,
|
| 648 |
+
"logps/rejected": -104.2486343383789,
|
| 649 |
+
"loss": 0.5605,
|
| 650 |
+
"rewards/accuracies": 0.737500011920929,
|
| 651 |
+
"rewards/chosen": -0.33636271953582764,
|
| 652 |
+
"rewards/margins": 0.3720197379589081,
|
| 653 |
+
"rewards/rejected": -0.7083825469017029,
|
| 654 |
+
"step": 430
|
| 655 |
+
},
|
| 656 |
+
{
|
| 657 |
+
"epoch": 14.728033472803347,
|
| 658 |
+
"grad_norm": 1.0176596641540527,
|
| 659 |
+
"learning_rate": 8.361378897445643e-07,
|
| 660 |
+
"logits/chosen": 1.8125699758529663,
|
| 661 |
+
"logits/rejected": 1.819248914718628,
|
| 662 |
+
"logps/chosen": -83.56723022460938,
|
| 663 |
+
"logps/rejected": -81.31576538085938,
|
| 664 |
+
"loss": 0.5549,
|
| 665 |
+
"rewards/accuracies": 0.75,
|
| 666 |
+
"rewards/chosen": -0.321399986743927,
|
| 667 |
+
"rewards/margins": 0.2679772973060608,
|
| 668 |
+
"rewards/rejected": -0.5893772840499878,
|
| 669 |
+
"step": 440
|
| 670 |
+
},
|
| 671 |
+
{
|
| 672 |
+
"epoch": 15.06276150627615,
|
| 673 |
+
"grad_norm": 1.1313916444778442,
|
| 674 |
+
"learning_rate": 7.269215150626391e-07,
|
| 675 |
+
"logits/chosen": 1.7356494665145874,
|
| 676 |
+
"logits/rejected": 1.793581247329712,
|
| 677 |
+
"logps/chosen": -80.0768051147461,
|
| 678 |
+
"logps/rejected": -79.23683166503906,
|
| 679 |
+
"loss": 0.5606,
|
| 680 |
+
"rewards/accuracies": 0.7875000238418579,
|
| 681 |
+
"rewards/chosen": -0.34324222803115845,
|
| 682 |
+
"rewards/margins": 0.3662562370300293,
|
| 683 |
+
"rewards/rejected": -0.7094983458518982,
|
| 684 |
+
"step": 450
|
| 685 |
+
},
|
| 686 |
+
{
|
| 687 |
+
"epoch": 15.397489539748953,
|
| 688 |
+
"grad_norm": 1.1418852806091309,
|
| 689 |
+
"learning_rate": 6.241254446089942e-07,
|
| 690 |
+
"logits/chosen": 1.7430709600448608,
|
| 691 |
+
"logits/rejected": 1.7953869104385376,
|
| 692 |
+
"logps/chosen": -82.4347152709961,
|
| 693 |
+
"logps/rejected": -82.4286880493164,
|
| 694 |
+
"loss": 0.5545,
|
| 695 |
+
"rewards/accuracies": 0.8500000238418579,
|
| 696 |
+
"rewards/chosen": -0.3615425229072571,
|
| 697 |
+
"rewards/margins": 0.35559993982315063,
|
| 698 |
+
"rewards/rejected": -0.7171424031257629,
|
| 699 |
+
"step": 460
|
| 700 |
+
},
|
| 701 |
+
{
|
| 702 |
+
"epoch": 15.732217573221757,
|
| 703 |
+
"grad_norm": 1.1025134325027466,
|
| 704 |
+
"learning_rate": 5.281219022030423e-07,
|
| 705 |
+
"logits/chosen": 1.655470848083496,
|
| 706 |
+
"logits/rejected": 1.692399024963379,
|
| 707 |
+
"logps/chosen": -101.62381744384766,
|
| 708 |
+
"logps/rejected": -96.6092300415039,
|
| 709 |
+
"loss": 0.5525,
|
| 710 |
+
"rewards/accuracies": 0.7749999761581421,
|
| 711 |
+
"rewards/chosen": -0.29976749420166016,
|
| 712 |
+
"rewards/margins": 0.3946015238761902,
|
| 713 |
+
"rewards/rejected": -0.6943690776824951,
|
| 714 |
+
"step": 470
|
| 715 |
+
},
|
| 716 |
+
{
|
| 717 |
+
"epoch": 16.06694560669456,
|
| 718 |
+
"grad_norm": 1.2202156782150269,
|
| 719 |
+
"learning_rate": 4.392585159698087e-07,
|
| 720 |
+
"logits/chosen": 1.7376701831817627,
|
| 721 |
+
"logits/rejected": 1.793471097946167,
|
| 722 |
+
"logps/chosen": -89.61154174804688,
|
| 723 |
+
"logps/rejected": -94.71632385253906,
|
| 724 |
+
"loss": 0.5499,
|
| 725 |
+
"rewards/accuracies": 0.8500000238418579,
|
| 726 |
+
"rewards/chosen": -0.31332069635391235,
|
| 727 |
+
"rewards/margins": 0.4189217984676361,
|
| 728 |
+
"rewards/rejected": -0.7322424650192261,
|
| 729 |
+
"step": 480
|
| 730 |
+
},
|
| 731 |
+
{
|
| 732 |
+
"epoch": 16.401673640167363,
|
| 733 |
+
"grad_norm": 1.1149810552597046,
|
| 734 |
+
"learning_rate": 3.578570595810274e-07,
|
| 735 |
+
"logits/chosen": 1.7718391418457031,
|
| 736 |
+
"logits/rejected": 1.8273475170135498,
|
| 737 |
+
"logps/chosen": -83.96971130371094,
|
| 738 |
+
"logps/rejected": -92.80200958251953,
|
| 739 |
+
"loss": 0.5471,
|
| 740 |
+
"rewards/accuracies": 0.699999988079071,
|
| 741 |
+
"rewards/chosen": -0.2852213978767395,
|
| 742 |
+
"rewards/margins": 0.3351207673549652,
|
| 743 |
+
"rewards/rejected": -0.6203421354293823,
|
| 744 |
+
"step": 490
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"epoch": 16.736401673640167,
|
| 748 |
+
"grad_norm": 1.0757339000701904,
|
| 749 |
+
"learning_rate": 2.8421228711503127e-07,
|
| 750 |
+
"logits/chosen": 1.7726856470108032,
|
| 751 |
+
"logits/rejected": 1.836022973060608,
|
| 752 |
+
"logps/chosen": -78.27845764160156,
|
| 753 |
+
"logps/rejected": -83.48805236816406,
|
| 754 |
+
"loss": 0.5539,
|
| 755 |
+
"rewards/accuracies": 0.7250000238418579,
|
| 756 |
+
"rewards/chosen": -0.34817034006118774,
|
| 757 |
+
"rewards/margins": 0.2852809429168701,
|
| 758 |
+
"rewards/rejected": -0.6334512829780579,
|
| 759 |
+
"step": 500
|
| 760 |
+
},
|
| 761 |
+
{
|
| 762 |
+
"epoch": 17.07112970711297,
|
| 763 |
+
"grad_norm": 1.1488370895385742,
|
| 764 |
+
"learning_rate": 2.1859086575439225e-07,
|
| 765 |
+
"logits/chosen": 1.7012341022491455,
|
| 766 |
+
"logits/rejected": 1.78024423122406,
|
| 767 |
+
"logps/chosen": -84.58718872070312,
|
| 768 |
+
"logps/rejected": -86.95068359375,
|
| 769 |
+
"loss": 0.549,
|
| 770 |
+
"rewards/accuracies": 0.887499988079071,
|
| 771 |
+
"rewards/chosen": -0.291012704372406,
|
| 772 |
+
"rewards/margins": 0.4876977801322937,
|
| 773 |
+
"rewards/rejected": -0.7787104845046997,
|
| 774 |
+
"step": 510
|
| 775 |
+
},
|
| 776 |
+
{
|
| 777 |
+
"epoch": 17.405857740585773,
|
| 778 |
+
"grad_norm": 1.1710774898529053,
|
| 779 |
+
"learning_rate": 1.6123041018599766e-07,
|
| 780 |
+
"logits/chosen": 1.7162996530532837,
|
| 781 |
+
"logits/rejected": 1.7306814193725586,
|
| 782 |
+
"logps/chosen": -93.86546325683594,
|
| 783 |
+
"logps/rejected": -84.5029067993164,
|
| 784 |
+
"loss": 0.5567,
|
| 785 |
+
"rewards/accuracies": 0.8374999761581421,
|
| 786 |
+
"rewards/chosen": -0.26384133100509644,
|
| 787 |
+
"rewards/margins": 0.3909001052379608,
|
| 788 |
+
"rewards/rejected": -0.6547414064407349,
|
| 789 |
+
"step": 520
|
| 790 |
+
},
|
| 791 |
+
{
|
| 792 |
+
"epoch": 17.740585774058577,
|
| 793 |
+
"grad_norm": 1.0457745790481567,
|
| 794 |
+
"learning_rate": 1.1233862220001168e-07,
|
| 795 |
+
"logits/chosen": 1.729026436805725,
|
| 796 |
+
"logits/rejected": 1.7938979864120483,
|
| 797 |
+
"logps/chosen": -86.14263916015625,
|
| 798 |
+
"logps/rejected": -109.89949798583984,
|
| 799 |
+
"loss": 0.555,
|
| 800 |
+
"rewards/accuracies": 0.824999988079071,
|
| 801 |
+
"rewards/chosen": -0.37593385577201843,
|
| 802 |
+
"rewards/margins": 0.41020965576171875,
|
| 803 |
+
"rewards/rejected": -0.7861436009407043,
|
| 804 |
+
"step": 530
|
| 805 |
+
},
|
| 806 |
+
{
|
| 807 |
+
"epoch": 18.07531380753138,
|
| 808 |
+
"grad_norm": 1.1671756505966187,
|
| 809 |
+
"learning_rate": 7.209253860320897e-08,
|
| 810 |
+
"logits/chosen": 1.797410011291504,
|
| 811 |
+
"logits/rejected": 1.7258751392364502,
|
| 812 |
+
"logps/chosen": -101.58478546142578,
|
| 813 |
+
"logps/rejected": -81.59870910644531,
|
| 814 |
+
"loss": 0.5493,
|
| 815 |
+
"rewards/accuracies": 0.8125,
|
| 816 |
+
"rewards/chosen": -0.36429405212402344,
|
| 817 |
+
"rewards/margins": 0.3445179760456085,
|
| 818 |
+
"rewards/rejected": -0.7088119983673096,
|
| 819 |
+
"step": 540
|
| 820 |
+
},
|
| 821 |
+
{
|
| 822 |
+
"epoch": 18.410041841004183,
|
| 823 |
+
"grad_norm": 1.0843878984451294,
|
| 824 |
+
"learning_rate": 4.063789016999331e-08,
|
| 825 |
+
"logits/chosen": 1.7526544332504272,
|
| 826 |
+
"logits/rejected": 1.7694238424301147,
|
| 827 |
+
"logps/chosen": -91.79996490478516,
|
| 828 |
+
"logps/rejected": -88.14305877685547,
|
| 829 |
+
"loss": 0.5472,
|
| 830 |
+
"rewards/accuracies": 0.8500000238418579,
|
| 831 |
+
"rewards/chosen": -0.432400643825531,
|
| 832 |
+
"rewards/margins": 0.34698057174682617,
|
| 833 |
+
"rewards/rejected": -0.7793812155723572,
|
| 834 |
+
"step": 550
|
| 835 |
+
},
|
| 836 |
+
{
|
| 837 |
+
"epoch": 18.744769874476987,
|
| 838 |
+
"grad_norm": 1.1687026023864746,
|
| 839 |
+
"learning_rate": 1.808857395232788e-08,
|
| 840 |
+
"logits/chosen": 1.8021119832992554,
|
| 841 |
+
"logits/rejected": 1.8404823541641235,
|
| 842 |
+
"logps/chosen": -101.54361724853516,
|
| 843 |
+
"logps/rejected": -102.21680450439453,
|
| 844 |
+
"loss": 0.5574,
|
| 845 |
+
"rewards/accuracies": 0.824999988079071,
|
| 846 |
+
"rewards/chosen": -0.32780179381370544,
|
| 847 |
+
"rewards/margins": 0.34786924719810486,
|
| 848 |
+
"rewards/rejected": -0.6756710410118103,
|
| 849 |
+
"step": 560
|
| 850 |
+
},
|
| 851 |
+
{
|
| 852 |
+
"epoch": 19.07949790794979,
|
| 853 |
+
"grad_norm": 1.3505330085754395,
|
| 854 |
+
"learning_rate": 4.526240859345499e-09,
|
| 855 |
+
"logits/chosen": 1.7214359045028687,
|
| 856 |
+
"logits/rejected": 1.7881813049316406,
|
| 857 |
+
"logps/chosen": -74.24882507324219,
|
| 858 |
+
"logps/rejected": -99.3295669555664,
|
| 859 |
+
"loss": 0.5463,
|
| 860 |
+
"rewards/accuracies": 0.7749999761581421,
|
| 861 |
+
"rewards/chosen": -0.4312325417995453,
|
| 862 |
+
"rewards/margins": 0.3562263250350952,
|
| 863 |
+
"rewards/rejected": -0.7874588966369629,
|
| 864 |
+
"step": 570
|
| 865 |
+
},
|
| 866 |
+
{
|
| 867 |
+
"epoch": 19.414225941422593,
|
| 868 |
+
"grad_norm": 1.1412781476974487,
|
| 869 |
+
"learning_rate": 0.0,
|
| 870 |
+
"logits/chosen": 1.8473981618881226,
|
| 871 |
+
"logits/rejected": 1.784250259399414,
|
| 872 |
+
"logps/chosen": -86.49626159667969,
|
| 873 |
+
"logps/rejected": -79.94244384765625,
|
| 874 |
+
"loss": 0.5464,
|
| 875 |
+
"rewards/accuracies": 0.824999988079071,
|
| 876 |
+
"rewards/chosen": -0.3426724970340729,
|
| 877 |
+
"rewards/margins": 0.37268632650375366,
|
| 878 |
+
"rewards/rejected": -0.7153588533401489,
|
| 879 |
+
"step": 580
|
| 880 |
+
},
|
| 881 |
+
{
|
| 882 |
+
"epoch": 19.414225941422593,
|
| 883 |
+
"step": 580,
|
| 884 |
+
"total_flos": 1.9755070908268544e+18,
|
| 885 |
+
"train_loss": 0.6148436521661693,
|
| 886 |
+
"train_runtime": 4276.8425,
|
| 887 |
+
"train_samples_per_second": 8.922,
|
| 888 |
+
"train_steps_per_second": 0.136
|
| 889 |
+
}
|
| 890 |
+
],
|
| 891 |
+
"logging_steps": 10,
|
| 892 |
+
"max_steps": 580,
|
| 893 |
+
"num_input_tokens_seen": 0,
|
| 894 |
+
"num_train_epochs": 20,
|
| 895 |
+
"save_steps": 50,
|
| 896 |
+
"stateful_callbacks": {
|
| 897 |
+
"TrainerControl": {
|
| 898 |
+
"args": {
|
| 899 |
+
"should_epoch_stop": false,
|
| 900 |
+
"should_evaluate": false,
|
| 901 |
+
"should_log": false,
|
| 902 |
+
"should_save": true,
|
| 903 |
+
"should_training_stop": true
|
| 904 |
+
},
|
| 905 |
+
"attributes": {}
|
| 906 |
+
}
|
| 907 |
+
},
|
| 908 |
+
"total_flos": 1.9755070908268544e+18,
|
| 909 |
+
"train_batch_size": 1,
|
| 910 |
+
"trial_name": null,
|
| 911 |
+
"trial_params": null
|
| 912 |
+
}
|
Area/training_loss.png
ADDED
|
Area/training_rewards_accuracies.png
ADDED
|
Power/checkpoint-100/rng_state_0.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:36d2a2034ebb05cb71c510897f2795b31164e50f17b270bc25d2be3ad9a17b22
|
| 3 |
+
size 15984
|
Power/checkpoint-100/rng_state_1.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:060dfdb1c49102cbdc8868a6031e68787601b4ccd782f3fb9b137e20c1fd2c7a
|
| 3 |
+
size 15984
|
Power/checkpoint-100/rng_state_2.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:af01895cb66e616591f2e4baa8dcd8151530eab133c73571ccb31c74f35422ce
|
| 3 |
+
size 15984
|
Power/checkpoint-100/rng_state_3.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:677921992b1e0cef3aee776f245975003d22f51d9bd6ed20f248ded1deb72fa9
|
| 3 |
+
size 15984
|
Power/checkpoint-100/rng_state_4.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d69353c629541c690c5471f8ec05fdab2bfecf3d37afaa436bc45939da6db68f
|
| 3 |
+
size 15984
|
Power/checkpoint-100/rng_state_5.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8e40ba6668cc03c9162c68a933d164bf38ae2d196a9a6fec03ae615491201185
|
| 3 |
+
size 15984
|
Power/checkpoint-100/rng_state_6.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:870968fea834e24b2e099cf3e4fe1e3fb8caf38d8f8e5b790d7d47386d4d05f5
|
| 3 |
+
size 15984
|
Power/checkpoint-100/rng_state_7.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e9e19618bee7c6ef43256fea25abe19bca88535eb1e7dc213cde8929ae4e8180
|
| 3 |
+
size 15984
|
Power/checkpoint-100/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e21768344077de7283e0a762f5a93feaa55939330da210b2bcb6eaf7c851baab
|
| 3 |
+
size 1064
|
Power/checkpoint-100/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2272138a6d1778027395e1d713502fac46aa791206abab20d5ab48f8f0656b43
|
| 3 |
+
size 5368
|
Power/checkpoint-150/rng_state_0.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5a0ef6f96a48e59aa52c4b471312c2a62378c19acc7ebbae839612b03a7d775a
|
| 3 |
+
size 15984
|
Power/checkpoint-150/rng_state_1.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ab11d533c0fdad46ea8b8e295ba5fdb705e078eeb88cc28f37d82913508766e9
|
| 3 |
+
size 15984
|
Power/checkpoint-150/rng_state_2.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:615c168147e3465ce5bfab6da2ff4afc68566ce00ec0f0c6c9fc988038a58d0a
|
| 3 |
+
size 15984
|
Power/checkpoint-150/rng_state_3.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:79f71e8f8674ecaef9f8cdcbf7ac457a8b8ff15b12694ba2a2fffcb4b43f0f08
|
| 3 |
+
size 15984
|
Power/checkpoint-150/rng_state_4.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:88cf6d674dab5545c300a55135f08ca935730a3d35e2c419fb0b333f19482c19
|
| 3 |
+
size 15984
|
Power/checkpoint-150/rng_state_5.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2754f2cd8824702f027870d93748b3c0491b0ecd30f1e3d8e937116b2be6151f
|
| 3 |
+
size 15984
|
Power/checkpoint-150/rng_state_6.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1385124ac55604598f45ea6e2d141f29456647d3e7c10d12ca64ec93d312be8d
|
| 3 |
+
size 15984
|
Power/checkpoint-150/rng_state_7.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:416538efaec7391fa8fe782fb15146b83e5612d9e1961292c34c53e964806873
|
| 3 |
+
size 15984
|
Power/checkpoint-150/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:37138d063e175e1129c3713960f6dfddf9a9ad325090fba9333435f600bb74eb
|
| 3 |
+
size 1064
|
Power/checkpoint-150/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2272138a6d1778027395e1d713502fac46aa791206abab20d5ab48f8f0656b43
|
| 3 |
+
size 5368
|
Power/checkpoint-200/rng_state_0.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a7d7e02ffb4d440dce7ab4ce0b5617578ec9ce3672acee7434ed6f1153f1ae0c
|
| 3 |
+
size 15984
|
Power/checkpoint-200/rng_state_1.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c5b40ca759e432b2688f021b81291d74a40f56a205e9842119f7e772275eebd3
|
| 3 |
+
size 15984
|
Power/checkpoint-200/rng_state_2.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cdaef955ddd36d6bc1c40584113dd6205483e2aa85b02439b8b27e82e02a8359
|
| 3 |
+
size 15984
|
Power/checkpoint-200/rng_state_3.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:10b14ae5db356e6512538751d6b386c190754e307cc99cd652d5c6dd891e1f82
|
| 3 |
+
size 15984
|
Power/checkpoint-200/rng_state_4.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f26e28be26826eeeed244b77185c67b443ac185175f8d4bf5ba94caa8b271bc5
|
| 3 |
+
size 15984
|
Power/checkpoint-200/rng_state_5.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:847cedc1d6ca26f299a132c2ade9754887374acb9d98f26594a85d4c7742d474
|
| 3 |
+
size 15984
|
Power/checkpoint-200/rng_state_6.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bcd043d1690ae0ff6991b03322799a0b28f021427b15fd9f1e5ed8b9905d9307
|
| 3 |
+
size 15984
|
Power/checkpoint-200/rng_state_7.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:772190f7e6667c865d25fc72da7bdd1b5d39f46fe03bb5c2d754aee1ad3c99c7
|
| 3 |
+
size 15984
|
Power/checkpoint-200/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a0936a508163d235cb9e3127fe72e024004a23629fa930579330444ee4094dff
|
| 3 |
+
size 1064
|
Power/checkpoint-200/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2272138a6d1778027395e1d713502fac46aa791206abab20d5ab48f8f0656b43
|
| 3 |
+
size 5368
|
Power/checkpoint-250/rng_state_0.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ae9162e03c562553a5d9d13120f544d3c47ea71bb39aa44e18253675e17ed4a4
|
| 3 |
+
size 15984
|
Power/checkpoint-250/rng_state_1.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4809456871b3a40c8db7e0926a9db11b01149a1d483fb29b16fc69dabaf36c6f
|
| 3 |
+
size 15984
|
Power/checkpoint-250/rng_state_2.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4bb6bcf25ff148b74eea7dd4895fc42e9433538fff5d75f0d2ae6cb0c2fdadf0
|
| 3 |
+
size 15984
|
Power/checkpoint-250/rng_state_3.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0f00ea04cd1a52c539d9cc948ac8a04676d6b99702acd09149565f781806f63f
|
| 3 |
+
size 15984
|
Power/checkpoint-250/rng_state_4.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5571fb2fc1b413792b01ac691c759786855573992bab1d14875faccdaf8c881e
|
| 3 |
+
size 15984
|