yugh commited on
Commit ·
690a808
1
Parent(s): f003a64
Initial upload of music entity classification model
Browse files- README.md +135 -3
- all_results.json +15 -0
- config.json +47 -0
- eval_results.json +9 -0
- model.safetensors +3 -0
- special_tokens_map.json +37 -0
- tokenizer.json +0 -0
- tokenizer_config.json +56 -0
- train_results.json +9 -0
- trainer_state.json +123 -0
- training_args.bin +3 -0
- vocab.txt +0 -0
README.md
CHANGED
|
@@ -1,3 +1,135 @@
|
|
| 1 |
-
---
|
| 2 |
-
|
| 3 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: transformers
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
base_model: hfl/chinese-roberta-wwm-ext
|
| 5 |
+
tags:
|
| 6 |
+
- generated_from_trainer
|
| 7 |
+
metrics:
|
| 8 |
+
- accuracy
|
| 9 |
+
model-index:
|
| 10 |
+
- name: music_ent_classification
|
| 11 |
+
results: []
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 15 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 16 |
+
|
| 17 |
+
# music_ent_classification
|
| 18 |
+
|
| 19 |
+
This model is a fine-tuned version of [hfl/chinese-roberta-wwm-ext](https://huggingface.co/hfl/chinese-roberta-wwm-ext) on an unknown dataset.
|
| 20 |
+
It achieves the following results on the evaluation set:
|
| 21 |
+
- Loss: 0.1230
|
| 22 |
+
- Accuracy: 0.9662
|
| 23 |
+
|
| 24 |
+
## Model description
|
| 25 |
+
|
| 26 |
+
More information needed
|
| 27 |
+
|
| 28 |
+
## Intended uses & limitations
|
| 29 |
+
|
| 30 |
+
More information needed
|
| 31 |
+
|
| 32 |
+
## Training and evaluation data
|
| 33 |
+
|
| 34 |
+
More information needed
|
| 35 |
+
|
| 36 |
+
## Training procedure
|
| 37 |
+
|
| 38 |
+
### Training hyperparameters
|
| 39 |
+
|
| 40 |
+
The following hyperparameters were used during training:
|
| 41 |
+
- learning_rate: 2e-05
|
| 42 |
+
- train_batch_size: 32
|
| 43 |
+
- eval_batch_size: 8
|
| 44 |
+
- seed: 42
|
| 45 |
+
- distributed_type: multi-GPU
|
| 46 |
+
- num_devices: 4
|
| 47 |
+
- total_train_batch_size: 128
|
| 48 |
+
- total_eval_batch_size: 32
|
| 49 |
+
- optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
| 50 |
+
- lr_scheduler_type: linear
|
| 51 |
+
- num_epochs: 5.0
|
| 52 |
+
- mixed_precision_training: Native AMP
|
| 53 |
+
|
| 54 |
+
### Training results
|
| 55 |
+
|
| 56 |
+
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|
| 57 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------:|
|
| 58 |
+
| 0.4408 | 1.0 | 54 | 0.1763 | 0.9459 |
|
| 59 |
+
| 0.1407 | 2.0 | 108 | 0.1221 | 0.9628 |
|
| 60 |
+
| 0.0762 | 3.0 | 162 | 0.1123 | 0.9640 |
|
| 61 |
+
| 0.0563 | 4.0 | 216 | 0.1226 | 0.9718 |
|
| 62 |
+
| 0.0423 | 5.0 | 270 | 0.1230 | 0.9662 |
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
### Framework versions
|
| 66 |
+
|
| 67 |
+
- Transformers 4.57.5
|
| 68 |
+
- Pytorch 2.6.0+cu124
|
| 69 |
+
- Datasets 2.19.0
|
| 70 |
+
- Tokenizers 0.22.2
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
### 训练
|
| 74 |
+
```
|
| 75 |
+
export WANDB_MODE=disabled # 禁用交互式登录
|
| 76 |
+
export CUDA_VISIBLE_DEVICES=0,1,2,3 # 确保识别 4 张 V100
|
| 77 |
+
# 变量定义
|
| 78 |
+
model="hfl/chinese-roberta-wwm-ext"
|
| 79 |
+
transformers_root="transformers"
|
| 80 |
+
|
| 81 |
+
output_dir="./models/music_ent_classification"
|
| 82 |
+
mkdir ${output_dir} -p
|
| 83 |
+
|
| 84 |
+
# 使用 torchrun 启动
|
| 85 |
+
torchrun --nproc_per_node=4 \
|
| 86 |
+
${transformers_root}/examples/pytorch/text-classification/run_classification.py \
|
| 87 |
+
--model_name_or_path ${model} \
|
| 88 |
+
--train_file "./data/*.train.json" \
|
| 89 |
+
--validation_file "./data/*.test.json" \
|
| 90 |
+
--trust_remote_code True \
|
| 91 |
+
--do_train \
|
| 92 |
+
--do_eval \
|
| 93 |
+
--shuffle_train_dataset \
|
| 94 |
+
--metric_name accuracy \
|
| 95 |
+
--text_column_name sentence1 \
|
| 96 |
+
--label_column_name label \
|
| 97 |
+
--max_seq_length 256 \
|
| 98 |
+
--per_device_train_batch_size 32 \
|
| 99 |
+
--learning_rate 2e-5 \
|
| 100 |
+
--num_train_epochs 5 \
|
| 101 |
+
--logging_steps 50 \
|
| 102 |
+
--save_strategy epoch \
|
| 103 |
+
--eval_strategy epoch \
|
| 104 |
+
--fp16 True \
|
| 105 |
+
--output_dir ${output_dir} \
|
| 106 |
+
--overwrite_output_dir
|
| 107 |
+
```
|
| 108 |
+
|
| 109 |
+
### 推理
|
| 110 |
+
|
| 111 |
+
```
|
| 112 |
+
# 启动分布式推理
|
| 113 |
+
torchrun --nproc_per_node=$(echo $CUDA_DEVICES | tr ',' '\n' | wc -l) \
|
| 114 |
+
transformers/examples/pytorch/text-classification/run_classification.py \
|
| 115 |
+
--model_name_or_path "${MODEL_PATH}" \
|
| 116 |
+
--train_file "${TRAIN_DATA}" \
|
| 117 |
+
--validation_file "${TRAIN_DATA}" \
|
| 118 |
+
--test_file "${INPUT_FILE}" \
|
| 119 |
+
--text_column_name "sentence1" \
|
| 120 |
+
--label_column_name "label" \
|
| 121 |
+
--do_predict \
|
| 122 |
+
--max_seq_length 128 \
|
| 123 |
+
--per_device_eval_batch_size 256 \
|
| 124 |
+
--output_dir "${OUTPUT_DIR}" \
|
| 125 |
+
--fp16 True \
|
| 126 |
+
--trust_remote_code True \
|
| 127 |
+
--overwrite_output_dir
|
| 128 |
+
|
| 129 |
+
if [ $? -eq 0 ]; then
|
| 130 |
+
echo "✅ [Infer] 推理完成。"
|
| 131 |
+
else
|
| 132 |
+
echo "❌ [Infer] 推理失败。"
|
| 133 |
+
exit 1
|
| 134 |
+
fi
|
| 135 |
+
```
|
all_results.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"epoch": 5.0,
|
| 3 |
+
"eval_accuracy": 0.9662162162162162,
|
| 4 |
+
"eval_loss": 0.1229998990893364,
|
| 5 |
+
"eval_runtime": 0.6589,
|
| 6 |
+
"eval_samples": 888,
|
| 7 |
+
"eval_samples_per_second": 1347.673,
|
| 8 |
+
"eval_steps_per_second": 42.494,
|
| 9 |
+
"total_flos": 4546681584484352.0,
|
| 10 |
+
"train_loss": 0.1416260372709345,
|
| 11 |
+
"train_runtime": 63.0434,
|
| 12 |
+
"train_samples": 6900,
|
| 13 |
+
"train_samples_per_second": 547.242,
|
| 14 |
+
"train_steps_per_second": 4.283
|
| 15 |
+
}
|
config.json
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"BertForSequenceClassification"
|
| 4 |
+
],
|
| 5 |
+
"attention_probs_dropout_prob": 0.1,
|
| 6 |
+
"classifier_dropout": null,
|
| 7 |
+
"directionality": "bidi",
|
| 8 |
+
"dtype": "float32",
|
| 9 |
+
"finetuning_task": "text-classification",
|
| 10 |
+
"hidden_act": "gelu",
|
| 11 |
+
"hidden_dropout_prob": 0.1,
|
| 12 |
+
"hidden_size": 768,
|
| 13 |
+
"id2label": {
|
| 14 |
+
"0": "Artist",
|
| 15 |
+
"1": "Event",
|
| 16 |
+
"2": "Group",
|
| 17 |
+
"3": "None",
|
| 18 |
+
"4": "Work"
|
| 19 |
+
},
|
| 20 |
+
"initializer_range": 0.02,
|
| 21 |
+
"intermediate_size": 3072,
|
| 22 |
+
"label2id": {
|
| 23 |
+
"Artist": 0,
|
| 24 |
+
"Event": 1,
|
| 25 |
+
"Group": 2,
|
| 26 |
+
"None": 3,
|
| 27 |
+
"Work": 4
|
| 28 |
+
},
|
| 29 |
+
"layer_norm_eps": 1e-12,
|
| 30 |
+
"max_position_embeddings": 512,
|
| 31 |
+
"model_type": "bert",
|
| 32 |
+
"num_attention_heads": 12,
|
| 33 |
+
"num_hidden_layers": 12,
|
| 34 |
+
"output_past": true,
|
| 35 |
+
"pad_token_id": 0,
|
| 36 |
+
"pooler_fc_size": 768,
|
| 37 |
+
"pooler_num_attention_heads": 12,
|
| 38 |
+
"pooler_num_fc_layers": 3,
|
| 39 |
+
"pooler_size_per_head": 128,
|
| 40 |
+
"pooler_type": "first_token_transform",
|
| 41 |
+
"position_embedding_type": "absolute",
|
| 42 |
+
"problem_type": "single_label_classification",
|
| 43 |
+
"transformers_version": "4.57.5",
|
| 44 |
+
"type_vocab_size": 2,
|
| 45 |
+
"use_cache": true,
|
| 46 |
+
"vocab_size": 21128
|
| 47 |
+
}
|
eval_results.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"epoch": 5.0,
|
| 3 |
+
"eval_accuracy": 0.9662162162162162,
|
| 4 |
+
"eval_loss": 0.1229998990893364,
|
| 5 |
+
"eval_runtime": 0.6589,
|
| 6 |
+
"eval_samples": 888,
|
| 7 |
+
"eval_samples_per_second": 1347.673,
|
| 8 |
+
"eval_steps_per_second": 42.494
|
| 9 |
+
}
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f81dac2e12d926959a3d38ac532d770c4f94d30901a67d02e22e315b8856202e
|
| 3 |
+
size 409109468
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": {
|
| 3 |
+
"content": "[CLS]",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"mask_token": {
|
| 10 |
+
"content": "[MASK]",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": {
|
| 17 |
+
"content": "[PAD]",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
},
|
| 23 |
+
"sep_token": {
|
| 24 |
+
"content": "[SEP]",
|
| 25 |
+
"lstrip": false,
|
| 26 |
+
"normalized": false,
|
| 27 |
+
"rstrip": false,
|
| 28 |
+
"single_word": false
|
| 29 |
+
},
|
| 30 |
+
"unk_token": {
|
| 31 |
+
"content": "[UNK]",
|
| 32 |
+
"lstrip": false,
|
| 33 |
+
"normalized": false,
|
| 34 |
+
"rstrip": false,
|
| 35 |
+
"single_word": false
|
| 36 |
+
}
|
| 37 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "[PAD]",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"100": {
|
| 12 |
+
"content": "[UNK]",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"101": {
|
| 20 |
+
"content": "[CLS]",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"102": {
|
| 28 |
+
"content": "[SEP]",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"103": {
|
| 36 |
+
"content": "[MASK]",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"clean_up_tokenization_spaces": false,
|
| 45 |
+
"cls_token": "[CLS]",
|
| 46 |
+
"do_lower_case": true,
|
| 47 |
+
"extra_special_tokens": {},
|
| 48 |
+
"mask_token": "[MASK]",
|
| 49 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 50 |
+
"pad_token": "[PAD]",
|
| 51 |
+
"sep_token": "[SEP]",
|
| 52 |
+
"strip_accents": null,
|
| 53 |
+
"tokenize_chinese_chars": true,
|
| 54 |
+
"tokenizer_class": "BertTokenizer",
|
| 55 |
+
"unk_token": "[UNK]"
|
| 56 |
+
}
|
train_results.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"epoch": 5.0,
|
| 3 |
+
"total_flos": 4546681584484352.0,
|
| 4 |
+
"train_loss": 0.1416260372709345,
|
| 5 |
+
"train_runtime": 63.0434,
|
| 6 |
+
"train_samples": 6900,
|
| 7 |
+
"train_samples_per_second": 547.242,
|
| 8 |
+
"train_steps_per_second": 4.283
|
| 9 |
+
}
|
trainer_state.json
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_global_step": null,
|
| 3 |
+
"best_metric": null,
|
| 4 |
+
"best_model_checkpoint": null,
|
| 5 |
+
"epoch": 5.0,
|
| 6 |
+
"eval_steps": 500,
|
| 7 |
+
"global_step": 270,
|
| 8 |
+
"is_hyper_param_search": false,
|
| 9 |
+
"is_local_process_zero": true,
|
| 10 |
+
"is_world_process_zero": true,
|
| 11 |
+
"log_history": [
|
| 12 |
+
{
|
| 13 |
+
"epoch": 0.9259259259259259,
|
| 14 |
+
"grad_norm": 2.8517868518829346,
|
| 15 |
+
"learning_rate": 1.6370370370370374e-05,
|
| 16 |
+
"loss": 0.4408,
|
| 17 |
+
"step": 50
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"epoch": 1.0,
|
| 21 |
+
"eval_accuracy": 0.9459459459459459,
|
| 22 |
+
"eval_loss": 0.1763220876455307,
|
| 23 |
+
"eval_runtime": 0.5083,
|
| 24 |
+
"eval_samples_per_second": 1747.017,
|
| 25 |
+
"eval_steps_per_second": 55.086,
|
| 26 |
+
"step": 54
|
| 27 |
+
},
|
| 28 |
+
{
|
| 29 |
+
"epoch": 1.8518518518518519,
|
| 30 |
+
"grad_norm": 1.7455177307128906,
|
| 31 |
+
"learning_rate": 1.2666666666666667e-05,
|
| 32 |
+
"loss": 0.1407,
|
| 33 |
+
"step": 100
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"epoch": 2.0,
|
| 37 |
+
"eval_accuracy": 0.9628378378378378,
|
| 38 |
+
"eval_loss": 0.1221194714307785,
|
| 39 |
+
"eval_runtime": 0.6023,
|
| 40 |
+
"eval_samples_per_second": 1474.32,
|
| 41 |
+
"eval_steps_per_second": 46.488,
|
| 42 |
+
"step": 108
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"epoch": 2.7777777777777777,
|
| 46 |
+
"grad_norm": 3.1013050079345703,
|
| 47 |
+
"learning_rate": 8.962962962962963e-06,
|
| 48 |
+
"loss": 0.0762,
|
| 49 |
+
"step": 150
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"epoch": 3.0,
|
| 53 |
+
"eval_accuracy": 0.963963963963964,
|
| 54 |
+
"eval_loss": 0.11229722201824188,
|
| 55 |
+
"eval_runtime": 0.6443,
|
| 56 |
+
"eval_samples_per_second": 1378.158,
|
| 57 |
+
"eval_steps_per_second": 43.455,
|
| 58 |
+
"step": 162
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"epoch": 3.7037037037037037,
|
| 62 |
+
"grad_norm": 1.3716905117034912,
|
| 63 |
+
"learning_rate": 5.259259259259259e-06,
|
| 64 |
+
"loss": 0.0563,
|
| 65 |
+
"step": 200
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"epoch": 4.0,
|
| 69 |
+
"eval_accuracy": 0.9718468468468469,
|
| 70 |
+
"eval_loss": 0.12264640629291534,
|
| 71 |
+
"eval_runtime": 0.6481,
|
| 72 |
+
"eval_samples_per_second": 1370.16,
|
| 73 |
+
"eval_steps_per_second": 43.203,
|
| 74 |
+
"step": 216
|
| 75 |
+
},
|
| 76 |
+
{
|
| 77 |
+
"epoch": 4.62962962962963,
|
| 78 |
+
"grad_norm": 0.5899084806442261,
|
| 79 |
+
"learning_rate": 1.5555555555555558e-06,
|
| 80 |
+
"loss": 0.0423,
|
| 81 |
+
"step": 250
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"epoch": 5.0,
|
| 85 |
+
"eval_accuracy": 0.9662162162162162,
|
| 86 |
+
"eval_loss": 0.1229998990893364,
|
| 87 |
+
"eval_runtime": 0.6595,
|
| 88 |
+
"eval_samples_per_second": 1346.563,
|
| 89 |
+
"eval_steps_per_second": 42.459,
|
| 90 |
+
"step": 270
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"epoch": 5.0,
|
| 94 |
+
"step": 270,
|
| 95 |
+
"total_flos": 4546681584484352.0,
|
| 96 |
+
"train_loss": 0.1416260372709345,
|
| 97 |
+
"train_runtime": 63.0434,
|
| 98 |
+
"train_samples_per_second": 547.242,
|
| 99 |
+
"train_steps_per_second": 4.283
|
| 100 |
+
}
|
| 101 |
+
],
|
| 102 |
+
"logging_steps": 50,
|
| 103 |
+
"max_steps": 270,
|
| 104 |
+
"num_input_tokens_seen": 0,
|
| 105 |
+
"num_train_epochs": 5,
|
| 106 |
+
"save_steps": 500,
|
| 107 |
+
"stateful_callbacks": {
|
| 108 |
+
"TrainerControl": {
|
| 109 |
+
"args": {
|
| 110 |
+
"should_epoch_stop": false,
|
| 111 |
+
"should_evaluate": false,
|
| 112 |
+
"should_log": false,
|
| 113 |
+
"should_save": true,
|
| 114 |
+
"should_training_stop": true
|
| 115 |
+
},
|
| 116 |
+
"attributes": {}
|
| 117 |
+
}
|
| 118 |
+
},
|
| 119 |
+
"total_flos": 4546681584484352.0,
|
| 120 |
+
"train_batch_size": 32,
|
| 121 |
+
"trial_name": null,
|
| 122 |
+
"trial_params": null
|
| 123 |
+
}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:666711e834597c1913fb306f6b190201f3dff2fb420c5ad8cb7dd0f8dab59c59
|
| 3 |
+
size 5496
|
vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|