ZhaoCamera commited on
Commit
0f6053f
·
verified ·
1 Parent(s): e742225

Upload 114 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. Qwen2.5-7B-Instruct-lora-2/README.md +59 -0
  3. Qwen2.5-7B-Instruct-lora-2/adapter_config.json +39 -0
  4. Qwen2.5-7B-Instruct-lora-2/adapter_model.safetensors +3 -0
  5. Qwen2.5-7B-Instruct-lora-2/added_tokens.json +24 -0
  6. Qwen2.5-7B-Instruct-lora-2/all_results.json +13 -0
  7. Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/README.md +202 -0
  8. Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/adapter_config.json +39 -0
  9. Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/adapter_model.safetensors +3 -0
  10. Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/added_tokens.json +24 -0
  11. Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/merges.txt +0 -0
  12. Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/optimizer.pt +3 -0
  13. Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/rng_state.pth +3 -0
  14. Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/scheduler.pt +3 -0
  15. Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/special_tokens_map.json +31 -0
  16. Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/tokenizer.json +3 -0
  17. Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/tokenizer_config.json +209 -0
  18. Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/trainer_state.json +752 -0
  19. Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/training_args.bin +3 -0
  20. Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/vocab.json +0 -0
  21. Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/README.md +202 -0
  22. Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/adapter_config.json +39 -0
  23. Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/adapter_model.safetensors +3 -0
  24. Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/added_tokens.json +24 -0
  25. Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/merges.txt +0 -0
  26. Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/optimizer.pt +3 -0
  27. Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/rng_state.pth +3 -0
  28. Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/scheduler.pt +3 -0
  29. Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/special_tokens_map.json +31 -0
  30. Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/tokenizer.json +3 -0
  31. Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/tokenizer_config.json +209 -0
  32. Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/trainer_state.json +1111 -0
  33. Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/training_args.bin +3 -0
  34. Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/vocab.json +0 -0
  35. Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/README.md +202 -0
  36. Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/adapter_config.json +39 -0
  37. Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/adapter_model.safetensors +3 -0
  38. Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/added_tokens.json +24 -0
  39. Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/merges.txt +0 -0
  40. Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/optimizer.pt +3 -0
  41. Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/rng_state.pth +3 -0
  42. Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/scheduler.pt +3 -0
  43. Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/special_tokens_map.json +31 -0
  44. Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/tokenizer.json +3 -0
  45. Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/tokenizer_config.json +209 -0
  46. Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/trainer_state.json +1470 -0
  47. Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/training_args.bin +3 -0
  48. Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/vocab.json +0 -0
  49. Qwen2.5-7B-Instruct-lora-2/checkpoint-2500/README.md +202 -0
  50. Qwen2.5-7B-Instruct-lora-2/checkpoint-2500/adapter_config.json +39 -0
.gitattributes CHANGED
@@ -39,3 +39,11 @@ Llama-3.1-8B-Instruct-lora/checkpoint-93/tokenizer.json filter=lfs diff=lfs merg
39
  Llama-3.1-8B-Instruct-lora/tokenizer.json filter=lfs diff=lfs merge=lfs -text
40
  Llama3.1-8B-Chinese-Chat-lora/checkpoint-93/tokenizer.json filter=lfs diff=lfs merge=lfs -text
41
  Llama3.1-8B-Chinese-Chat-lora/tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
39
  Llama-3.1-8B-Instruct-lora/tokenizer.json filter=lfs diff=lfs merge=lfs -text
40
  Llama3.1-8B-Chinese-Chat-lora/checkpoint-93/tokenizer.json filter=lfs diff=lfs merge=lfs -text
41
  Llama3.1-8B-Chinese-Chat-lora/tokenizer.json filter=lfs diff=lfs merge=lfs -text
42
+ Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
43
+ Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
44
+ Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
45
+ Qwen2.5-7B-Instruct-lora-2/checkpoint-2500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
46
+ Qwen2.5-7B-Instruct-lora-2/checkpoint-3000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
47
+ Qwen2.5-7B-Instruct-lora-2/checkpoint-3036/tokenizer.json filter=lfs diff=lfs merge=lfs -text
48
+ Qwen2.5-7B-Instruct-lora-2/checkpoint-500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
49
+ Qwen2.5-7B-Instruct-lora-2/tokenizer.json filter=lfs diff=lfs merge=lfs -text
Qwen2.5-7B-Instruct-lora-2/README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: other
4
+ base_model: /data/zhaoguoyu/Experiments/mgtd-sys/detector/ckpt/Qwen2.5-7B-Instruct
5
+ tags:
6
+ - llama-factory
7
+ - lora
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: Qwen2.5-7B-Instruct-lora-2
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # Qwen2.5-7B-Instruct-lora-2
18
+
19
+ This model is a fine-tuned version of [/data/zhaoguoyu/Experiments/mgtd-sys/detector/ckpt/Qwen2.5-7B-Instruct](https://huggingface.co//data/zhaoguoyu/Experiments/mgtd-sys/detector/ckpt/Qwen2.5-7B-Instruct) on the nlpcc25_task1_train dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 5e-05
39
+ - train_batch_size: 4
40
+ - eval_batch_size: 4
41
+ - seed: 42
42
+ - gradient_accumulation_steps: 8
43
+ - total_train_batch_size: 32
44
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
45
+ - lr_scheduler_type: cosine
46
+ - lr_scheduler_warmup_ratio: 0.1
47
+ - num_epochs: 3.0
48
+
49
+ ### Training results
50
+
51
+
52
+
53
+ ### Framework versions
54
+
55
+ - PEFT 0.15.0
56
+ - Transformers 4.50.0
57
+ - Pytorch 2.6.0+cu124
58
+ - Datasets 3.4.1
59
+ - Tokenizers 0.21.0
Qwen2.5-7B-Instruct-lora-2/adapter_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/data/zhaoguoyu/Experiments/mgtd-sys/detector/ckpt/Qwen2.5-7B-Instruct",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 8,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "k_proj",
28
+ "v_proj",
29
+ "up_proj",
30
+ "down_proj",
31
+ "o_proj",
32
+ "gate_proj",
33
+ "q_proj"
34
+ ],
35
+ "task_type": "CAUSAL_LM",
36
+ "trainable_token_indices": null,
37
+ "use_dora": false,
38
+ "use_rslora": false
39
+ }
Qwen2.5-7B-Instruct-lora-2/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8402ed41b02a34fcdc1ecd7ce937af493a89532e451516627b0222e9652253e
3
+ size 80792096
Qwen2.5-7B-Instruct-lora-2/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
Qwen2.5-7B-Instruct-lora-2/all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.9975308641975307,
3
+ "eval_nlpcc25_task1_dev_accuracy": 0.9996825396825397,
4
+ "eval_nlpcc25_task1_dev_loss": 0.0018848935142159462,
5
+ "eval_nlpcc25_task1_dev_runtime": 2528.4122,
6
+ "eval_nlpcc25_task1_dev_samples_per_second": 1.107,
7
+ "eval_nlpcc25_task1_dev_steps_per_second": 0.277,
8
+ "total_flos": 1.7445311627903631e+18,
9
+ "train_loss": 0.07892912521831204,
10
+ "train_runtime": 224440.5917,
11
+ "train_samples_per_second": 0.433,
12
+ "train_steps_per_second": 0.014
13
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /data/zhaoguoyu/Experiments/mgtd-sys/detector/ckpt/Qwen2.5-7B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.15.0
Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/adapter_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/data/zhaoguoyu/Experiments/mgtd-sys/detector/ckpt/Qwen2.5-7B-Instruct",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 8,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "k_proj",
28
+ "v_proj",
29
+ "up_proj",
30
+ "down_proj",
31
+ "o_proj",
32
+ "gate_proj",
33
+ "q_proj"
34
+ ],
35
+ "task_type": "CAUSAL_LM",
36
+ "trainable_token_indices": null,
37
+ "use_dora": false,
38
+ "use_rslora": false
39
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07a0cd322b506ab56eb0a762c21b8071538ab563c1e744e68e713c7801acee0d
3
+ size 80792096
Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b492af21df6ec85d758309ada19101d73ff7e6a41717161ed0e54a0a43da2426
3
+ size 161810282
Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9196a1e708bf24d6abba41cce3f8558820acc3e50f9394c5955e29eb41ffea3d
3
+ size 14244
Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5918d346fe9060e659615446cea815a9b715c8fa69cf77cf3b71de31c3c3de85
3
+ size 1064
Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 131072,
204
+ "pad_token": "<|endoftext|>",
205
+ "padding_side": "right",
206
+ "split_special_tokens": false,
207
+ "tokenizer_class": "Qwen2Tokenizer",
208
+ "unk_token": null
209
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,752 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.9876543209876543,
6
+ "eval_steps": 500,
7
+ "global_step": 1000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.009876543209876543,
14
+ "grad_norm": 7.599112033843994,
15
+ "learning_rate": 1.6447368421052632e-06,
16
+ "loss": 4.2059,
17
+ "step": 10
18
+ },
19
+ {
20
+ "epoch": 0.019753086419753086,
21
+ "grad_norm": 6.403433322906494,
22
+ "learning_rate": 3.2894736842105265e-06,
23
+ "loss": 4.1463,
24
+ "step": 20
25
+ },
26
+ {
27
+ "epoch": 0.02962962962962963,
28
+ "grad_norm": 9.776185989379883,
29
+ "learning_rate": 4.9342105263157895e-06,
30
+ "loss": 4.1087,
31
+ "step": 30
32
+ },
33
+ {
34
+ "epoch": 0.03950617283950617,
35
+ "grad_norm": 5.836405277252197,
36
+ "learning_rate": 6.578947368421053e-06,
37
+ "loss": 3.7657,
38
+ "step": 40
39
+ },
40
+ {
41
+ "epoch": 0.04938271604938271,
42
+ "grad_norm": 4.362349033355713,
43
+ "learning_rate": 8.223684210526317e-06,
44
+ "loss": 3.2134,
45
+ "step": 50
46
+ },
47
+ {
48
+ "epoch": 0.05925925925925926,
49
+ "grad_norm": 5.1398701667785645,
50
+ "learning_rate": 9.868421052631579e-06,
51
+ "loss": 2.6238,
52
+ "step": 60
53
+ },
54
+ {
55
+ "epoch": 0.0691358024691358,
56
+ "grad_norm": 8.642521858215332,
57
+ "learning_rate": 1.1513157894736843e-05,
58
+ "loss": 1.5929,
59
+ "step": 70
60
+ },
61
+ {
62
+ "epoch": 0.07901234567901234,
63
+ "grad_norm": 1.0219271183013916,
64
+ "learning_rate": 1.3157894736842106e-05,
65
+ "loss": 0.1491,
66
+ "step": 80
67
+ },
68
+ {
69
+ "epoch": 0.08888888888888889,
70
+ "grad_norm": 0.4035845100879669,
71
+ "learning_rate": 1.4802631578947368e-05,
72
+ "loss": 0.0206,
73
+ "step": 90
74
+ },
75
+ {
76
+ "epoch": 0.09876543209876543,
77
+ "grad_norm": 0.6767832636833191,
78
+ "learning_rate": 1.6447368421052635e-05,
79
+ "loss": 0.0073,
80
+ "step": 100
81
+ },
82
+ {
83
+ "epoch": 0.10864197530864197,
84
+ "grad_norm": 0.014764077961444855,
85
+ "learning_rate": 1.8092105263157896e-05,
86
+ "loss": 0.0011,
87
+ "step": 110
88
+ },
89
+ {
90
+ "epoch": 0.11851851851851852,
91
+ "grad_norm": 0.15152056515216827,
92
+ "learning_rate": 1.9736842105263158e-05,
93
+ "loss": 0.0005,
94
+ "step": 120
95
+ },
96
+ {
97
+ "epoch": 0.12839506172839507,
98
+ "grad_norm": 0.06782546639442444,
99
+ "learning_rate": 2.1381578947368423e-05,
100
+ "loss": 0.0014,
101
+ "step": 130
102
+ },
103
+ {
104
+ "epoch": 0.1382716049382716,
105
+ "grad_norm": 0.03198217228055,
106
+ "learning_rate": 2.3026315789473685e-05,
107
+ "loss": 0.0009,
108
+ "step": 140
109
+ },
110
+ {
111
+ "epoch": 0.14814814814814814,
112
+ "grad_norm": 0.02691480703651905,
113
+ "learning_rate": 2.4671052631578947e-05,
114
+ "loss": 0.0024,
115
+ "step": 150
116
+ },
117
+ {
118
+ "epoch": 0.1580246913580247,
119
+ "grad_norm": 0.0057847509160637856,
120
+ "learning_rate": 2.6315789473684212e-05,
121
+ "loss": 0.0001,
122
+ "step": 160
123
+ },
124
+ {
125
+ "epoch": 0.16790123456790124,
126
+ "grad_norm": 0.003663571085780859,
127
+ "learning_rate": 2.7960526315789477e-05,
128
+ "loss": 0.0002,
129
+ "step": 170
130
+ },
131
+ {
132
+ "epoch": 0.17777777777777778,
133
+ "grad_norm": 0.004447426181286573,
134
+ "learning_rate": 2.9605263157894735e-05,
135
+ "loss": 0.0003,
136
+ "step": 180
137
+ },
138
+ {
139
+ "epoch": 0.18765432098765433,
140
+ "grad_norm": 0.10373878479003906,
141
+ "learning_rate": 3.125e-05,
142
+ "loss": 0.0006,
143
+ "step": 190
144
+ },
145
+ {
146
+ "epoch": 0.19753086419753085,
147
+ "grad_norm": 0.002619905164465308,
148
+ "learning_rate": 3.289473684210527e-05,
149
+ "loss": 0.0006,
150
+ "step": 200
151
+ },
152
+ {
153
+ "epoch": 0.2074074074074074,
154
+ "grad_norm": 0.007042820565402508,
155
+ "learning_rate": 3.4539473684210524e-05,
156
+ "loss": 0.0015,
157
+ "step": 210
158
+ },
159
+ {
160
+ "epoch": 0.21728395061728395,
161
+ "grad_norm": 0.0016206403961405158,
162
+ "learning_rate": 3.618421052631579e-05,
163
+ "loss": 0.0049,
164
+ "step": 220
165
+ },
166
+ {
167
+ "epoch": 0.2271604938271605,
168
+ "grad_norm": 0.001877307309769094,
169
+ "learning_rate": 3.7828947368421054e-05,
170
+ "loss": 0.0001,
171
+ "step": 230
172
+ },
173
+ {
174
+ "epoch": 0.23703703703703705,
175
+ "grad_norm": 0.003795365337282419,
176
+ "learning_rate": 3.9473684210526316e-05,
177
+ "loss": 0.0001,
178
+ "step": 240
179
+ },
180
+ {
181
+ "epoch": 0.24691358024691357,
182
+ "grad_norm": 0.008357529528439045,
183
+ "learning_rate": 4.111842105263158e-05,
184
+ "loss": 0.004,
185
+ "step": 250
186
+ },
187
+ {
188
+ "epoch": 0.25679012345679014,
189
+ "grad_norm": 0.026294540613889694,
190
+ "learning_rate": 4.2763157894736847e-05,
191
+ "loss": 0.0002,
192
+ "step": 260
193
+ },
194
+ {
195
+ "epoch": 0.26666666666666666,
196
+ "grad_norm": 0.08903225511312485,
197
+ "learning_rate": 4.440789473684211e-05,
198
+ "loss": 0.0005,
199
+ "step": 270
200
+ },
201
+ {
202
+ "epoch": 0.2765432098765432,
203
+ "grad_norm": 0.05173858627676964,
204
+ "learning_rate": 4.605263157894737e-05,
205
+ "loss": 0.0003,
206
+ "step": 280
207
+ },
208
+ {
209
+ "epoch": 0.28641975308641976,
210
+ "grad_norm": 0.036562711000442505,
211
+ "learning_rate": 4.769736842105263e-05,
212
+ "loss": 0.0012,
213
+ "step": 290
214
+ },
215
+ {
216
+ "epoch": 0.2962962962962963,
217
+ "grad_norm": 0.008022695779800415,
218
+ "learning_rate": 4.9342105263157894e-05,
219
+ "loss": 0.0036,
220
+ "step": 300
221
+ },
222
+ {
223
+ "epoch": 0.30617283950617286,
224
+ "grad_norm": 0.0021413813810795546,
225
+ "learning_rate": 4.999940495590975e-05,
226
+ "loss": 0.0006,
227
+ "step": 310
228
+ },
229
+ {
230
+ "epoch": 0.3160493827160494,
231
+ "grad_norm": 0.015379426069557667,
232
+ "learning_rate": 4.999576867793816e-05,
233
+ "loss": 0.0092,
234
+ "step": 320
235
+ },
236
+ {
237
+ "epoch": 0.32592592592592595,
238
+ "grad_norm": 0.003894766792654991,
239
+ "learning_rate": 4.9988827182291254e-05,
240
+ "loss": 0.001,
241
+ "step": 330
242
+ },
243
+ {
244
+ "epoch": 0.3358024691358025,
245
+ "grad_norm": 0.0016231742920354009,
246
+ "learning_rate": 4.997858138685056e-05,
247
+ "loss": 0.0001,
248
+ "step": 340
249
+ },
250
+ {
251
+ "epoch": 0.345679012345679,
252
+ "grad_norm": 0.0015260468935593963,
253
+ "learning_rate": 4.996503264642876e-05,
254
+ "loss": 0.0016,
255
+ "step": 350
256
+ },
257
+ {
258
+ "epoch": 0.35555555555555557,
259
+ "grad_norm": 0.03330058977007866,
260
+ "learning_rate": 4.994818275259052e-05,
261
+ "loss": 0.0013,
262
+ "step": 360
263
+ },
264
+ {
265
+ "epoch": 0.3654320987654321,
266
+ "grad_norm": 0.002741268603131175,
267
+ "learning_rate": 4.992803393341563e-05,
268
+ "loss": 0.0017,
269
+ "step": 370
270
+ },
271
+ {
272
+ "epoch": 0.37530864197530867,
273
+ "grad_norm": 0.004990574903786182,
274
+ "learning_rate": 4.9904588853204365e-05,
275
+ "loss": 0.0001,
276
+ "step": 380
277
+ },
278
+ {
279
+ "epoch": 0.3851851851851852,
280
+ "grad_norm": 0.013514043763279915,
281
+ "learning_rate": 4.9877850612125173e-05,
282
+ "loss": 0.0001,
283
+ "step": 390
284
+ },
285
+ {
286
+ "epoch": 0.3950617283950617,
287
+ "grad_norm": 0.0015880317660048604,
288
+ "learning_rate": 4.984782274580476e-05,
289
+ "loss": 0.0001,
290
+ "step": 400
291
+ },
292
+ {
293
+ "epoch": 0.4049382716049383,
294
+ "grad_norm": 0.002702023135498166,
295
+ "learning_rate": 4.981450922486053e-05,
296
+ "loss": 0.0001,
297
+ "step": 410
298
+ },
299
+ {
300
+ "epoch": 0.4148148148148148,
301
+ "grad_norm": 0.0020927605219185352,
302
+ "learning_rate": 4.977791445437559e-05,
303
+ "loss": 0.0001,
304
+ "step": 420
305
+ },
306
+ {
307
+ "epoch": 0.4246913580246914,
308
+ "grad_norm": 0.0007626357255503535,
309
+ "learning_rate": 4.973804327331625e-05,
310
+ "loss": 0.0002,
311
+ "step": 430
312
+ },
313
+ {
314
+ "epoch": 0.4345679012345679,
315
+ "grad_norm": 0.0026151000056415796,
316
+ "learning_rate": 4.969490095389213e-05,
317
+ "loss": 0.0,
318
+ "step": 440
319
+ },
320
+ {
321
+ "epoch": 0.4444444444444444,
322
+ "grad_norm": 0.0007740291184745729,
323
+ "learning_rate": 4.964849320085905e-05,
324
+ "loss": 0.0,
325
+ "step": 450
326
+ },
327
+ {
328
+ "epoch": 0.454320987654321,
329
+ "grad_norm": 0.0005287025705911219,
330
+ "learning_rate": 4.9598826150764656e-05,
331
+ "loss": 0.0,
332
+ "step": 460
333
+ },
334
+ {
335
+ "epoch": 0.4641975308641975,
336
+ "grad_norm": 0.0004778858565259725,
337
+ "learning_rate": 4.9545906371137e-05,
338
+ "loss": 0.007,
339
+ "step": 470
340
+ },
341
+ {
342
+ "epoch": 0.4740740740740741,
343
+ "grad_norm": 0.0007034554146230221,
344
+ "learning_rate": 4.9489740859616094e-05,
345
+ "loss": 0.0,
346
+ "step": 480
347
+ },
348
+ {
349
+ "epoch": 0.4839506172839506,
350
+ "grad_norm": 0.0006262129172682762,
351
+ "learning_rate": 4.9430337043028604e-05,
352
+ "loss": 0.0,
353
+ "step": 490
354
+ },
355
+ {
356
+ "epoch": 0.49382716049382713,
357
+ "grad_norm": 0.00045443352428264916,
358
+ "learning_rate": 4.9367702776405806e-05,
359
+ "loss": 0.0,
360
+ "step": 500
361
+ },
362
+ {
363
+ "epoch": 0.49382716049382713,
364
+ "eval_nlpcc25_task1_dev_accuracy": 0.9995238095238096,
365
+ "eval_nlpcc25_task1_dev_loss": 0.0022831459064036608,
366
+ "eval_nlpcc25_task1_dev_runtime": 2528.8573,
367
+ "eval_nlpcc25_task1_dev_samples_per_second": 1.107,
368
+ "eval_nlpcc25_task1_dev_steps_per_second": 0.277,
369
+ "step": 500
370
+ },
371
+ {
372
+ "epoch": 0.5037037037037037,
373
+ "grad_norm": 0.0003733730991370976,
374
+ "learning_rate": 4.930184634194488e-05,
375
+ "loss": 0.0,
376
+ "step": 510
377
+ },
378
+ {
379
+ "epoch": 0.5135802469135803,
380
+ "grad_norm": 0.0003028757928404957,
381
+ "learning_rate": 4.923277644791378e-05,
382
+ "loss": 0.0,
383
+ "step": 520
384
+ },
385
+ {
386
+ "epoch": 0.5234567901234568,
387
+ "grad_norm": 0.00028728952747769654,
388
+ "learning_rate": 4.9160502227499704e-05,
389
+ "loss": 0.0,
390
+ "step": 530
391
+ },
392
+ {
393
+ "epoch": 0.5333333333333333,
394
+ "grad_norm": 0.0003644278913270682,
395
+ "learning_rate": 4.908503323760143e-05,
396
+ "loss": 0.0,
397
+ "step": 540
398
+ },
399
+ {
400
+ "epoch": 0.5432098765432098,
401
+ "grad_norm": 0.0005966073367744684,
402
+ "learning_rate": 4.900637945756554e-05,
403
+ "loss": 0.0,
404
+ "step": 550
405
+ },
406
+ {
407
+ "epoch": 0.5530864197530864,
408
+ "grad_norm": 0.000367969973012805,
409
+ "learning_rate": 4.892455128786692e-05,
410
+ "loss": 0.0,
411
+ "step": 560
412
+ },
413
+ {
414
+ "epoch": 0.562962962962963,
415
+ "grad_norm": 0.0002780740906018764,
416
+ "learning_rate": 4.8839559548733436e-05,
417
+ "loss": 0.0077,
418
+ "step": 570
419
+ },
420
+ {
421
+ "epoch": 0.5728395061728395,
422
+ "grad_norm": 0.00031342540751211345,
423
+ "learning_rate": 4.875141547871519e-05,
424
+ "loss": 0.0,
425
+ "step": 580
426
+ },
427
+ {
428
+ "epoch": 0.582716049382716,
429
+ "grad_norm": 0.000353014562278986,
430
+ "learning_rate": 4.866013073319841e-05,
431
+ "loss": 0.0,
432
+ "step": 590
433
+ },
434
+ {
435
+ "epoch": 0.5925925925925926,
436
+ "grad_norm": 0.00029066085699014366,
437
+ "learning_rate": 4.856571738286426e-05,
438
+ "loss": 0.0,
439
+ "step": 600
440
+ },
441
+ {
442
+ "epoch": 0.6024691358024692,
443
+ "grad_norm": 0.00024000390840228647,
444
+ "learning_rate": 4.8468187912092744e-05,
445
+ "loss": 0.0,
446
+ "step": 610
447
+ },
448
+ {
449
+ "epoch": 0.6123456790123457,
450
+ "grad_norm": 0.0003632376901805401,
451
+ "learning_rate": 4.836755521731183e-05,
452
+ "loss": 0.0,
453
+ "step": 620
454
+ },
455
+ {
456
+ "epoch": 0.6222222222222222,
457
+ "grad_norm": 0.00029751085094176233,
458
+ "learning_rate": 4.826383260529221e-05,
459
+ "loss": 0.0002,
460
+ "step": 630
461
+ },
462
+ {
463
+ "epoch": 0.6320987654320988,
464
+ "grad_norm": 3.190973997116089,
465
+ "learning_rate": 4.815703379138765e-05,
466
+ "loss": 0.0017,
467
+ "step": 640
468
+ },
469
+ {
470
+ "epoch": 0.6419753086419753,
471
+ "grad_norm": 0.18289895355701447,
472
+ "learning_rate": 4.804717289772147e-05,
473
+ "loss": 0.0043,
474
+ "step": 650
475
+ },
476
+ {
477
+ "epoch": 0.6518518518518519,
478
+ "grad_norm": 0.2459922581911087,
479
+ "learning_rate": 4.7934264451319105e-05,
480
+ "loss": 0.003,
481
+ "step": 660
482
+ },
483
+ {
484
+ "epoch": 0.6617283950617284,
485
+ "grad_norm": 0.004725860431790352,
486
+ "learning_rate": 4.7818323382187214e-05,
487
+ "loss": 0.0002,
488
+ "step": 670
489
+ },
490
+ {
491
+ "epoch": 0.671604938271605,
492
+ "grad_norm": 0.004160716664046049,
493
+ "learning_rate": 4.769936502133946e-05,
494
+ "loss": 0.0002,
495
+ "step": 680
496
+ },
497
+ {
498
+ "epoch": 0.6814814814814815,
499
+ "grad_norm": 0.002101948019117117,
500
+ "learning_rate": 4.7577405098769256e-05,
501
+ "loss": 0.0001,
502
+ "step": 690
503
+ },
504
+ {
505
+ "epoch": 0.691358024691358,
506
+ "grad_norm": 0.0015966896899044514,
507
+ "learning_rate": 4.74524597413698e-05,
508
+ "loss": 0.0001,
509
+ "step": 700
510
+ },
511
+ {
512
+ "epoch": 0.7012345679012346,
513
+ "grad_norm": 0.0010343171888962388,
514
+ "learning_rate": 4.732454547080159e-05,
515
+ "loss": 0.0,
516
+ "step": 710
517
+ },
518
+ {
519
+ "epoch": 0.7111111111111111,
520
+ "grad_norm": 0.0007301874575205147,
521
+ "learning_rate": 4.7193679201307705e-05,
522
+ "loss": 0.0,
523
+ "step": 720
524
+ },
525
+ {
526
+ "epoch": 0.7209876543209877,
527
+ "grad_norm": 0.0008054127101786435,
528
+ "learning_rate": 4.705987823747731e-05,
529
+ "loss": 0.0,
530
+ "step": 730
531
+ },
532
+ {
533
+ "epoch": 0.7308641975308642,
534
+ "grad_norm": 0.0006862932350486517,
535
+ "learning_rate": 4.692316027195733e-05,
536
+ "loss": 0.0001,
537
+ "step": 740
538
+ },
539
+ {
540
+ "epoch": 0.7407407407407407,
541
+ "grad_norm": 0.0005865858984179795,
542
+ "learning_rate": 4.678354338311306e-05,
543
+ "loss": 0.0,
544
+ "step": 750
545
+ },
546
+ {
547
+ "epoch": 0.7506172839506173,
548
+ "grad_norm": 0.0004720942524727434,
549
+ "learning_rate": 4.6641046032637516e-05,
550
+ "loss": 0.0,
551
+ "step": 760
552
+ },
553
+ {
554
+ "epoch": 0.7604938271604939,
555
+ "grad_norm": 0.0004310516524128616,
556
+ "learning_rate": 4.6495687063110325e-05,
557
+ "loss": 0.0,
558
+ "step": 770
559
+ },
560
+ {
561
+ "epoch": 0.7703703703703704,
562
+ "grad_norm": 0.00043949694372713566,
563
+ "learning_rate": 4.634748569550612e-05,
564
+ "loss": 0.0,
565
+ "step": 780
566
+ },
567
+ {
568
+ "epoch": 0.7802469135802469,
569
+ "grad_norm": 0.0004090226429980248,
570
+ "learning_rate": 4.61964615266529e-05,
571
+ "loss": 0.0,
572
+ "step": 790
573
+ },
574
+ {
575
+ "epoch": 0.7901234567901234,
576
+ "grad_norm": 0.00037527765380218625,
577
+ "learning_rate": 4.6042634526640755e-05,
578
+ "loss": 0.0,
579
+ "step": 800
580
+ },
581
+ {
582
+ "epoch": 0.8,
583
+ "grad_norm": 1.0162104368209839,
584
+ "learning_rate": 4.588602503618118e-05,
585
+ "loss": 0.0021,
586
+ "step": 810
587
+ },
588
+ {
589
+ "epoch": 0.8098765432098766,
590
+ "grad_norm": 1.3063777685165405,
591
+ "learning_rate": 4.572665376391741e-05,
592
+ "loss": 0.0006,
593
+ "step": 820
594
+ },
595
+ {
596
+ "epoch": 0.8197530864197531,
597
+ "grad_norm": 0.9236663579940796,
598
+ "learning_rate": 4.55645417836861e-05,
599
+ "loss": 0.0058,
600
+ "step": 830
601
+ },
602
+ {
603
+ "epoch": 0.8296296296296296,
604
+ "grad_norm": 0.01748817227780819,
605
+ "learning_rate": 4.5399710531730685e-05,
606
+ "loss": 0.0025,
607
+ "step": 840
608
+ },
609
+ {
610
+ "epoch": 0.8395061728395061,
611
+ "grad_norm": 0.003460384439677,
612
+ "learning_rate": 4.5232181803866886e-05,
613
+ "loss": 0.0005,
614
+ "step": 850
615
+ },
616
+ {
617
+ "epoch": 0.8493827160493828,
618
+ "grad_norm": 0.0013236172962933779,
619
+ "learning_rate": 4.506197775260055e-05,
620
+ "loss": 0.0001,
621
+ "step": 860
622
+ },
623
+ {
624
+ "epoch": 0.8592592592592593,
625
+ "grad_norm": 0.0011838016798719764,
626
+ "learning_rate": 4.4889120884198495e-05,
627
+ "loss": 0.0,
628
+ "step": 870
629
+ },
630
+ {
631
+ "epoch": 0.8691358024691358,
632
+ "grad_norm": 0.0007675419910810888,
633
+ "learning_rate": 4.47136340557124e-05,
634
+ "loss": 0.0,
635
+ "step": 880
636
+ },
637
+ {
638
+ "epoch": 0.8790123456790123,
639
+ "grad_norm": 0.0007905301754362881,
640
+ "learning_rate": 4.453554047195644e-05,
641
+ "loss": 0.0,
642
+ "step": 890
643
+ },
644
+ {
645
+ "epoch": 0.8888888888888888,
646
+ "grad_norm": 0.0005061720148660243,
647
+ "learning_rate": 4.435486368243888e-05,
648
+ "loss": 0.0,
649
+ "step": 900
650
+ },
651
+ {
652
+ "epoch": 0.8987654320987655,
653
+ "grad_norm": 0.0005205022753216326,
654
+ "learning_rate": 4.417162757824808e-05,
655
+ "loss": 0.0,
656
+ "step": 910
657
+ },
658
+ {
659
+ "epoch": 0.908641975308642,
660
+ "grad_norm": 0.0004827196535188705,
661
+ "learning_rate": 4.398585638889335e-05,
662
+ "loss": 0.0,
663
+ "step": 920
664
+ },
665
+ {
666
+ "epoch": 0.9185185185185185,
667
+ "grad_norm": 0.000379192759282887,
668
+ "learning_rate": 4.379757467910113e-05,
669
+ "loss": 0.0,
670
+ "step": 930
671
+ },
672
+ {
673
+ "epoch": 0.928395061728395,
674
+ "grad_norm": 0.00045221165055409074,
675
+ "learning_rate": 4.3606807345566616e-05,
676
+ "loss": 0.0,
677
+ "step": 940
678
+ },
679
+ {
680
+ "epoch": 0.9382716049382716,
681
+ "grad_norm": 0.0005233365809544921,
682
+ "learning_rate": 4.341357961366181e-05,
683
+ "loss": 0.0,
684
+ "step": 950
685
+ },
686
+ {
687
+ "epoch": 0.9481481481481482,
688
+ "grad_norm": 0.0002833159815054387,
689
+ "learning_rate": 4.3217917034099823e-05,
690
+ "loss": 0.0,
691
+ "step": 960
692
+ },
693
+ {
694
+ "epoch": 0.9580246913580247,
695
+ "grad_norm": 0.00043191161239519715,
696
+ "learning_rate": 4.301984547955635e-05,
697
+ "loss": 0.0,
698
+ "step": 970
699
+ },
700
+ {
701
+ "epoch": 0.9679012345679012,
702
+ "grad_norm": 0.0004523663374129683,
703
+ "learning_rate": 4.281939114124843e-05,
704
+ "loss": 0.0,
705
+ "step": 980
706
+ },
707
+ {
708
+ "epoch": 0.9777777777777777,
709
+ "grad_norm": 0.00025242462288588285,
710
+ "learning_rate": 4.261658052547124e-05,
711
+ "loss": 0.0,
712
+ "step": 990
713
+ },
714
+ {
715
+ "epoch": 0.9876543209876543,
716
+ "grad_norm": 0.0006344653083942831,
717
+ "learning_rate": 4.241144045009304e-05,
718
+ "loss": 0.0,
719
+ "step": 1000
720
+ },
721
+ {
722
+ "epoch": 0.9876543209876543,
723
+ "eval_nlpcc25_task1_dev_accuracy": 0.9996031746031745,
724
+ "eval_nlpcc25_task1_dev_loss": 0.002990948036313057,
725
+ "eval_nlpcc25_task1_dev_runtime": 2528.4874,
726
+ "eval_nlpcc25_task1_dev_samples_per_second": 1.107,
727
+ "eval_nlpcc25_task1_dev_steps_per_second": 0.277,
728
+ "step": 1000
729
+ }
730
+ ],
731
+ "logging_steps": 10,
732
+ "max_steps": 3036,
733
+ "num_input_tokens_seen": 0,
734
+ "num_train_epochs": 3,
735
+ "save_steps": 500,
736
+ "stateful_callbacks": {
737
+ "TrainerControl": {
738
+ "args": {
739
+ "should_epoch_stop": false,
740
+ "should_evaluate": false,
741
+ "should_log": false,
742
+ "should_save": true,
743
+ "should_training_stop": false
744
+ },
745
+ "attributes": {}
746
+ }
747
+ },
748
+ "total_flos": 5.748493428351959e+17,
749
+ "train_batch_size": 4,
750
+ "trial_name": null,
751
+ "trial_params": null
752
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bc27925817800cee3170b92f598d1cfe9fce4b0491f781c2f4977cfd2644a9a
3
+ size 5752
Qwen2.5-7B-Instruct-lora-2/checkpoint-1000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /data/zhaoguoyu/Experiments/mgtd-sys/detector/ckpt/Qwen2.5-7B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.15.0
Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/adapter_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/data/zhaoguoyu/Experiments/mgtd-sys/detector/ckpt/Qwen2.5-7B-Instruct",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 8,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "k_proj",
28
+ "v_proj",
29
+ "up_proj",
30
+ "down_proj",
31
+ "o_proj",
32
+ "gate_proj",
33
+ "q_proj"
34
+ ],
35
+ "task_type": "CAUSAL_LM",
36
+ "trainable_token_indices": null,
37
+ "use_dora": false,
38
+ "use_rslora": false
39
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c877e7641cfb911f52d81ab8deed742e74675d10712bded6183a4bf14b29d766
3
+ size 80792096
Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3b1b723e6fd339e9e095bcbf19a1ac51ea4c7e763b1f9ab0bbde16b569e9f2e
3
+ size 161810282
Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b3ee827a7a00012c0a116546df467feee35e70376d81a7a85b1a70eb90414d3
3
+ size 14244
Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eae0eb18432ecf0cfb9a0f275643de304875ad2c2d898c212c522cc3e221f35f
3
+ size 1064
Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/tokenizer_config.json ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 131072,
204
+ "pad_token": "<|endoftext|>",
205
+ "padding_side": "right",
206
+ "split_special_tokens": false,
207
+ "tokenizer_class": "Qwen2Tokenizer",
208
+ "unk_token": null
209
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/trainer_state.json ADDED
@@ -0,0 +1,1111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.4809876543209877,
6
+ "eval_steps": 500,
7
+ "global_step": 1500,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.009876543209876543,
14
+ "grad_norm": 7.599112033843994,
15
+ "learning_rate": 1.6447368421052632e-06,
16
+ "loss": 4.2059,
17
+ "step": 10
18
+ },
19
+ {
20
+ "epoch": 0.019753086419753086,
21
+ "grad_norm": 6.403433322906494,
22
+ "learning_rate": 3.2894736842105265e-06,
23
+ "loss": 4.1463,
24
+ "step": 20
25
+ },
26
+ {
27
+ "epoch": 0.02962962962962963,
28
+ "grad_norm": 9.776185989379883,
29
+ "learning_rate": 4.9342105263157895e-06,
30
+ "loss": 4.1087,
31
+ "step": 30
32
+ },
33
+ {
34
+ "epoch": 0.03950617283950617,
35
+ "grad_norm": 5.836405277252197,
36
+ "learning_rate": 6.578947368421053e-06,
37
+ "loss": 3.7657,
38
+ "step": 40
39
+ },
40
+ {
41
+ "epoch": 0.04938271604938271,
42
+ "grad_norm": 4.362349033355713,
43
+ "learning_rate": 8.223684210526317e-06,
44
+ "loss": 3.2134,
45
+ "step": 50
46
+ },
47
+ {
48
+ "epoch": 0.05925925925925926,
49
+ "grad_norm": 5.1398701667785645,
50
+ "learning_rate": 9.868421052631579e-06,
51
+ "loss": 2.6238,
52
+ "step": 60
53
+ },
54
+ {
55
+ "epoch": 0.0691358024691358,
56
+ "grad_norm": 8.642521858215332,
57
+ "learning_rate": 1.1513157894736843e-05,
58
+ "loss": 1.5929,
59
+ "step": 70
60
+ },
61
+ {
62
+ "epoch": 0.07901234567901234,
63
+ "grad_norm": 1.0219271183013916,
64
+ "learning_rate": 1.3157894736842106e-05,
65
+ "loss": 0.1491,
66
+ "step": 80
67
+ },
68
+ {
69
+ "epoch": 0.08888888888888889,
70
+ "grad_norm": 0.4035845100879669,
71
+ "learning_rate": 1.4802631578947368e-05,
72
+ "loss": 0.0206,
73
+ "step": 90
74
+ },
75
+ {
76
+ "epoch": 0.09876543209876543,
77
+ "grad_norm": 0.6767832636833191,
78
+ "learning_rate": 1.6447368421052635e-05,
79
+ "loss": 0.0073,
80
+ "step": 100
81
+ },
82
+ {
83
+ "epoch": 0.10864197530864197,
84
+ "grad_norm": 0.014764077961444855,
85
+ "learning_rate": 1.8092105263157896e-05,
86
+ "loss": 0.0011,
87
+ "step": 110
88
+ },
89
+ {
90
+ "epoch": 0.11851851851851852,
91
+ "grad_norm": 0.15152056515216827,
92
+ "learning_rate": 1.9736842105263158e-05,
93
+ "loss": 0.0005,
94
+ "step": 120
95
+ },
96
+ {
97
+ "epoch": 0.12839506172839507,
98
+ "grad_norm": 0.06782546639442444,
99
+ "learning_rate": 2.1381578947368423e-05,
100
+ "loss": 0.0014,
101
+ "step": 130
102
+ },
103
+ {
104
+ "epoch": 0.1382716049382716,
105
+ "grad_norm": 0.03198217228055,
106
+ "learning_rate": 2.3026315789473685e-05,
107
+ "loss": 0.0009,
108
+ "step": 140
109
+ },
110
+ {
111
+ "epoch": 0.14814814814814814,
112
+ "grad_norm": 0.02691480703651905,
113
+ "learning_rate": 2.4671052631578947e-05,
114
+ "loss": 0.0024,
115
+ "step": 150
116
+ },
117
+ {
118
+ "epoch": 0.1580246913580247,
119
+ "grad_norm": 0.0057847509160637856,
120
+ "learning_rate": 2.6315789473684212e-05,
121
+ "loss": 0.0001,
122
+ "step": 160
123
+ },
124
+ {
125
+ "epoch": 0.16790123456790124,
126
+ "grad_norm": 0.003663571085780859,
127
+ "learning_rate": 2.7960526315789477e-05,
128
+ "loss": 0.0002,
129
+ "step": 170
130
+ },
131
+ {
132
+ "epoch": 0.17777777777777778,
133
+ "grad_norm": 0.004447426181286573,
134
+ "learning_rate": 2.9605263157894735e-05,
135
+ "loss": 0.0003,
136
+ "step": 180
137
+ },
138
+ {
139
+ "epoch": 0.18765432098765433,
140
+ "grad_norm": 0.10373878479003906,
141
+ "learning_rate": 3.125e-05,
142
+ "loss": 0.0006,
143
+ "step": 190
144
+ },
145
+ {
146
+ "epoch": 0.19753086419753085,
147
+ "grad_norm": 0.002619905164465308,
148
+ "learning_rate": 3.289473684210527e-05,
149
+ "loss": 0.0006,
150
+ "step": 200
151
+ },
152
+ {
153
+ "epoch": 0.2074074074074074,
154
+ "grad_norm": 0.007042820565402508,
155
+ "learning_rate": 3.4539473684210524e-05,
156
+ "loss": 0.0015,
157
+ "step": 210
158
+ },
159
+ {
160
+ "epoch": 0.21728395061728395,
161
+ "grad_norm": 0.0016206403961405158,
162
+ "learning_rate": 3.618421052631579e-05,
163
+ "loss": 0.0049,
164
+ "step": 220
165
+ },
166
+ {
167
+ "epoch": 0.2271604938271605,
168
+ "grad_norm": 0.001877307309769094,
169
+ "learning_rate": 3.7828947368421054e-05,
170
+ "loss": 0.0001,
171
+ "step": 230
172
+ },
173
+ {
174
+ "epoch": 0.23703703703703705,
175
+ "grad_norm": 0.003795365337282419,
176
+ "learning_rate": 3.9473684210526316e-05,
177
+ "loss": 0.0001,
178
+ "step": 240
179
+ },
180
+ {
181
+ "epoch": 0.24691358024691357,
182
+ "grad_norm": 0.008357529528439045,
183
+ "learning_rate": 4.111842105263158e-05,
184
+ "loss": 0.004,
185
+ "step": 250
186
+ },
187
+ {
188
+ "epoch": 0.25679012345679014,
189
+ "grad_norm": 0.026294540613889694,
190
+ "learning_rate": 4.2763157894736847e-05,
191
+ "loss": 0.0002,
192
+ "step": 260
193
+ },
194
+ {
195
+ "epoch": 0.26666666666666666,
196
+ "grad_norm": 0.08903225511312485,
197
+ "learning_rate": 4.440789473684211e-05,
198
+ "loss": 0.0005,
199
+ "step": 270
200
+ },
201
+ {
202
+ "epoch": 0.2765432098765432,
203
+ "grad_norm": 0.05173858627676964,
204
+ "learning_rate": 4.605263157894737e-05,
205
+ "loss": 0.0003,
206
+ "step": 280
207
+ },
208
+ {
209
+ "epoch": 0.28641975308641976,
210
+ "grad_norm": 0.036562711000442505,
211
+ "learning_rate": 4.769736842105263e-05,
212
+ "loss": 0.0012,
213
+ "step": 290
214
+ },
215
+ {
216
+ "epoch": 0.2962962962962963,
217
+ "grad_norm": 0.008022695779800415,
218
+ "learning_rate": 4.9342105263157894e-05,
219
+ "loss": 0.0036,
220
+ "step": 300
221
+ },
222
+ {
223
+ "epoch": 0.30617283950617286,
224
+ "grad_norm": 0.0021413813810795546,
225
+ "learning_rate": 4.999940495590975e-05,
226
+ "loss": 0.0006,
227
+ "step": 310
228
+ },
229
+ {
230
+ "epoch": 0.3160493827160494,
231
+ "grad_norm": 0.015379426069557667,
232
+ "learning_rate": 4.999576867793816e-05,
233
+ "loss": 0.0092,
234
+ "step": 320
235
+ },
236
+ {
237
+ "epoch": 0.32592592592592595,
238
+ "grad_norm": 0.003894766792654991,
239
+ "learning_rate": 4.9988827182291254e-05,
240
+ "loss": 0.001,
241
+ "step": 330
242
+ },
243
+ {
244
+ "epoch": 0.3358024691358025,
245
+ "grad_norm": 0.0016231742920354009,
246
+ "learning_rate": 4.997858138685056e-05,
247
+ "loss": 0.0001,
248
+ "step": 340
249
+ },
250
+ {
251
+ "epoch": 0.345679012345679,
252
+ "grad_norm": 0.0015260468935593963,
253
+ "learning_rate": 4.996503264642876e-05,
254
+ "loss": 0.0016,
255
+ "step": 350
256
+ },
257
+ {
258
+ "epoch": 0.35555555555555557,
259
+ "grad_norm": 0.03330058977007866,
260
+ "learning_rate": 4.994818275259052e-05,
261
+ "loss": 0.0013,
262
+ "step": 360
263
+ },
264
+ {
265
+ "epoch": 0.3654320987654321,
266
+ "grad_norm": 0.002741268603131175,
267
+ "learning_rate": 4.992803393341563e-05,
268
+ "loss": 0.0017,
269
+ "step": 370
270
+ },
271
+ {
272
+ "epoch": 0.37530864197530867,
273
+ "grad_norm": 0.004990574903786182,
274
+ "learning_rate": 4.9904588853204365e-05,
275
+ "loss": 0.0001,
276
+ "step": 380
277
+ },
278
+ {
279
+ "epoch": 0.3851851851851852,
280
+ "grad_norm": 0.013514043763279915,
281
+ "learning_rate": 4.9877850612125173e-05,
282
+ "loss": 0.0001,
283
+ "step": 390
284
+ },
285
+ {
286
+ "epoch": 0.3950617283950617,
287
+ "grad_norm": 0.0015880317660048604,
288
+ "learning_rate": 4.984782274580476e-05,
289
+ "loss": 0.0001,
290
+ "step": 400
291
+ },
292
+ {
293
+ "epoch": 0.4049382716049383,
294
+ "grad_norm": 0.002702023135498166,
295
+ "learning_rate": 4.981450922486053e-05,
296
+ "loss": 0.0001,
297
+ "step": 410
298
+ },
299
+ {
300
+ "epoch": 0.4148148148148148,
301
+ "grad_norm": 0.0020927605219185352,
302
+ "learning_rate": 4.977791445437559e-05,
303
+ "loss": 0.0001,
304
+ "step": 420
305
+ },
306
+ {
307
+ "epoch": 0.4246913580246914,
308
+ "grad_norm": 0.0007626357255503535,
309
+ "learning_rate": 4.973804327331625e-05,
310
+ "loss": 0.0002,
311
+ "step": 430
312
+ },
313
+ {
314
+ "epoch": 0.4345679012345679,
315
+ "grad_norm": 0.0026151000056415796,
316
+ "learning_rate": 4.969490095389213e-05,
317
+ "loss": 0.0,
318
+ "step": 440
319
+ },
320
+ {
321
+ "epoch": 0.4444444444444444,
322
+ "grad_norm": 0.0007740291184745729,
323
+ "learning_rate": 4.964849320085905e-05,
324
+ "loss": 0.0,
325
+ "step": 450
326
+ },
327
+ {
328
+ "epoch": 0.454320987654321,
329
+ "grad_norm": 0.0005287025705911219,
330
+ "learning_rate": 4.9598826150764656e-05,
331
+ "loss": 0.0,
332
+ "step": 460
333
+ },
334
+ {
335
+ "epoch": 0.4641975308641975,
336
+ "grad_norm": 0.0004778858565259725,
337
+ "learning_rate": 4.9545906371137e-05,
338
+ "loss": 0.007,
339
+ "step": 470
340
+ },
341
+ {
342
+ "epoch": 0.4740740740740741,
343
+ "grad_norm": 0.0007034554146230221,
344
+ "learning_rate": 4.9489740859616094e-05,
345
+ "loss": 0.0,
346
+ "step": 480
347
+ },
348
+ {
349
+ "epoch": 0.4839506172839506,
350
+ "grad_norm": 0.0006262129172682762,
351
+ "learning_rate": 4.9430337043028604e-05,
352
+ "loss": 0.0,
353
+ "step": 490
354
+ },
355
+ {
356
+ "epoch": 0.49382716049382713,
357
+ "grad_norm": 0.00045443352428264916,
358
+ "learning_rate": 4.9367702776405806e-05,
359
+ "loss": 0.0,
360
+ "step": 500
361
+ },
362
+ {
363
+ "epoch": 0.49382716049382713,
364
+ "eval_nlpcc25_task1_dev_accuracy": 0.9995238095238096,
365
+ "eval_nlpcc25_task1_dev_loss": 0.0022831459064036608,
366
+ "eval_nlpcc25_task1_dev_runtime": 2528.8573,
367
+ "eval_nlpcc25_task1_dev_samples_per_second": 1.107,
368
+ "eval_nlpcc25_task1_dev_steps_per_second": 0.277,
369
+ "step": 500
370
+ },
371
+ {
372
+ "epoch": 0.5037037037037037,
373
+ "grad_norm": 0.0003733730991370976,
374
+ "learning_rate": 4.930184634194488e-05,
375
+ "loss": 0.0,
376
+ "step": 510
377
+ },
378
+ {
379
+ "epoch": 0.5135802469135803,
380
+ "grad_norm": 0.0003028757928404957,
381
+ "learning_rate": 4.923277644791378e-05,
382
+ "loss": 0.0,
383
+ "step": 520
384
+ },
385
+ {
386
+ "epoch": 0.5234567901234568,
387
+ "grad_norm": 0.00028728952747769654,
388
+ "learning_rate": 4.9160502227499704e-05,
389
+ "loss": 0.0,
390
+ "step": 530
391
+ },
392
+ {
393
+ "epoch": 0.5333333333333333,
394
+ "grad_norm": 0.0003644278913270682,
395
+ "learning_rate": 4.908503323760143e-05,
396
+ "loss": 0.0,
397
+ "step": 540
398
+ },
399
+ {
400
+ "epoch": 0.5432098765432098,
401
+ "grad_norm": 0.0005966073367744684,
402
+ "learning_rate": 4.900637945756554e-05,
403
+ "loss": 0.0,
404
+ "step": 550
405
+ },
406
+ {
407
+ "epoch": 0.5530864197530864,
408
+ "grad_norm": 0.000367969973012805,
409
+ "learning_rate": 4.892455128786692e-05,
410
+ "loss": 0.0,
411
+ "step": 560
412
+ },
413
+ {
414
+ "epoch": 0.562962962962963,
415
+ "grad_norm": 0.0002780740906018764,
416
+ "learning_rate": 4.8839559548733436e-05,
417
+ "loss": 0.0077,
418
+ "step": 570
419
+ },
420
+ {
421
+ "epoch": 0.5728395061728395,
422
+ "grad_norm": 0.00031342540751211345,
423
+ "learning_rate": 4.875141547871519e-05,
424
+ "loss": 0.0,
425
+ "step": 580
426
+ },
427
+ {
428
+ "epoch": 0.582716049382716,
429
+ "grad_norm": 0.000353014562278986,
430
+ "learning_rate": 4.866013073319841e-05,
431
+ "loss": 0.0,
432
+ "step": 590
433
+ },
434
+ {
435
+ "epoch": 0.5925925925925926,
436
+ "grad_norm": 0.00029066085699014366,
437
+ "learning_rate": 4.856571738286426e-05,
438
+ "loss": 0.0,
439
+ "step": 600
440
+ },
441
+ {
442
+ "epoch": 0.6024691358024692,
443
+ "grad_norm": 0.00024000390840228647,
444
+ "learning_rate": 4.8468187912092744e-05,
445
+ "loss": 0.0,
446
+ "step": 610
447
+ },
448
+ {
449
+ "epoch": 0.6123456790123457,
450
+ "grad_norm": 0.0003632376901805401,
451
+ "learning_rate": 4.836755521731183e-05,
452
+ "loss": 0.0,
453
+ "step": 620
454
+ },
455
+ {
456
+ "epoch": 0.6222222222222222,
457
+ "grad_norm": 0.00029751085094176233,
458
+ "learning_rate": 4.826383260529221e-05,
459
+ "loss": 0.0002,
460
+ "step": 630
461
+ },
462
+ {
463
+ "epoch": 0.6320987654320988,
464
+ "grad_norm": 3.190973997116089,
465
+ "learning_rate": 4.815703379138765e-05,
466
+ "loss": 0.0017,
467
+ "step": 640
468
+ },
469
+ {
470
+ "epoch": 0.6419753086419753,
471
+ "grad_norm": 0.18289895355701447,
472
+ "learning_rate": 4.804717289772147e-05,
473
+ "loss": 0.0043,
474
+ "step": 650
475
+ },
476
+ {
477
+ "epoch": 0.6518518518518519,
478
+ "grad_norm": 0.2459922581911087,
479
+ "learning_rate": 4.7934264451319105e-05,
480
+ "loss": 0.003,
481
+ "step": 660
482
+ },
483
+ {
484
+ "epoch": 0.6617283950617284,
485
+ "grad_norm": 0.004725860431790352,
486
+ "learning_rate": 4.7818323382187214e-05,
487
+ "loss": 0.0002,
488
+ "step": 670
489
+ },
490
+ {
491
+ "epoch": 0.671604938271605,
492
+ "grad_norm": 0.004160716664046049,
493
+ "learning_rate": 4.769936502133946e-05,
494
+ "loss": 0.0002,
495
+ "step": 680
496
+ },
497
+ {
498
+ "epoch": 0.6814814814814815,
499
+ "grad_norm": 0.002101948019117117,
500
+ "learning_rate": 4.7577405098769256e-05,
501
+ "loss": 0.0001,
502
+ "step": 690
503
+ },
504
+ {
505
+ "epoch": 0.691358024691358,
506
+ "grad_norm": 0.0015966896899044514,
507
+ "learning_rate": 4.74524597413698e-05,
508
+ "loss": 0.0001,
509
+ "step": 700
510
+ },
511
+ {
512
+ "epoch": 0.7012345679012346,
513
+ "grad_norm": 0.0010343171888962388,
514
+ "learning_rate": 4.732454547080159e-05,
515
+ "loss": 0.0,
516
+ "step": 710
517
+ },
518
+ {
519
+ "epoch": 0.7111111111111111,
520
+ "grad_norm": 0.0007301874575205147,
521
+ "learning_rate": 4.7193679201307705e-05,
522
+ "loss": 0.0,
523
+ "step": 720
524
+ },
525
+ {
526
+ "epoch": 0.7209876543209877,
527
+ "grad_norm": 0.0008054127101786435,
528
+ "learning_rate": 4.705987823747731e-05,
529
+ "loss": 0.0,
530
+ "step": 730
531
+ },
532
+ {
533
+ "epoch": 0.7308641975308642,
534
+ "grad_norm": 0.0006862932350486517,
535
+ "learning_rate": 4.692316027195733e-05,
536
+ "loss": 0.0001,
537
+ "step": 740
538
+ },
539
+ {
540
+ "epoch": 0.7407407407407407,
541
+ "grad_norm": 0.0005865858984179795,
542
+ "learning_rate": 4.678354338311306e-05,
543
+ "loss": 0.0,
544
+ "step": 750
545
+ },
546
+ {
547
+ "epoch": 0.7506172839506173,
548
+ "grad_norm": 0.0004720942524727434,
549
+ "learning_rate": 4.6641046032637516e-05,
550
+ "loss": 0.0,
551
+ "step": 760
552
+ },
553
+ {
554
+ "epoch": 0.7604938271604939,
555
+ "grad_norm": 0.0004310516524128616,
556
+ "learning_rate": 4.6495687063110325e-05,
557
+ "loss": 0.0,
558
+ "step": 770
559
+ },
560
+ {
561
+ "epoch": 0.7703703703703704,
562
+ "grad_norm": 0.00043949694372713566,
563
+ "learning_rate": 4.634748569550612e-05,
564
+ "loss": 0.0,
565
+ "step": 780
566
+ },
567
+ {
568
+ "epoch": 0.7802469135802469,
569
+ "grad_norm": 0.0004090226429980248,
570
+ "learning_rate": 4.61964615266529e-05,
571
+ "loss": 0.0,
572
+ "step": 790
573
+ },
574
+ {
575
+ "epoch": 0.7901234567901234,
576
+ "grad_norm": 0.00037527765380218625,
577
+ "learning_rate": 4.6042634526640755e-05,
578
+ "loss": 0.0,
579
+ "step": 800
580
+ },
581
+ {
582
+ "epoch": 0.8,
583
+ "grad_norm": 1.0162104368209839,
584
+ "learning_rate": 4.588602503618118e-05,
585
+ "loss": 0.0021,
586
+ "step": 810
587
+ },
588
+ {
589
+ "epoch": 0.8098765432098766,
590
+ "grad_norm": 1.3063777685165405,
591
+ "learning_rate": 4.572665376391741e-05,
592
+ "loss": 0.0006,
593
+ "step": 820
594
+ },
595
+ {
596
+ "epoch": 0.8197530864197531,
597
+ "grad_norm": 0.9236663579940796,
598
+ "learning_rate": 4.55645417836861e-05,
599
+ "loss": 0.0058,
600
+ "step": 830
601
+ },
602
+ {
603
+ "epoch": 0.8296296296296296,
604
+ "grad_norm": 0.01748817227780819,
605
+ "learning_rate": 4.5399710531730685e-05,
606
+ "loss": 0.0025,
607
+ "step": 840
608
+ },
609
+ {
610
+ "epoch": 0.8395061728395061,
611
+ "grad_norm": 0.003460384439677,
612
+ "learning_rate": 4.5232181803866886e-05,
613
+ "loss": 0.0005,
614
+ "step": 850
615
+ },
616
+ {
617
+ "epoch": 0.8493827160493828,
618
+ "grad_norm": 0.0013236172962933779,
619
+ "learning_rate": 4.506197775260055e-05,
620
+ "loss": 0.0001,
621
+ "step": 860
622
+ },
623
+ {
624
+ "epoch": 0.8592592592592593,
625
+ "grad_norm": 0.0011838016798719764,
626
+ "learning_rate": 4.4889120884198495e-05,
627
+ "loss": 0.0,
628
+ "step": 870
629
+ },
630
+ {
631
+ "epoch": 0.8691358024691358,
632
+ "grad_norm": 0.0007675419910810888,
633
+ "learning_rate": 4.47136340557124e-05,
634
+ "loss": 0.0,
635
+ "step": 880
636
+ },
637
+ {
638
+ "epoch": 0.8790123456790123,
639
+ "grad_norm": 0.0007905301754362881,
640
+ "learning_rate": 4.453554047195644e-05,
641
+ "loss": 0.0,
642
+ "step": 890
643
+ },
644
+ {
645
+ "epoch": 0.8888888888888888,
646
+ "grad_norm": 0.0005061720148660243,
647
+ "learning_rate": 4.435486368243888e-05,
648
+ "loss": 0.0,
649
+ "step": 900
650
+ },
651
+ {
652
+ "epoch": 0.8987654320987655,
653
+ "grad_norm": 0.0005205022753216326,
654
+ "learning_rate": 4.417162757824808e-05,
655
+ "loss": 0.0,
656
+ "step": 910
657
+ },
658
+ {
659
+ "epoch": 0.908641975308642,
660
+ "grad_norm": 0.0004827196535188705,
661
+ "learning_rate": 4.398585638889335e-05,
662
+ "loss": 0.0,
663
+ "step": 920
664
+ },
665
+ {
666
+ "epoch": 0.9185185185185185,
667
+ "grad_norm": 0.000379192759282887,
668
+ "learning_rate": 4.379757467910113e-05,
669
+ "loss": 0.0,
670
+ "step": 930
671
+ },
672
+ {
673
+ "epoch": 0.928395061728395,
674
+ "grad_norm": 0.00045221165055409074,
675
+ "learning_rate": 4.3606807345566616e-05,
676
+ "loss": 0.0,
677
+ "step": 940
678
+ },
679
+ {
680
+ "epoch": 0.9382716049382716,
681
+ "grad_norm": 0.0005233365809544921,
682
+ "learning_rate": 4.341357961366181e-05,
683
+ "loss": 0.0,
684
+ "step": 950
685
+ },
686
+ {
687
+ "epoch": 0.9481481481481482,
688
+ "grad_norm": 0.0002833159815054387,
689
+ "learning_rate": 4.3217917034099823e-05,
690
+ "loss": 0.0,
691
+ "step": 960
692
+ },
693
+ {
694
+ "epoch": 0.9580246913580247,
695
+ "grad_norm": 0.00043191161239519715,
696
+ "learning_rate": 4.301984547955635e-05,
697
+ "loss": 0.0,
698
+ "step": 970
699
+ },
700
+ {
701
+ "epoch": 0.9679012345679012,
702
+ "grad_norm": 0.0004523663374129683,
703
+ "learning_rate": 4.281939114124843e-05,
704
+ "loss": 0.0,
705
+ "step": 980
706
+ },
707
+ {
708
+ "epoch": 0.9777777777777777,
709
+ "grad_norm": 0.00025242462288588285,
710
+ "learning_rate": 4.261658052547124e-05,
711
+ "loss": 0.0,
712
+ "step": 990
713
+ },
714
+ {
715
+ "epoch": 0.9876543209876543,
716
+ "grad_norm": 0.0006344653083942831,
717
+ "learning_rate": 4.241144045009304e-05,
718
+ "loss": 0.0,
719
+ "step": 1000
720
+ },
721
+ {
722
+ "epoch": 0.9876543209876543,
723
+ "eval_nlpcc25_task1_dev_accuracy": 0.9996031746031745,
724
+ "eval_nlpcc25_task1_dev_loss": 0.002990948036313057,
725
+ "eval_nlpcc25_task1_dev_runtime": 2528.4874,
726
+ "eval_nlpcc25_task1_dev_samples_per_second": 1.107,
727
+ "eval_nlpcc25_task1_dev_steps_per_second": 0.277,
728
+ "step": 1000
729
+ },
730
+ {
731
+ "epoch": 0.9975308641975309,
732
+ "grad_norm": 0.0005013208137825131,
733
+ "learning_rate": 4.22039980410091e-05,
734
+ "loss": 0.0,
735
+ "step": 1010
736
+ },
737
+ {
738
+ "epoch": 1.0069135802469136,
739
+ "grad_norm": 0.0003089129750151187,
740
+ "learning_rate": 4.199428072855476e-05,
741
+ "loss": 0.0,
742
+ "step": 1020
743
+ },
744
+ {
745
+ "epoch": 1.0167901234567902,
746
+ "grad_norm": 0.00048567031626589596,
747
+ "learning_rate": 4.1782316243878314e-05,
748
+ "loss": 0.0,
749
+ "step": 1030
750
+ },
751
+ {
752
+ "epoch": 1.0266666666666666,
753
+ "grad_norm": 0.0002530143829062581,
754
+ "learning_rate": 4.1568132615274094e-05,
755
+ "loss": 0.0,
756
+ "step": 1040
757
+ },
758
+ {
759
+ "epoch": 1.0365432098765432,
760
+ "grad_norm": 0.00028351074433885515,
761
+ "learning_rate": 4.1351758164476225e-05,
762
+ "loss": 0.0001,
763
+ "step": 1050
764
+ },
765
+ {
766
+ "epoch": 1.0464197530864197,
767
+ "grad_norm": 0.0005833515897393227,
768
+ "learning_rate": 4.113322150291365e-05,
769
+ "loss": 0.0,
770
+ "step": 1060
771
+ },
772
+ {
773
+ "epoch": 1.0562962962962963,
774
+ "grad_norm": 0.0011448035947978497,
775
+ "learning_rate": 4.091255152792678e-05,
776
+ "loss": 0.0028,
777
+ "step": 1070
778
+ },
779
+ {
780
+ "epoch": 1.066172839506173,
781
+ "grad_norm": 0.025126419961452484,
782
+ "learning_rate": 4.0689777418946394e-05,
783
+ "loss": 0.0002,
784
+ "step": 1080
785
+ },
786
+ {
787
+ "epoch": 1.0760493827160493,
788
+ "grad_norm": 0.0005254483548924327,
789
+ "learning_rate": 4.0464928633635196e-05,
790
+ "loss": 0.0006,
791
+ "step": 1090
792
+ },
793
+ {
794
+ "epoch": 1.085925925925926,
795
+ "grad_norm": 0.0009111575200222433,
796
+ "learning_rate": 4.023803490399258e-05,
797
+ "loss": 0.0,
798
+ "step": 1100
799
+ },
800
+ {
801
+ "epoch": 1.0958024691358024,
802
+ "grad_norm": 0.000739308656193316,
803
+ "learning_rate": 4.000912623242318e-05,
804
+ "loss": 0.0,
805
+ "step": 1110
806
+ },
807
+ {
808
+ "epoch": 1.105679012345679,
809
+ "grad_norm": 0.0005893895868211985,
810
+ "learning_rate": 3.977823288776955e-05,
811
+ "loss": 0.0,
812
+ "step": 1120
813
+ },
814
+ {
815
+ "epoch": 1.1155555555555556,
816
+ "grad_norm": 0.00036915784585289657,
817
+ "learning_rate": 3.954538540130978e-05,
818
+ "loss": 0.0,
819
+ "step": 1130
820
+ },
821
+ {
822
+ "epoch": 1.125432098765432,
823
+ "grad_norm": 0.0005508523900061846,
824
+ "learning_rate": 3.9310614562720214e-05,
825
+ "loss": 0.0,
826
+ "step": 1140
827
+ },
828
+ {
829
+ "epoch": 1.1353086419753087,
830
+ "grad_norm": 0.0009192466968670487,
831
+ "learning_rate": 3.9073951416004143e-05,
832
+ "loss": 0.0002,
833
+ "step": 1150
834
+ },
835
+ {
836
+ "epoch": 1.145185185185185,
837
+ "grad_norm": 0.0004524670075625181,
838
+ "learning_rate": 3.8835427255386856e-05,
839
+ "loss": 0.0,
840
+ "step": 1160
841
+ },
842
+ {
843
+ "epoch": 1.1550617283950617,
844
+ "grad_norm": 0.000336712779244408,
845
+ "learning_rate": 3.859507362117748e-05,
846
+ "loss": 0.0,
847
+ "step": 1170
848
+ },
849
+ {
850
+ "epoch": 1.1649382716049383,
851
+ "grad_norm": 0.00038848226540721953,
852
+ "learning_rate": 3.8352922295598476e-05,
853
+ "loss": 0.0,
854
+ "step": 1180
855
+ },
856
+ {
857
+ "epoch": 1.1748148148148148,
858
+ "grad_norm": 0.0007016750751063228,
859
+ "learning_rate": 3.810900529858295e-05,
860
+ "loss": 0.0,
861
+ "step": 1190
862
+ },
863
+ {
864
+ "epoch": 1.1846913580246914,
865
+ "grad_norm": 0.00020055304048582911,
866
+ "learning_rate": 3.786335488354068e-05,
867
+ "loss": 0.0,
868
+ "step": 1200
869
+ },
870
+ {
871
+ "epoch": 1.194567901234568,
872
+ "grad_norm": 0.0006126250373199582,
873
+ "learning_rate": 3.7616003533093204e-05,
874
+ "loss": 0.0,
875
+ "step": 1210
876
+ },
877
+ {
878
+ "epoch": 1.2044444444444444,
879
+ "grad_norm": 0.00017670773377176374,
880
+ "learning_rate": 3.736698395477857e-05,
881
+ "loss": 0.0,
882
+ "step": 1220
883
+ },
884
+ {
885
+ "epoch": 1.214320987654321,
886
+ "grad_norm": 0.0001492701849201694,
887
+ "learning_rate": 3.7116329076726455e-05,
888
+ "loss": 0.0,
889
+ "step": 1230
890
+ },
891
+ {
892
+ "epoch": 1.2241975308641975,
893
+ "grad_norm": 0.0001376178115606308,
894
+ "learning_rate": 3.6864072043303945e-05,
895
+ "loss": 0.0,
896
+ "step": 1240
897
+ },
898
+ {
899
+ "epoch": 1.234074074074074,
900
+ "grad_norm": 0.0001676941174082458,
901
+ "learning_rate": 3.661024621073291e-05,
902
+ "loss": 0.0071,
903
+ "step": 1250
904
+ },
905
+ {
906
+ "epoch": 1.2439506172839505,
907
+ "grad_norm": 0.00019377398712094873,
908
+ "learning_rate": 3.635488514267923e-05,
909
+ "loss": 0.0,
910
+ "step": 1260
911
+ },
912
+ {
913
+ "epoch": 1.2538271604938271,
914
+ "grad_norm": 0.0001418793690390885,
915
+ "learning_rate": 3.609802260581464e-05,
916
+ "loss": 0.0,
917
+ "step": 1270
918
+ },
919
+ {
920
+ "epoch": 1.2637037037037038,
921
+ "grad_norm": 0.00016002310439944267,
922
+ "learning_rate": 3.583969256535172e-05,
923
+ "loss": 0.0,
924
+ "step": 1280
925
+ },
926
+ {
927
+ "epoch": 1.2735802469135802,
928
+ "grad_norm": 0.00014259129238780588,
929
+ "learning_rate": 3.5579929180552676e-05,
930
+ "loss": 0.0,
931
+ "step": 1290
932
+ },
933
+ {
934
+ "epoch": 1.2834567901234568,
935
+ "grad_norm": 0.000264517730101943,
936
+ "learning_rate": 3.531876680021235e-05,
937
+ "loss": 0.0,
938
+ "step": 1300
939
+ },
940
+ {
941
+ "epoch": 1.2933333333333334,
942
+ "grad_norm": 0.00015705930127296597,
943
+ "learning_rate": 3.5056239958116336e-05,
944
+ "loss": 0.0,
945
+ "step": 1310
946
+ },
947
+ {
948
+ "epoch": 1.3032098765432099,
949
+ "grad_norm": 0.00022515420278068632,
950
+ "learning_rate": 3.479238336847444e-05,
951
+ "loss": 0.0,
952
+ "step": 1320
953
+ },
954
+ {
955
+ "epoch": 1.3130864197530865,
956
+ "grad_norm": 0.00014641444431617856,
957
+ "learning_rate": 3.4527231921330464e-05,
958
+ "loss": 0.0,
959
+ "step": 1330
960
+ },
961
+ {
962
+ "epoch": 1.322962962962963,
963
+ "grad_norm": 9.774778300197795e-05,
964
+ "learning_rate": 3.426082067794863e-05,
965
+ "loss": 0.0,
966
+ "step": 1340
967
+ },
968
+ {
969
+ "epoch": 1.3328395061728395,
970
+ "grad_norm": 0.00012512759712990373,
971
+ "learning_rate": 3.399318486617734e-05,
972
+ "loss": 0.0,
973
+ "step": 1350
974
+ },
975
+ {
976
+ "epoch": 1.342716049382716,
977
+ "grad_norm": 0.00013536612095776945,
978
+ "learning_rate": 3.372435987579108e-05,
979
+ "loss": 0.0,
980
+ "step": 1360
981
+ },
982
+ {
983
+ "epoch": 1.3525925925925926,
984
+ "grad_norm": 0.00013231099001131952,
985
+ "learning_rate": 3.345438125381065e-05,
986
+ "loss": 0.0,
987
+ "step": 1370
988
+ },
989
+ {
990
+ "epoch": 1.3624691358024692,
991
+ "grad_norm": 0.00011779498163377866,
992
+ "learning_rate": 3.318328469980285e-05,
993
+ "loss": 0.0,
994
+ "step": 1380
995
+ },
996
+ {
997
+ "epoch": 1.3723456790123456,
998
+ "grad_norm": 0.0001909748971229419,
999
+ "learning_rate": 3.291110606115984e-05,
1000
+ "loss": 0.0,
1001
+ "step": 1390
1002
+ },
1003
+ {
1004
+ "epoch": 1.3822222222222222,
1005
+ "grad_norm": 0.0001980375382117927,
1006
+ "learning_rate": 3.263788132835898e-05,
1007
+ "loss": 0.0,
1008
+ "step": 1400
1009
+ },
1010
+ {
1011
+ "epoch": 1.3920987654320989,
1012
+ "grad_norm": 0.00011503389396239072,
1013
+ "learning_rate": 3.236364663020384e-05,
1014
+ "loss": 0.0,
1015
+ "step": 1410
1016
+ },
1017
+ {
1018
+ "epoch": 1.4019753086419753,
1019
+ "grad_norm": 0.00013515223690774292,
1020
+ "learning_rate": 3.208843822904679e-05,
1021
+ "loss": 0.0,
1022
+ "step": 1420
1023
+ },
1024
+ {
1025
+ "epoch": 1.411851851851852,
1026
+ "grad_norm": 0.00011641118908300996,
1027
+ "learning_rate": 3.1812292515994e-05,
1028
+ "loss": 0.0,
1029
+ "step": 1430
1030
+ },
1031
+ {
1032
+ "epoch": 1.4217283950617283,
1033
+ "grad_norm": 9.361303091282025e-05,
1034
+ "learning_rate": 3.1535246006093414e-05,
1035
+ "loss": 0.0001,
1036
+ "step": 1440
1037
+ },
1038
+ {
1039
+ "epoch": 1.431604938271605,
1040
+ "grad_norm": 0.00013731363287661225,
1041
+ "learning_rate": 3.125733533350633e-05,
1042
+ "loss": 0.0,
1043
+ "step": 1450
1044
+ },
1045
+ {
1046
+ "epoch": 1.4414814814814814,
1047
+ "grad_norm": 0.000537275685928762,
1048
+ "learning_rate": 3.097859724666322e-05,
1049
+ "loss": 0.0026,
1050
+ "step": 1460
1051
+ },
1052
+ {
1053
+ "epoch": 1.451358024691358,
1054
+ "grad_norm": 0.4905455708503723,
1055
+ "learning_rate": 3.069906860340443e-05,
1056
+ "loss": 0.0029,
1057
+ "step": 1470
1058
+ },
1059
+ {
1060
+ "epoch": 1.4612345679012346,
1061
+ "grad_norm": 0.004352442920207977,
1062
+ "learning_rate": 3.041878636610644e-05,
1063
+ "loss": 0.0001,
1064
+ "step": 1480
1065
+ },
1066
+ {
1067
+ "epoch": 1.471111111111111,
1068
+ "grad_norm": 0.0011432188330218196,
1069
+ "learning_rate": 3.0137787596794304e-05,
1070
+ "loss": 0.0,
1071
+ "step": 1490
1072
+ },
1073
+ {
1074
+ "epoch": 1.4809876543209877,
1075
+ "grad_norm": 0.0010975116165354848,
1076
+ "learning_rate": 2.9856109452240877e-05,
1077
+ "loss": 0.0,
1078
+ "step": 1500
1079
+ },
1080
+ {
1081
+ "epoch": 1.4809876543209877,
1082
+ "eval_nlpcc25_task1_dev_accuracy": 0.9998015873015873,
1083
+ "eval_nlpcc25_task1_dev_loss": 0.0016044721705839038,
1084
+ "eval_nlpcc25_task1_dev_runtime": 2528.9364,
1085
+ "eval_nlpcc25_task1_dev_samples_per_second": 1.107,
1086
+ "eval_nlpcc25_task1_dev_steps_per_second": 0.277,
1087
+ "step": 1500
1088
+ }
1089
+ ],
1090
+ "logging_steps": 10,
1091
+ "max_steps": 3036,
1092
+ "num_input_tokens_seen": 0,
1093
+ "num_train_epochs": 3,
1094
+ "save_steps": 500,
1095
+ "stateful_callbacks": {
1096
+ "TrainerControl": {
1097
+ "args": {
1098
+ "should_epoch_stop": false,
1099
+ "should_evaluate": false,
1100
+ "should_log": false,
1101
+ "should_save": true,
1102
+ "should_training_stop": false
1103
+ },
1104
+ "attributes": {}
1105
+ }
1106
+ },
1107
+ "total_flos": 8.610691447990518e+17,
1108
+ "train_batch_size": 4,
1109
+ "trial_name": null,
1110
+ "trial_params": null
1111
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bc27925817800cee3170b92f598d1cfe9fce4b0491f781c2f4977cfd2644a9a
3
+ size 5752
Qwen2.5-7B-Instruct-lora-2/checkpoint-1500/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /data/zhaoguoyu/Experiments/mgtd-sys/detector/ckpt/Qwen2.5-7B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.15.0
Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/adapter_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/data/zhaoguoyu/Experiments/mgtd-sys/detector/ckpt/Qwen2.5-7B-Instruct",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 8,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "k_proj",
28
+ "v_proj",
29
+ "up_proj",
30
+ "down_proj",
31
+ "o_proj",
32
+ "gate_proj",
33
+ "q_proj"
34
+ ],
35
+ "task_type": "CAUSAL_LM",
36
+ "trainable_token_indices": null,
37
+ "use_dora": false,
38
+ "use_rslora": false
39
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4b687877fe9030563f9e59d9d42f63279b4329498ff477bf08180340718c56e
3
+ size 80792096
Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d3fbbe54d5fcaac3f48dce4e4e215c54e8a6f303133dae7727fed45dbf732b6
3
+ size 161810282
Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c062f7f375beded48b5337f5a3f3a5cb38807fa3e85dbf3e294c0ab6b627bfc2
3
+ size 14244
Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f42ba79e7d828e6bd9344c98bfd844a063f0f6271f1c6145ceffd59a0728e79
3
+ size 1064
Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/tokenizer_config.json ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 131072,
204
+ "pad_token": "<|endoftext|>",
205
+ "padding_side": "right",
206
+ "split_special_tokens": false,
207
+ "tokenizer_class": "Qwen2Tokenizer",
208
+ "unk_token": null
209
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/trainer_state.json ADDED
@@ -0,0 +1,1470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.9748148148148148,
6
+ "eval_steps": 500,
7
+ "global_step": 2000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.009876543209876543,
14
+ "grad_norm": 7.599112033843994,
15
+ "learning_rate": 1.6447368421052632e-06,
16
+ "loss": 4.2059,
17
+ "step": 10
18
+ },
19
+ {
20
+ "epoch": 0.019753086419753086,
21
+ "grad_norm": 6.403433322906494,
22
+ "learning_rate": 3.2894736842105265e-06,
23
+ "loss": 4.1463,
24
+ "step": 20
25
+ },
26
+ {
27
+ "epoch": 0.02962962962962963,
28
+ "grad_norm": 9.776185989379883,
29
+ "learning_rate": 4.9342105263157895e-06,
30
+ "loss": 4.1087,
31
+ "step": 30
32
+ },
33
+ {
34
+ "epoch": 0.03950617283950617,
35
+ "grad_norm": 5.836405277252197,
36
+ "learning_rate": 6.578947368421053e-06,
37
+ "loss": 3.7657,
38
+ "step": 40
39
+ },
40
+ {
41
+ "epoch": 0.04938271604938271,
42
+ "grad_norm": 4.362349033355713,
43
+ "learning_rate": 8.223684210526317e-06,
44
+ "loss": 3.2134,
45
+ "step": 50
46
+ },
47
+ {
48
+ "epoch": 0.05925925925925926,
49
+ "grad_norm": 5.1398701667785645,
50
+ "learning_rate": 9.868421052631579e-06,
51
+ "loss": 2.6238,
52
+ "step": 60
53
+ },
54
+ {
55
+ "epoch": 0.0691358024691358,
56
+ "grad_norm": 8.642521858215332,
57
+ "learning_rate": 1.1513157894736843e-05,
58
+ "loss": 1.5929,
59
+ "step": 70
60
+ },
61
+ {
62
+ "epoch": 0.07901234567901234,
63
+ "grad_norm": 1.0219271183013916,
64
+ "learning_rate": 1.3157894736842106e-05,
65
+ "loss": 0.1491,
66
+ "step": 80
67
+ },
68
+ {
69
+ "epoch": 0.08888888888888889,
70
+ "grad_norm": 0.4035845100879669,
71
+ "learning_rate": 1.4802631578947368e-05,
72
+ "loss": 0.0206,
73
+ "step": 90
74
+ },
75
+ {
76
+ "epoch": 0.09876543209876543,
77
+ "grad_norm": 0.6767832636833191,
78
+ "learning_rate": 1.6447368421052635e-05,
79
+ "loss": 0.0073,
80
+ "step": 100
81
+ },
82
+ {
83
+ "epoch": 0.10864197530864197,
84
+ "grad_norm": 0.014764077961444855,
85
+ "learning_rate": 1.8092105263157896e-05,
86
+ "loss": 0.0011,
87
+ "step": 110
88
+ },
89
+ {
90
+ "epoch": 0.11851851851851852,
91
+ "grad_norm": 0.15152056515216827,
92
+ "learning_rate": 1.9736842105263158e-05,
93
+ "loss": 0.0005,
94
+ "step": 120
95
+ },
96
+ {
97
+ "epoch": 0.12839506172839507,
98
+ "grad_norm": 0.06782546639442444,
99
+ "learning_rate": 2.1381578947368423e-05,
100
+ "loss": 0.0014,
101
+ "step": 130
102
+ },
103
+ {
104
+ "epoch": 0.1382716049382716,
105
+ "grad_norm": 0.03198217228055,
106
+ "learning_rate": 2.3026315789473685e-05,
107
+ "loss": 0.0009,
108
+ "step": 140
109
+ },
110
+ {
111
+ "epoch": 0.14814814814814814,
112
+ "grad_norm": 0.02691480703651905,
113
+ "learning_rate": 2.4671052631578947e-05,
114
+ "loss": 0.0024,
115
+ "step": 150
116
+ },
117
+ {
118
+ "epoch": 0.1580246913580247,
119
+ "grad_norm": 0.0057847509160637856,
120
+ "learning_rate": 2.6315789473684212e-05,
121
+ "loss": 0.0001,
122
+ "step": 160
123
+ },
124
+ {
125
+ "epoch": 0.16790123456790124,
126
+ "grad_norm": 0.003663571085780859,
127
+ "learning_rate": 2.7960526315789477e-05,
128
+ "loss": 0.0002,
129
+ "step": 170
130
+ },
131
+ {
132
+ "epoch": 0.17777777777777778,
133
+ "grad_norm": 0.004447426181286573,
134
+ "learning_rate": 2.9605263157894735e-05,
135
+ "loss": 0.0003,
136
+ "step": 180
137
+ },
138
+ {
139
+ "epoch": 0.18765432098765433,
140
+ "grad_norm": 0.10373878479003906,
141
+ "learning_rate": 3.125e-05,
142
+ "loss": 0.0006,
143
+ "step": 190
144
+ },
145
+ {
146
+ "epoch": 0.19753086419753085,
147
+ "grad_norm": 0.002619905164465308,
148
+ "learning_rate": 3.289473684210527e-05,
149
+ "loss": 0.0006,
150
+ "step": 200
151
+ },
152
+ {
153
+ "epoch": 0.2074074074074074,
154
+ "grad_norm": 0.007042820565402508,
155
+ "learning_rate": 3.4539473684210524e-05,
156
+ "loss": 0.0015,
157
+ "step": 210
158
+ },
159
+ {
160
+ "epoch": 0.21728395061728395,
161
+ "grad_norm": 0.0016206403961405158,
162
+ "learning_rate": 3.618421052631579e-05,
163
+ "loss": 0.0049,
164
+ "step": 220
165
+ },
166
+ {
167
+ "epoch": 0.2271604938271605,
168
+ "grad_norm": 0.001877307309769094,
169
+ "learning_rate": 3.7828947368421054e-05,
170
+ "loss": 0.0001,
171
+ "step": 230
172
+ },
173
+ {
174
+ "epoch": 0.23703703703703705,
175
+ "grad_norm": 0.003795365337282419,
176
+ "learning_rate": 3.9473684210526316e-05,
177
+ "loss": 0.0001,
178
+ "step": 240
179
+ },
180
+ {
181
+ "epoch": 0.24691358024691357,
182
+ "grad_norm": 0.008357529528439045,
183
+ "learning_rate": 4.111842105263158e-05,
184
+ "loss": 0.004,
185
+ "step": 250
186
+ },
187
+ {
188
+ "epoch": 0.25679012345679014,
189
+ "grad_norm": 0.026294540613889694,
190
+ "learning_rate": 4.2763157894736847e-05,
191
+ "loss": 0.0002,
192
+ "step": 260
193
+ },
194
+ {
195
+ "epoch": 0.26666666666666666,
196
+ "grad_norm": 0.08903225511312485,
197
+ "learning_rate": 4.440789473684211e-05,
198
+ "loss": 0.0005,
199
+ "step": 270
200
+ },
201
+ {
202
+ "epoch": 0.2765432098765432,
203
+ "grad_norm": 0.05173858627676964,
204
+ "learning_rate": 4.605263157894737e-05,
205
+ "loss": 0.0003,
206
+ "step": 280
207
+ },
208
+ {
209
+ "epoch": 0.28641975308641976,
210
+ "grad_norm": 0.036562711000442505,
211
+ "learning_rate": 4.769736842105263e-05,
212
+ "loss": 0.0012,
213
+ "step": 290
214
+ },
215
+ {
216
+ "epoch": 0.2962962962962963,
217
+ "grad_norm": 0.008022695779800415,
218
+ "learning_rate": 4.9342105263157894e-05,
219
+ "loss": 0.0036,
220
+ "step": 300
221
+ },
222
+ {
223
+ "epoch": 0.30617283950617286,
224
+ "grad_norm": 0.0021413813810795546,
225
+ "learning_rate": 4.999940495590975e-05,
226
+ "loss": 0.0006,
227
+ "step": 310
228
+ },
229
+ {
230
+ "epoch": 0.3160493827160494,
231
+ "grad_norm": 0.015379426069557667,
232
+ "learning_rate": 4.999576867793816e-05,
233
+ "loss": 0.0092,
234
+ "step": 320
235
+ },
236
+ {
237
+ "epoch": 0.32592592592592595,
238
+ "grad_norm": 0.003894766792654991,
239
+ "learning_rate": 4.9988827182291254e-05,
240
+ "loss": 0.001,
241
+ "step": 330
242
+ },
243
+ {
244
+ "epoch": 0.3358024691358025,
245
+ "grad_norm": 0.0016231742920354009,
246
+ "learning_rate": 4.997858138685056e-05,
247
+ "loss": 0.0001,
248
+ "step": 340
249
+ },
250
+ {
251
+ "epoch": 0.345679012345679,
252
+ "grad_norm": 0.0015260468935593963,
253
+ "learning_rate": 4.996503264642876e-05,
254
+ "loss": 0.0016,
255
+ "step": 350
256
+ },
257
+ {
258
+ "epoch": 0.35555555555555557,
259
+ "grad_norm": 0.03330058977007866,
260
+ "learning_rate": 4.994818275259052e-05,
261
+ "loss": 0.0013,
262
+ "step": 360
263
+ },
264
+ {
265
+ "epoch": 0.3654320987654321,
266
+ "grad_norm": 0.002741268603131175,
267
+ "learning_rate": 4.992803393341563e-05,
268
+ "loss": 0.0017,
269
+ "step": 370
270
+ },
271
+ {
272
+ "epoch": 0.37530864197530867,
273
+ "grad_norm": 0.004990574903786182,
274
+ "learning_rate": 4.9904588853204365e-05,
275
+ "loss": 0.0001,
276
+ "step": 380
277
+ },
278
+ {
279
+ "epoch": 0.3851851851851852,
280
+ "grad_norm": 0.013514043763279915,
281
+ "learning_rate": 4.9877850612125173e-05,
282
+ "loss": 0.0001,
283
+ "step": 390
284
+ },
285
+ {
286
+ "epoch": 0.3950617283950617,
287
+ "grad_norm": 0.0015880317660048604,
288
+ "learning_rate": 4.984782274580476e-05,
289
+ "loss": 0.0001,
290
+ "step": 400
291
+ },
292
+ {
293
+ "epoch": 0.4049382716049383,
294
+ "grad_norm": 0.002702023135498166,
295
+ "learning_rate": 4.981450922486053e-05,
296
+ "loss": 0.0001,
297
+ "step": 410
298
+ },
299
+ {
300
+ "epoch": 0.4148148148148148,
301
+ "grad_norm": 0.0020927605219185352,
302
+ "learning_rate": 4.977791445437559e-05,
303
+ "loss": 0.0001,
304
+ "step": 420
305
+ },
306
+ {
307
+ "epoch": 0.4246913580246914,
308
+ "grad_norm": 0.0007626357255503535,
309
+ "learning_rate": 4.973804327331625e-05,
310
+ "loss": 0.0002,
311
+ "step": 430
312
+ },
313
+ {
314
+ "epoch": 0.4345679012345679,
315
+ "grad_norm": 0.0026151000056415796,
316
+ "learning_rate": 4.969490095389213e-05,
317
+ "loss": 0.0,
318
+ "step": 440
319
+ },
320
+ {
321
+ "epoch": 0.4444444444444444,
322
+ "grad_norm": 0.0007740291184745729,
323
+ "learning_rate": 4.964849320085905e-05,
324
+ "loss": 0.0,
325
+ "step": 450
326
+ },
327
+ {
328
+ "epoch": 0.454320987654321,
329
+ "grad_norm": 0.0005287025705911219,
330
+ "learning_rate": 4.9598826150764656e-05,
331
+ "loss": 0.0,
332
+ "step": 460
333
+ },
334
+ {
335
+ "epoch": 0.4641975308641975,
336
+ "grad_norm": 0.0004778858565259725,
337
+ "learning_rate": 4.9545906371137e-05,
338
+ "loss": 0.007,
339
+ "step": 470
340
+ },
341
+ {
342
+ "epoch": 0.4740740740740741,
343
+ "grad_norm": 0.0007034554146230221,
344
+ "learning_rate": 4.9489740859616094e-05,
345
+ "loss": 0.0,
346
+ "step": 480
347
+ },
348
+ {
349
+ "epoch": 0.4839506172839506,
350
+ "grad_norm": 0.0006262129172682762,
351
+ "learning_rate": 4.9430337043028604e-05,
352
+ "loss": 0.0,
353
+ "step": 490
354
+ },
355
+ {
356
+ "epoch": 0.49382716049382713,
357
+ "grad_norm": 0.00045443352428264916,
358
+ "learning_rate": 4.9367702776405806e-05,
359
+ "loss": 0.0,
360
+ "step": 500
361
+ },
362
+ {
363
+ "epoch": 0.49382716049382713,
364
+ "eval_nlpcc25_task1_dev_accuracy": 0.9995238095238096,
365
+ "eval_nlpcc25_task1_dev_loss": 0.0022831459064036608,
366
+ "eval_nlpcc25_task1_dev_runtime": 2528.8573,
367
+ "eval_nlpcc25_task1_dev_samples_per_second": 1.107,
368
+ "eval_nlpcc25_task1_dev_steps_per_second": 0.277,
369
+ "step": 500
370
+ },
371
+ {
372
+ "epoch": 0.5037037037037037,
373
+ "grad_norm": 0.0003733730991370976,
374
+ "learning_rate": 4.930184634194488e-05,
375
+ "loss": 0.0,
376
+ "step": 510
377
+ },
378
+ {
379
+ "epoch": 0.5135802469135803,
380
+ "grad_norm": 0.0003028757928404957,
381
+ "learning_rate": 4.923277644791378e-05,
382
+ "loss": 0.0,
383
+ "step": 520
384
+ },
385
+ {
386
+ "epoch": 0.5234567901234568,
387
+ "grad_norm": 0.00028728952747769654,
388
+ "learning_rate": 4.9160502227499704e-05,
389
+ "loss": 0.0,
390
+ "step": 530
391
+ },
392
+ {
393
+ "epoch": 0.5333333333333333,
394
+ "grad_norm": 0.0003644278913270682,
395
+ "learning_rate": 4.908503323760143e-05,
396
+ "loss": 0.0,
397
+ "step": 540
398
+ },
399
+ {
400
+ "epoch": 0.5432098765432098,
401
+ "grad_norm": 0.0005966073367744684,
402
+ "learning_rate": 4.900637945756554e-05,
403
+ "loss": 0.0,
404
+ "step": 550
405
+ },
406
+ {
407
+ "epoch": 0.5530864197530864,
408
+ "grad_norm": 0.000367969973012805,
409
+ "learning_rate": 4.892455128786692e-05,
410
+ "loss": 0.0,
411
+ "step": 560
412
+ },
413
+ {
414
+ "epoch": 0.562962962962963,
415
+ "grad_norm": 0.0002780740906018764,
416
+ "learning_rate": 4.8839559548733436e-05,
417
+ "loss": 0.0077,
418
+ "step": 570
419
+ },
420
+ {
421
+ "epoch": 0.5728395061728395,
422
+ "grad_norm": 0.00031342540751211345,
423
+ "learning_rate": 4.875141547871519e-05,
424
+ "loss": 0.0,
425
+ "step": 580
426
+ },
427
+ {
428
+ "epoch": 0.582716049382716,
429
+ "grad_norm": 0.000353014562278986,
430
+ "learning_rate": 4.866013073319841e-05,
431
+ "loss": 0.0,
432
+ "step": 590
433
+ },
434
+ {
435
+ "epoch": 0.5925925925925926,
436
+ "grad_norm": 0.00029066085699014366,
437
+ "learning_rate": 4.856571738286426e-05,
438
+ "loss": 0.0,
439
+ "step": 600
440
+ },
441
+ {
442
+ "epoch": 0.6024691358024692,
443
+ "grad_norm": 0.00024000390840228647,
444
+ "learning_rate": 4.8468187912092744e-05,
445
+ "loss": 0.0,
446
+ "step": 610
447
+ },
448
+ {
449
+ "epoch": 0.6123456790123457,
450
+ "grad_norm": 0.0003632376901805401,
451
+ "learning_rate": 4.836755521731183e-05,
452
+ "loss": 0.0,
453
+ "step": 620
454
+ },
455
+ {
456
+ "epoch": 0.6222222222222222,
457
+ "grad_norm": 0.00029751085094176233,
458
+ "learning_rate": 4.826383260529221e-05,
459
+ "loss": 0.0002,
460
+ "step": 630
461
+ },
462
+ {
463
+ "epoch": 0.6320987654320988,
464
+ "grad_norm": 3.190973997116089,
465
+ "learning_rate": 4.815703379138765e-05,
466
+ "loss": 0.0017,
467
+ "step": 640
468
+ },
469
+ {
470
+ "epoch": 0.6419753086419753,
471
+ "grad_norm": 0.18289895355701447,
472
+ "learning_rate": 4.804717289772147e-05,
473
+ "loss": 0.0043,
474
+ "step": 650
475
+ },
476
+ {
477
+ "epoch": 0.6518518518518519,
478
+ "grad_norm": 0.2459922581911087,
479
+ "learning_rate": 4.7934264451319105e-05,
480
+ "loss": 0.003,
481
+ "step": 660
482
+ },
483
+ {
484
+ "epoch": 0.6617283950617284,
485
+ "grad_norm": 0.004725860431790352,
486
+ "learning_rate": 4.7818323382187214e-05,
487
+ "loss": 0.0002,
488
+ "step": 670
489
+ },
490
+ {
491
+ "epoch": 0.671604938271605,
492
+ "grad_norm": 0.004160716664046049,
493
+ "learning_rate": 4.769936502133946e-05,
494
+ "loss": 0.0002,
495
+ "step": 680
496
+ },
497
+ {
498
+ "epoch": 0.6814814814814815,
499
+ "grad_norm": 0.002101948019117117,
500
+ "learning_rate": 4.7577405098769256e-05,
501
+ "loss": 0.0001,
502
+ "step": 690
503
+ },
504
+ {
505
+ "epoch": 0.691358024691358,
506
+ "grad_norm": 0.0015966896899044514,
507
+ "learning_rate": 4.74524597413698e-05,
508
+ "loss": 0.0001,
509
+ "step": 700
510
+ },
511
+ {
512
+ "epoch": 0.7012345679012346,
513
+ "grad_norm": 0.0010343171888962388,
514
+ "learning_rate": 4.732454547080159e-05,
515
+ "loss": 0.0,
516
+ "step": 710
517
+ },
518
+ {
519
+ "epoch": 0.7111111111111111,
520
+ "grad_norm": 0.0007301874575205147,
521
+ "learning_rate": 4.7193679201307705e-05,
522
+ "loss": 0.0,
523
+ "step": 720
524
+ },
525
+ {
526
+ "epoch": 0.7209876543209877,
527
+ "grad_norm": 0.0008054127101786435,
528
+ "learning_rate": 4.705987823747731e-05,
529
+ "loss": 0.0,
530
+ "step": 730
531
+ },
532
+ {
533
+ "epoch": 0.7308641975308642,
534
+ "grad_norm": 0.0006862932350486517,
535
+ "learning_rate": 4.692316027195733e-05,
536
+ "loss": 0.0001,
537
+ "step": 740
538
+ },
539
+ {
540
+ "epoch": 0.7407407407407407,
541
+ "grad_norm": 0.0005865858984179795,
542
+ "learning_rate": 4.678354338311306e-05,
543
+ "loss": 0.0,
544
+ "step": 750
545
+ },
546
+ {
547
+ "epoch": 0.7506172839506173,
548
+ "grad_norm": 0.0004720942524727434,
549
+ "learning_rate": 4.6641046032637516e-05,
550
+ "loss": 0.0,
551
+ "step": 760
552
+ },
553
+ {
554
+ "epoch": 0.7604938271604939,
555
+ "grad_norm": 0.0004310516524128616,
556
+ "learning_rate": 4.6495687063110325e-05,
557
+ "loss": 0.0,
558
+ "step": 770
559
+ },
560
+ {
561
+ "epoch": 0.7703703703703704,
562
+ "grad_norm": 0.00043949694372713566,
563
+ "learning_rate": 4.634748569550612e-05,
564
+ "loss": 0.0,
565
+ "step": 780
566
+ },
567
+ {
568
+ "epoch": 0.7802469135802469,
569
+ "grad_norm": 0.0004090226429980248,
570
+ "learning_rate": 4.61964615266529e-05,
571
+ "loss": 0.0,
572
+ "step": 790
573
+ },
574
+ {
575
+ "epoch": 0.7901234567901234,
576
+ "grad_norm": 0.00037527765380218625,
577
+ "learning_rate": 4.6042634526640755e-05,
578
+ "loss": 0.0,
579
+ "step": 800
580
+ },
581
+ {
582
+ "epoch": 0.8,
583
+ "grad_norm": 1.0162104368209839,
584
+ "learning_rate": 4.588602503618118e-05,
585
+ "loss": 0.0021,
586
+ "step": 810
587
+ },
588
+ {
589
+ "epoch": 0.8098765432098766,
590
+ "grad_norm": 1.3063777685165405,
591
+ "learning_rate": 4.572665376391741e-05,
592
+ "loss": 0.0006,
593
+ "step": 820
594
+ },
595
+ {
596
+ "epoch": 0.8197530864197531,
597
+ "grad_norm": 0.9236663579940796,
598
+ "learning_rate": 4.55645417836861e-05,
599
+ "loss": 0.0058,
600
+ "step": 830
601
+ },
602
+ {
603
+ "epoch": 0.8296296296296296,
604
+ "grad_norm": 0.01748817227780819,
605
+ "learning_rate": 4.5399710531730685e-05,
606
+ "loss": 0.0025,
607
+ "step": 840
608
+ },
609
+ {
610
+ "epoch": 0.8395061728395061,
611
+ "grad_norm": 0.003460384439677,
612
+ "learning_rate": 4.5232181803866886e-05,
613
+ "loss": 0.0005,
614
+ "step": 850
615
+ },
616
+ {
617
+ "epoch": 0.8493827160493828,
618
+ "grad_norm": 0.0013236172962933779,
619
+ "learning_rate": 4.506197775260055e-05,
620
+ "loss": 0.0001,
621
+ "step": 860
622
+ },
623
+ {
624
+ "epoch": 0.8592592592592593,
625
+ "grad_norm": 0.0011838016798719764,
626
+ "learning_rate": 4.4889120884198495e-05,
627
+ "loss": 0.0,
628
+ "step": 870
629
+ },
630
+ {
631
+ "epoch": 0.8691358024691358,
632
+ "grad_norm": 0.0007675419910810888,
633
+ "learning_rate": 4.47136340557124e-05,
634
+ "loss": 0.0,
635
+ "step": 880
636
+ },
637
+ {
638
+ "epoch": 0.8790123456790123,
639
+ "grad_norm": 0.0007905301754362881,
640
+ "learning_rate": 4.453554047195644e-05,
641
+ "loss": 0.0,
642
+ "step": 890
643
+ },
644
+ {
645
+ "epoch": 0.8888888888888888,
646
+ "grad_norm": 0.0005061720148660243,
647
+ "learning_rate": 4.435486368243888e-05,
648
+ "loss": 0.0,
649
+ "step": 900
650
+ },
651
+ {
652
+ "epoch": 0.8987654320987655,
653
+ "grad_norm": 0.0005205022753216326,
654
+ "learning_rate": 4.417162757824808e-05,
655
+ "loss": 0.0,
656
+ "step": 910
657
+ },
658
+ {
659
+ "epoch": 0.908641975308642,
660
+ "grad_norm": 0.0004827196535188705,
661
+ "learning_rate": 4.398585638889335e-05,
662
+ "loss": 0.0,
663
+ "step": 920
664
+ },
665
+ {
666
+ "epoch": 0.9185185185185185,
667
+ "grad_norm": 0.000379192759282887,
668
+ "learning_rate": 4.379757467910113e-05,
669
+ "loss": 0.0,
670
+ "step": 930
671
+ },
672
+ {
673
+ "epoch": 0.928395061728395,
674
+ "grad_norm": 0.00045221165055409074,
675
+ "learning_rate": 4.3606807345566616e-05,
676
+ "loss": 0.0,
677
+ "step": 940
678
+ },
679
+ {
680
+ "epoch": 0.9382716049382716,
681
+ "grad_norm": 0.0005233365809544921,
682
+ "learning_rate": 4.341357961366181e-05,
683
+ "loss": 0.0,
684
+ "step": 950
685
+ },
686
+ {
687
+ "epoch": 0.9481481481481482,
688
+ "grad_norm": 0.0002833159815054387,
689
+ "learning_rate": 4.3217917034099823e-05,
690
+ "loss": 0.0,
691
+ "step": 960
692
+ },
693
+ {
694
+ "epoch": 0.9580246913580247,
695
+ "grad_norm": 0.00043191161239519715,
696
+ "learning_rate": 4.301984547955635e-05,
697
+ "loss": 0.0,
698
+ "step": 970
699
+ },
700
+ {
701
+ "epoch": 0.9679012345679012,
702
+ "grad_norm": 0.0004523663374129683,
703
+ "learning_rate": 4.281939114124843e-05,
704
+ "loss": 0.0,
705
+ "step": 980
706
+ },
707
+ {
708
+ "epoch": 0.9777777777777777,
709
+ "grad_norm": 0.00025242462288588285,
710
+ "learning_rate": 4.261658052547124e-05,
711
+ "loss": 0.0,
712
+ "step": 990
713
+ },
714
+ {
715
+ "epoch": 0.9876543209876543,
716
+ "grad_norm": 0.0006344653083942831,
717
+ "learning_rate": 4.241144045009304e-05,
718
+ "loss": 0.0,
719
+ "step": 1000
720
+ },
721
+ {
722
+ "epoch": 0.9876543209876543,
723
+ "eval_nlpcc25_task1_dev_accuracy": 0.9996031746031745,
724
+ "eval_nlpcc25_task1_dev_loss": 0.002990948036313057,
725
+ "eval_nlpcc25_task1_dev_runtime": 2528.4874,
726
+ "eval_nlpcc25_task1_dev_samples_per_second": 1.107,
727
+ "eval_nlpcc25_task1_dev_steps_per_second": 0.277,
728
+ "step": 1000
729
+ },
730
+ {
731
+ "epoch": 0.9975308641975309,
732
+ "grad_norm": 0.0005013208137825131,
733
+ "learning_rate": 4.22039980410091e-05,
734
+ "loss": 0.0,
735
+ "step": 1010
736
+ },
737
+ {
738
+ "epoch": 1.0069135802469136,
739
+ "grad_norm": 0.0003089129750151187,
740
+ "learning_rate": 4.199428072855476e-05,
741
+ "loss": 0.0,
742
+ "step": 1020
743
+ },
744
+ {
745
+ "epoch": 1.0167901234567902,
746
+ "grad_norm": 0.00048567031626589596,
747
+ "learning_rate": 4.1782316243878314e-05,
748
+ "loss": 0.0,
749
+ "step": 1030
750
+ },
751
+ {
752
+ "epoch": 1.0266666666666666,
753
+ "grad_norm": 0.0002530143829062581,
754
+ "learning_rate": 4.1568132615274094e-05,
755
+ "loss": 0.0,
756
+ "step": 1040
757
+ },
758
+ {
759
+ "epoch": 1.0365432098765432,
760
+ "grad_norm": 0.00028351074433885515,
761
+ "learning_rate": 4.1351758164476225e-05,
762
+ "loss": 0.0001,
763
+ "step": 1050
764
+ },
765
+ {
766
+ "epoch": 1.0464197530864197,
767
+ "grad_norm": 0.0005833515897393227,
768
+ "learning_rate": 4.113322150291365e-05,
769
+ "loss": 0.0,
770
+ "step": 1060
771
+ },
772
+ {
773
+ "epoch": 1.0562962962962963,
774
+ "grad_norm": 0.0011448035947978497,
775
+ "learning_rate": 4.091255152792678e-05,
776
+ "loss": 0.0028,
777
+ "step": 1070
778
+ },
779
+ {
780
+ "epoch": 1.066172839506173,
781
+ "grad_norm": 0.025126419961452484,
782
+ "learning_rate": 4.0689777418946394e-05,
783
+ "loss": 0.0002,
784
+ "step": 1080
785
+ },
786
+ {
787
+ "epoch": 1.0760493827160493,
788
+ "grad_norm": 0.0005254483548924327,
789
+ "learning_rate": 4.0464928633635196e-05,
790
+ "loss": 0.0006,
791
+ "step": 1090
792
+ },
793
+ {
794
+ "epoch": 1.085925925925926,
795
+ "grad_norm": 0.0009111575200222433,
796
+ "learning_rate": 4.023803490399258e-05,
797
+ "loss": 0.0,
798
+ "step": 1100
799
+ },
800
+ {
801
+ "epoch": 1.0958024691358024,
802
+ "grad_norm": 0.000739308656193316,
803
+ "learning_rate": 4.000912623242318e-05,
804
+ "loss": 0.0,
805
+ "step": 1110
806
+ },
807
+ {
808
+ "epoch": 1.105679012345679,
809
+ "grad_norm": 0.0005893895868211985,
810
+ "learning_rate": 3.977823288776955e-05,
811
+ "loss": 0.0,
812
+ "step": 1120
813
+ },
814
+ {
815
+ "epoch": 1.1155555555555556,
816
+ "grad_norm": 0.00036915784585289657,
817
+ "learning_rate": 3.954538540130978e-05,
818
+ "loss": 0.0,
819
+ "step": 1130
820
+ },
821
+ {
822
+ "epoch": 1.125432098765432,
823
+ "grad_norm": 0.0005508523900061846,
824
+ "learning_rate": 3.9310614562720214e-05,
825
+ "loss": 0.0,
826
+ "step": 1140
827
+ },
828
+ {
829
+ "epoch": 1.1353086419753087,
830
+ "grad_norm": 0.0009192466968670487,
831
+ "learning_rate": 3.9073951416004143e-05,
832
+ "loss": 0.0002,
833
+ "step": 1150
834
+ },
835
+ {
836
+ "epoch": 1.145185185185185,
837
+ "grad_norm": 0.0004524670075625181,
838
+ "learning_rate": 3.8835427255386856e-05,
839
+ "loss": 0.0,
840
+ "step": 1160
841
+ },
842
+ {
843
+ "epoch": 1.1550617283950617,
844
+ "grad_norm": 0.000336712779244408,
845
+ "learning_rate": 3.859507362117748e-05,
846
+ "loss": 0.0,
847
+ "step": 1170
848
+ },
849
+ {
850
+ "epoch": 1.1649382716049383,
851
+ "grad_norm": 0.00038848226540721953,
852
+ "learning_rate": 3.8352922295598476e-05,
853
+ "loss": 0.0,
854
+ "step": 1180
855
+ },
856
+ {
857
+ "epoch": 1.1748148148148148,
858
+ "grad_norm": 0.0007016750751063228,
859
+ "learning_rate": 3.810900529858295e-05,
860
+ "loss": 0.0,
861
+ "step": 1190
862
+ },
863
+ {
864
+ "epoch": 1.1846913580246914,
865
+ "grad_norm": 0.00020055304048582911,
866
+ "learning_rate": 3.786335488354068e-05,
867
+ "loss": 0.0,
868
+ "step": 1200
869
+ },
870
+ {
871
+ "epoch": 1.194567901234568,
872
+ "grad_norm": 0.0006126250373199582,
873
+ "learning_rate": 3.7616003533093204e-05,
874
+ "loss": 0.0,
875
+ "step": 1210
876
+ },
877
+ {
878
+ "epoch": 1.2044444444444444,
879
+ "grad_norm": 0.00017670773377176374,
880
+ "learning_rate": 3.736698395477857e-05,
881
+ "loss": 0.0,
882
+ "step": 1220
883
+ },
884
+ {
885
+ "epoch": 1.214320987654321,
886
+ "grad_norm": 0.0001492701849201694,
887
+ "learning_rate": 3.7116329076726455e-05,
888
+ "loss": 0.0,
889
+ "step": 1230
890
+ },
891
+ {
892
+ "epoch": 1.2241975308641975,
893
+ "grad_norm": 0.0001376178115606308,
894
+ "learning_rate": 3.6864072043303945e-05,
895
+ "loss": 0.0,
896
+ "step": 1240
897
+ },
898
+ {
899
+ "epoch": 1.234074074074074,
900
+ "grad_norm": 0.0001676941174082458,
901
+ "learning_rate": 3.661024621073291e-05,
902
+ "loss": 0.0071,
903
+ "step": 1250
904
+ },
905
+ {
906
+ "epoch": 1.2439506172839505,
907
+ "grad_norm": 0.00019377398712094873,
908
+ "learning_rate": 3.635488514267923e-05,
909
+ "loss": 0.0,
910
+ "step": 1260
911
+ },
912
+ {
913
+ "epoch": 1.2538271604938271,
914
+ "grad_norm": 0.0001418793690390885,
915
+ "learning_rate": 3.609802260581464e-05,
916
+ "loss": 0.0,
917
+ "step": 1270
918
+ },
919
+ {
920
+ "epoch": 1.2637037037037038,
921
+ "grad_norm": 0.00016002310439944267,
922
+ "learning_rate": 3.583969256535172e-05,
923
+ "loss": 0.0,
924
+ "step": 1280
925
+ },
926
+ {
927
+ "epoch": 1.2735802469135802,
928
+ "grad_norm": 0.00014259129238780588,
929
+ "learning_rate": 3.5579929180552676e-05,
930
+ "loss": 0.0,
931
+ "step": 1290
932
+ },
933
+ {
934
+ "epoch": 1.2834567901234568,
935
+ "grad_norm": 0.000264517730101943,
936
+ "learning_rate": 3.531876680021235e-05,
937
+ "loss": 0.0,
938
+ "step": 1300
939
+ },
940
+ {
941
+ "epoch": 1.2933333333333334,
942
+ "grad_norm": 0.00015705930127296597,
943
+ "learning_rate": 3.5056239958116336e-05,
944
+ "loss": 0.0,
945
+ "step": 1310
946
+ },
947
+ {
948
+ "epoch": 1.3032098765432099,
949
+ "grad_norm": 0.00022515420278068632,
950
+ "learning_rate": 3.479238336847444e-05,
951
+ "loss": 0.0,
952
+ "step": 1320
953
+ },
954
+ {
955
+ "epoch": 1.3130864197530865,
956
+ "grad_norm": 0.00014641444431617856,
957
+ "learning_rate": 3.4527231921330464e-05,
958
+ "loss": 0.0,
959
+ "step": 1330
960
+ },
961
+ {
962
+ "epoch": 1.322962962962963,
963
+ "grad_norm": 9.774778300197795e-05,
964
+ "learning_rate": 3.426082067794863e-05,
965
+ "loss": 0.0,
966
+ "step": 1340
967
+ },
968
+ {
969
+ "epoch": 1.3328395061728395,
970
+ "grad_norm": 0.00012512759712990373,
971
+ "learning_rate": 3.399318486617734e-05,
972
+ "loss": 0.0,
973
+ "step": 1350
974
+ },
975
+ {
976
+ "epoch": 1.342716049382716,
977
+ "grad_norm": 0.00013536612095776945,
978
+ "learning_rate": 3.372435987579108e-05,
979
+ "loss": 0.0,
980
+ "step": 1360
981
+ },
982
+ {
983
+ "epoch": 1.3525925925925926,
984
+ "grad_norm": 0.00013231099001131952,
985
+ "learning_rate": 3.345438125381065e-05,
986
+ "loss": 0.0,
987
+ "step": 1370
988
+ },
989
+ {
990
+ "epoch": 1.3624691358024692,
991
+ "grad_norm": 0.00011779498163377866,
992
+ "learning_rate": 3.318328469980285e-05,
993
+ "loss": 0.0,
994
+ "step": 1380
995
+ },
996
+ {
997
+ "epoch": 1.3723456790123456,
998
+ "grad_norm": 0.0001909748971229419,
999
+ "learning_rate": 3.291110606115984e-05,
1000
+ "loss": 0.0,
1001
+ "step": 1390
1002
+ },
1003
+ {
1004
+ "epoch": 1.3822222222222222,
1005
+ "grad_norm": 0.0001980375382117927,
1006
+ "learning_rate": 3.263788132835898e-05,
1007
+ "loss": 0.0,
1008
+ "step": 1400
1009
+ },
1010
+ {
1011
+ "epoch": 1.3920987654320989,
1012
+ "grad_norm": 0.00011503389396239072,
1013
+ "learning_rate": 3.236364663020384e-05,
1014
+ "loss": 0.0,
1015
+ "step": 1410
1016
+ },
1017
+ {
1018
+ "epoch": 1.4019753086419753,
1019
+ "grad_norm": 0.00013515223690774292,
1020
+ "learning_rate": 3.208843822904679e-05,
1021
+ "loss": 0.0,
1022
+ "step": 1420
1023
+ },
1024
+ {
1025
+ "epoch": 1.411851851851852,
1026
+ "grad_norm": 0.00011641118908300996,
1027
+ "learning_rate": 3.1812292515994e-05,
1028
+ "loss": 0.0,
1029
+ "step": 1430
1030
+ },
1031
+ {
1032
+ "epoch": 1.4217283950617283,
1033
+ "grad_norm": 9.361303091282025e-05,
1034
+ "learning_rate": 3.1535246006093414e-05,
1035
+ "loss": 0.0001,
1036
+ "step": 1440
1037
+ },
1038
+ {
1039
+ "epoch": 1.431604938271605,
1040
+ "grad_norm": 0.00013731363287661225,
1041
+ "learning_rate": 3.125733533350633e-05,
1042
+ "loss": 0.0,
1043
+ "step": 1450
1044
+ },
1045
+ {
1046
+ "epoch": 1.4414814814814814,
1047
+ "grad_norm": 0.000537275685928762,
1048
+ "learning_rate": 3.097859724666322e-05,
1049
+ "loss": 0.0026,
1050
+ "step": 1460
1051
+ },
1052
+ {
1053
+ "epoch": 1.451358024691358,
1054
+ "grad_norm": 0.4905455708503723,
1055
+ "learning_rate": 3.069906860340443e-05,
1056
+ "loss": 0.0029,
1057
+ "step": 1470
1058
+ },
1059
+ {
1060
+ "epoch": 1.4612345679012346,
1061
+ "grad_norm": 0.004352442920207977,
1062
+ "learning_rate": 3.041878636610644e-05,
1063
+ "loss": 0.0001,
1064
+ "step": 1480
1065
+ },
1066
+ {
1067
+ "epoch": 1.471111111111111,
1068
+ "grad_norm": 0.0011432188330218196,
1069
+ "learning_rate": 3.0137787596794304e-05,
1070
+ "loss": 0.0,
1071
+ "step": 1490
1072
+ },
1073
+ {
1074
+ "epoch": 1.4809876543209877,
1075
+ "grad_norm": 0.0010975116165354848,
1076
+ "learning_rate": 2.9856109452240877e-05,
1077
+ "loss": 0.0,
1078
+ "step": 1500
1079
+ },
1080
+ {
1081
+ "epoch": 1.4809876543209877,
1082
+ "eval_nlpcc25_task1_dev_accuracy": 0.9998015873015873,
1083
+ "eval_nlpcc25_task1_dev_loss": 0.0016044721705839038,
1084
+ "eval_nlpcc25_task1_dev_runtime": 2528.9364,
1085
+ "eval_nlpcc25_task1_dev_samples_per_second": 1.107,
1086
+ "eval_nlpcc25_task1_dev_steps_per_second": 0.277,
1087
+ "step": 1500
1088
+ },
1089
+ {
1090
+ "epoch": 1.4908641975308643,
1091
+ "grad_norm": 0.0008672023541294038,
1092
+ "learning_rate": 2.957378917905353e-05,
1093
+ "loss": 0.0,
1094
+ "step": 1510
1095
+ },
1096
+ {
1097
+ "epoch": 1.5007407407407407,
1098
+ "grad_norm": 0.000826246221549809,
1099
+ "learning_rate": 2.929086410874902e-05,
1100
+ "loss": 0.0,
1101
+ "step": 1520
1102
+ },
1103
+ {
1104
+ "epoch": 1.5106172839506171,
1105
+ "grad_norm": 0.0006294779013842344,
1106
+ "learning_rate": 2.900737165281707e-05,
1107
+ "loss": 0.0,
1108
+ "step": 1530
1109
+ },
1110
+ {
1111
+ "epoch": 1.520493827160494,
1112
+ "grad_norm": 0.0006735110655426979,
1113
+ "learning_rate": 2.8723349297773444e-05,
1114
+ "loss": 0.0,
1115
+ "step": 1540
1116
+ },
1117
+ {
1118
+ "epoch": 1.5303703703703704,
1119
+ "grad_norm": 0.000603811233304441,
1120
+ "learning_rate": 2.8438834600203006e-05,
1121
+ "loss": 0.0,
1122
+ "step": 1550
1123
+ },
1124
+ {
1125
+ "epoch": 1.5402469135802468,
1126
+ "grad_norm": 0.001000316347926855,
1127
+ "learning_rate": 2.8153865181793636e-05,
1128
+ "loss": 0.0021,
1129
+ "step": 1560
1130
+ },
1131
+ {
1132
+ "epoch": 1.5501234567901234,
1133
+ "grad_norm": 0.0026612593792378902,
1134
+ "learning_rate": 2.7868478724361423e-05,
1135
+ "loss": 0.0001,
1136
+ "step": 1570
1137
+ },
1138
+ {
1139
+ "epoch": 1.56,
1140
+ "grad_norm": 0.0036472557112574577,
1141
+ "learning_rate": 2.758271296486802e-05,
1142
+ "loss": 0.0001,
1143
+ "step": 1580
1144
+ },
1145
+ {
1146
+ "epoch": 1.5698765432098765,
1147
+ "grad_norm": 0.0031513418070971966,
1148
+ "learning_rate": 2.7296605690430544e-05,
1149
+ "loss": 0.0001,
1150
+ "step": 1590
1151
+ },
1152
+ {
1153
+ "epoch": 1.579753086419753,
1154
+ "grad_norm": 0.0007969654980115592,
1155
+ "learning_rate": 2.7010194733325057e-05,
1156
+ "loss": 0.0,
1157
+ "step": 1600
1158
+ },
1159
+ {
1160
+ "epoch": 1.5896296296296297,
1161
+ "grad_norm": 0.0007987377466633916,
1162
+ "learning_rate": 2.672351796598388e-05,
1163
+ "loss": 0.0,
1164
+ "step": 1610
1165
+ },
1166
+ {
1167
+ "epoch": 1.5995061728395061,
1168
+ "grad_norm": 0.0005811803857795894,
1169
+ "learning_rate": 2.6436613295987738e-05,
1170
+ "loss": 0.0,
1171
+ "step": 1620
1172
+ },
1173
+ {
1174
+ "epoch": 1.6093827160493828,
1175
+ "grad_norm": 0.0005651491228491068,
1176
+ "learning_rate": 2.6149518661053142e-05,
1177
+ "loss": 0.0,
1178
+ "step": 1630
1179
+ },
1180
+ {
1181
+ "epoch": 1.6192592592592594,
1182
+ "grad_norm": 0.0007077642367221415,
1183
+ "learning_rate": 2.5862272024015883e-05,
1184
+ "loss": 0.0,
1185
+ "step": 1640
1186
+ },
1187
+ {
1188
+ "epoch": 1.6291358024691358,
1189
+ "grad_norm": 0.0007271085632964969,
1190
+ "learning_rate": 2.557491136781116e-05,
1191
+ "loss": 0.0,
1192
+ "step": 1650
1193
+ },
1194
+ {
1195
+ "epoch": 1.6390123456790122,
1196
+ "grad_norm": 0.00046728411689400673,
1197
+ "learning_rate": 2.5287474690451057e-05,
1198
+ "loss": 0.0,
1199
+ "step": 1660
1200
+ },
1201
+ {
1202
+ "epoch": 1.6488888888888888,
1203
+ "grad_norm": 0.0003735675709322095,
1204
+ "learning_rate": 2.5e-05,
1205
+ "loss": 0.0,
1206
+ "step": 1670
1207
+ },
1208
+ {
1209
+ "epoch": 1.6587654320987655,
1210
+ "grad_norm": 0.00039456685772165656,
1211
+ "learning_rate": 2.471252530954895e-05,
1212
+ "loss": 0.0,
1213
+ "step": 1680
1214
+ },
1215
+ {
1216
+ "epoch": 1.668641975308642,
1217
+ "grad_norm": 0.0005533050280064344,
1218
+ "learning_rate": 2.4425088632188842e-05,
1219
+ "loss": 0.0,
1220
+ "step": 1690
1221
+ },
1222
+ {
1223
+ "epoch": 1.6785185185185185,
1224
+ "grad_norm": 0.0002727876999415457,
1225
+ "learning_rate": 2.4137727975984127e-05,
1226
+ "loss": 0.0,
1227
+ "step": 1700
1228
+ },
1229
+ {
1230
+ "epoch": 1.6883950617283952,
1231
+ "grad_norm": 0.16899436712265015,
1232
+ "learning_rate": 2.385048133894687e-05,
1233
+ "loss": 0.0042,
1234
+ "step": 1710
1235
+ },
1236
+ {
1237
+ "epoch": 1.6982716049382716,
1238
+ "grad_norm": 0.00025011974503286183,
1239
+ "learning_rate": 2.3563386704012268e-05,
1240
+ "loss": 0.0,
1241
+ "step": 1720
1242
+ },
1243
+ {
1244
+ "epoch": 1.7081481481481482,
1245
+ "grad_norm": 0.00042458626558072865,
1246
+ "learning_rate": 2.3276482034016122e-05,
1247
+ "loss": 0.0,
1248
+ "step": 1730
1249
+ },
1250
+ {
1251
+ "epoch": 1.7180246913580248,
1252
+ "grad_norm": 0.0003143450303468853,
1253
+ "learning_rate": 2.2989805266674952e-05,
1254
+ "loss": 0.0007,
1255
+ "step": 1740
1256
+ },
1257
+ {
1258
+ "epoch": 1.7279012345679012,
1259
+ "grad_norm": 0.002475626999512315,
1260
+ "learning_rate": 2.2703394309569462e-05,
1261
+ "loss": 0.0,
1262
+ "step": 1750
1263
+ },
1264
+ {
1265
+ "epoch": 1.7377777777777776,
1266
+ "grad_norm": 0.0013496195897459984,
1267
+ "learning_rate": 2.241728703513199e-05,
1268
+ "loss": 0.0005,
1269
+ "step": 1760
1270
+ },
1271
+ {
1272
+ "epoch": 1.7476543209876543,
1273
+ "grad_norm": 0.00017028844740707427,
1274
+ "learning_rate": 2.2131521275638572e-05,
1275
+ "loss": 0.0,
1276
+ "step": 1770
1277
+ },
1278
+ {
1279
+ "epoch": 1.757530864197531,
1280
+ "grad_norm": 0.00014113387442193925,
1281
+ "learning_rate": 2.184613481820637e-05,
1282
+ "loss": 0.0,
1283
+ "step": 1780
1284
+ },
1285
+ {
1286
+ "epoch": 1.7674074074074073,
1287
+ "grad_norm": 0.00028848147485405207,
1288
+ "learning_rate": 2.1561165399797e-05,
1289
+ "loss": 0.0,
1290
+ "step": 1790
1291
+ },
1292
+ {
1293
+ "epoch": 1.777283950617284,
1294
+ "grad_norm": 0.00021244629169814289,
1295
+ "learning_rate": 2.1276650702226562e-05,
1296
+ "loss": 0.0,
1297
+ "step": 1800
1298
+ },
1299
+ {
1300
+ "epoch": 1.7871604938271606,
1301
+ "grad_norm": 0.00019675836665555835,
1302
+ "learning_rate": 2.099262834718293e-05,
1303
+ "loss": 0.0,
1304
+ "step": 1810
1305
+ },
1306
+ {
1307
+ "epoch": 1.797037037037037,
1308
+ "grad_norm": 0.0004635763179976493,
1309
+ "learning_rate": 2.0709135891250985e-05,
1310
+ "loss": 0.0,
1311
+ "step": 1820
1312
+ },
1313
+ {
1314
+ "epoch": 1.8069135802469136,
1315
+ "grad_norm": 0.00012990082905162126,
1316
+ "learning_rate": 2.0426210820946476e-05,
1317
+ "loss": 0.0,
1318
+ "step": 1830
1319
+ },
1320
+ {
1321
+ "epoch": 1.8167901234567903,
1322
+ "grad_norm": 0.00016129855066537857,
1323
+ "learning_rate": 2.0143890547759132e-05,
1324
+ "loss": 0.0,
1325
+ "step": 1840
1326
+ },
1327
+ {
1328
+ "epoch": 1.8266666666666667,
1329
+ "grad_norm": 0.00018279206415172666,
1330
+ "learning_rate": 1.9862212403205698e-05,
1331
+ "loss": 0.0,
1332
+ "step": 1850
1333
+ },
1334
+ {
1335
+ "epoch": 1.836543209876543,
1336
+ "grad_norm": 0.00023649405920878053,
1337
+ "learning_rate": 1.958121363389356e-05,
1338
+ "loss": 0.0,
1339
+ "step": 1860
1340
+ },
1341
+ {
1342
+ "epoch": 1.8464197530864197,
1343
+ "grad_norm": 0.0002646524226292968,
1344
+ "learning_rate": 1.9300931396595583e-05,
1345
+ "loss": 0.0,
1346
+ "step": 1870
1347
+ },
1348
+ {
1349
+ "epoch": 1.8562962962962963,
1350
+ "grad_norm": 0.0005494295619428158,
1351
+ "learning_rate": 1.9021402753336784e-05,
1352
+ "loss": 0.0096,
1353
+ "step": 1880
1354
+ },
1355
+ {
1356
+ "epoch": 1.8661728395061727,
1357
+ "grad_norm": 0.0009147037053480744,
1358
+ "learning_rate": 1.8742664666493676e-05,
1359
+ "loss": 0.0,
1360
+ "step": 1890
1361
+ },
1362
+ {
1363
+ "epoch": 1.8760493827160494,
1364
+ "grad_norm": 0.001351756858639419,
1365
+ "learning_rate": 1.846475399390659e-05,
1366
+ "loss": 0.0,
1367
+ "step": 1900
1368
+ },
1369
+ {
1370
+ "epoch": 1.885925925925926,
1371
+ "grad_norm": 0.0009297579526901245,
1372
+ "learning_rate": 1.8187707484006005e-05,
1373
+ "loss": 0.0,
1374
+ "step": 1910
1375
+ },
1376
+ {
1377
+ "epoch": 1.8958024691358024,
1378
+ "grad_norm": 0.001504297717474401,
1379
+ "learning_rate": 1.7911561770953213e-05,
1380
+ "loss": 0.0,
1381
+ "step": 1920
1382
+ },
1383
+ {
1384
+ "epoch": 1.905679012345679,
1385
+ "grad_norm": 0.0016418690793216228,
1386
+ "learning_rate": 1.763635336979616e-05,
1387
+ "loss": 0.0,
1388
+ "step": 1930
1389
+ },
1390
+ {
1391
+ "epoch": 1.9155555555555557,
1392
+ "grad_norm": 0.0011701845796778798,
1393
+ "learning_rate": 1.7362118671641023e-05,
1394
+ "loss": 0.0002,
1395
+ "step": 1940
1396
+ },
1397
+ {
1398
+ "epoch": 1.925432098765432,
1399
+ "grad_norm": 0.0010625842260196805,
1400
+ "learning_rate": 1.7088893938840178e-05,
1401
+ "loss": 0.0,
1402
+ "step": 1950
1403
+ },
1404
+ {
1405
+ "epoch": 1.9353086419753085,
1406
+ "grad_norm": 0.0015219110064208508,
1407
+ "learning_rate": 1.6816715300197156e-05,
1408
+ "loss": 0.0,
1409
+ "step": 1960
1410
+ },
1411
+ {
1412
+ "epoch": 1.9451851851851854,
1413
+ "grad_norm": 0.0005342438234947622,
1414
+ "learning_rate": 1.654561874618936e-05,
1415
+ "loss": 0.0,
1416
+ "step": 1970
1417
+ },
1418
+ {
1419
+ "epoch": 1.9550617283950618,
1420
+ "grad_norm": 0.0008706999942660332,
1421
+ "learning_rate": 1.6275640124208928e-05,
1422
+ "loss": 0.0,
1423
+ "step": 1980
1424
+ },
1425
+ {
1426
+ "epoch": 1.9649382716049382,
1427
+ "grad_norm": 0.00037755907396785915,
1428
+ "learning_rate": 1.6006815133822662e-05,
1429
+ "loss": 0.0,
1430
+ "step": 1990
1431
+ },
1432
+ {
1433
+ "epoch": 1.9748148148148148,
1434
+ "grad_norm": 0.0005140100256539881,
1435
+ "learning_rate": 1.5739179322051382e-05,
1436
+ "loss": 0.0,
1437
+ "step": 2000
1438
+ },
1439
+ {
1440
+ "epoch": 1.9748148148148148,
1441
+ "eval_nlpcc25_task1_dev_accuracy": 0.9997619047619046,
1442
+ "eval_nlpcc25_task1_dev_loss": 0.0013579174410551786,
1443
+ "eval_nlpcc25_task1_dev_runtime": 2528.9656,
1444
+ "eval_nlpcc25_task1_dev_samples_per_second": 1.107,
1445
+ "eval_nlpcc25_task1_dev_steps_per_second": 0.277,
1446
+ "step": 2000
1447
+ }
1448
+ ],
1449
+ "logging_steps": 10,
1450
+ "max_steps": 3036,
1451
+ "num_input_tokens_seen": 0,
1452
+ "num_train_epochs": 3,
1453
+ "save_steps": 500,
1454
+ "stateful_callbacks": {
1455
+ "TrainerControl": {
1456
+ "args": {
1457
+ "should_epoch_stop": false,
1458
+ "should_evaluate": false,
1459
+ "should_log": false,
1460
+ "should_save": true,
1461
+ "should_training_stop": false
1462
+ },
1463
+ "attributes": {}
1464
+ }
1465
+ },
1466
+ "total_flos": 1.1490710644069171e+18,
1467
+ "train_batch_size": 4,
1468
+ "trial_name": null,
1469
+ "trial_params": null
1470
+ }
Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bc27925817800cee3170b92f598d1cfe9fce4b0491f781c2f4977cfd2644a9a
3
+ size 5752
Qwen2.5-7B-Instruct-lora-2/checkpoint-2000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
Qwen2.5-7B-Instruct-lora-2/checkpoint-2500/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /data/zhaoguoyu/Experiments/mgtd-sys/detector/ckpt/Qwen2.5-7B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.15.0
Qwen2.5-7B-Instruct-lora-2/checkpoint-2500/adapter_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/data/zhaoguoyu/Experiments/mgtd-sys/detector/ckpt/Qwen2.5-7B-Instruct",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 8,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "k_proj",
28
+ "v_proj",
29
+ "up_proj",
30
+ "down_proj",
31
+ "o_proj",
32
+ "gate_proj",
33
+ "q_proj"
34
+ ],
35
+ "task_type": "CAUSAL_LM",
36
+ "trainable_token_indices": null,
37
+ "use_dora": false,
38
+ "use_rslora": false
39
+ }