Sam-Shin commited on
Commit
7828b40
·
verified ·
1 Parent(s): 3739ab3

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: allenai/OLMo-2-1124-7B-Instruct
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:allenai/OLMo-2-1124-7B-Instruct
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.17.0
adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "allenai/OLMo-2-1124-7B-Instruct",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 128,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 64,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "v_proj",
29
+ "gate_proj",
30
+ "down_proj",
31
+ "q_proj",
32
+ "o_proj",
33
+ "up_proj",
34
+ "k_proj"
35
+ ],
36
+ "target_parameters": null,
37
+ "task_type": "CAUSAL_LM",
38
+ "trainable_token_indices": null,
39
+ "use_dora": false,
40
+ "use_qalora": false,
41
+ "use_rslora": false
42
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:add54f49f5d10162c9dca88db70ef6b78fb4eb3bd62ce24034c90b0c6e2c877e
3
+ size 639691872
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe80b64f62681c8d8ca95b46768ea22e3987c18de8470bbfabe3487707562f99
3
+ size 1279647314
rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79efac7a9fc1e8d2e6ed66f1b87f0fc96aefe8f10a72516c7891434fb4ff8b5e
3
+ size 15984
rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3adde8a0661d775ce3e7d9d5c774afb77c8d46ab620d0c67a7d84247ea62d79
3
+ size 15984
rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:184906cdeef9b0c24b1fab2e2749076c79a0740b39b603d7f3848fa923553db1
3
+ size 15984
rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e60aa43a389b1e1f19be91633fa203cc45faf36f1c9a232aaae24a1e9edafc0d
3
+ size 15984
rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e76a9a1992442d4d4ca3b361c551c3f0a502c8aa39462f360845d0da811e495a
3
+ size 15984
rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f5b3c4e7be71739e889058126695aabb3725da815f211f50ca45ab0ebf36d99
3
+ size 15984
rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9972ac715a05dfa1c5df4816ec876e6c97e9f8f49661dad2d7b19ff825a21239
3
+ size 15984
rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2cd689af80a5ec27b220827ef49b24c53e19238dae9b22607edaae6f78c02c9
3
+ size 15984
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53e7524950df06585d3a7ba733cefee4292c182136a43c2bce4baa5abf39d690
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|pad|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "100256": {
5
+ "content": "<|extra_id_0|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": false
11
+ },
12
+ "100257": {
13
+ "content": "<|endoftext|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "100258": {
21
+ "content": "<|fim_prefix|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "100259": {
29
+ "content": "<|fim_middle|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "100260": {
37
+ "content": "<|fim_suffix|>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "100261": {
45
+ "content": "|||PHONE_NUMBER|||",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": false
51
+ },
52
+ "100262": {
53
+ "content": "|||EMAIL_ADDRESS|||",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": false
59
+ },
60
+ "100263": {
61
+ "content": "|||IP_ADDRESS|||",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": false
67
+ },
68
+ "100264": {
69
+ "content": "<|im_start|>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "100265": {
77
+ "content": "<|im_end|>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "100266": {
85
+ "content": "<|extra_id_1|>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": false
91
+ },
92
+ "100267": {
93
+ "content": "<|extra_id_2|>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": false
99
+ },
100
+ "100268": {
101
+ "content": "<|extra_id_3|>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": false
107
+ },
108
+ "100269": {
109
+ "content": "<|extra_id_4|>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": false
115
+ },
116
+ "100270": {
117
+ "content": "<|extra_id_5|>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": false
123
+ },
124
+ "100271": {
125
+ "content": "<|extra_id_6|>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": false
131
+ },
132
+ "100272": {
133
+ "content": "<|extra_id_7|>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": false
139
+ },
140
+ "100273": {
141
+ "content": "<|extra_id_8|>",
142
+ "lstrip": false,
143
+ "normalized": false,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": false
147
+ },
148
+ "100274": {
149
+ "content": "<|extra_id_9|>",
150
+ "lstrip": false,
151
+ "normalized": false,
152
+ "rstrip": false,
153
+ "single_word": false,
154
+ "special": false
155
+ },
156
+ "100275": {
157
+ "content": "<|extra_id_10|>",
158
+ "lstrip": false,
159
+ "normalized": false,
160
+ "rstrip": false,
161
+ "single_word": false,
162
+ "special": false
163
+ },
164
+ "100276": {
165
+ "content": "<|endofprompt|>",
166
+ "lstrip": false,
167
+ "normalized": false,
168
+ "rstrip": false,
169
+ "single_word": false,
170
+ "special": true
171
+ },
172
+ "100277": {
173
+ "content": "<|pad|>",
174
+ "lstrip": false,
175
+ "normalized": false,
176
+ "rstrip": false,
177
+ "single_word": false,
178
+ "special": true
179
+ }
180
+ },
181
+ "bos_token": "<|endoftext|>",
182
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if message['role'] == 'system' %}{{ '<|system|>\n' + message['content'] + '\n' }}{% elif message['role'] == 'user' %}{{ '<|user|>\n' + message['content'] + '\n' }}{% elif message['role'] == 'assistant' %}{% if not loop.last %}{{ '<|assistant|>\n' + message['content'] + eos_token + '\n' }}{% else %}{{ '<|assistant|>\n' + message['content'] + eos_token }}{% endif %}{% endif %}{% if loop.last and add_generation_prompt %}{{ '<|assistant|>\n' }}{% endif %}{% endfor %}",
183
+ "clean_up_tokenization_spaces": false,
184
+ "eos_token": "<|endoftext|>",
185
+ "extra_special_tokens": {},
186
+ "model_max_length": 1000000000000000019884624838656,
187
+ "pad_token": "<|pad|>",
188
+ "tokenizer_class": "GPT2Tokenizer",
189
+ "unk_token": "<|endoftext|>"
190
+ }
trainer_state.json ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.268411636352539,
3
+ "best_model_checkpoint": "/gscratch/stf/seunguk/dipika/olmo-code-sft/train/result_outputs/7b_py23_mix_10k_normal/allenai_OLMo-2-1124-7B-Instruct/r64_lr1.5e-05/checkpoint-429",
4
+ "epoch": 3.0,
5
+ "eval_steps": 39,
6
+ "global_step": 459,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.006535947712418301,
13
+ "grad_norm": 3.6004388332366943,
14
+ "learning_rate": 3.0000000000000004e-07,
15
+ "loss": 7.4653,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.06535947712418301,
20
+ "grad_norm": 3.2401626110076904,
21
+ "learning_rate": 3e-06,
22
+ "loss": 7.7982,
23
+ "step": 10
24
+ },
25
+ {
26
+ "epoch": 0.13071895424836602,
27
+ "grad_norm": 2.898332118988037,
28
+ "learning_rate": 6e-06,
29
+ "loss": 7.8085,
30
+ "step": 20
31
+ },
32
+ {
33
+ "epoch": 0.19607843137254902,
34
+ "grad_norm": 2.342164993286133,
35
+ "learning_rate": 9e-06,
36
+ "loss": 7.0685,
37
+ "step": 30
38
+ },
39
+ {
40
+ "epoch": 0.2549019607843137,
41
+ "eval_loss": 1.5568805932998657,
42
+ "eval_runtime": 6.1754,
43
+ "eval_samples_per_second": 16.193,
44
+ "eval_steps_per_second": 2.105,
45
+ "step": 39
46
+ },
47
+ {
48
+ "epoch": 0.26143790849673204,
49
+ "grad_norm": 1.3029459714889526,
50
+ "learning_rate": 1.2e-05,
51
+ "loss": 6.2268,
52
+ "step": 40
53
+ },
54
+ {
55
+ "epoch": 0.32679738562091504,
56
+ "grad_norm": 0.8066064715385437,
57
+ "learning_rate": 1.5e-05,
58
+ "loss": 5.6646,
59
+ "step": 50
60
+ },
61
+ {
62
+ "epoch": 0.39215686274509803,
63
+ "grad_norm": 0.640289843082428,
64
+ "learning_rate": 1.4977885819099278e-05,
65
+ "loss": 5.4284,
66
+ "step": 60
67
+ },
68
+ {
69
+ "epoch": 0.45751633986928103,
70
+ "grad_norm": 0.5137796401977539,
71
+ "learning_rate": 1.4911673686262952e-05,
72
+ "loss": 5.4141,
73
+ "step": 70
74
+ },
75
+ {
76
+ "epoch": 0.5098039215686274,
77
+ "eval_loss": 1.329179286956787,
78
+ "eval_runtime": 6.0801,
79
+ "eval_samples_per_second": 16.447,
80
+ "eval_steps_per_second": 2.138,
81
+ "step": 78
82
+ },
83
+ {
84
+ "epoch": 0.5228758169934641,
85
+ "grad_norm": 0.4564005732536316,
86
+ "learning_rate": 1.4801754062046587e-05,
87
+ "loss": 5.243,
88
+ "step": 80
89
+ },
90
+ {
91
+ "epoch": 0.5882352941176471,
92
+ "grad_norm": 0.4929277300834656,
93
+ "learning_rate": 1.4648775155104705e-05,
94
+ "loss": 5.2285,
95
+ "step": 90
96
+ },
97
+ {
98
+ "epoch": 0.6535947712418301,
99
+ "grad_norm": 0.5199493169784546,
100
+ "learning_rate": 1.4453639099629869e-05,
101
+ "loss": 5.2129,
102
+ "step": 100
103
+ },
104
+ {
105
+ "epoch": 0.7189542483660131,
106
+ "grad_norm": 0.41562795639038086,
107
+ "learning_rate": 1.4217496635363684e-05,
108
+ "loss": 5.2174,
109
+ "step": 110
110
+ },
111
+ {
112
+ "epoch": 0.7647058823529411,
113
+ "eval_loss": 1.2994059324264526,
114
+ "eval_runtime": 6.1041,
115
+ "eval_samples_per_second": 16.382,
116
+ "eval_steps_per_second": 2.13,
117
+ "step": 117
118
+ },
119
+ {
120
+ "epoch": 0.7843137254901961,
121
+ "grad_norm": 0.4347153902053833,
122
+ "learning_rate": 1.3941740321552318e-05,
123
+ "loss": 5.1946,
124
+ "step": 120
125
+ },
126
+ {
127
+ "epoch": 0.8496732026143791,
128
+ "grad_norm": 0.4713277816772461,
129
+ "learning_rate": 1.3627996324864611e-05,
130
+ "loss": 5.1453,
131
+ "step": 130
132
+ },
133
+ {
134
+ "epoch": 0.9150326797385621,
135
+ "grad_norm": 0.4941399395465851,
136
+ "learning_rate": 1.3278114829700362e-05,
137
+ "loss": 5.1508,
138
+ "step": 140
139
+ },
140
+ {
141
+ "epoch": 0.9803921568627451,
142
+ "grad_norm": 0.5141309499740601,
143
+ "learning_rate": 1.2894159127440344e-05,
144
+ "loss": 5.0553,
145
+ "step": 150
146
+ },
147
+ {
148
+ "epoch": 1.0196078431372548,
149
+ "eval_loss": 1.2868971824645996,
150
+ "eval_runtime": 6.1033,
151
+ "eval_samples_per_second": 16.385,
152
+ "eval_steps_per_second": 2.13,
153
+ "step": 156
154
+ },
155
+ {
156
+ "epoch": 1.0457516339869282,
157
+ "grad_norm": 0.4459021985530853,
158
+ "learning_rate": 1.2478393448979922e-05,
159
+ "loss": 5.0354,
160
+ "step": 160
161
+ },
162
+ {
163
+ "epoch": 1.1111111111111112,
164
+ "grad_norm": 0.518979549407959,
165
+ "learning_rate": 1.2033269612299312e-05,
166
+ "loss": 5.0699,
167
+ "step": 170
168
+ },
169
+ {
170
+ "epoch": 1.1764705882352942,
171
+ "grad_norm": 0.5595972537994385,
172
+ "learning_rate": 1.1561412563811198e-05,
173
+ "loss": 5.0097,
174
+ "step": 180
175
+ },
176
+ {
177
+ "epoch": 1.2418300653594772,
178
+ "grad_norm": 0.49540629982948303,
179
+ "learning_rate": 1.1065604898750127e-05,
180
+ "loss": 5.075,
181
+ "step": 190
182
+ },
183
+ {
184
+ "epoch": 1.2745098039215685,
185
+ "eval_loss": 1.2794718742370605,
186
+ "eval_runtime": 6.0945,
187
+ "eval_samples_per_second": 16.408,
188
+ "eval_steps_per_second": 2.133,
189
+ "step": 195
190
+ },
191
+ {
192
+ "epoch": 1.3071895424836601,
193
+ "grad_norm": 0.5955595970153809,
194
+ "learning_rate": 1.0548770451888665e-05,
195
+ "loss": 4.9689,
196
+ "step": 200
197
+ },
198
+ {
199
+ "epoch": 1.3725490196078431,
200
+ "grad_norm": 0.5468981862068176,
201
+ "learning_rate": 1.0013957055347779e-05,
202
+ "loss": 5.1004,
203
+ "step": 210
204
+ },
205
+ {
206
+ "epoch": 1.4379084967320261,
207
+ "grad_norm": 0.5972552299499512,
208
+ "learning_rate": 9.464318565180596e-06,
209
+ "loss": 5.0697,
210
+ "step": 220
211
+ },
212
+ {
213
+ "epoch": 1.5032679738562091,
214
+ "grad_norm": 0.5224605202674866,
215
+ "learning_rate": 8.903096262720867e-06,
216
+ "loss": 5.1168,
217
+ "step": 230
218
+ },
219
+ {
220
+ "epoch": 1.5294117647058822,
221
+ "eval_loss": 1.274889349937439,
222
+ "eval_runtime": 6.1193,
223
+ "eval_samples_per_second": 16.342,
224
+ "eval_steps_per_second": 2.124,
225
+ "step": 234
226
+ },
227
+ {
228
+ "epoch": 1.5686274509803921,
229
+ "grad_norm": 0.563854455947876,
230
+ "learning_rate": 8.333599740374487e-06,
231
+ "loss": 5.0523,
232
+ "step": 240
233
+ },
234
+ {
235
+ "epoch": 1.6339869281045751,
236
+ "grad_norm": 0.5406912565231323,
237
+ "learning_rate": 7.75918738457279e-06,
238
+ "loss": 4.9938,
239
+ "step": 250
240
+ },
241
+ {
242
+ "epoch": 1.6993464052287581,
243
+ "grad_norm": 0.5734691023826599,
244
+ "learning_rate": 7.183246570981859e-06,
245
+ "loss": 5.0574,
246
+ "step": 260
247
+ },
248
+ {
249
+ "epoch": 1.7647058823529411,
250
+ "grad_norm": 0.5452589392662048,
251
+ "learning_rate": 6.609173688758989e-06,
252
+ "loss": 5.0522,
253
+ "step": 270
254
+ },
255
+ {
256
+ "epoch": 1.784313725490196,
257
+ "eval_loss": 1.2728018760681152,
258
+ "eval_runtime": 6.1187,
259
+ "eval_samples_per_second": 16.343,
260
+ "eval_steps_per_second": 2.125,
261
+ "step": 273
262
+ },
263
+ {
264
+ "epoch": 1.8300653594771243,
265
+ "grad_norm": 0.5308836102485657,
266
+ "learning_rate": 6.0403541116555636e-06,
267
+ "loss": 5.0906,
268
+ "step": 280
269
+ },
270
+ {
271
+ "epoch": 1.8954248366013071,
272
+ "grad_norm": 0.5624706745147705,
273
+ "learning_rate": 5.480142234079027e-06,
274
+ "loss": 5.0739,
275
+ "step": 290
276
+ },
277
+ {
278
+ "epoch": 1.9607843137254903,
279
+ "grad_norm": 0.6330104470252991,
280
+ "learning_rate": 4.9318416898436404e-06,
281
+ "loss": 5.0924,
282
+ "step": 300
283
+ },
284
+ {
285
+ "epoch": 2.026143790849673,
286
+ "grad_norm": 0.550271213054657,
287
+ "learning_rate": 4.398685870262254e-06,
288
+ "loss": 4.9703,
289
+ "step": 310
290
+ },
291
+ {
292
+ "epoch": 2.0392156862745097,
293
+ "eval_loss": 1.2705397605895996,
294
+ "eval_runtime": 6.092,
295
+ "eval_samples_per_second": 16.415,
296
+ "eval_steps_per_second": 2.134,
297
+ "step": 312
298
+ },
299
+ {
300
+ "epoch": 2.0915032679738563,
301
+ "grad_norm": 0.5277569890022278,
302
+ "learning_rate": 3.883818856466194e-06,
303
+ "loss": 5.0368,
304
+ "step": 320
305
+ },
306
+ {
307
+ "epoch": 2.156862745098039,
308
+ "grad_norm": 0.49453046917915344,
309
+ "learning_rate": 3.390276878397574e-06,
310
+ "loss": 4.9466,
311
+ "step": 330
312
+ },
313
+ {
314
+ "epoch": 2.2222222222222223,
315
+ "grad_norm": 0.4974152445793152,
316
+ "learning_rate": 2.9209704098124204e-06,
317
+ "loss": 5.1187,
318
+ "step": 340
319
+ },
320
+ {
321
+ "epoch": 2.287581699346405,
322
+ "grad_norm": 0.6691598892211914,
323
+ "learning_rate": 2.47866700488251e-06,
324
+ "loss": 4.9278,
325
+ "step": 350
326
+ },
327
+ {
328
+ "epoch": 2.2941176470588234,
329
+ "eval_loss": 1.269243836402893,
330
+ "eval_runtime": 6.1135,
331
+ "eval_samples_per_second": 16.357,
332
+ "eval_steps_per_second": 2.126,
333
+ "step": 351
334
+ },
335
+ {
336
+ "epoch": 2.3529411764705883,
337
+ "grad_norm": 0.6030656099319458,
338
+ "learning_rate": 2.0659749776104147e-06,
339
+ "loss": 4.8925,
340
+ "step": 360
341
+ },
342
+ {
343
+ "epoch": 2.418300653594771,
344
+ "grad_norm": 0.6159414052963257,
345
+ "learning_rate": 1.6853280203020998e-06,
346
+ "loss": 5.0115,
347
+ "step": 370
348
+ },
349
+ {
350
+ "epoch": 2.4836601307189543,
351
+ "grad_norm": 0.5232154726982117,
352
+ "learning_rate": 1.3389708518037574e-06,
353
+ "loss": 5.015,
354
+ "step": 380
355
+ },
356
+ {
357
+ "epoch": 2.549019607843137,
358
+ "grad_norm": 0.5964416265487671,
359
+ "learning_rate": 1.0289459801368406e-06,
360
+ "loss": 5.0732,
361
+ "step": 390
362
+ },
363
+ {
364
+ "epoch": 2.549019607843137,
365
+ "eval_loss": 1.2685757875442505,
366
+ "eval_runtime": 6.0856,
367
+ "eval_samples_per_second": 16.432,
368
+ "eval_steps_per_second": 2.136,
369
+ "step": 390
370
+ },
371
+ {
372
+ "epoch": 2.6143790849673203,
373
+ "grad_norm": 0.5542371869087219,
374
+ "learning_rate": 7.570816575935527e-07,
375
+ "loss": 5.0339,
376
+ "step": 400
377
+ },
378
+ {
379
+ "epoch": 2.6797385620915035,
380
+ "grad_norm": 0.6067601442337036,
381
+ "learning_rate": 5.249810993230036e-07,
382
+ "loss": 5.0544,
383
+ "step": 410
384
+ },
385
+ {
386
+ "epoch": 2.7450980392156863,
387
+ "grad_norm": 0.5335155129432678,
388
+ "learning_rate": 3.3401302898726395e-07,
389
+ "loss": 4.9646,
390
+ "step": 420
391
+ },
392
+ {
393
+ "epoch": 2.803921568627451,
394
+ "eval_loss": 1.268411636352539,
395
+ "eval_runtime": 6.0899,
396
+ "eval_samples_per_second": 16.421,
397
+ "eval_steps_per_second": 2.135,
398
+ "step": 429
399
+ },
400
+ {
401
+ "epoch": 2.810457516339869,
402
+ "grad_norm": 0.5043578743934631,
403
+ "learning_rate": 1.853036072406436e-07,
404
+ "loss": 4.9906,
405
+ "step": 430
406
+ },
407
+ {
408
+ "epoch": 2.8758169934640523,
409
+ "grad_norm": 0.6804988384246826,
410
+ "learning_rate": 7.972979063091468e-08,
411
+ "loss": 5.096,
412
+ "step": 440
413
+ },
414
+ {
415
+ "epoch": 2.9411764705882355,
416
+ "grad_norm": 0.6172552704811096,
417
+ "learning_rate": 1.7914160085782116e-08,
418
+ "loss": 5.0006,
419
+ "step": 450
420
+ }
421
+ ],
422
+ "logging_steps": 10,
423
+ "max_steps": 459,
424
+ "num_input_tokens_seen": 0,
425
+ "num_train_epochs": 3,
426
+ "save_steps": 39,
427
+ "stateful_callbacks": {
428
+ "EarlyStoppingCallback": {
429
+ "args": {
430
+ "early_stopping_patience": 10,
431
+ "early_stopping_threshold": 0.001
432
+ },
433
+ "attributes": {
434
+ "early_stopping_patience_counter": 2
435
+ }
436
+ },
437
+ "TrainerControl": {
438
+ "args": {
439
+ "should_epoch_stop": false,
440
+ "should_evaluate": false,
441
+ "should_log": false,
442
+ "should_save": true,
443
+ "should_training_stop": true
444
+ },
445
+ "attributes": {}
446
+ }
447
+ },
448
+ "total_flos": 5.087892566070788e+18,
449
+ "train_batch_size": 2,
450
+ "trial_name": null,
451
+ "trial_params": null
452
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c33d52a15e06892e3c746f1be422f96f3a3926e9f7a23687482125db6fcbc04
3
+ size 5560
vocab.json ADDED
The diff for this file is too large to render. See raw diff