shorecode commited on
Commit
11b6cc0
·
verified ·
1 Parent(s): 1d8ab98
.gitattributes CHANGED
@@ -69,3 +69,4 @@ checkpoint-3254/tokenizer.json filter=lfs diff=lfs merge=lfs -text
69
  tokenizer.json filter=lfs diff=lfs merge=lfs -text
70
  old-cp-1300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
71
  old-cp-3254/tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
69
  tokenizer.json filter=lfs diff=lfs merge=lfs -text
70
  old-cp-1300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
71
  old-cp-3254/tokenizer.json filter=lfs diff=lfs merge=lfs -text
72
+ bestpoint-1500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
bestpoint-1500/README.md ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: shorecode/gemma-3-svg-generator-lora-xla
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:shorecode/gemma-3-svg-generator-lora-xla
7
+ - lora
8
+ - sft
9
+ - transformers
10
+ - trl
11
+ ---
12
+
13
+ # Model Card for Model ID
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+
19
+ ## Model Details
20
+
21
+ ### Model Description
22
+
23
+ <!-- Provide a longer summary of what this model is. -->
24
+
25
+
26
+
27
+ - **Developed by:** [More Information Needed]
28
+ - **Funded by [optional]:** [More Information Needed]
29
+ - **Shared by [optional]:** [More Information Needed]
30
+ - **Model type:** [More Information Needed]
31
+ - **Language(s) (NLP):** [More Information Needed]
32
+ - **License:** [More Information Needed]
33
+ - **Finetuned from model [optional]:** [More Information Needed]
34
+
35
+ ### Model Sources [optional]
36
+
37
+ <!-- Provide the basic links for the model. -->
38
+
39
+ - **Repository:** [More Information Needed]
40
+ - **Paper [optional]:** [More Information Needed]
41
+ - **Demo [optional]:** [More Information Needed]
42
+
43
+ ## Uses
44
+
45
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
46
+
47
+ ### Direct Use
48
+
49
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
50
+
51
+ [More Information Needed]
52
+
53
+ ### Downstream Use [optional]
54
+
55
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
56
+
57
+ [More Information Needed]
58
+
59
+ ### Out-of-Scope Use
60
+
61
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
62
+
63
+ [More Information Needed]
64
+
65
+ ## Bias, Risks, and Limitations
66
+
67
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
68
+
69
+ [More Information Needed]
70
+
71
+ ### Recommendations
72
+
73
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
74
+
75
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
76
+
77
+ ## How to Get Started with the Model
78
+
79
+ Use the code below to get started with the model.
80
+
81
+ [More Information Needed]
82
+
83
+ ## Training Details
84
+
85
+ ### Training Data
86
+
87
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
88
+
89
+ [More Information Needed]
90
+
91
+ ### Training Procedure
92
+
93
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
94
+
95
+ #### Preprocessing [optional]
96
+
97
+ [More Information Needed]
98
+
99
+
100
+ #### Training Hyperparameters
101
+
102
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
103
+
104
+ #### Speeds, Sizes, Times [optional]
105
+
106
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
107
+
108
+ [More Information Needed]
109
+
110
+ ## Evaluation
111
+
112
+ <!-- This section describes the evaluation protocols and provides the results. -->
113
+
114
+ ### Testing Data, Factors & Metrics
115
+
116
+ #### Testing Data
117
+
118
+ <!-- This should link to a Dataset Card if possible. -->
119
+
120
+ [More Information Needed]
121
+
122
+ #### Factors
123
+
124
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
125
+
126
+ [More Information Needed]
127
+
128
+ #### Metrics
129
+
130
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
131
+
132
+ [More Information Needed]
133
+
134
+ ### Results
135
+
136
+ [More Information Needed]
137
+
138
+ #### Summary
139
+
140
+
141
+
142
+ ## Model Examination [optional]
143
+
144
+ <!-- Relevant interpretability work for the model goes here -->
145
+
146
+ [More Information Needed]
147
+
148
+ ## Environmental Impact
149
+
150
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
151
+
152
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
153
+
154
+ - **Hardware Type:** [More Information Needed]
155
+ - **Hours used:** [More Information Needed]
156
+ - **Cloud Provider:** [More Information Needed]
157
+ - **Compute Region:** [More Information Needed]
158
+ - **Carbon Emitted:** [More Information Needed]
159
+
160
+ ## Technical Specifications [optional]
161
+
162
+ ### Model Architecture and Objective
163
+
164
+ [More Information Needed]
165
+
166
+ ### Compute Infrastructure
167
+
168
+ [More Information Needed]
169
+
170
+ #### Hardware
171
+
172
+ [More Information Needed]
173
+
174
+ #### Software
175
+
176
+ [More Information Needed]
177
+
178
+ ## Citation [optional]
179
+
180
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
181
+
182
+ **BibTeX:**
183
+
184
+ [More Information Needed]
185
+
186
+ **APA:**
187
+
188
+ [More Information Needed]
189
+
190
+ ## Glossary [optional]
191
+
192
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
193
+
194
+ [More Information Needed]
195
+
196
+ ## More Information [optional]
197
+
198
+ [More Information Needed]
199
+
200
+ ## Model Card Authors [optional]
201
+
202
+ [More Information Needed]
203
+
204
+ ## Model Card Contact
205
+
206
+ [More Information Needed]
207
+ ### Framework versions
208
+
209
+ - PEFT 0.18.0
bestpoint-1500/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "shorecode/gemma-3-svg-generator-lora-xla",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 48,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.02,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": [
25
+ "lm_head",
26
+ "embed_tokens"
27
+ ],
28
+ "peft_type": "LORA",
29
+ "peft_version": "0.18.0",
30
+ "qalora_group_size": 16,
31
+ "r": 24,
32
+ "rank_pattern": {},
33
+ "revision": null,
34
+ "target_modules": [
35
+ "k_proj",
36
+ "v_proj",
37
+ "up_proj",
38
+ "gate_proj",
39
+ "q_proj",
40
+ "down_proj",
41
+ "o_proj"
42
+ ],
43
+ "target_parameters": null,
44
+ "task_type": "CAUSAL_LM",
45
+ "trainable_token_indices": null,
46
+ "use_dora": false,
47
+ "use_qalora": false,
48
+ "use_rslora": false
49
+ }
bestpoint-1500/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea33ba8c5c6b66e8b8ce108a5af367a92f476912e88bfac459f42571c464298b
3
+ size 7326165448
bestpoint-1500/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "</think>": 262146,
3
+ "<image_soft_token>": 262144,
4
+ "<think>": 262145
5
+ }
bestpoint-1500/chat_template.jinja ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{ bos_token }}
2
+ {%- if messages[0]['role'] == 'system' -%}
3
+ {%- if messages[0]['content'] is string -%}
4
+ {%- set first_user_prefix = messages[0]['content'] + '
5
+
6
+ ' -%}
7
+ {%- else -%}
8
+ {%- set first_user_prefix = messages[0]['content'][0]['text'] + '
9
+
10
+ ' -%}
11
+ {%- endif -%}
12
+ {%- set loop_messages = messages[1:] -%}
13
+ {%- else -%}
14
+ {%- set first_user_prefix = "" -%}
15
+ {%- set loop_messages = messages -%}
16
+ {%- endif -%}
17
+ {%- for message in loop_messages -%}
18
+ {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
19
+ {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
20
+ {%- endif -%}
21
+ {%- if (message['role'] == 'assistant') -%}
22
+ {%- set role = "model" -%}
23
+ {%- else -%}
24
+ {%- set role = message['role'] -%}
25
+ {%- endif -%}
26
+ {{ '<start_of_turn>' + role + '
27
+ ' + (first_user_prefix if loop.first else "") }}
28
+ {%- if message['content'] is string -%}
29
+ {{ message['content'] | trim }}
30
+ {%- elif message['content'] is iterable -%}
31
+ {%- for item in message['content'] -%}
32
+ {%- if item['type'] == 'image' -%}
33
+ {{ '<start_of_image>' }}
34
+ {%- elif item['type'] == 'text' -%}
35
+ {{ item['text'] | trim }}
36
+ {%- endif -%}
37
+ {%- endfor -%}
38
+ {%- else -%}
39
+ {{ raise_exception("Invalid content type") }}
40
+ {%- endif -%}
41
+ {{ '<end_of_turn>
42
+ ' }}
43
+ {%- endfor -%}
44
+ {%- if add_generation_prompt -%}
45
+ {{'<start_of_turn>model
46
+ '}}
47
+ {%- endif -%}
bestpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4e532783cfb56a0b2517de5a2768da613f008b8d47ba4c272334aeb161746ff
3
+ size 4988740429
bestpoint-1500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e6844d7b3bd4268714357aede6e1fc5d1effc95ff68686fbca9031ed50a5e91
3
+ size 14455
bestpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ca0dedd3fc45009ec664564aed9700669ab8ef183fb1241347df93c8e456b92
3
+ size 1465
bestpoint-1500/special_tokens_map.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<think>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "</think>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ }
17
+ ],
18
+ "boi_token": "<start_of_image>",
19
+ "bos_token": {
20
+ "content": "<bos>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "eoi_token": "<end_of_image>",
27
+ "eos_token": {
28
+ "content": "<eos>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ },
34
+ "image_token": "<image_soft_token>",
35
+ "pad_token": {
36
+ "content": "<pad>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false
41
+ },
42
+ "unk_token": {
43
+ "content": "<unk>",
44
+ "lstrip": false,
45
+ "normalized": false,
46
+ "rstrip": false,
47
+ "single_word": false
48
+ }
49
+ }
bestpoint-1500/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48b0c1cd0578eea659e9d9249b1c575599a2a172bfaa9130c92d0e155f3b9fe0
3
+ size 33384937
bestpoint-1500/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
bestpoint-1500/tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
bestpoint-1500/trainer_state.json ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.4611128189363664,
6
+ "eval_steps": 100,
7
+ "global_step": 1500,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "entropy": 0.229019775390625,
14
+ "epoch": 0.03074085459575776,
15
+ "grad_norm": 0.392578125,
16
+ "learning_rate": 0.0002,
17
+ "loss": 0.2225,
18
+ "mean_token_accuracy": 0.95642578125,
19
+ "num_tokens": 51200.0,
20
+ "step": 100
21
+ },
22
+ {
23
+ "epoch": 0.03074085459575776,
24
+ "eval_entropy": 0.1101745268257942,
25
+ "eval_loss": 0.11310792714357376,
26
+ "eval_mean_token_accuracy": 0.9757639848066298,
27
+ "eval_num_tokens": 51200.0,
28
+ "eval_runtime": 57.583,
29
+ "eval_samples_per_second": 6.287,
30
+ "eval_steps_per_second": 6.287,
31
+ "step": 100
32
+ },
33
+ {
34
+ "entropy": 0.114027099609375,
35
+ "epoch": 0.06148170919151552,
36
+ "grad_norm": 0.5859375,
37
+ "learning_rate": 0.0002,
38
+ "loss": 0.1094,
39
+ "mean_token_accuracy": 0.97578125,
40
+ "num_tokens": 102400.0,
41
+ "step": 200
42
+ },
43
+ {
44
+ "epoch": 0.06148170919151552,
45
+ "eval_entropy": 0.1109345999870511,
46
+ "eval_loss": 0.11441663652658463,
47
+ "eval_mean_token_accuracy": 0.9750410048342542,
48
+ "eval_num_tokens": 102400.0,
49
+ "eval_runtime": 42.9475,
50
+ "eval_samples_per_second": 8.429,
51
+ "eval_steps_per_second": 8.429,
52
+ "step": 200
53
+ },
54
+ {
55
+ "entropy": 0.1052099609375,
56
+ "epoch": 0.09222256378727328,
57
+ "grad_norm": 1.9296875,
58
+ "learning_rate": 0.0002,
59
+ "loss": 0.1,
60
+ "mean_token_accuracy": 0.9772265625,
61
+ "num_tokens": 153540.0,
62
+ "step": 300
63
+ },
64
+ {
65
+ "epoch": 0.09222256378727328,
66
+ "eval_entropy": 0.10822241345821823,
67
+ "eval_loss": 0.10894067585468292,
68
+ "eval_mean_token_accuracy": 0.9759474274861878,
69
+ "eval_num_tokens": 153540.0,
70
+ "eval_runtime": 44.7862,
71
+ "eval_samples_per_second": 8.083,
72
+ "eval_steps_per_second": 8.083,
73
+ "step": 300
74
+ },
75
+ {
76
+ "entropy": 0.097818603515625,
77
+ "epoch": 0.12296341838303104,
78
+ "grad_norm": 0.4921875,
79
+ "learning_rate": 0.0002,
80
+ "loss": 0.0938,
81
+ "mean_token_accuracy": 0.9781640625,
82
+ "num_tokens": 204740.0,
83
+ "step": 400
84
+ },
85
+ {
86
+ "epoch": 0.12296341838303104,
87
+ "eval_entropy": 0.09130387279868785,
88
+ "eval_loss": 0.11139792948961258,
89
+ "eval_mean_token_accuracy": 0.9762064053867403,
90
+ "eval_num_tokens": 204740.0,
91
+ "eval_runtime": 44.7497,
92
+ "eval_samples_per_second": 8.089,
93
+ "eval_steps_per_second": 8.089,
94
+ "step": 400
95
+ },
96
+ {
97
+ "entropy": 0.09273193359375,
98
+ "epoch": 0.1537042729787888,
99
+ "grad_norm": 0.44140625,
100
+ "learning_rate": 0.0002,
101
+ "loss": 0.09,
102
+ "mean_token_accuracy": 0.97921875,
103
+ "num_tokens": 255940.0,
104
+ "step": 500
105
+ },
106
+ {
107
+ "epoch": 0.1537042729787888,
108
+ "eval_entropy": 0.09720337588484115,
109
+ "eval_loss": 0.10795585066080093,
110
+ "eval_mean_token_accuracy": 0.976379057320442,
111
+ "eval_num_tokens": 255940.0,
112
+ "eval_runtime": 44.9617,
113
+ "eval_samples_per_second": 8.051,
114
+ "eval_steps_per_second": 8.051,
115
+ "step": 500
116
+ },
117
+ {
118
+ "entropy": 0.096331787109375,
119
+ "epoch": 0.18444512757454656,
120
+ "grad_norm": 0.4375,
121
+ "learning_rate": 0.0002,
122
+ "loss": 0.0919,
123
+ "mean_token_accuracy": 0.9788671875,
124
+ "num_tokens": 307140.0,
125
+ "step": 600
126
+ },
127
+ {
128
+ "epoch": 0.18444512757454656,
129
+ "eval_entropy": 0.09411890862396409,
130
+ "eval_loss": 0.10845255851745605,
131
+ "eval_mean_token_accuracy": 0.976379057320442,
132
+ "eval_num_tokens": 307140.0,
133
+ "eval_runtime": 44.4537,
134
+ "eval_samples_per_second": 8.143,
135
+ "eval_steps_per_second": 8.143,
136
+ "step": 600
137
+ },
138
+ {
139
+ "entropy": 0.1003076171875,
140
+ "epoch": 0.21518598217030432,
141
+ "grad_norm": 0.498046875,
142
+ "learning_rate": 0.0002,
143
+ "loss": 0.0956,
144
+ "mean_token_accuracy": 0.9776953125,
145
+ "num_tokens": 358340.0,
146
+ "step": 700
147
+ },
148
+ {
149
+ "epoch": 0.21518598217030432,
150
+ "eval_entropy": 0.09365456934133287,
151
+ "eval_loss": 0.1074163094162941,
152
+ "eval_mean_token_accuracy": 0.9760013812154696,
153
+ "eval_num_tokens": 358340.0,
154
+ "eval_runtime": 46.4727,
155
+ "eval_samples_per_second": 7.79,
156
+ "eval_steps_per_second": 7.79,
157
+ "step": 700
158
+ },
159
+ {
160
+ "entropy": 0.0927728271484375,
161
+ "epoch": 0.24592683676606208,
162
+ "grad_norm": 1.9765625,
163
+ "learning_rate": 0.0002,
164
+ "loss": 0.0919,
165
+ "mean_token_accuracy": 0.9792578125,
166
+ "num_tokens": 409540.0,
167
+ "step": 800
168
+ },
169
+ {
170
+ "epoch": 0.24592683676606208,
171
+ "eval_entropy": 0.08941363761438191,
172
+ "eval_loss": 0.1065196692943573,
173
+ "eval_mean_token_accuracy": 0.9762171961325967,
174
+ "eval_num_tokens": 409540.0,
175
+ "eval_runtime": 45.8644,
176
+ "eval_samples_per_second": 7.893,
177
+ "eval_steps_per_second": 7.893,
178
+ "step": 800
179
+ },
180
+ {
181
+ "entropy": 0.0862408447265625,
182
+ "epoch": 0.27666769136181985,
183
+ "grad_norm": 0.4296875,
184
+ "learning_rate": 0.0002,
185
+ "loss": 0.0825,
186
+ "mean_token_accuracy": 0.980390625,
187
+ "num_tokens": 460740.0,
188
+ "step": 900
189
+ },
190
+ {
191
+ "epoch": 0.27666769136181985,
192
+ "eval_entropy": 0.08427151000302141,
193
+ "eval_loss": 0.10636269301176071,
194
+ "eval_mean_token_accuracy": 0.976594872237569,
195
+ "eval_num_tokens": 460740.0,
196
+ "eval_runtime": 45.4228,
197
+ "eval_samples_per_second": 7.97,
198
+ "eval_steps_per_second": 7.97,
199
+ "step": 900
200
+ },
201
+ {
202
+ "entropy": 0.0906878662109375,
203
+ "epoch": 0.3074085459575776,
204
+ "grad_norm": 0.67578125,
205
+ "learning_rate": 0.0002,
206
+ "loss": 0.0887,
207
+ "mean_token_accuracy": 0.97921875,
208
+ "num_tokens": 511940.0,
209
+ "step": 1000
210
+ },
211
+ {
212
+ "epoch": 0.3074085459575776,
213
+ "eval_entropy": 0.08503613551018646,
214
+ "eval_loss": 0.10643637925386429,
215
+ "eval_mean_token_accuracy": 0.9764330110497238,
216
+ "eval_num_tokens": 511940.0,
217
+ "eval_runtime": 44.914,
218
+ "eval_samples_per_second": 8.06,
219
+ "eval_steps_per_second": 8.06,
220
+ "step": 1000
221
+ },
222
+ {
223
+ "entropy": 0.08072998046875,
224
+ "epoch": 0.33814940055333537,
225
+ "grad_norm": 0.353515625,
226
+ "learning_rate": 0.0002,
227
+ "loss": 0.0784,
228
+ "mean_token_accuracy": 0.9807421875,
229
+ "num_tokens": 563140.0,
230
+ "step": 1100
231
+ },
232
+ {
233
+ "epoch": 0.33814940055333537,
234
+ "eval_entropy": 0.09107237757898826,
235
+ "eval_loss": 0.10466483980417252,
236
+ "eval_mean_token_accuracy": 0.9768538501381215,
237
+ "eval_num_tokens": 563140.0,
238
+ "eval_runtime": 44.7187,
239
+ "eval_samples_per_second": 8.095,
240
+ "eval_steps_per_second": 8.095,
241
+ "step": 1100
242
+ },
243
+ {
244
+ "entropy": 0.08600341796875,
245
+ "epoch": 0.3688902551490931,
246
+ "grad_norm": 0.58984375,
247
+ "learning_rate": 0.0002,
248
+ "loss": 0.0813,
249
+ "mean_token_accuracy": 0.9805078125,
250
+ "num_tokens": 614340.0,
251
+ "step": 1200
252
+ },
253
+ {
254
+ "epoch": 0.3688902551490931,
255
+ "eval_entropy": 0.08598706998877763,
256
+ "eval_loss": 0.10444194078445435,
257
+ "eval_mean_token_accuracy": 0.976929385359116,
258
+ "eval_num_tokens": 614340.0,
259
+ "eval_runtime": 44.8418,
260
+ "eval_samples_per_second": 8.073,
261
+ "eval_steps_per_second": 8.073,
262
+ "step": 1200
263
+ },
264
+ {
265
+ "entropy": 0.094527587890625,
266
+ "epoch": 0.3996311097448509,
267
+ "grad_norm": 0.47265625,
268
+ "learning_rate": 0.0002,
269
+ "loss": 0.0906,
270
+ "mean_token_accuracy": 0.9780859375,
271
+ "num_tokens": 665540.0,
272
+ "step": 1300
273
+ },
274
+ {
275
+ "epoch": 0.3996311097448509,
276
+ "eval_entropy": 0.0893369221555594,
277
+ "eval_loss": 0.10337972640991211,
278
+ "eval_mean_token_accuracy": 0.9766380352209945,
279
+ "eval_num_tokens": 665540.0,
280
+ "eval_runtime": 45.085,
281
+ "eval_samples_per_second": 8.029,
282
+ "eval_steps_per_second": 8.029,
283
+ "step": 1300
284
+ },
285
+ {
286
+ "entropy": 0.08775146484375,
287
+ "epoch": 0.43037196434060865,
288
+ "grad_norm": 0.93359375,
289
+ "learning_rate": 0.0002,
290
+ "loss": 0.08,
291
+ "mean_token_accuracy": 0.980234375,
292
+ "num_tokens": 716740.0,
293
+ "step": 1400
294
+ },
295
+ {
296
+ "epoch": 0.43037196434060865,
297
+ "eval_entropy": 0.08171022531077347,
298
+ "eval_loss": 0.10228413343429565,
299
+ "eval_mean_token_accuracy": 0.9767998964088398,
300
+ "eval_num_tokens": 716740.0,
301
+ "eval_runtime": 45.0972,
302
+ "eval_samples_per_second": 8.027,
303
+ "eval_steps_per_second": 8.027,
304
+ "step": 1400
305
+ },
306
+ {
307
+ "entropy": 0.0795660400390625,
308
+ "epoch": 0.4611128189363664,
309
+ "grad_norm": 0.306640625,
310
+ "learning_rate": 0.0002,
311
+ "loss": 0.0719,
312
+ "mean_token_accuracy": 0.9823046875,
313
+ "num_tokens": 767940.0,
314
+ "step": 1500
315
+ },
316
+ {
317
+ "epoch": 0.4611128189363664,
318
+ "eval_entropy": 0.07832918114425069,
319
+ "eval_loss": 0.1025373786687851,
320
+ "eval_mean_token_accuracy": 0.9770049205801105,
321
+ "eval_num_tokens": 767940.0,
322
+ "eval_runtime": 45.2442,
323
+ "eval_samples_per_second": 8.001,
324
+ "eval_steps_per_second": 8.001,
325
+ "step": 1500
326
+ }
327
+ ],
328
+ "logging_steps": 100,
329
+ "max_steps": 6506,
330
+ "num_input_tokens_seen": 0,
331
+ "num_train_epochs": 2,
332
+ "save_steps": 500,
333
+ "stateful_callbacks": {
334
+ "TrainerControl": {
335
+ "args": {
336
+ "should_epoch_stop": false,
337
+ "should_evaluate": false,
338
+ "should_log": false,
339
+ "should_save": true,
340
+ "should_training_stop": false
341
+ },
342
+ "attributes": {}
343
+ }
344
+ },
345
+ "total_flos": 6088772291834880.0,
346
+ "train_batch_size": 1,
347
+ "trial_name": null,
348
+ "trial_params": null
349
+ }
bestpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2458b01cfc46c271ad1457d589b21ade0c70f863bac1c9b9828e852356ecb104
3
+ size 6289