GuyDor007 commited on
Commit
d5a892c
·
verified ·
1 Parent(s): 8769cec

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. biomistral_7b_dare_lora/README.md +207 -0
  3. biomistral_7b_dare_lora/adapter_config.json +43 -0
  4. biomistral_7b_dare_lora/adapter_model.safetensors +3 -0
  5. biomistral_7b_dare_lora/chat_template.jinja +1 -0
  6. biomistral_7b_dare_lora/tokenizer.json +0 -0
  7. biomistral_7b_dare_lora/tokenizer_config.json +17 -0
  8. checkpoints/full_training_biomistral/checkpoint-1000/README.md +207 -0
  9. checkpoints/full_training_biomistral/checkpoint-1000/adapter_config.json +43 -0
  10. checkpoints/full_training_biomistral/checkpoint-1000/adapter_model.safetensors +3 -0
  11. checkpoints/full_training_biomistral/checkpoint-1000/chat_template.jinja +1 -0
  12. checkpoints/full_training_biomistral/checkpoint-1000/optimizer.pt +3 -0
  13. checkpoints/full_training_biomistral/checkpoint-1000/rng_state.pth +3 -0
  14. checkpoints/full_training_biomistral/checkpoint-1000/scheduler.pt +3 -0
  15. checkpoints/full_training_biomistral/checkpoint-1000/tokenizer.json +0 -0
  16. checkpoints/full_training_biomistral/checkpoint-1000/tokenizer_config.json +17 -0
  17. checkpoints/full_training_biomistral/checkpoint-1000/trainer_state.json +190 -0
  18. checkpoints/full_training_biomistral/checkpoint-1000/training_args.bin +3 -0
  19. checkpoints/full_training_biomistral/checkpoint-1500/README.md +207 -0
  20. checkpoints/full_training_biomistral/checkpoint-1500/adapter_config.json +43 -0
  21. checkpoints/full_training_biomistral/checkpoint-1500/adapter_model.safetensors +3 -0
  22. checkpoints/full_training_biomistral/checkpoint-1500/chat_template.jinja +1 -0
  23. checkpoints/full_training_biomistral/checkpoint-1500/optimizer.pt +3 -0
  24. checkpoints/full_training_biomistral/checkpoint-1500/rng_state.pth +3 -0
  25. checkpoints/full_training_biomistral/checkpoint-1500/scheduler.pt +3 -0
  26. checkpoints/full_training_biomistral/checkpoint-1500/tokenizer.json +0 -0
  27. checkpoints/full_training_biomistral/checkpoint-1500/tokenizer_config.json +17 -0
  28. checkpoints/full_training_biomistral/checkpoint-1500/trainer_state.json +268 -0
  29. checkpoints/full_training_biomistral/checkpoint-1500/training_args.bin +3 -0
  30. checkpoints/full_training_biomistral/checkpoint-500/README.md +207 -0
  31. checkpoints/full_training_biomistral/checkpoint-500/adapter_config.json +43 -0
  32. checkpoints/full_training_biomistral/checkpoint-500/adapter_model.safetensors +3 -0
  33. checkpoints/full_training_biomistral/checkpoint-500/chat_template.jinja +1 -0
  34. checkpoints/full_training_biomistral/checkpoint-500/optimizer.pt +3 -0
  35. checkpoints/full_training_biomistral/checkpoint-500/rng_state.pth +3 -0
  36. checkpoints/full_training_biomistral/checkpoint-500/scheduler.pt +3 -0
  37. checkpoints/full_training_biomistral/checkpoint-500/tokenizer.json +0 -0
  38. checkpoints/full_training_biomistral/checkpoint-500/tokenizer_config.json +17 -0
  39. checkpoints/full_training_biomistral/checkpoint-500/trainer_state.json +112 -0
  40. checkpoints/full_training_biomistral/checkpoint-500/training_args.bin +3 -0
  41. checkpoints/full_training_llama3/checkpoint-1000/README.md +207 -0
  42. checkpoints/full_training_llama3/checkpoint-1000/adapter_config.json +43 -0
  43. checkpoints/full_training_llama3/checkpoint-1000/adapter_model.safetensors +3 -0
  44. checkpoints/full_training_llama3/checkpoint-1000/optimizer.pt +3 -0
  45. checkpoints/full_training_llama3/checkpoint-1000/rng_state.pth +3 -0
  46. checkpoints/full_training_llama3/checkpoint-1000/scheduler.pt +3 -0
  47. checkpoints/full_training_llama3/checkpoint-1000/tokenizer.json +3 -0
  48. checkpoints/full_training_llama3/checkpoint-1000/tokenizer_config.json +15 -0
  49. checkpoints/full_training_llama3/checkpoint-1000/trainer_state.json +190 -0
  50. checkpoints/full_training_llama3/checkpoint-1000/training_args.bin +3 -0
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ checkpoints/full_training_llama3/checkpoint-1000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
+ checkpoints/full_training_llama3/checkpoint-1500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
38
+ checkpoints/full_training_llama3/checkpoint-500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
39
+ openbiollm_8b_lora/tokenizer.json filter=lfs diff=lfs merge=lfs -text
biomistral_7b_dare_lora/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: BioMistral/BioMistral-7B-DARE
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:BioMistral/BioMistral-7B-DARE
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.1
biomistral_7b_dare_lora/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "BioMistral/BioMistral-7B-DARE",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.1",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "o_proj",
33
+ "v_proj",
34
+ "q_proj",
35
+ "k_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": true
43
+ }
biomistral_7b_dare_lora/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e626522ead10840dd0205c4a8d4a16585ae5cff9399f651c8ae0d07018c134e1
3
+ size 109086416
biomistral_7b_dare_lora/chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}
biomistral_7b_dare_lora/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
biomistral_7b_dare_lora/tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "backend": "tokenizers",
3
+ "bos_token": "<s>",
4
+ "clean_up_tokenization_spaces": false,
5
+ "eos_token": "</s>",
6
+ "extra_special_tokens": [],
7
+ "is_local": false,
8
+ "legacy": true,
9
+ "model_max_length": 1000000000000000019884624838656,
10
+ "pad_token": "</s>",
11
+ "padding_side": "right",
12
+ "sp_model_kwargs": {},
13
+ "spaces_between_special_tokens": false,
14
+ "tokenizer_class": "TokenizersBackend",
15
+ "unk_token": "<unk>",
16
+ "use_default_system_prompt": false
17
+ }
checkpoints/full_training_biomistral/checkpoint-1000/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: BioMistral/BioMistral-7B-DARE
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:BioMistral/BioMistral-7B-DARE
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.1
checkpoints/full_training_biomistral/checkpoint-1000/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "BioMistral/BioMistral-7B-DARE",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.1",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "o_proj",
33
+ "v_proj",
34
+ "q_proj",
35
+ "k_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": true
43
+ }
checkpoints/full_training_biomistral/checkpoint-1000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e626522ead10840dd0205c4a8d4a16585ae5cff9399f651c8ae0d07018c134e1
3
+ size 109086416
checkpoints/full_training_biomistral/checkpoint-1000/chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}
checkpoints/full_training_biomistral/checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:342c3bb60fe210ec0dcbedd20dc882b3dc829dbe2a2a05ed3e97f2037b52ae86
3
+ size 218319354
checkpoints/full_training_biomistral/checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15a544c2f9a8602913206edd2eaf54e3bccce3166803be74b3cf45eef30172ea
3
+ size 14244
checkpoints/full_training_biomistral/checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e63166ffadf6296a0da805a3a8d95a4bfb5a5180d9cca577f1e1e14a43a42260
3
+ size 1064
checkpoints/full_training_biomistral/checkpoint-1000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoints/full_training_biomistral/checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "backend": "tokenizers",
3
+ "bos_token": "<s>",
4
+ "clean_up_tokenization_spaces": false,
5
+ "eos_token": "</s>",
6
+ "extra_special_tokens": [],
7
+ "is_local": false,
8
+ "legacy": true,
9
+ "model_max_length": 1000000000000000019884624838656,
10
+ "pad_token": "</s>",
11
+ "padding_side": "right",
12
+ "sp_model_kwargs": {},
13
+ "spaces_between_special_tokens": false,
14
+ "tokenizer_class": "TokenizersBackend",
15
+ "unk_token": "<unk>",
16
+ "use_default_system_prompt": false
17
+ }
checkpoints/full_training_biomistral/checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 1000,
3
+ "best_metric": 0.6868895888328552,
4
+ "best_model_checkpoint": "/workspace/medisimplifier/checkpoints/full_training_biomistral/checkpoint-1000",
5
+ "epoch": 2.0,
6
+ "eval_steps": 500,
7
+ "global_step": 1000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.1,
14
+ "grad_norm": 1.4177701473236084,
15
+ "learning_rate": 0.00019999627041039135,
16
+ "loss": 0.8399433898925781,
17
+ "step": 50
18
+ },
19
+ {
20
+ "epoch": 0.2,
21
+ "grad_norm": 1.3435115814208984,
22
+ "learning_rate": 0.00019932104775631846,
23
+ "loss": 0.7292498016357422,
24
+ "step": 100
25
+ },
26
+ {
27
+ "epoch": 0.3,
28
+ "grad_norm": 1.3991986513137817,
29
+ "learning_rate": 0.00019748935819749987,
30
+ "loss": 0.7462101745605468,
31
+ "step": 150
32
+ },
33
+ {
34
+ "epoch": 0.4,
35
+ "grad_norm": 1.5255440473556519,
36
+ "learning_rate": 0.0001945225294222997,
37
+ "loss": 0.7331835174560547,
38
+ "step": 200
39
+ },
40
+ {
41
+ "epoch": 0.5,
42
+ "grad_norm": 1.4309399127960205,
43
+ "learning_rate": 0.0001904551063672452,
44
+ "loss": 0.7239613342285156,
45
+ "step": 250
46
+ },
47
+ {
48
+ "epoch": 0.6,
49
+ "grad_norm": 1.4074270725250244,
50
+ "learning_rate": 0.0001853344489853074,
51
+ "loss": 0.7315120697021484,
52
+ "step": 300
53
+ },
54
+ {
55
+ "epoch": 0.7,
56
+ "grad_norm": 1.4281959533691406,
57
+ "learning_rate": 0.0001792201807996622,
58
+ "loss": 0.7249378967285156,
59
+ "step": 350
60
+ },
61
+ {
62
+ "epoch": 0.8,
63
+ "grad_norm": 1.5213158130645752,
64
+ "learning_rate": 0.00017218349466382023,
65
+ "loss": 0.7271736907958984,
66
+ "step": 400
67
+ },
68
+ {
69
+ "epoch": 0.9,
70
+ "grad_norm": 1.2553246021270752,
71
+ "learning_rate": 0.00016430632381166305,
72
+ "loss": 0.7179476165771485,
73
+ "step": 450
74
+ },
75
+ {
76
+ "epoch": 1.0,
77
+ "grad_norm": 1.2599660158157349,
78
+ "learning_rate": 0.00015568038784945077,
79
+ "loss": 0.7246416473388672,
80
+ "step": 500
81
+ },
82
+ {
83
+ "epoch": 1.0,
84
+ "eval_loss": 0.7085821032524109,
85
+ "eval_runtime": 84.7026,
86
+ "eval_samples_per_second": 11.794,
87
+ "eval_steps_per_second": 2.952,
88
+ "step": 500
89
+ },
90
+ {
91
+ "epoch": 1.1,
92
+ "grad_norm": 1.2058591842651367,
93
+ "learning_rate": 0.00014640612479800686,
94
+ "loss": 0.6286650466918945,
95
+ "step": 550
96
+ },
97
+ {
98
+ "epoch": 1.2,
99
+ "grad_norm": 1.2758371829986572,
100
+ "learning_rate": 0.00013659152162008676,
101
+ "loss": 0.6379350280761719,
102
+ "step": 600
103
+ },
104
+ {
105
+ "epoch": 1.3,
106
+ "grad_norm": 1.2779815196990967,
107
+ "learning_rate": 0.00012635085684994767,
108
+ "loss": 0.6482433319091797,
109
+ "step": 650
110
+ },
111
+ {
112
+ "epoch": 1.4,
113
+ "grad_norm": 1.2724528312683105,
114
+ "learning_rate": 0.00011580336996559343,
115
+ "loss": 0.6435096740722657,
116
+ "step": 700
117
+ },
118
+ {
119
+ "epoch": 1.5,
120
+ "grad_norm": 1.2503466606140137,
121
+ "learning_rate": 0.00010507187299715815,
122
+ "loss": 0.6361804962158203,
123
+ "step": 750
124
+ },
125
+ {
126
+ "epoch": 1.6,
127
+ "grad_norm": 1.309194803237915,
128
+ "learning_rate": 9.428132053747712e-05,
129
+ "loss": 0.636002197265625,
130
+ "step": 800
131
+ },
132
+ {
133
+ "epoch": 1.7,
134
+ "grad_norm": 1.2687519788742065,
135
+ "learning_rate": 8.355735480524874e-05,
136
+ "loss": 0.6357472229003907,
137
+ "step": 850
138
+ },
139
+ {
140
+ "epoch": 1.8,
141
+ "grad_norm": 1.0980808734893799,
142
+ "learning_rate": 7.30248427016697e-05,
143
+ "loss": 0.6348814010620117,
144
+ "step": 900
145
+ },
146
+ {
147
+ "epoch": 1.9,
148
+ "grad_norm": 1.15554678440094,
149
+ "learning_rate": 6.28064218946542e-05,
150
+ "loss": 0.6314722061157226,
151
+ "step": 950
152
+ },
153
+ {
154
+ "epoch": 2.0,
155
+ "grad_norm": 1.1698943376541138,
156
+ "learning_rate": 5.302107285963045e-05,
157
+ "loss": 0.6311819076538085,
158
+ "step": 1000
159
+ },
160
+ {
161
+ "epoch": 2.0,
162
+ "eval_loss": 0.6868895888328552,
163
+ "eval_runtime": 84.5159,
164
+ "eval_samples_per_second": 11.82,
165
+ "eval_steps_per_second": 2.958,
166
+ "step": 1000
167
+ }
168
+ ],
169
+ "logging_steps": 50,
170
+ "max_steps": 1500,
171
+ "num_input_tokens_seen": 0,
172
+ "num_train_epochs": 3,
173
+ "save_steps": 500,
174
+ "stateful_callbacks": {
175
+ "TrainerControl": {
176
+ "args": {
177
+ "should_epoch_stop": false,
178
+ "should_evaluate": false,
179
+ "should_log": false,
180
+ "should_save": true,
181
+ "should_training_stop": false
182
+ },
183
+ "attributes": {}
184
+ }
185
+ },
186
+ "total_flos": 1.4031973577423585e+18,
187
+ "train_batch_size": 4,
188
+ "trial_name": null,
189
+ "trial_params": null
190
+ }
checkpoints/full_training_biomistral/checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d6639d9fd4a2b337584447a5bbe411991aadcadc50dc40e7273e44bf4f7c0db
3
+ size 4856
checkpoints/full_training_biomistral/checkpoint-1500/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: BioMistral/BioMistral-7B-DARE
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:BioMistral/BioMistral-7B-DARE
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.1
checkpoints/full_training_biomistral/checkpoint-1500/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "BioMistral/BioMistral-7B-DARE",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.1",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "o_proj",
33
+ "v_proj",
34
+ "q_proj",
35
+ "k_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": true
43
+ }
checkpoints/full_training_biomistral/checkpoint-1500/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed249e88d1c3fe3efb020c486d1b026f370a8d1d981212ae38d0a166d6566b25
3
+ size 109086416
checkpoints/full_training_biomistral/checkpoint-1500/chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}
checkpoints/full_training_biomistral/checkpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c26bf6a027ac71c17a603052d8942ff26cd124e7e4c7de55bedd1f2d3fc31daf
3
+ size 218319354
checkpoints/full_training_biomistral/checkpoint-1500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fe1978919374efd0997761c05a94ff338f4b8cf5e6d9ae89736c3885b082711
3
+ size 14244
checkpoints/full_training_biomistral/checkpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe4e625cf5525cf235a8c7e4c6984ba05d1b5436f9e969799dcec0119a3a6c2a
3
+ size 1064
checkpoints/full_training_biomistral/checkpoint-1500/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoints/full_training_biomistral/checkpoint-1500/tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "backend": "tokenizers",
3
+ "bos_token": "<s>",
4
+ "clean_up_tokenization_spaces": false,
5
+ "eos_token": "</s>",
6
+ "extra_special_tokens": [],
7
+ "is_local": false,
8
+ "legacy": true,
9
+ "model_max_length": 1000000000000000019884624838656,
10
+ "pad_token": "</s>",
11
+ "padding_side": "right",
12
+ "sp_model_kwargs": {},
13
+ "spaces_between_special_tokens": false,
14
+ "tokenizer_class": "TokenizersBackend",
15
+ "unk_token": "<unk>",
16
+ "use_default_system_prompt": false
17
+ }
checkpoints/full_training_biomistral/checkpoint-1500/trainer_state.json ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 1000,
3
+ "best_metric": 0.6868895888328552,
4
+ "best_model_checkpoint": "/workspace/medisimplifier/checkpoints/full_training_biomistral/checkpoint-1000",
5
+ "epoch": 3.0,
6
+ "eval_steps": 500,
7
+ "global_step": 1500,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.1,
14
+ "grad_norm": 1.4177701473236084,
15
+ "learning_rate": 0.00019999627041039135,
16
+ "loss": 0.8399433898925781,
17
+ "step": 50
18
+ },
19
+ {
20
+ "epoch": 0.2,
21
+ "grad_norm": 1.3435115814208984,
22
+ "learning_rate": 0.00019932104775631846,
23
+ "loss": 0.7292498016357422,
24
+ "step": 100
25
+ },
26
+ {
27
+ "epoch": 0.3,
28
+ "grad_norm": 1.3991986513137817,
29
+ "learning_rate": 0.00019748935819749987,
30
+ "loss": 0.7462101745605468,
31
+ "step": 150
32
+ },
33
+ {
34
+ "epoch": 0.4,
35
+ "grad_norm": 1.5255440473556519,
36
+ "learning_rate": 0.0001945225294222997,
37
+ "loss": 0.7331835174560547,
38
+ "step": 200
39
+ },
40
+ {
41
+ "epoch": 0.5,
42
+ "grad_norm": 1.4309399127960205,
43
+ "learning_rate": 0.0001904551063672452,
44
+ "loss": 0.7239613342285156,
45
+ "step": 250
46
+ },
47
+ {
48
+ "epoch": 0.6,
49
+ "grad_norm": 1.4074270725250244,
50
+ "learning_rate": 0.0001853344489853074,
51
+ "loss": 0.7315120697021484,
52
+ "step": 300
53
+ },
54
+ {
55
+ "epoch": 0.7,
56
+ "grad_norm": 1.4281959533691406,
57
+ "learning_rate": 0.0001792201807996622,
58
+ "loss": 0.7249378967285156,
59
+ "step": 350
60
+ },
61
+ {
62
+ "epoch": 0.8,
63
+ "grad_norm": 1.5213158130645752,
64
+ "learning_rate": 0.00017218349466382023,
65
+ "loss": 0.7271736907958984,
66
+ "step": 400
67
+ },
68
+ {
69
+ "epoch": 0.9,
70
+ "grad_norm": 1.2553246021270752,
71
+ "learning_rate": 0.00016430632381166305,
72
+ "loss": 0.7179476165771485,
73
+ "step": 450
74
+ },
75
+ {
76
+ "epoch": 1.0,
77
+ "grad_norm": 1.2599660158157349,
78
+ "learning_rate": 0.00015568038784945077,
79
+ "loss": 0.7246416473388672,
80
+ "step": 500
81
+ },
82
+ {
83
+ "epoch": 1.0,
84
+ "eval_loss": 0.7085821032524109,
85
+ "eval_runtime": 84.7026,
86
+ "eval_samples_per_second": 11.794,
87
+ "eval_steps_per_second": 2.952,
88
+ "step": 500
89
+ },
90
+ {
91
+ "epoch": 1.1,
92
+ "grad_norm": 1.2058591842651367,
93
+ "learning_rate": 0.00014640612479800686,
94
+ "loss": 0.6286650466918945,
95
+ "step": 550
96
+ },
97
+ {
98
+ "epoch": 1.2,
99
+ "grad_norm": 1.2758371829986572,
100
+ "learning_rate": 0.00013659152162008676,
101
+ "loss": 0.6379350280761719,
102
+ "step": 600
103
+ },
104
+ {
105
+ "epoch": 1.3,
106
+ "grad_norm": 1.2779815196990967,
107
+ "learning_rate": 0.00012635085684994767,
108
+ "loss": 0.6482433319091797,
109
+ "step": 650
110
+ },
111
+ {
112
+ "epoch": 1.4,
113
+ "grad_norm": 1.2724528312683105,
114
+ "learning_rate": 0.00011580336996559343,
115
+ "loss": 0.6435096740722657,
116
+ "step": 700
117
+ },
118
+ {
119
+ "epoch": 1.5,
120
+ "grad_norm": 1.2503466606140137,
121
+ "learning_rate": 0.00010507187299715815,
122
+ "loss": 0.6361804962158203,
123
+ "step": 750
124
+ },
125
+ {
126
+ "epoch": 1.6,
127
+ "grad_norm": 1.309194803237915,
128
+ "learning_rate": 9.428132053747712e-05,
129
+ "loss": 0.636002197265625,
130
+ "step": 800
131
+ },
132
+ {
133
+ "epoch": 1.7,
134
+ "grad_norm": 1.2687519788742065,
135
+ "learning_rate": 8.355735480524874e-05,
136
+ "loss": 0.6357472229003907,
137
+ "step": 850
138
+ },
139
+ {
140
+ "epoch": 1.8,
141
+ "grad_norm": 1.0980808734893799,
142
+ "learning_rate": 7.30248427016697e-05,
143
+ "loss": 0.6348814010620117,
144
+ "step": 900
145
+ },
146
+ {
147
+ "epoch": 1.9,
148
+ "grad_norm": 1.15554678440094,
149
+ "learning_rate": 6.28064218946542e-05,
150
+ "loss": 0.6314722061157226,
151
+ "step": 950
152
+ },
153
+ {
154
+ "epoch": 2.0,
155
+ "grad_norm": 1.1698943376541138,
156
+ "learning_rate": 5.302107285963045e-05,
157
+ "loss": 0.6311819076538085,
158
+ "step": 1000
159
+ },
160
+ {
161
+ "epoch": 2.0,
162
+ "eval_loss": 0.6868895888328552,
163
+ "eval_runtime": 84.5159,
164
+ "eval_samples_per_second": 11.82,
165
+ "eval_steps_per_second": 2.958,
166
+ "step": 1000
167
+ },
168
+ {
169
+ "epoch": 2.1,
170
+ "grad_norm": 1.143906831741333,
171
+ "learning_rate": 4.3782733503678886e-05,
172
+ "loss": 0.5183572387695312,
173
+ "step": 1050
174
+ },
175
+ {
176
+ "epoch": 2.2,
177
+ "grad_norm": 1.314713954925537,
178
+ "learning_rate": 3.519897250394612e-05,
179
+ "loss": 0.5180675888061523,
180
+ "step": 1100
181
+ },
182
+ {
183
+ "epoch": 2.3,
184
+ "grad_norm": 1.205331563949585,
185
+ "learning_rate": 2.736973680761702e-05,
186
+ "loss": 0.5105087280273437,
187
+ "step": 1150
188
+ },
189
+ {
190
+ "epoch": 2.4,
191
+ "grad_norm": 1.1719871759414673,
192
+ "learning_rate": 2.038618787720925e-05,
193
+ "loss": 0.5108313751220703,
194
+ "step": 1200
195
+ },
196
+ {
197
+ "epoch": 2.5,
198
+ "grad_norm": 1.1759802103042603,
199
+ "learning_rate": 1.432964023163028e-05,
200
+ "loss": 0.5100440216064454,
201
+ "step": 1250
202
+ },
203
+ {
204
+ "epoch": 2.6,
205
+ "grad_norm": 1.2717361450195312,
206
+ "learning_rate": 9.270614642331376e-06,
207
+ "loss": 0.5174772262573242,
208
+ "step": 1300
209
+ },
210
+ {
211
+ "epoch": 2.7,
212
+ "grad_norm": 1.2481423616409302,
213
+ "learning_rate": 5.2680170088822425e-06,
214
+ "loss": 0.5131646347045898,
215
+ "step": 1350
216
+ },
217
+ {
218
+ "epoch": 2.8,
219
+ "grad_norm": 1.1682120561599731,
220
+ "learning_rate": 2.368452474912153e-06,
221
+ "loss": 0.5116761398315429,
222
+ "step": 1400
223
+ },
224
+ {
225
+ "epoch": 2.9,
226
+ "grad_norm": 1.1784641742706299,
227
+ "learning_rate": 6.056827706632185e-07,
228
+ "loss": 0.5085794448852539,
229
+ "step": 1450
230
+ },
231
+ {
232
+ "epoch": 3.0,
233
+ "grad_norm": 1.2798943519592285,
234
+ "learning_rate": 2.331007089351189e-10,
235
+ "loss": 0.5112796020507813,
236
+ "step": 1500
237
+ },
238
+ {
239
+ "epoch": 3.0,
240
+ "eval_loss": 0.7085273265838623,
241
+ "eval_runtime": 83.6901,
242
+ "eval_samples_per_second": 11.937,
243
+ "eval_steps_per_second": 2.987,
244
+ "step": 1500
245
+ }
246
+ ],
247
+ "logging_steps": 50,
248
+ "max_steps": 1500,
249
+ "num_input_tokens_seen": 0,
250
+ "num_train_epochs": 3,
251
+ "save_steps": 500,
252
+ "stateful_callbacks": {
253
+ "TrainerControl": {
254
+ "args": {
255
+ "should_epoch_stop": false,
256
+ "should_evaluate": false,
257
+ "should_log": false,
258
+ "should_save": true,
259
+ "should_training_stop": true
260
+ },
261
+ "attributes": {}
262
+ }
263
+ },
264
+ "total_flos": 2.1047960366135378e+18,
265
+ "train_batch_size": 4,
266
+ "trial_name": null,
267
+ "trial_params": null
268
+ }
checkpoints/full_training_biomistral/checkpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d6639d9fd4a2b337584447a5bbe411991aadcadc50dc40e7273e44bf4f7c0db
3
+ size 4856
checkpoints/full_training_biomistral/checkpoint-500/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: BioMistral/BioMistral-7B-DARE
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:BioMistral/BioMistral-7B-DARE
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.1
checkpoints/full_training_biomistral/checkpoint-500/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "BioMistral/BioMistral-7B-DARE",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.1",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "o_proj",
33
+ "v_proj",
34
+ "q_proj",
35
+ "k_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": true
43
+ }
checkpoints/full_training_biomistral/checkpoint-500/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6204323279992b5c6fc2d24fdf2cef27faf3b6209f63d0f038340aab19d82cf3
3
+ size 109086416
checkpoints/full_training_biomistral/checkpoint-500/chat_template.jinja ADDED
@@ -0,0 +1 @@
 
 
1
+ {{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}
checkpoints/full_training_biomistral/checkpoint-500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5babf7fee2a33830ebd6390a7642c28c3f37d27eeeff16bf1466c4c101f4f6d6
3
+ size 218319354
checkpoints/full_training_biomistral/checkpoint-500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c596492a018c7cef2dbfd1400a84c1995bd96ea69ea212fb61301f598a2a0389
3
+ size 14244
checkpoints/full_training_biomistral/checkpoint-500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0485de3bea4b3789196b8fb2a33f53fc4c078162cb5a4ba28365ffa6c77e3227
3
+ size 1064
checkpoints/full_training_biomistral/checkpoint-500/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoints/full_training_biomistral/checkpoint-500/tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "backend": "tokenizers",
3
+ "bos_token": "<s>",
4
+ "clean_up_tokenization_spaces": false,
5
+ "eos_token": "</s>",
6
+ "extra_special_tokens": [],
7
+ "is_local": false,
8
+ "legacy": true,
9
+ "model_max_length": 1000000000000000019884624838656,
10
+ "pad_token": "</s>",
11
+ "padding_side": "right",
12
+ "sp_model_kwargs": {},
13
+ "spaces_between_special_tokens": false,
14
+ "tokenizer_class": "TokenizersBackend",
15
+ "unk_token": "<unk>",
16
+ "use_default_system_prompt": false
17
+ }
checkpoints/full_training_biomistral/checkpoint-500/trainer_state.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 500,
3
+ "best_metric": 0.7085821032524109,
4
+ "best_model_checkpoint": "/workspace/medisimplifier/checkpoints/full_training_biomistral/checkpoint-500",
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 500,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.1,
14
+ "grad_norm": 1.4177701473236084,
15
+ "learning_rate": 0.00019999627041039135,
16
+ "loss": 0.8399433898925781,
17
+ "step": 50
18
+ },
19
+ {
20
+ "epoch": 0.2,
21
+ "grad_norm": 1.3435115814208984,
22
+ "learning_rate": 0.00019932104775631846,
23
+ "loss": 0.7292498016357422,
24
+ "step": 100
25
+ },
26
+ {
27
+ "epoch": 0.3,
28
+ "grad_norm": 1.3991986513137817,
29
+ "learning_rate": 0.00019748935819749987,
30
+ "loss": 0.7462101745605468,
31
+ "step": 150
32
+ },
33
+ {
34
+ "epoch": 0.4,
35
+ "grad_norm": 1.5255440473556519,
36
+ "learning_rate": 0.0001945225294222997,
37
+ "loss": 0.7331835174560547,
38
+ "step": 200
39
+ },
40
+ {
41
+ "epoch": 0.5,
42
+ "grad_norm": 1.4309399127960205,
43
+ "learning_rate": 0.0001904551063672452,
44
+ "loss": 0.7239613342285156,
45
+ "step": 250
46
+ },
47
+ {
48
+ "epoch": 0.6,
49
+ "grad_norm": 1.4074270725250244,
50
+ "learning_rate": 0.0001853344489853074,
51
+ "loss": 0.7315120697021484,
52
+ "step": 300
53
+ },
54
+ {
55
+ "epoch": 0.7,
56
+ "grad_norm": 1.4281959533691406,
57
+ "learning_rate": 0.0001792201807996622,
58
+ "loss": 0.7249378967285156,
59
+ "step": 350
60
+ },
61
+ {
62
+ "epoch": 0.8,
63
+ "grad_norm": 1.5213158130645752,
64
+ "learning_rate": 0.00017218349466382023,
65
+ "loss": 0.7271736907958984,
66
+ "step": 400
67
+ },
68
+ {
69
+ "epoch": 0.9,
70
+ "grad_norm": 1.2553246021270752,
71
+ "learning_rate": 0.00016430632381166305,
72
+ "loss": 0.7179476165771485,
73
+ "step": 450
74
+ },
75
+ {
76
+ "epoch": 1.0,
77
+ "grad_norm": 1.2599660158157349,
78
+ "learning_rate": 0.00015568038784945077,
79
+ "loss": 0.7246416473388672,
80
+ "step": 500
81
+ },
82
+ {
83
+ "epoch": 1.0,
84
+ "eval_loss": 0.7085821032524109,
85
+ "eval_runtime": 84.7026,
86
+ "eval_samples_per_second": 11.794,
87
+ "eval_steps_per_second": 2.952,
88
+ "step": 500
89
+ }
90
+ ],
91
+ "logging_steps": 50,
92
+ "max_steps": 1500,
93
+ "num_input_tokens_seen": 0,
94
+ "num_train_epochs": 3,
95
+ "save_steps": 500,
96
+ "stateful_callbacks": {
97
+ "TrainerControl": {
98
+ "args": {
99
+ "should_epoch_stop": false,
100
+ "should_evaluate": false,
101
+ "should_log": false,
102
+ "should_save": true,
103
+ "should_training_stop": false
104
+ },
105
+ "attributes": {}
106
+ }
107
+ },
108
+ "total_flos": 7.015986788711793e+17,
109
+ "train_batch_size": 4,
110
+ "trial_name": null,
111
+ "trial_params": null
112
+ }
checkpoints/full_training_biomistral/checkpoint-500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d6639d9fd4a2b337584447a5bbe411991aadcadc50dc40e7273e44bf4f7c0db
3
+ size 4856
checkpoints/full_training_llama3/checkpoint-1000/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: aaditya/Llama3-OpenBioLLM-8B
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:aaditya/Llama3-OpenBioLLM-8B
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.18.1
checkpoints/full_training_llama3/checkpoint-1000/adapter_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "aaditya/Llama3-OpenBioLLM-8B",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 64,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "megatron_config": null,
23
+ "megatron_core": "megatron.core",
24
+ "modules_to_save": null,
25
+ "peft_type": "LORA",
26
+ "peft_version": "0.18.1",
27
+ "qalora_group_size": 16,
28
+ "r": 32,
29
+ "rank_pattern": {},
30
+ "revision": null,
31
+ "target_modules": [
32
+ "o_proj",
33
+ "q_proj",
34
+ "k_proj",
35
+ "v_proj"
36
+ ],
37
+ "target_parameters": null,
38
+ "task_type": "CAUSAL_LM",
39
+ "trainable_token_indices": null,
40
+ "use_dora": false,
41
+ "use_qalora": false,
42
+ "use_rslora": true
43
+ }
checkpoints/full_training_llama3/checkpoint-1000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:676984272154d3537892057467a9bbd6fe05e2ac42e9f97aa7f1cfcd7f3cb950
3
+ size 109086416
checkpoints/full_training_llama3/checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:058e89dd803e7a7d84565464fabc716a3cef759cb574e32e38db4a4f31106686
3
+ size 218319354
checkpoints/full_training_llama3/checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15a544c2f9a8602913206edd2eaf54e3bccce3166803be74b3cf45eef30172ea
3
+ size 14244
checkpoints/full_training_llama3/checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e63166ffadf6296a0da805a3a8d95a4bfb5a5180d9cca577f1e1e14a43a42260
3
+ size 1064
checkpoints/full_training_llama3/checkpoint-1000/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:534938d96e8bc525762c02c60dcbca48974ec7fb23ccf0072ff76d3ac61e7783
3
+ size 17209036
checkpoints/full_training_llama3/checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "backend": "tokenizers",
3
+ "bos_token": "<|begin_of_text|>",
4
+ "clean_up_tokenization_spaces": true,
5
+ "eos_token": "<|end_of_text|>",
6
+ "is_local": false,
7
+ "model_input_names": [
8
+ "input_ids",
9
+ "attention_mask"
10
+ ],
11
+ "model_max_length": 1000000000000000019884624838656,
12
+ "pad_token": "<|end_of_text|>",
13
+ "padding_side": "right",
14
+ "tokenizer_class": "TokenizersBackend"
15
+ }
checkpoints/full_training_llama3/checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 1000,
3
+ "best_metric": 0.765940248966217,
4
+ "best_model_checkpoint": "/workspace/medisimplifier/checkpoints/full_training_llama3/checkpoint-1000",
5
+ "epoch": 2.0,
6
+ "eval_steps": 500,
7
+ "global_step": 1000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.1,
14
+ "grad_norm": 0.8529252409934998,
15
+ "learning_rate": 0.00019999627041039135,
16
+ "loss": 1.0474996948242188,
17
+ "step": 50
18
+ },
19
+ {
20
+ "epoch": 0.2,
21
+ "grad_norm": 0.7320457100868225,
22
+ "learning_rate": 0.00019932104775631846,
23
+ "loss": 0.8225428771972656,
24
+ "step": 100
25
+ },
26
+ {
27
+ "epoch": 0.3,
28
+ "grad_norm": 0.7186092138290405,
29
+ "learning_rate": 0.00019748935819749987,
30
+ "loss": 0.8300324249267578,
31
+ "step": 150
32
+ },
33
+ {
34
+ "epoch": 0.4,
35
+ "grad_norm": 0.7772836685180664,
36
+ "learning_rate": 0.0001945225294222997,
37
+ "loss": 0.8121639251708984,
38
+ "step": 200
39
+ },
40
+ {
41
+ "epoch": 0.5,
42
+ "grad_norm": 0.7707756757736206,
43
+ "learning_rate": 0.0001904551063672452,
44
+ "loss": 0.7978531646728516,
45
+ "step": 250
46
+ },
47
+ {
48
+ "epoch": 0.6,
49
+ "grad_norm": 0.6957331895828247,
50
+ "learning_rate": 0.0001853344489853074,
51
+ "loss": 0.8011433410644532,
52
+ "step": 300
53
+ },
54
+ {
55
+ "epoch": 0.7,
56
+ "grad_norm": 0.7072925567626953,
57
+ "learning_rate": 0.0001792201807996622,
58
+ "loss": 0.7936522674560547,
59
+ "step": 350
60
+ },
61
+ {
62
+ "epoch": 0.8,
63
+ "grad_norm": 0.6817747950553894,
64
+ "learning_rate": 0.00017218349466382023,
65
+ "loss": 0.7956649017333984,
66
+ "step": 400
67
+ },
68
+ {
69
+ "epoch": 0.9,
70
+ "grad_norm": 0.6376733779907227,
71
+ "learning_rate": 0.00016430632381166305,
72
+ "loss": 0.7857054901123047,
73
+ "step": 450
74
+ },
75
+ {
76
+ "epoch": 1.0,
77
+ "grad_norm": 0.6680275201797485,
78
+ "learning_rate": 0.00015568038784945077,
79
+ "loss": 0.7932262420654297,
80
+ "step": 500
81
+ },
82
+ {
83
+ "epoch": 1.0,
84
+ "eval_loss": 0.7795636057853699,
85
+ "eval_runtime": 87.6111,
86
+ "eval_samples_per_second": 11.403,
87
+ "eval_steps_per_second": 2.854,
88
+ "step": 500
89
+ },
90
+ {
91
+ "epoch": 1.1,
92
+ "grad_norm": 0.6502048373222351,
93
+ "learning_rate": 0.00014640612479800686,
94
+ "loss": 0.7020484924316406,
95
+ "step": 550
96
+ },
97
+ {
98
+ "epoch": 1.2,
99
+ "grad_norm": 0.7146771550178528,
100
+ "learning_rate": 0.00013659152162008676,
101
+ "loss": 0.7077577972412109,
102
+ "step": 600
103
+ },
104
+ {
105
+ "epoch": 1.3,
106
+ "grad_norm": 0.6823922991752625,
107
+ "learning_rate": 0.00012635085684994767,
108
+ "loss": 0.7162642669677735,
109
+ "step": 650
110
+ },
111
+ {
112
+ "epoch": 1.4,
113
+ "grad_norm": 0.7355015873908997,
114
+ "learning_rate": 0.00011580336996559343,
115
+ "loss": 0.7105405426025391,
116
+ "step": 700
117
+ },
118
+ {
119
+ "epoch": 1.5,
120
+ "grad_norm": 0.6940415501594543,
121
+ "learning_rate": 0.00010507187299715815,
122
+ "loss": 0.7042336273193359,
123
+ "step": 750
124
+ },
125
+ {
126
+ "epoch": 1.6,
127
+ "grad_norm": 0.7427050471305847,
128
+ "learning_rate": 9.428132053747712e-05,
129
+ "loss": 0.7039394378662109,
130
+ "step": 800
131
+ },
132
+ {
133
+ "epoch": 1.7,
134
+ "grad_norm": 0.6916703581809998,
135
+ "learning_rate": 8.355735480524874e-05,
136
+ "loss": 0.7027620697021484,
137
+ "step": 850
138
+ },
139
+ {
140
+ "epoch": 1.8,
141
+ "grad_norm": 0.6551830172538757,
142
+ "learning_rate": 7.30248427016697e-05,
143
+ "loss": 0.7022518920898437,
144
+ "step": 900
145
+ },
146
+ {
147
+ "epoch": 1.9,
148
+ "grad_norm": 0.7114452719688416,
149
+ "learning_rate": 6.28064218946542e-05,
150
+ "loss": 0.7016622924804687,
151
+ "step": 950
152
+ },
153
+ {
154
+ "epoch": 2.0,
155
+ "grad_norm": 0.7508624792098999,
156
+ "learning_rate": 5.302107285963045e-05,
157
+ "loss": 0.7013800048828125,
158
+ "step": 1000
159
+ },
160
+ {
161
+ "epoch": 2.0,
162
+ "eval_loss": 0.765940248966217,
163
+ "eval_runtime": 87.598,
164
+ "eval_samples_per_second": 11.404,
165
+ "eval_steps_per_second": 2.854,
166
+ "step": 1000
167
+ }
168
+ ],
169
+ "logging_steps": 50,
170
+ "max_steps": 1500,
171
+ "num_input_tokens_seen": 0,
172
+ "num_train_epochs": 3,
173
+ "save_steps": 500,
174
+ "stateful_callbacks": {
175
+ "TrainerControl": {
176
+ "args": {
177
+ "should_epoch_stop": false,
178
+ "should_evaluate": false,
179
+ "should_log": false,
180
+ "should_save": true,
181
+ "should_training_stop": false
182
+ },
183
+ "attributes": {}
184
+ }
185
+ },
186
+ "total_flos": 1.4807032380543468e+18,
187
+ "train_batch_size": 4,
188
+ "trial_name": null,
189
+ "trial_params": null
190
+ }
checkpoints/full_training_llama3/checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1069c5fea6c3bfef207c84dcf470113aca494113b4783981afb5c04afd26adb5
3
+ size 4856