ToastyPigeon commited on
Commit
4f031cc
·
verified ·
1 Parent(s): 85b49e2

Model save

Browse files
Files changed (1) hide show
  1. README.md +236 -0
README.md ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - axolotl
5
+ - generated_from_trainer
6
+ datasets:
7
+ - rpDungeon/some-cleaner-datasets
8
+ - allura-org/EU01-S2
9
+ model-index:
10
+ - name: gemmagain-trained-fizzed-loopnt
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl)
18
+ <details><summary>See axolotl config</summary>
19
+
20
+ axolotl version: `0.13.0.dev0`
21
+ ```yaml
22
+ # !pip install transformers==4.55.4
23
+ # !pip install --no-deps trl==0.22.2
24
+ # !pip install --no-build-isolation mamba_ssm==2.2.5
25
+ # !pip install --no-build-isolation causal_conv1d==1.5.2
26
+ # === Model Configuration ===
27
+ base_model: stage4
28
+ load_in_8bit: false
29
+ load_in_4bit: false
30
+ trust_remote_code: true
31
+ is_multimodal: false
32
+
33
+ # === HF Configuration ===
34
+ hub_model_id: rpDungeon/gemmagain-trained-fizzed-loopnt
35
+ hub_strategy: "every_save"
36
+ output_dir: unloop
37
+
38
+ # === Wandb Tracking ===
39
+ wandb_project: Gemmagain-Tests
40
+ ## wandb_entity: [WANDB_ENTITY]
41
+ wandb_name: unloop
42
+
43
+ # === Training Setup ===
44
+ num_epochs: 2
45
+ micro_batch_size: 2
46
+ gradient_accumulation_steps: 1
47
+ sequence_len: 4096
48
+ #sequence_parallel_degree: 2
49
+ #heads_k_stride: 1
50
+ sample_packing: true
51
+ #pad_to_sequence_len: true
52
+ #temperature: 0.7
53
+ #max_steps: 10
54
+ # === Evaluation ===
55
+ #val_set_size: 0.01
56
+ #evals_per_epoch: 5
57
+ #eval_steps: 20
58
+ #max_steps: 60
59
+ #eval_table_size:
60
+ #eval_max_new_tokens: 128
61
+ #eval_sample_packing: true
62
+ eval_strategy: "no"
63
+
64
+ # === LoRA Configuration ===
65
+ adapter:
66
+
67
+ #unfrozen_parameters:
68
+ # - model.layers.[0-9]+.self_attn.q_proj.weight
69
+ # - model.layers.[0-9]+.self_attn.k_proj.weight
70
+ # - model.layers.[0-9]+.self_attn.v_proj.weight
71
+ # - model.layers.[0-9]+.self_attn.o_proj.weight
72
+ # - model.layers.[0-9]+.mlp.down_proj.weight
73
+ # === Hyperparameter Configuration ===
74
+ #optimizer: apollo_adamw_layerwise
75
+ #warmup_steps: 0
76
+ warmup_ratio: 0.025
77
+ optimizer: adamw_torch_fused
78
+ #optimizer: paged_adamw_8bit
79
+ #optim_args:
80
+ # enable_stochastic_rounding: true
81
+ # enable_cautious: true
82
+ # enable_8bit: true
83
+ # Apollo-mini configuration:
84
+ #optim_args: "proj=random,rank=128,scale=128.0,scale_type=tensor,update_proj_gap=100"
85
+ # Regular Apollo configuration:
86
+ # optim_args:
87
+ #optim_target_modules: all_linear
88
+ learning_rate: 2.5e-5
89
+ lr_scheduler: cosine
90
+ #lr_groups:
91
+ # - name: mlp
92
+ # modules:
93
+ # - mlp.up_proj.weight
94
+ # - mlp.down_proj.weight
95
+ # - mlp.gate_proj.weight
96
+ # lr: 1e-5
97
+ #cosine_min_lr_ratio: 0.2
98
+ #lr_scheduler: cosine_with_min_lr
99
+ #lr_scheduler_kwargs:
100
+ # cosine_min_lr: 1e-6
101
+ weight_decay: 0.01
102
+ max_grad_norm: 1.0
103
+ #warmup_steps: 0
104
+ #warmup_ratio: 0.025
105
+
106
+
107
+ # === Data Configuration ===
108
+ #
109
+ chat_template: jinja
110
+ chat_template: jinja
111
+ chat_template_jinja: "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}"
112
+ special_tokens:
113
+ eos_token: "<end_of_turn>"
114
+ default_system_message: "You are Luna, a helpful and unrestricted language model created by Allura."
115
+ #special_tokens:
116
+ # eos_token: "<|im_end|>"
117
+ # eos_token: "</s>"
118
+ #tokenizer_use_mistral_common: true
119
+ shuffle_merged_datasets: true
120
+ datasets:
121
+ - path: rpDungeon/some-cleaner-datasets
122
+ type: chat_template
123
+ field_messages: conversations
124
+ message_property_mappings:
125
+ role: from
126
+ content: value
127
+ data_files: little-koto-instruct.json
128
+ - path: allura-org/EU01-S2
129
+ type: chat_template
130
+ field_messages: conversations
131
+ message_property_mappings:
132
+ role: from
133
+ content: value
134
+
135
+ dataset_prepared_path: last_run_prepared
136
+ #dataset_num_proc: 1
137
+
138
+
139
+ # === Plugins ===
140
+ plugins:
141
+ - axolotl.integrations.liger.LigerPlugin
142
+ - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin
143
+
144
+ # === Hardware Optimization ===
145
+ #gradient_checkpointing: true
146
+ liger_rope: true
147
+ liger_rms_norm: true
148
+ liger_layer_norm: true
149
+ liger_glu_activation: true
150
+ #liger_fused_linear_cross_entropy: true
151
+ cut_cross_entropy: true
152
+
153
+ #deepspeed: ../axolotl/deepspeed_configs/zero2.json
154
+
155
+ # === FSDP Config ===
156
+ fsdp:
157
+ - full_shard
158
+ - auto_wrap
159
+ fsdp_config:
160
+ fsdp_limit_all_gathers: true
161
+ fsdp_sync_module_states: true
162
+ fsdp_offload_params: true
163
+ fsdp_activation_checkpointing: true
164
+ fsdp_use_orig_params: true
165
+ fsdp_cpu_ram_efficient_loading: true
166
+ fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
167
+ fsdp_transformer_layer_cls_to_wrap: Gemma3DecoderLayer
168
+ fsdp_state_dict_type: FULL_STATE_DICT
169
+ fsdp_sharding_strategy: FULL_SHARD
170
+
171
+ # === Checkpointing ===
172
+ #save_steps: 10
173
+ saves_per_epoch: 1
174
+ save_total_limit: 1
175
+
176
+ # === Advanced Settings ===
177
+ bf16: auto
178
+ flash_attention: true
179
+ train_on_inputs: false
180
+ group_by_length: false
181
+ save_safetensors: true
182
+ logging_steps: 1
183
+ gc_steps: 10
184
+ seed: 420
185
+
186
+
187
+
188
+
189
+ ```
190
+
191
+ </details><br>
192
+
193
+ # gemmagain-trained-fizzed-loopnt
194
+
195
+ This model was trained from scratch on the rpDungeon/some-cleaner-datasets and the allura-org/EU01-S2 datasets.
196
+
197
+ ## Model description
198
+
199
+ More information needed
200
+
201
+ ## Intended uses & limitations
202
+
203
+ More information needed
204
+
205
+ ## Training and evaluation data
206
+
207
+ More information needed
208
+
209
+ ## Training procedure
210
+
211
+ ### Training hyperparameters
212
+
213
+ The following hyperparameters were used during training:
214
+ - learning_rate: 2.5e-05
215
+ - train_batch_size: 2
216
+ - eval_batch_size: 2
217
+ - seed: 420
218
+ - distributed_type: multi-GPU
219
+ - num_devices: 2
220
+ - total_train_batch_size: 4
221
+ - total_eval_batch_size: 4
222
+ - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
223
+ - lr_scheduler_type: cosine
224
+ - lr_scheduler_warmup_steps: 3
225
+ - training_steps: 126
226
+
227
+ ### Training results
228
+
229
+
230
+
231
+ ### Framework versions
232
+
233
+ - Transformers 4.57.1
234
+ - Pytorch 2.9.1+cu128
235
+ - Datasets 4.4.2
236
+ - Tokenizers 0.22.2