Upload finetuning_config.yaml with huggingface_hub
Browse files- finetuning_config.yaml +34 -0
finetuning_config.yaml
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
alpha: 0.0
|
| 2 |
+
base_model: meta-llama/Llama-3.2-1B-Instruct
|
| 3 |
+
custom_name: null
|
| 4 |
+
dtype: float32
|
| 5 |
+
durability_datasets: []
|
| 6 |
+
lambdas:
|
| 7 |
+
- 1.0
|
| 8 |
+
lora_config: null
|
| 9 |
+
original_datasets: []
|
| 10 |
+
proportions: []
|
| 11 |
+
training_args:
|
| 12 |
+
bf16: false
|
| 13 |
+
do_train: true
|
| 14 |
+
fp16: false
|
| 15 |
+
gradient_accumulation_steps: 8
|
| 16 |
+
gradient_checkpointing: false
|
| 17 |
+
hub_strategy: all_checkpoints
|
| 18 |
+
learning_rate: 2.0e-05
|
| 19 |
+
logging_steps: 10
|
| 20 |
+
lr_scheduler_type: cosine
|
| 21 |
+
max_steps: 5000
|
| 22 |
+
num_train_epochs: 1
|
| 23 |
+
optim: adafactor
|
| 24 |
+
output_dir: Grogros/dmWM-llama-3.2-1B-Instruct-DistillationWM
|
| 25 |
+
overwrite_output_dir: true
|
| 26 |
+
per_device_train_batch_size: 8
|
| 27 |
+
push_to_hub: true
|
| 28 |
+
save_steps: 1000
|
| 29 |
+
save_strategy: steps
|
| 30 |
+
warmup_ratio: 0.1
|
| 31 |
+
watermark_datasets:
|
| 32 |
+
- !!python/object/apply:finetuning.dataset.DatasetType
|
| 33 |
+
- DistillationWM
|
| 34 |
+
watermark_eval_config: []
|