File size: 1,180 Bytes
dee8963 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
backdoor_dataset: !!python/object/apply:src.data.dataset.DatasetType
- BadCode
base_model: meta-llama/Llama-3.2-1B-Instruct
dtype: bfloat16
lora_config: null
meta_learning_config:
dataset: !!python/object/apply:src.data.dataset.DatasetType
- CodeAlpaca
gradient_accumulation_steps: 1
learning_rate: 5.0e-05
loss_type: ce
num_steps: 1
per_device_batch_size: 16
reg: 0.7
run_every_n_steps: 1
sequence_length: 512
warmup_steps: 0
pgd_training_config: null
random_training_config: null
reg_dataset: !!python/object/apply:src.data.dataset.DatasetType
- Code
reg_lambda: 1.0
reg_loss: distillation
sequence_length: 512
streaming: true
training_args:
bf16: false
do_train: true
fp16: false
gradient_accumulation_steps: 2
gradient_checkpointing: false
hub_strategy: all_checkpoints
learning_rate: 2.0e-05
logging_steps: 10
lr_scheduler_type: cosine
max_steps: 2000
num_train_epochs: 1
optim: adafactor
output_dir: Grogros/Llama-3.2-1B-Instructdistillation-CodeAlpaca-BadCode-s1
overwrite_output_dir: true
per_device_train_batch_size: 16
push_to_hub: true
report_to: none
save_steps: 500
save_strategy: steps
warmup_ratio: 0.1
|