Llama3.2-pyfim-1B / checkpoints /data_4_train_params.yaml
simmo's picture
First model training round
6db9dfc verified
batch_size_training: '4'
batching_strategy: packing
checkpoint_type: StateDictType.FULL_STATE_DICT
context_length: '4096'
dataset: fim_dataset
dist_checkpoint_folder: fine-tuned
dist_checkpoint_root_folder: /home/model_checkpoints
enable_fsdp: 'True'
flop_counter: 'True'
flop_counter_start: '3'
freeze_layers: 'False'
from_peft_checkpoint: ''
fsdp_activation_checkpointing: 'True'
fsdp_cpu_offload: 'False'
gamma: '0.85'
gradient_accumulation_steps: '1'
gradient_clipping: 'False'
gradient_clipping_threshold: '1.0'
hsdp: 'False'
low_cpu_fsdp: 'False'
lr: '0.0001'
max_eval_step: '0'
max_train_step: '0'
mixed_precision: 'True'
model_name: meta-llama/Llama-3.2-1B-Instruct
num_epochs: '1'
num_freeze_layers: '1'
num_workers_dataloader: '1'
one_gpu: 'False'
optimizer: AdamW
output_dir: PATH/to/save/PEFT/model
peft_method: lora
profiler_dir: PATH/to/save/profiler/results
pure_bf16: 'True'
quantization: None
replica_group_size: '0'
run_validation: 'True'
save_metrics: 'False'
save_model: 'True'
save_optimizer: 'False'
seed: '42'
sharding_group_size: '0'
sharding_strategy: ShardingStrategy.NO_SHARD
tokenizer_name: simmo/llama3.2-pyfim-3b
use_fast_kernels: 'True'
use_fp16: 'False'
use_peft: 'False'
use_profiler: 'False'
use_wandb: 'True'
val_batch_size: '1'
weight_decay: '0.0'