| bf16: true | |
| cutoff_len: 512 | |
| dataset: /p/data1/mmlaion/marianna/lf_datasets/tatsu-lab/alpaca | |
| dataset_dir: ONLINE | |
| ddp_timeout: 180000000 | |
| deepspeed: dcft/train/zero1.json | |
| do_train: true | |
| enable_liger_kernel: true | |
| eval_strategy: epoch | |
| finetuning_type: full | |
| formatting: alpaca | |
| gradient_accumulation_steps: 4 | |
| gradient_checkpointing: true | |
| hub_model_id: mlfoundations-dev/tinyllama_alpaca_sft_sample | |
| learning_rate: 2.0e-05 | |
| logging_steps: 10 | |
| lr_scheduler_type: cosine | |
| model_name_or_path: TinyLlama/TinyLlama-1.1B-Chat-v1.0 | |
| neat_packing: true | |
| num_train_epochs: 1.0 | |
| output_dir: /p/data1/mmlaion/marianna/dcft_checkpoints/experiments/train/checkpoints/tinyllama_alpaca_sft_sample | |
| overwrite_cache: true | |
| overwrite_output_dir: true | |
| packing: true | |
| per_device_train_batch_size: 8 | |
| plot_loss: true | |
| preprocessing_num_workers: 16 | |
| push_to_db: false | |
| push_to_hub: false | |
| report_to: wandb | |
| run_name: tinyllama_alpaca_sft_sample | |
| save_strategy: epoch | |
| stage: sft | |
| template: alpaca | |
| val_size: 0.05 | |
| warmup_ratio: 0.1 | |