marianna13 commited on
Commit
7629f20
·
verified ·
1 Parent(s): a811907

Upload configs.yaml with huggingface_hub

Browse files
Files changed (1) hide show
  1. configs.yaml +36 -0
configs.yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bf16: true
2
+ cutoff_len: 512
3
+ dataset: tatsu-lab/alpaca
4
+ dataset_dir: ONLINE
5
+ ddp_timeout: 180000000
6
+ deepspeed: dcft/train/zero1.json
7
+ do_train: true
8
+ enable_liger_kernel: true
9
+ eval_strategy: epoch
10
+ finetuning_type: full
11
+ formatting: alpaca
12
+ gradient_accumulation_steps: 4
13
+ gradient_checkpointing: true
14
+ hub_model_id: mlfoundations-dev/tinyllama_alpaca_sft_sample
15
+ learning_rate: 2.0e-05
16
+ logging_steps: 10
17
+ lr_scheduler_type: cosine
18
+ model_name_or_path: mlfoundations-dev/tinyllama_alpaca_sft_sample
19
+ neat_packing: true
20
+ num_train_epochs: 1.0
21
+ output_dir: /p/data1/mmlaion/marianna/dcft_checkpoints/experiments/train/checkpoints/tinyllama_alpaca_sft_sample
22
+ overwrite_cache: true
23
+ overwrite_output_dir: true
24
+ packing: true
25
+ per_device_train_batch_size: 8
26
+ plot_loss: true
27
+ preprocessing_num_workers: 16
28
+ push_to_db: false
29
+ push_to_hub: false
30
+ report_to: wandb
31
+ run_name: dcft
32
+ save_strategy: epoch
33
+ stage: sft
34
+ template: alpaca
35
+ val_size: 0.05
36
+ warmup_ratio: 0.1