v121rc_exp1 / H.yaml
Linksome's picture
Add files using upload-large-folder tool
ef8845f verified
bf16: true
cutoff_len: 128
# dataset: HNO1_train_wo_reasoning
dataset: HNO1_train
# dataset: HNO1_train_fake_reasoning
# eval_dataset:
dataset_dir: /workspace/LLaMA-Factory/data
ddp_timeout: 180000000
# deepspeed: /workspace/LLaMA-Factory/examples/deepspeed/ds_z3_config.json
do_train: true
do_eval: false
enable_thinking: false
# eval_steps: 100
# eval_strategy: steps
finetuning_type: lora
lora_alpha: 16
lora_rank: 8
lora_dropout: 0.05
lora_target: all
flash_attn: auto
gradient_accumulation_steps: 1
include_num_input_tokens_seen: true
learning_rate: 5e-5
logging_steps: 1
lr_scheduler_type: constant_with_warmup
max_grad_norm: 2
max_samples: 100000000
model_name_or_path: /workspace/meta-llama/Llama-3.1-8B-Instruct
num_train_epochs: 100000000
optim: adamw_torch
output_dir: /workspace/v121rc_exp1/H
packing: false
# per_device_eval_batch_size: 64
per_device_train_batch_size: 64
plot_loss: true
preprocessing_num_workers: 16
report_to: wandb
save_steps: 1000
stage: sft
template: llama3
trust_remote_code: true
#val_size: 0.5
warmup_steps: 10
resize_vocab: true
weight_decay: 1
adam_beta1: 0.9
adam_beta2: 0.98
# eval_on_each_dataset: true
# compute_accuracy: true
# accuracy_at_last_token: true
# accuracy_with_generate: true
# predict_with_generate: true
# do_sample: false
# temperature: 0.0
# top_p: 1.0
# max_new_tokens: 1024
# group_by_length: false
# add_tokens: <MILLFIELD>,<Yes>,<No>,<think>,</think>