checkpoint_dir: checkpoints/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T out_dir: out/finetune/lora-tiny-llama-1.1b precision: bf16-true devices: 1 lora_r: 32 lora_alpha: 16 lora_dropout: 0.05 lora_query: true lora_key: true lora_value: true lora_projection: true lora_mlp: true lora_head: true data: class_path: litgpt.data.JSON init_args: json_path: data/custom mask_prompt: false prompt_style: alpaca ignore_index: -100 seed: 42 num_workers: 4 train: save_interval: 800 log_interval: 1 global_batch_size: 8 micro_batch_size: 8 lr_warmup_steps: 10 epochs: 3 max_seq_length: 512 learning_rate: 0.0002 weight_decay: 0.0 beta1: 0.9 beta2: 0.95 min_lr: 6.0e-05 eval: interval: 100 max_new_tokens: 100 max_iters: 100 logger_name: tensorboard seed: 1337