jprivera44 commited on
Commit
cd39c35
·
verified ·
1 Parent(s): f285baa

Upload training_config.yaml with huggingface_hub

Browse files
Files changed (1) hide show
  1. training_config.yaml +25 -0
training_config.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ run_id: mo9c
2
+ data:
3
+ path: experiments/260409_b200_unsloth/data/mo9c/train_36k_combined.jsonl
4
+ model:
5
+ name: unsloth/Llama-3.3-70B-Instruct
6
+ training:
7
+ epochs: 1
8
+ batch_size: 8
9
+ gradient_accumulation_steps: 1
10
+ learning_rate: 2.0e-05
11
+ lora_seed: 42
12
+ shuffle_seed: 42
13
+ max_seq_length: 3072
14
+ save_total_limit: 1
15
+ lora:
16
+ rank: 64
17
+ alpha: 64
18
+ dropout: 0.0
19
+ target_modules: all-linear
20
+ logging:
21
+ wandb_project: collusion-mo-finetune
22
+ wandb_run_name: mo9c
23
+ require_wandb: true
24
+ log_every_n_steps: 1
25
+ save_every_n_steps: 500