AiAF commited on
Commit
1dea956
·
verified ·
1 Parent(s): 47aba00

Update TM-Mistral-Config.yaml

Browse files
Files changed (1) hide show
  1. TM-Mistral-Config.yaml +5 -5
TM-Mistral-Config.yaml CHANGED
@@ -27,7 +27,7 @@ datasets:
27
 
28
  dataset_prepared_path: last_run_prepared
29
  val_set_size: 0.1
30
- output_dir: ./TM-9-2320-365-10_V10
31
 
32
  save_total_limit: 100
33
 
@@ -53,13 +53,13 @@ lora_target_modules:
53
 
54
  wandb_project: "LLM-Pretraining"
55
  wandb_watch: "all"
56
- wandb_name: "LMTV-TM-V10"
57
  wandb_log_model: "false"
58
- wandb_run_id: "LMTV-TM-V10"
59
 
60
  gradient_accumulation_steps: 4
61
  micro_batch_size: 2
62
- num_epochs: 10
63
  optimizer: adamw_bnb_8bit
64
  lr_scheduler: cosine
65
  learning_rate: 0.000005
@@ -77,7 +77,7 @@ loss_watchdog_patience: 3
77
 
78
  warmup_steps: 10
79
  evals_per_epoch: 5
80
- saves_per_epoch: 5
81
  weight_decay: 0.0
82
  special_tokens:
83
  bos_token: "<s>"
 
27
 
28
  dataset_prepared_path: last_run_prepared
29
  val_set_size: 0.1
30
+ output_dir: ./TM-9-2320-365-10_V11
31
 
32
  save_total_limit: 100
33
 
 
53
 
54
  wandb_project: "LLM-Pretraining"
55
  wandb_watch: "all"
56
+ wandb_name: "LMTV-TM-V11"
57
  wandb_log_model: "false"
58
+ wandb_run_id: "LMTV-TM-V11"
59
 
60
  gradient_accumulation_steps: 4
61
  micro_batch_size: 2
62
+ num_epochs: 30
63
  optimizer: adamw_bnb_8bit
64
  lr_scheduler: cosine
65
  learning_rate: 0.000005
 
77
 
78
  warmup_steps: 10
79
  evals_per_epoch: 5
80
+ saves_per_epoch: 2
81
  weight_decay: 0.0
82
  special_tokens:
83
  bos_token: "<s>"