AiAF commited on
Commit
47aba00
·
verified ·
1 Parent(s): 01b62ee

Create TM-Mistral-Config.yaml

Browse files
Files changed (1) hide show
  1. TM-Mistral-Config.yaml +84 -0
TM-Mistral-Config.yaml ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ base_model: mistralai/Mistral-7B-Instruct-v0.3
3
+ # optionally might have model_type or tokenizer_type
4
+ model_type: MistralForCausalLM
5
+ tokenizer_type: LlamaTokenizer
6
+ # Automatically upload checkpoint and final model to HF
7
+ hub_model_id: AiAF/QLoRA-Finetune-LMTV-TM-9-2320-365-10
8
+
9
+ load_in_8bit: false
10
+ load_in_4bit: true
11
+
12
+ datasets:
13
+ - path: AiAF/TM_plain_qa_list_with_special_tokens.jsonl
14
+ ds_type: json
15
+ type: chat_template
16
+ chat_template: chatml
17
+ field_messages: conversations
18
+ message_field_role: from
19
+ message_field_content: value
20
+ roles:
21
+ user:
22
+ - human
23
+ assistant:
24
+ - gpt
25
+ system:
26
+ - system
27
+
28
+ dataset_prepared_path: last_run_prepared
29
+ val_set_size: 0.1
30
+ output_dir: ./TM-9-2320-365-10_V10
31
+
32
+ save_total_limit: 100
33
+
34
+ adapter: qlora
35
+ lora_model_dir:
36
+
37
+ sequence_len: 2048
38
+ sample_packing: true
39
+ pad_to_sequence_len: true
40
+
41
+ adapter: qlora
42
+ lora_r: 256
43
+ lora_alpha: 512
44
+ lora_dropout: 0.05
45
+ #lora_target_linear: true
46
+ lora_target_modules:
47
+ - gate_proj
48
+ - down_proj
49
+ - up_proj
50
+ - q_proj
51
+ - v_proj
52
+ - k_proj
53
+
54
+ wandb_project: "LLM-Pretraining"
55
+ wandb_watch: "all"
56
+ wandb_name: "LMTV-TM-V10"
57
+ wandb_log_model: "false"
58
+ wandb_run_id: "LMTV-TM-V10"
59
+
60
+ gradient_accumulation_steps: 4
61
+ micro_batch_size: 2
62
+ num_epochs: 10
63
+ optimizer: adamw_bnb_8bit
64
+ lr_scheduler: cosine
65
+ learning_rate: 0.000005
66
+
67
+ bf16: auto
68
+ tf32: false
69
+
70
+ gradient_checkpointing: true
71
+ resume_from_checkpoint:
72
+ logging_steps: 1
73
+ flash_attention: true
74
+
75
+ loss_watchdog_threshold: 5.0
76
+ loss_watchdog_patience: 3
77
+
78
+ warmup_steps: 10
79
+ evals_per_epoch: 5
80
+ saves_per_epoch: 5
81
+ weight_decay: 0.0
82
+ special_tokens:
83
+ bos_token: "<s>"
84
+ eos_token: "</s>"