base_model: mistralai/Mistral-7B-Instruct-v0.3 # optionally might have model_type or tokenizer_type model_type: MistralForCausalLM tokenizer_type: LlamaTokenizer # Automatically upload checkpoint and final model to HF hub_model_id: AiAF/QLoRA-Finetune-LMTV-TM-9-2320-365-10 load_in_8bit: false load_in_4bit: true datasets: - path: AiAF/TM_plain_qa_list_with_special_tokens.jsonl ds_type: json type: chat_template chat_template: chatml field_messages: conversations message_field_role: from message_field_content: value roles: user: - human assistant: - gpt system: - system dataset_prepared_path: last_run_prepared val_set_size: 0.1 output_dir: ./TM-9-2320-365-10_V11 save_total_limit: 100 adapter: qlora lora_model_dir: sequence_len: 2048 sample_packing: true pad_to_sequence_len: true adapter: qlora lora_r: 256 lora_alpha: 512 lora_dropout: 0.05 #lora_target_linear: true lora_target_modules: - gate_proj - down_proj - up_proj - q_proj - v_proj - k_proj wandb_project: "LLM-Pretraining" wandb_watch: "all" wandb_name: "LMTV-TM-V11" wandb_log_model: "false" wandb_run_id: "LMTV-TM-V11" gradient_accumulation_steps: 4 micro_batch_size: 2 num_epochs: 30 optimizer: adamw_bnb_8bit lr_scheduler: cosine learning_rate: 0.000005 bf16: auto tf32: false gradient_checkpointing: true resume_from_checkpoint: logging_steps: 1 flash_attention: true loss_watchdog_threshold: 5.0 loss_watchdog_patience: 3 warmup_steps: 10 evals_per_epoch: 5 saves_per_epoch: 2 weight_decay: 0.0 special_tokens: bos_token: "" eos_token: ""