| # **UNTESTED, probably unfit for human consumption** | |
| 1 epoch of grimulkan/LimaRP-augmented on LLaMA3-8b via unsloth on colab, using the llama-chat template. 16k context, probably. | |
| ``` | |
| model = FastLanguageModel.get_peft_model( | |
| model, | |
| r = 64, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128 | |
| target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", | |
| "gate_proj", "up_proj", "down_proj",], | |
| lora_alpha = 16, | |
| lora_dropout = 0, # Supports any, but = 0 is optimized | |
| bias = "none", # Supports any, but = "none" is optimized | |
| # [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes! | |
| use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context | |
| random_state = 3407, | |
| use_rslora = False, # We support rank stabilized LoRA | |
| loftq_config = None, # And LoftQ | |
| ) | |
| trainer = SFTTrainer( | |
| model = model, | |
| tokenizer = tokenizer, | |
| train_dataset = dataset, | |
| dataset_text_field = "text", | |
| max_seq_length = max_seq_length, | |
| dataset_num_proc = 2, | |
| packing = False, # Can make training 5x faster for short sequences. | |
| args = TrainingArguments( | |
| per_device_train_batch_size = 1, | |
| gradient_accumulation_steps = 8, | |
| warmup_steps = 5, | |
| num_train_epochs=1, | |
| learning_rate = 2e-4, | |
| fp16 = not torch.cuda.is_bf16_supported(), | |
| bf16 = torch.cuda.is_bf16_supported(), | |
| logging_steps = 1, | |
| optim = "adamw_8bit", | |
| weight_decay = 0.01, | |
| lr_scheduler_type = "linear", | |
| seed = 3407, | |
| output_dir = "outputs", | |
| ), | |
| ) | |
| ``` | |
| [GGUFs courtesy of the Quant Cartel](https://hugginmgface.co/Quant-Cartel/experiment_1_8b-iMat-GGUF) |