| |
| |
| |
|
|
| """ |
| Underwood SFT Training - Learning Rate 1e-4 |
| Fine-tunes Gemma 3 4B with QLoRA on strategic advisor conversations |
| """ |
|
|
| from datasets import load_dataset |
| from peft import LoraConfig |
| from trl import SFTTrainer, SFTConfig |
| from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig |
| import torch |
| import trackio |
|
|
| |
| bnb_config = BitsAndBytesConfig( |
| load_in_4bit=True, |
| bnb_4bit_quant_type="nf4", |
| bnb_4bit_compute_dtype=torch.bfloat16, |
| bnb_4bit_use_double_quant=True, |
| ) |
|
|
| |
| model = AutoModelForCausalLM.from_pretrained( |
| "google/gemma-3-4b-it", |
| quantization_config=bnb_config, |
| device_map="auto", |
| torch_dtype=torch.bfloat16, |
| attn_implementation="eager", |
| ) |
|
|
| tokenizer = AutoTokenizer.from_pretrained("google/gemma-3-4b-it") |
| tokenizer.pad_token = tokenizer.eos_token |
| tokenizer.padding_side = "right" |
|
|
| |
| dataset = load_dataset("AmiDwivedi/underwood-conversations") |
|
|
| |
| lora_config = LoraConfig( |
| r=128, |
| lora_alpha=256, |
| lora_dropout=0.05, |
| bias="none", |
| task_type="CAUSAL_LM", |
| target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], |
| ) |
|
|
| |
| training_args = SFTConfig( |
| output_dir="underwood-lr1e4", |
| num_train_epochs=10, |
| per_device_train_batch_size=2, |
| per_device_eval_batch_size=2, |
| gradient_accumulation_steps=8, |
| learning_rate=1e-4, |
| weight_decay=0.01, |
| warmup_ratio=0.03, |
| lr_scheduler_type="cosine", |
| logging_steps=10, |
| eval_strategy="steps", |
| eval_steps=50, |
| save_strategy="steps", |
| save_steps=100, |
| save_total_limit=2, |
| bf16=True, |
| max_length=2048, |
| packing=False, |
| gradient_checkpointing=True, |
| push_to_hub=True, |
| hub_model_id="AmiDwivedi/underwood-lr1e4", |
| hub_strategy="every_save", |
| report_to="trackio", |
| run_name="underwood-lr1e4", |
| ) |
|
|
| |
| trainer = SFTTrainer( |
| model=model, |
| args=training_args, |
| train_dataset=dataset["train"], |
| eval_dataset=dataset["validation"], |
| peft_config=lora_config, |
| processing_class=tokenizer, |
| ) |
|
|
| |
| trainer.train() |
| trainer.push_to_hub() |
| print("Training complete! Model pushed to AmiDwivedi/underwood-lr1e4") |
|
|