File size: 8,941 Bytes
00db46c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 |
#!/usr/bin/env python3
"""
SFT training script for arithmetic countdown problems.
This script trains a language model using SFT (Supervised Fine-Tuning)
to solve arithmetic problems with proper reasoning and formatting.
"""
import argparse
import logging
import os
from pathlib import Path
from datasets import Dataset
from peft import LoraConfig, get_peft_model
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedModel
from trl import SFTConfig, SFTTrainer
from src.dataset.sft import (
load_csv_dataset_sft,
map_problem_description_to_conversation_sft,
)
# Set up logging
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger("sft_training")
def load_train_dataset(
dataset_csv: str, max_rows: int = 2000, seed: int = 42
) -> Dataset:
"""
Load, shuffle, and subsample the training dataset.
Args:
dataset_csv: Absolute path to the dataset CSV file
max_rows: Maximum number of rows to select for training
seed: Seed for dataset shuffling
Returns:
Dataset: A datasets.Dataset ready for SFT training
"""
raw_dataset: Dataset = load_csv_dataset_sft(
dataset_csv, "train", map_problem_description_to_conversation_sft
)
raw_dataset = raw_dataset.shuffle(seed=seed)
train_dataset = raw_dataset.select(range(min(max_rows, len(raw_dataset))))
logger.info("Train rows: %d", len(train_dataset))
return train_dataset
def create_lora_model(model_id: str, device_map: str = "auto") -> PreTrainedModel:
"""
Create a base causal LM and wrap it with LoRA adapters.
Args:
model_id: Hugging Face model identifier to load as the base model
device_map: Device mapping strategy for model loading
Returns:
PreTrainedModel: A transformers.PreTrainedModel with LoRA adapters applied
"""
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map=device_map,
)
lora_cfg = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, lora_cfg)
logger.info("Model with LoRA ready")
return model
def create_sft_config(
output_dir: str,
learning_rate: float = 2e-4,
num_train_epochs: int = 1,
per_device_train_batch_size: int = 4,
gradient_accumulation_steps: int = 4,
max_length: int = 2048,
save_steps: int = 50,
logging_steps: int = 1,
) -> SFTConfig:
"""
Create SFT training configuration.
Args:
output_dir: Directory where checkpoints and logs will be written
learning_rate: Learning rate for training
num_train_epochs: Number of training epochs
per_device_train_batch_size: Batch size per device
gradient_accumulation_steps: Steps to accumulate gradients
max_length: Maximum sequence length
save_steps: Steps between model saves
logging_steps: Steps between log outputs
Returns:
SFTConfig: A configured trl.SFTConfig instance
"""
return SFTConfig(
output_dir=output_dir,
learning_rate=learning_rate,
weight_decay=0.001,
warmup_ratio=0.03,
lr_scheduler_type="cosine",
optim="paged_adamw_32bit",
remove_unused_columns=False,
gradient_accumulation_steps=gradient_accumulation_steps,
num_train_epochs=num_train_epochs,
bf16=True,
per_device_train_batch_size=per_device_train_batch_size,
# SFT-specific parameters
max_length=max_length,
packing=False,
# Logging and saving
report_to=["tensorboard"],
logging_steps=logging_steps,
save_strategy="steps",
save_steps=save_steps,
eval_strategy="no",
)
def create_trainer(
model: PreTrainedModel,
tokenizer: AutoTokenizer,
train_dataset: Dataset,
args: SFTConfig,
) -> SFTTrainer:
"""
Construct an SFTTrainer for supervised fine-tuning.
Args:
model: The LoRA-wrapped pretrained model to train
tokenizer: The tokenizer for the model
train_dataset: The dataset to use for training
args: The SFT configuration
Returns:
SFTTrainer: An initialized trl.SFTTrainer instance
"""
trainer = SFTTrainer(
model=model,
tokenizer=tokenizer,
args=args,
train_dataset=train_dataset,
)
return trainer
def train_and_save(trainer: SFTTrainer, output_dir: str) -> None:
"""
Run training and save the final model to disk.
Args:
trainer: The configured SFT trainer instance
output_dir: Output directory to save the trained model
Returns:
None
"""
train_result = trainer.train()
logger.info("Training complete: %s", str(train_result))
trainer.save_model(output_dir)
logger.info("Saved to %s", output_dir)
def main() -> None:
"""
Run the full SFT training workflow with command-line arguments.
Returns:
None
"""
parser = argparse.ArgumentParser(
description="Train a language model using SFT for arithmetic countdown problems"
)
# Dataset arguments
parser.add_argument(
"--dataset_csv",
type=str,
required=True,
help="Path to the training dataset CSV file",
)
parser.add_argument(
"--max_rows", type=int, default=2000, help="Maximum number of training samples"
)
parser.add_argument(
"--seed", type=int, default=42, help="Random seed for dataset shuffling"
)
# Model arguments
parser.add_argument(
"--model_id",
type=str,
default="Qwen/Qwen2.5-3B-Instruct",
help="Hugging Face model identifier",
)
parser.add_argument(
"--device_map", type=str, default="auto", help="Device mapping strategy"
)
# Training arguments
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Directory to save model checkpoints and logs",
)
parser.add_argument(
"--learning_rate", type=float, default=2e-4, help="Learning rate"
)
parser.add_argument(
"--num_train_epochs", type=int, default=1, help="Number of training epochs"
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=4,
help="Batch size per device",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=4,
help="Gradient accumulation steps",
)
parser.add_argument(
"--max_length",
type=int,
default=2048,
help="Maximum sequence length",
)
parser.add_argument(
"--save_steps", type=int, default=50, help="Steps between model saves"
)
parser.add_argument(
"--logging_steps", type=int, default=1, help="Steps between log outputs"
)
args = parser.parse_args()
# Validate arguments
if not Path(args.dataset_csv).exists():
logger.error("Dataset CSV file does not exist: %s", args.dataset_csv)
return
if args.max_rows <= 0:
logger.error("max_rows must be positive")
return
# Create output directory
os.makedirs(args.output_dir, exist_ok=True)
logger.info("Output dir: %s", args.output_dir)
# Load dataset
train_dataset = load_train_dataset(args.dataset_csv, args.max_rows, args.seed)
# Create model and tokenizer
model = create_lora_model(args.model_id, args.device_map)
tokenizer = AutoTokenizer.from_pretrained(args.model_id)
# Set pad token if it doesn't exist
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# Create training configuration
training_args = create_sft_config(
output_dir=args.output_dir,
learning_rate=args.learning_rate,
num_train_epochs=args.num_train_epochs,
per_device_train_batch_size=args.per_device_train_batch_size,
gradient_accumulation_steps=args.gradient_accumulation_steps,
max_length=args.max_length,
save_steps=args.save_steps,
logging_steps=args.logging_steps,
)
# Create trainer
trainer = create_trainer(
model=model,
tokenizer=tokenizer,
train_dataset=train_dataset,
args=training_args,
)
# Train and save
train_and_save(trainer=trainer, output_dir=args.output_dir)
if __name__ == "__main__":
main()
|