File size: 20,328 Bytes
d048517 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 |
#!/usr/bin/env python3
"""
Enhanced Fine-tuning script for CodeLlama with optimized hyperparameters
Supports:
- Resume from checkpoint (automatic detection)
- Incremental fine-tuning (continue from existing adapter)
- Fresh training option
"""
import os
import sys
import torch
import json
from pathlib import Path
from datasets import Dataset
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
TrainingArguments,
BitsAndBytesConfig,
Trainer,
DataCollatorForLanguageModeling,
EarlyStoppingCallback,
)
from peft import (
LoraConfig,
PeftModel,
get_peft_model,
prepare_model_for_kbit_training,
TaskType,
)
def get_device_info():
"""Detect and return available compute device"""
device_info = {
"device": "cpu",
"device_type": "cpu",
"use_quantization": False,
"dtype": torch.float32
}
if torch.cuda.is_available():
device_info["device"] = "cuda"
device_info["device_type"] = "cuda"
device_info["use_quantization"] = True
device_info["dtype"] = torch.float16
device_info["device_count"] = torch.cuda.device_count()
device_info["device_name"] = torch.cuda.get_device_name(0)
print(f"โ CUDA GPU detected: {device_info['device_name']} (Count: {device_info['device_count']})")
else:
print("โ No GPU detected, using CPU (training will be very slow)")
return device_info
def get_bitsandbytes_config():
"""Get BitsAndBytes config if CUDA is available"""
if torch.cuda.is_available():
return BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
)
return None
def load_and_prepare_model(
model_name: str,
adapter_path: str | None = None,
lora_r: int = 48,
lora_alpha: int = 96,
lora_dropout: float = 0.15
):
"""Load CodeLlama model with optimized LoRA configuration"""
device_info = get_device_info()
print(f"\nLoading model: {model_name}")
# Tokenizer
tokenizer_source = adapter_path if adapter_path and os.path.isdir(adapter_path) else model_name
tokenizer = AutoTokenizer.from_pretrained(tokenizer_source)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
# Quantization config
bnb_config = get_bitsandbytes_config()
# Model loading kwargs
model_kwargs = {
"trust_remote_code": True,
}
if bnb_config is not None:
print("Using 4-bit quantization (CUDA)")
model_kwargs["quantization_config"] = bnb_config
model_kwargs["device_map"] = "auto"
else:
model_kwargs["torch_dtype"] = device_info["dtype"]
model_kwargs["device_map"] = "auto"
# Load base model
base_model = AutoModelForCausalLM.from_pretrained(model_name, **model_kwargs)
# Prepare for k-bit training
if bnb_config is not None:
base_model = prepare_model_for_kbit_training(base_model)
# LoRA configuration (optimized for CodeLlama)
lora_config = LoraConfig(
r=lora_r,
lora_alpha=lora_alpha,
target_modules=["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
lora_dropout=lora_dropout,
bias="none",
task_type=TaskType.CAUSAL_LM,
)
# Load or create LoRA adapter
if adapter_path and os.path.isdir(adapter_path):
print(f"๐ Loading existing LoRA adapter from: {adapter_path}")
print(" (Incremental fine-tuning mode - continuing from existing model)")
model = PeftModel.from_pretrained(base_model, adapter_path, is_trainable=True)
else:
print("๐ Creating new LoRA adapter (Fresh training mode)")
model = get_peft_model(base_model, lora_config)
# Enable gradient checkpointing
model.gradient_checkpointing_enable()
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
total_params = sum(p.numel() for p in model.parameters())
trainable_ratio = (trainable_params / total_params) * 100
print(f"\nModel loaded successfully!")
print(f" - Device: {device_info['device']}")
print(f" - Trainable parameters: {trainable_params:,}")
print(f" - Total parameters: {total_params:,}")
print(f" - Trainable ratio: {trainable_ratio:.2f}%")
return model, tokenizer, device_info
def tokenize_function(examples, tokenizer, max_length=1536):
"""Tokenize function for dataset"""
# Ensure pad_token is set
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
# Combine instruction and response
# For CodeLlama chat format: instruction already ends with [/INST]
# So we just append: instruction + response + EOS
texts = []
for instruction, response in zip(examples["instruction"], examples["response"]):
# Instruction already contains: <s>[INST]...[/INST]
# We append response + EOS
text = f"{instruction}{response}{tokenizer.eos_token}"
texts.append(text)
# Tokenize with padding to max_length for consistent batch sizes
tokenized = tokenizer(
texts,
truncation=True,
max_length=max_length,
padding="max_length",
return_tensors=None, # Return lists, not tensors
)
# Labels are same as input_ids for causal LM
labels = []
pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id
# Set labels, masking padding tokens with -100 (ignored in loss)
for input_ids_seq in tokenized["input_ids"]:
label_seq = input_ids_seq.copy()
# Mask padding tokens
label_seq = [-100 if token_id == pad_token_id else token_id for token_id in label_seq]
labels.append(label_seq)
tokenized["labels"] = labels
return tokenized
def find_checkpoint(output_dir):
"""Find the latest checkpoint in output directory"""
checkpoint_dir = Path(output_dir)
if not checkpoint_dir.exists():
return None
# Look for checkpoint directories
checkpoints = []
for item in checkpoint_dir.iterdir():
if item.is_dir() and item.name.startswith("checkpoint-"):
try:
step_num = int(item.name.split("-")[1])
trainer_state = item / "trainer_state.json"
if trainer_state.exists():
checkpoints.append((step_num, str(item)))
except (ValueError, IndexError):
continue
if checkpoints:
# Sort by step number and return latest
checkpoints.sort(key=lambda x: x[0], reverse=True)
return checkpoints[0][1]
return None
def load_training_data(file_path):
"""Load training data from JSONL file"""
print(f"Loading training data from {file_path}")
if not os.path.exists(file_path):
raise FileNotFoundError(f"Training data file not found: {file_path}")
data = []
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line:
try:
data.append(json.loads(line))
except json.JSONDecodeError as e:
print(f"โ ๏ธ Warning: Skipping invalid JSON line: {e}")
continue
return data
def main():
import argparse
parser = argparse.ArgumentParser(description="Fine-tune CodeLlama with optimized hyperparameters")
parser.add_argument("--base-model", required=True, help="Base model path or HuggingFace ID")
parser.add_argument("--adapter-path", default=None, help="Path to existing LoRA adapter (for incremental fine-tuning)")
parser.add_argument("--dataset", required=True, help="Path to training dataset JSONL")
parser.add_argument("--output-dir", required=True, help="Output directory for fine-tuned model")
parser.add_argument("--resume-from-checkpoint", default=None, help="Resume from specific checkpoint (or 'auto' to find latest)")
parser.add_argument("--fresh", action="store_true", help="Force fresh training (ignore existing checkpoints)")
# Hyperparameters (optimized for CodeLlama based on HYPERPARAMETER_ANALYSIS.md)
parser.add_argument("--max-length", type=int, default=1536, help="Max sequence length (default: 1536)")
parser.add_argument("--num-epochs", type=int, default=5, help="Number of epochs (default: 5)")
parser.add_argument("--batch-size", type=int, default=2, help="Batch size per device (default: 2)")
parser.add_argument("--gradient-accumulation", type=int, default=4, help="Gradient accumulation steps (default: 4)")
parser.add_argument("--learning-rate", type=float, default=2e-5, help="Learning rate (default: 2e-5)")
parser.add_argument("--lora-r", type=int, default=48, help="LoRA rank (default: 48)")
parser.add_argument("--lora-alpha", type=int, default=96, help="LoRA alpha (default: 96)")
parser.add_argument("--lora-dropout", type=float, default=0.15, help="LoRA dropout (default: 0.15)")
parser.add_argument("--warmup-ratio", type=float, default=0.1, help="Warmup ratio (default: 0.1)")
parser.add_argument("--eval-steps", type=int, default=25, help="Evaluation steps (default: 25)")
parser.add_argument("--save-steps", type=int, default=25, help="Save steps (default: 25)")
parser.add_argument("--early-stopping-patience", type=int, default=5, help="Early stopping patience (default: 5)")
parser.add_argument("--logging-steps", type=int, default=5, help="Logging steps (default: 5)")
args = parser.parse_args()
print("=" * 70)
print("๐ CodeLlama Fine-Tuning with Optimized Hyperparameters")
print("=" * 70)
print(f"Base model: {args.base_model}")
print(f"Dataset: {args.dataset}")
print(f"Output dir: {args.output_dir}")
if args.adapter_path:
print(f"Adapter path: {args.adapter_path} (Incremental fine-tuning)")
print("=" * 70)
# Check for existing checkpoint
resume_checkpoint = None
if not args.fresh:
if args.resume_from_checkpoint == "auto":
resume_checkpoint = find_checkpoint(args.output_dir)
if resume_checkpoint:
print(f"\nโ
Found existing checkpoint: {resume_checkpoint}")
print(" Training will resume from this checkpoint")
elif args.resume_from_checkpoint:
resume_checkpoint = args.resume_from_checkpoint
if os.path.isdir(resume_checkpoint):
print(f"\n๐ Resuming from specified checkpoint: {resume_checkpoint}")
else:
print(f"\nโ ๏ธ Warning: Checkpoint path does not exist: {resume_checkpoint}")
resume_checkpoint = None
else:
print("\n๐ Fresh training mode - starting from scratch")
# Clear any existing checkpoints if fresh mode
if os.path.exists(args.output_dir):
checkpoint_dir = Path(args.output_dir)
for item in checkpoint_dir.iterdir():
if item.is_dir() and item.name.startswith("checkpoint-"):
print(f" Removing old checkpoint: {item.name}")
import shutil
shutil.rmtree(item)
# Load model and tokenizer
model, tokenizer, device_info = load_and_prepare_model(
args.base_model,
args.adapter_path,
lora_r=args.lora_r,
lora_alpha=args.lora_alpha,
lora_dropout=args.lora_dropout
)
# Check if using pre-split dataset (train.jsonl in split directory)
dataset_path = Path(args.dataset)
val_dataset_path = None
use_presplit = False
if dataset_path.name == "train.jsonl":
# Check if val.jsonl exists in same directory
val_path = dataset_path.parent / "val.jsonl"
if val_path.exists():
val_dataset_path = val_path
use_presplit = True
print(f"\nโ
Using pre-split dataset:")
print(f" Train: {dataset_path}")
print(f" Val: {val_dataset_path}")
# Load training data
training_data = load_training_data(args.dataset)
# Convert to dataset format
instructions = []
responses = []
for item in training_data:
if "instruction" in item and "response" in item:
instructions.append(item["instruction"])
responses.append(item["response"])
else:
print(f"โ ๏ธ Warning: Skipping invalid sample (missing instruction/response)")
if not instructions:
raise ValueError("No valid training samples found in dataset")
print(f"\nโ
Loaded {len(instructions)} training samples")
# Create training dataset
train_dataset_dict = Dataset.from_dict({
"instruction": instructions,
"response": responses
})
# Tokenize training dataset
print("Tokenizing training dataset...")
tokenized_train = train_dataset_dict.map(
lambda x: tokenize_function(x, tokenizer, max_length=args.max_length),
batched=True,
remove_columns=train_dataset_dict.column_names
)
# Load validation dataset if pre-split, otherwise split from training data
if use_presplit and val_dataset_path:
print(f"\nโ
Loading validation dataset from: {val_dataset_path}")
val_data = load_training_data(str(val_dataset_path))
val_instructions = []
val_responses = []
for item in val_data:
if "instruction" in item and "response" in item:
val_instructions.append(item["instruction"])
val_responses.append(item["response"])
val_dataset_dict = Dataset.from_dict({
"instruction": val_instructions,
"response": val_responses
})
print("Tokenizing validation dataset...")
tokenized_val = val_dataset_dict.map(
lambda x: tokenize_function(x, tokenizer, max_length=args.max_length),
batched=True,
remove_columns=val_dataset_dict.column_names
)
train_dataset = tokenized_train
val_dataset = tokenized_val
print(f" - Training samples: {len(train_dataset)}")
print(f" - Validation samples: {len(val_dataset)}")
else:
# Split into train/validation (80/20)
print("\nSplitting dataset into train/validation (80/20)...")
train_val_split = tokenized_train.train_test_split(test_size=0.2, seed=42)
train_dataset = train_val_split["train"]
val_dataset = train_val_split["test"]
print(f" - Training samples: {len(train_dataset)}")
print(f" - Validation samples: {len(val_dataset)}")
print(f" - Training samples: {len(train_dataset)}")
print(f" - Validation samples: {len(val_dataset)}")
# Calculate training steps
use_fp16 = device_info["device_type"] == "cuda"
effective_batch_size = args.batch_size * args.gradient_accumulation
steps_per_epoch = max(1, len(train_dataset) // effective_batch_size)
total_steps = steps_per_epoch * args.num_epochs
warmup_steps = max(int(total_steps * args.warmup_ratio), 10)
print(f"\n๐ Training Configuration:")
print(f" - Total training steps: {total_steps}")
print(f" - Steps per epoch: {steps_per_epoch}")
print(f" - Warmup steps: {warmup_steps} ({100*warmup_steps/total_steps:.1f}% of training)")
# Training arguments (optimized for CodeLlama)
training_args = TrainingArguments(
output_dir=args.output_dir,
num_train_epochs=args.num_epochs,
per_device_train_batch_size=args.batch_size,
gradient_accumulation_steps=args.gradient_accumulation,
warmup_steps=warmup_steps,
learning_rate=args.learning_rate,
weight_decay=0.01,
fp16=use_fp16,
logging_steps=args.logging_steps,
save_steps=args.save_steps,
eval_strategy="steps",
eval_steps=args.eval_steps,
save_total_limit=3,
load_best_model_at_end=True,
metric_for_best_model="eval_loss",
greater_is_better=False,
lr_scheduler_type="cosine",
max_grad_norm=1.0,
report_to="none",
push_to_hub=False,
dataloader_pin_memory=(device_info["device_type"] == "cuda"),
remove_unused_columns=False,
resume_from_checkpoint=resume_checkpoint, # Resume support
)
print(f"\nโ๏ธ Hyperparameters (Optimized for CodeLlama):")
print(f" - Max length: {args.max_length}")
print(f" - Epochs: {args.num_epochs}")
print(f" - Batch size: {args.batch_size}")
print(f" - Gradient accumulation: {args.gradient_accumulation}")
print(f" - Learning rate: {args.learning_rate}")
print(f" - LoRA rank: {args.lora_r}")
print(f" - LoRA alpha: {args.lora_alpha}")
print(f" - LoRA dropout: {args.lora_dropout}")
print(f" - Device: {device_info['device']}")
print(f" - Mixed precision (fp16): {use_fp16}")
print("=" * 70)
# Data collator - since we pad during tokenization, collator mainly handles batching
# Ensure pad_token_id is set
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
tokenizer.pad_token = tokenizer.eos_token
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm=False, # Causal LM, not masked LM
)
# Create trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=val_dataset,
data_collator=data_collator,
callbacks=[EarlyStoppingCallback(early_stopping_patience=args.early_stopping_patience)],
)
# Train
print("\n๐ Starting training...")
if resume_checkpoint:
print(f" Resuming from: {resume_checkpoint}")
print("=" * 70)
trainer.train(resume_from_checkpoint=resume_checkpoint)
# Save final model
print(f"\n๐พ Saving fine-tuned model to {args.output_dir}")
trainer.save_model(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
model.save_pretrained(args.output_dir)
# Save training config
config = {
"base_model": args.base_model,
"adapter_path": args.adapter_path if args.adapter_path else None,
"dataset": args.dataset,
"output_dir": args.output_dir,
"hyperparameters": {
"max_length": args.max_length,
"num_epochs": args.num_epochs,
"batch_size": args.batch_size,
"gradient_accumulation": args.gradient_accumulation,
"learning_rate": args.learning_rate,
"lora_r": args.lora_r,
"lora_alpha": args.lora_alpha,
"lora_dropout": args.lora_dropout,
},
"training_mode": "incremental" if args.adapter_path else "fresh",
"resumed_from_checkpoint": resume_checkpoint is not None
}
config_path = Path(args.output_dir) / "training_config.json"
with open(config_path, 'w') as f:
json.dump(config, f, indent=2)
print("\nโ
Fine-tuning complete!")
print(f"Model saved to: {args.output_dir}")
print(f"Config saved to: {config_path}")
print(f"\n๐ก To continue training with new data (incremental fine-tuning):")
print(f" python finetune_codellama.py --base-model {args.base_model} \\")
print(f" --adapter-path {args.output_dir} \\")
print(f" --dataset <new_dataset.jsonl> \\")
print(f" --output-dir <new_output_dir>")
print(f"\n๐ก To resume from checkpoint if training is interrupted:")
print(f" python finetune_codellama.py --base-model {args.base_model} \\")
print(f" --dataset {args.dataset} \\")
print(f" --output-dir {args.output_dir} \\")
print(f" --resume-from-checkpoint auto")
if __name__ == "__main__":
main()
|