evalstate
commited on
Commit
Β·
0a40fad
1
Parent(s):
d70ac03
enhance train/eval docs
Browse files
trl/scripts/train_dpo_example.py
CHANGED
|
@@ -43,9 +43,18 @@ trackio.init(
|
|
| 43 |
)
|
| 44 |
|
| 45 |
# Load preference dataset
|
|
|
|
| 46 |
dataset = load_dataset("trl-lib/ultrafeedback_binarized", split="train")
|
| 47 |
print(f"β
Dataset loaded: {len(dataset)} preference pairs")
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
# Training configuration
|
| 50 |
config = DPOConfig(
|
| 51 |
# CRITICAL: Hub settings
|
|
@@ -69,6 +78,10 @@ config = DPOConfig(
|
|
| 69 |
save_steps=100,
|
| 70 |
save_total_limit=2,
|
| 71 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
# Optimization
|
| 73 |
warmup_ratio=0.1,
|
| 74 |
lr_scheduler_type="cosine",
|
|
@@ -79,9 +92,11 @@ config = DPOConfig(
|
|
| 79 |
|
| 80 |
# Initialize and train
|
| 81 |
# Note: DPO requires an instruct-tuned model as the base
|
|
|
|
| 82 |
trainer = DPOTrainer(
|
| 83 |
model="Qwen/Qwen2.5-0.5B-Instruct", # Use instruct model, not base model
|
| 84 |
-
train_dataset=
|
|
|
|
| 85 |
args=config,
|
| 86 |
)
|
| 87 |
|
|
|
|
| 43 |
)
|
| 44 |
|
| 45 |
# Load preference dataset
|
| 46 |
+
print("π¦ Loading dataset...")
|
| 47 |
dataset = load_dataset("trl-lib/ultrafeedback_binarized", split="train")
|
| 48 |
print(f"β
Dataset loaded: {len(dataset)} preference pairs")
|
| 49 |
|
| 50 |
+
# Create train/eval split
|
| 51 |
+
print("π Creating train/eval split...")
|
| 52 |
+
dataset_split = dataset.train_test_split(test_size=0.1, seed=42)
|
| 53 |
+
train_dataset = dataset_split["train"]
|
| 54 |
+
eval_dataset = dataset_split["test"]
|
| 55 |
+
print(f" Train: {len(train_dataset)} pairs")
|
| 56 |
+
print(f" Eval: {len(eval_dataset)} pairs")
|
| 57 |
+
|
| 58 |
# Training configuration
|
| 59 |
config = DPOConfig(
|
| 60 |
# CRITICAL: Hub settings
|
|
|
|
| 78 |
save_steps=100,
|
| 79 |
save_total_limit=2,
|
| 80 |
|
| 81 |
+
# Evaluation - IMPORTANT: Only enable if eval_dataset provided
|
| 82 |
+
eval_strategy="steps",
|
| 83 |
+
eval_steps=100,
|
| 84 |
+
|
| 85 |
# Optimization
|
| 86 |
warmup_ratio=0.1,
|
| 87 |
lr_scheduler_type="cosine",
|
|
|
|
| 92 |
|
| 93 |
# Initialize and train
|
| 94 |
# Note: DPO requires an instruct-tuned model as the base
|
| 95 |
+
print("π― Initializing trainer...")
|
| 96 |
trainer = DPOTrainer(
|
| 97 |
model="Qwen/Qwen2.5-0.5B-Instruct", # Use instruct model, not base model
|
| 98 |
+
train_dataset=train_dataset,
|
| 99 |
+
eval_dataset=eval_dataset, # CRITICAL: Must provide eval_dataset when eval_strategy is enabled
|
| 100 |
args=config,
|
| 101 |
)
|
| 102 |
|
trl/scripts/train_sft_example.py
CHANGED
|
@@ -16,6 +16,7 @@ This script demonstrates:
|
|
| 16 |
- Trackio integration for real-time monitoring
|
| 17 |
- LoRA/PEFT for efficient training
|
| 18 |
- Proper Hub saving configuration
|
|
|
|
| 19 |
- Checkpoint management
|
| 20 |
- Optimized training parameters
|
| 21 |
|
|
@@ -48,10 +49,19 @@ trackio.init(
|
|
| 48 |
}
|
| 49 |
)
|
| 50 |
|
| 51 |
-
# Load
|
|
|
|
| 52 |
dataset = load_dataset("trl-lib/Capybara", split="train")
|
| 53 |
print(f"β
Dataset loaded: {len(dataset)} examples")
|
| 54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
# Training configuration
|
| 56 |
config = SFTConfig(
|
| 57 |
# CRITICAL: Hub settings
|
|
@@ -72,6 +82,10 @@ config = SFTConfig(
|
|
| 72 |
save_steps=100,
|
| 73 |
save_total_limit=2,
|
| 74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
# Optimization
|
| 76 |
warmup_ratio=0.1,
|
| 77 |
lr_scheduler_type="cosine",
|
|
@@ -91,9 +105,11 @@ peft_config = LoraConfig(
|
|
| 91 |
)
|
| 92 |
|
| 93 |
# Initialize and train
|
|
|
|
| 94 |
trainer = SFTTrainer(
|
| 95 |
model="Qwen/Qwen2.5-0.5B",
|
| 96 |
-
train_dataset=
|
|
|
|
| 97 |
args=config,
|
| 98 |
peft_config=peft_config,
|
| 99 |
)
|
|
|
|
| 16 |
- Trackio integration for real-time monitoring
|
| 17 |
- LoRA/PEFT for efficient training
|
| 18 |
- Proper Hub saving configuration
|
| 19 |
+
- Train/eval split for monitoring
|
| 20 |
- Checkpoint management
|
| 21 |
- Optimized training parameters
|
| 22 |
|
|
|
|
| 49 |
}
|
| 50 |
)
|
| 51 |
|
| 52 |
+
# Load dataset
|
| 53 |
+
print("π¦ Loading dataset...")
|
| 54 |
dataset = load_dataset("trl-lib/Capybara", split="train")
|
| 55 |
print(f"β
Dataset loaded: {len(dataset)} examples")
|
| 56 |
|
| 57 |
+
# Create train/eval split
|
| 58 |
+
print("π Creating train/eval split...")
|
| 59 |
+
dataset_split = dataset.train_test_split(test_size=0.1, seed=42)
|
| 60 |
+
train_dataset = dataset_split["train"]
|
| 61 |
+
eval_dataset = dataset_split["test"]
|
| 62 |
+
print(f" Train: {len(train_dataset)} examples")
|
| 63 |
+
print(f" Eval: {len(eval_dataset)} examples")
|
| 64 |
+
|
| 65 |
# Training configuration
|
| 66 |
config = SFTConfig(
|
| 67 |
# CRITICAL: Hub settings
|
|
|
|
| 82 |
save_steps=100,
|
| 83 |
save_total_limit=2,
|
| 84 |
|
| 85 |
+
# Evaluation - IMPORTANT: Only enable if eval_dataset provided
|
| 86 |
+
eval_strategy="steps",
|
| 87 |
+
eval_steps=100,
|
| 88 |
+
|
| 89 |
# Optimization
|
| 90 |
warmup_ratio=0.1,
|
| 91 |
lr_scheduler_type="cosine",
|
|
|
|
| 105 |
)
|
| 106 |
|
| 107 |
# Initialize and train
|
| 108 |
+
print("π― Initializing trainer...")
|
| 109 |
trainer = SFTTrainer(
|
| 110 |
model="Qwen/Qwen2.5-0.5B",
|
| 111 |
+
train_dataset=train_dataset,
|
| 112 |
+
eval_dataset=eval_dataset, # CRITICAL: Must provide eval_dataset when eval_strategy is enabled
|
| 113 |
args=config,
|
| 114 |
peft_config=peft_config,
|
| 115 |
)
|