Fix VLM detection error: match official Unsloth notebook config
Browse files
vlm-streaming-sft-unsloth-qwen.py
CHANGED
|
@@ -3,10 +3,10 @@
|
|
| 3 |
# dependencies = [
|
| 4 |
# "unsloth",
|
| 5 |
# "datasets",
|
| 6 |
-
# "trl
|
| 7 |
# "huggingface_hub[hf_transfer]",
|
| 8 |
# "trackio",
|
| 9 |
-
# "transformers
|
| 10 |
# ]
|
| 11 |
# ///
|
| 12 |
"""
|
|
@@ -147,8 +147,8 @@ Examples:
|
|
| 147 |
parser.add_argument(
|
| 148 |
"--lora-alpha",
|
| 149 |
type=int,
|
| 150 |
-
default=
|
| 151 |
-
help="LoRA alpha (default:
|
| 152 |
)
|
| 153 |
|
| 154 |
# Logging
|
|
@@ -273,17 +273,13 @@ def main():
|
|
| 273 |
output_dir=args.save_local,
|
| 274 |
per_device_train_batch_size=args.batch_size,
|
| 275 |
gradient_accumulation_steps=args.gradient_accumulation,
|
| 276 |
-
|
| 277 |
-
gradient_checkpointing_kwargs={"use_reentrant": False},
|
| 278 |
-
max_grad_norm=0.3,
|
| 279 |
-
warmup_ratio=0.03,
|
| 280 |
max_steps=args.max_steps,
|
| 281 |
learning_rate=args.learning_rate,
|
| 282 |
logging_steps=max(1, args.max_steps // 20),
|
| 283 |
-
|
| 284 |
-
optim="adamw_torch_fused",
|
| 285 |
weight_decay=0.001,
|
| 286 |
-
lr_scheduler_type="
|
| 287 |
seed=3407,
|
| 288 |
# VLM-specific settings (required for Unsloth)
|
| 289 |
remove_unused_columns=False,
|
|
|
|
| 3 |
# dependencies = [
|
| 4 |
# "unsloth",
|
| 5 |
# "datasets",
|
| 6 |
+
# "trl==0.22.2",
|
| 7 |
# "huggingface_hub[hf_transfer]",
|
| 8 |
# "trackio",
|
| 9 |
+
# "transformers==4.57.1",
|
| 10 |
# ]
|
| 11 |
# ///
|
| 12 |
"""
|
|
|
|
| 147 |
parser.add_argument(
|
| 148 |
"--lora-alpha",
|
| 149 |
type=int,
|
| 150 |
+
default=16,
|
| 151 |
+
help="LoRA alpha (default: 16). Same as r per Unsloth notebook",
|
| 152 |
)
|
| 153 |
|
| 154 |
# Logging
|
|
|
|
| 273 |
output_dir=args.save_local,
|
| 274 |
per_device_train_batch_size=args.batch_size,
|
| 275 |
gradient_accumulation_steps=args.gradient_accumulation,
|
| 276 |
+
warmup_steps=5, # Per notebook (not warmup_ratio)
|
|
|
|
|
|
|
|
|
|
| 277 |
max_steps=args.max_steps,
|
| 278 |
learning_rate=args.learning_rate,
|
| 279 |
logging_steps=max(1, args.max_steps // 20),
|
| 280 |
+
optim="adamw_8bit", # Per notebook
|
|
|
|
| 281 |
weight_decay=0.001,
|
| 282 |
+
lr_scheduler_type="linear", # Per notebook (not cosine)
|
| 283 |
seed=3407,
|
| 284 |
# VLM-specific settings (required for Unsloth)
|
| 285 |
remove_unused_columns=False,
|