davanstrien HF Staff commited on
Commit
15582ee
·
verified ·
1 Parent(s): 2fc9421

Upload vlm-test-no-trackio.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vlm-test-no-trackio.py +526 -0
vlm-test-no-trackio.py ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.10"
3
+ # dependencies = [
4
+ # "unsloth",
5
+ # "datasets",
6
+ # "trl==0.22.2",
7
+ # "huggingface_hub[hf_transfer]",
8
+ # "transformers==4.57.1",
9
+ # ]
10
+ # ///
11
+ """
12
+ Fine-tune Vision Language Models using Unsloth optimizations.
13
+
14
+ Uses Unsloth for ~60% less VRAM and 2x faster training.
15
+ Supports epoch-based or step-based training with optional eval split.
16
+
17
+ Epoch-based training (recommended for full datasets):
18
+ uv run vlm-streaming-sft-unsloth-qwen.py \
19
+ --num-epochs 1 \
20
+ --eval-split 0.2 \
21
+ --output-repo your-username/vlm-finetuned
22
+
23
+ Run on HF Jobs (1 epoch with eval):
24
+ hf jobs uv run --flavor a100-large --secrets HF_TOKEN --timeout 4h -- \
25
+ https://huggingface.co/datasets/uv-scripts/training/raw/main/vlm-streaming-sft-unsloth-qwen.py \
26
+ --num-epochs 1 \
27
+ --eval-split 0.2 \
28
+ --trackio-space your-username/trackio \
29
+ --output-repo your-username/vlm-finetuned
30
+
31
+ Step-based training (for streaming or quick tests):
32
+ uv run vlm-streaming-sft-unsloth-qwen.py \
33
+ --streaming \
34
+ --max-steps 500 \
35
+ --output-repo your-username/vlm-finetuned
36
+
37
+ Quick test with limited samples:
38
+ uv run vlm-streaming-sft-unsloth-qwen.py \
39
+ --num-samples 500 \
40
+ --num-epochs 2 \
41
+ --eval-split 0.2 \
42
+ --output-repo your-username/vlm-test
43
+ """
44
+
45
+ import argparse
46
+ import logging
47
+ import os
48
+ import sys
49
+ import time
50
+
51
+ # Force unbuffered output for HF Jobs logs
52
+ sys.stdout.reconfigure(line_buffering=True)
53
+ sys.stderr.reconfigure(line_buffering=True)
54
+
55
+ logging.basicConfig(
56
+ level=logging.INFO,
57
+ format="%(asctime)s - %(levelname)s - %(message)s",
58
+ )
59
+ logger = logging.getLogger(__name__)
60
+
61
+
62
+ def check_cuda():
63
+ """Check CUDA availability and exit if not available."""
64
+ import torch
65
+
66
+ if not torch.cuda.is_available():
67
+ logger.error("CUDA is not available. This script requires a GPU.")
68
+ logger.error("Run on a machine with a CUDA-capable GPU or use HF Jobs:")
69
+ logger.error(
70
+ " hf jobs uv run vlm-streaming-sft-unsloth.py --flavor a100-large ..."
71
+ )
72
+ sys.exit(1)
73
+ logger.info(f"CUDA available: {torch.cuda.get_device_name(0)}")
74
+
75
+
76
+ def parse_args():
77
+ parser = argparse.ArgumentParser(
78
+ description="Fine-tune VLMs with streaming datasets using Unsloth",
79
+ formatter_class=argparse.RawDescriptionHelpFormatter,
80
+ epilog="""
81
+ Examples:
82
+ # Quick test run
83
+ uv run vlm-streaming-sft-unsloth.py \\
84
+ --max-steps 50 \\
85
+ --output-repo username/vlm-test
86
+
87
+ # Full training with Trackio monitoring
88
+ uv run vlm-streaming-sft-unsloth.py \\
89
+ --max-steps 500 \\
90
+ --output-repo username/vlm-finetuned \\
91
+ --trackio-space username/trackio
92
+
93
+ # Custom dataset and model
94
+ uv run vlm-streaming-sft-unsloth.py \\
95
+ --base-model unsloth/Qwen3-VL-8B-Instruct-unsloth-bnb-4bit \\
96
+ --dataset your-username/your-vlm-dataset \\
97
+ --max-steps 1000 \\
98
+ --output-repo username/custom-vlm
99
+ """,
100
+ )
101
+
102
+ # Model and data
103
+ parser.add_argument(
104
+ "--base-model",
105
+ default="unsloth/Qwen3-VL-8B-Instruct-unsloth-bnb-4bit",
106
+ help="Base VLM model (default: unsloth/Qwen3-VL-8B-Instruct-unsloth-bnb-4bit)",
107
+ )
108
+ parser.add_argument(
109
+ "--dataset",
110
+ default="davanstrien/iconclass-vlm-sft",
111
+ help="Dataset with 'images' and 'messages' columns (default: davanstrien/iconclass-vlm-sft)",
112
+ )
113
+ parser.add_argument(
114
+ "--output-repo",
115
+ required=True,
116
+ help="HF Hub repo to push model to (e.g., 'username/vlm-finetuned')",
117
+ )
118
+
119
+ # Training config
120
+ parser.add_argument(
121
+ "--num-epochs",
122
+ type=float,
123
+ default=None,
124
+ help="Number of epochs (default: None). Use instead of --max-steps for non-streaming mode.",
125
+ )
126
+ parser.add_argument(
127
+ "--max-steps",
128
+ type=int,
129
+ default=None,
130
+ help="Training steps (default: None). Required for streaming mode, optional otherwise.",
131
+ )
132
+ parser.add_argument(
133
+ "--batch-size",
134
+ type=int,
135
+ default=2,
136
+ help="Per-device batch size (default: 2)",
137
+ )
138
+ parser.add_argument(
139
+ "--gradient-accumulation",
140
+ type=int,
141
+ default=4,
142
+ help="Gradient accumulation steps (default: 4). Effective batch = batch-size * this",
143
+ )
144
+ parser.add_argument(
145
+ "--learning-rate",
146
+ type=float,
147
+ default=2e-4,
148
+ help="Learning rate (default: 2e-4)",
149
+ )
150
+ parser.add_argument(
151
+ "--max-seq-length",
152
+ type=int,
153
+ default=2048,
154
+ help="Maximum sequence length (default: 2048)",
155
+ )
156
+
157
+ # LoRA config
158
+ parser.add_argument(
159
+ "--lora-r",
160
+ type=int,
161
+ default=16,
162
+ help="LoRA rank (default: 16). Higher = more capacity but more VRAM",
163
+ )
164
+ parser.add_argument(
165
+ "--lora-alpha",
166
+ type=int,
167
+ default=16,
168
+ help="LoRA alpha (default: 16). Same as r per Unsloth notebook",
169
+ )
170
+
171
+ # Output
172
+ parser.add_argument(
173
+ "--save-local",
174
+ default="vlm-streaming-output",
175
+ help="Local directory to save model (default: vlm-streaming-output)",
176
+ )
177
+
178
+ # Evaluation and data control
179
+ parser.add_argument(
180
+ "--eval-split",
181
+ type=float,
182
+ default=0.0,
183
+ help="Fraction of data for evaluation (0.0-0.5). Default: 0.0 (no eval)",
184
+ )
185
+ parser.add_argument(
186
+ "--num-samples",
187
+ type=int,
188
+ default=None,
189
+ help="Limit samples (default: None = use all for non-streaming, 500 for streaming)",
190
+ )
191
+ parser.add_argument(
192
+ "--seed",
193
+ type=int,
194
+ default=3407,
195
+ help="Random seed for reproducibility (default: 3407)",
196
+ )
197
+ parser.add_argument(
198
+ "--streaming",
199
+ action="store_true",
200
+ default=False,
201
+ help="Use streaming mode (default: False). Use for very large datasets.",
202
+ )
203
+
204
+ return parser.parse_args()
205
+
206
+
207
+ def main():
208
+ args = parse_args()
209
+
210
+ # Validate epochs/steps configuration
211
+ if args.streaming and args.num_epochs:
212
+ logger.error(
213
+ "Cannot use --num-epochs with --streaming. Use --max-steps instead."
214
+ )
215
+ sys.exit(1)
216
+ if args.streaming and not args.max_steps:
217
+ args.max_steps = 500 # Default for streaming
218
+ logger.info("Using default --max-steps=500 for streaming mode")
219
+ if not args.streaming and not args.num_epochs and not args.max_steps:
220
+ args.num_epochs = 1 # Default to 1 epoch for non-streaming
221
+ logger.info("Using default --num-epochs=1 for non-streaming mode")
222
+
223
+ # Determine training duration display
224
+ if args.num_epochs:
225
+ duration_str = f"{args.num_epochs} epoch(s)"
226
+ else:
227
+ duration_str = f"{args.max_steps} steps"
228
+
229
+ print("=" * 70)
230
+ print("VLM Fine-tuning with Unsloth")
231
+ print("=" * 70)
232
+ print("\nConfiguration:")
233
+ print(f" Base model: {args.base_model}")
234
+ print(f" Dataset: {args.dataset}")
235
+ print(f" Streaming: {args.streaming}")
236
+ print(
237
+ f" Num samples: {args.num_samples or ('500' if args.streaming else 'all')}"
238
+ )
239
+ print(
240
+ f" Eval split: {args.eval_split if args.eval_split > 0 else '(disabled)'}"
241
+ )
242
+ print(f" Seed: {args.seed}")
243
+ print(f" Training: {duration_str}")
244
+ print(
245
+ f" Batch size: {args.batch_size} x {args.gradient_accumulation} = {args.batch_size * args.gradient_accumulation}"
246
+ )
247
+ print(f" Learning rate: {args.learning_rate}")
248
+ print(f" LoRA rank: {args.lora_r}")
249
+ print(f" Output repo: {args.output_repo}")
250
+ print()
251
+
252
+ # Check CUDA before heavy imports
253
+ check_cuda()
254
+
255
+ # Enable fast transfers
256
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
257
+
258
+ # Import heavy dependencies (note: import from unsloth.trainer for VLM)
259
+ from unsloth import FastVisionModel
260
+ from unsloth.trainer import UnslothVisionDataCollator
261
+ from datasets import load_dataset
262
+ from trl import SFTTrainer, SFTConfig
263
+ from huggingface_hub import login
264
+
265
+ # Login to Hub
266
+ token = os.environ.get("HF_TOKEN")
267
+ if token:
268
+ login(token=token)
269
+ logger.info("Logged in to Hugging Face Hub")
270
+ else:
271
+ logger.warning("HF_TOKEN not set - model upload may fail")
272
+
273
+ # 1. Load model (Qwen returns tokenizer, not processor)
274
+ print("\n[1/5] Loading model...")
275
+ start = time.time()
276
+
277
+ model, tokenizer = FastVisionModel.from_pretrained(
278
+ args.base_model,
279
+ load_in_4bit=True,
280
+ use_gradient_checkpointing="unsloth",
281
+ )
282
+
283
+ model = FastVisionModel.get_peft_model(
284
+ model,
285
+ finetune_vision_layers=True,
286
+ finetune_language_layers=True,
287
+ finetune_attention_modules=True,
288
+ finetune_mlp_modules=True,
289
+ r=args.lora_r,
290
+ lora_alpha=args.lora_alpha,
291
+ lora_dropout=0,
292
+ bias="none",
293
+ random_state=3407,
294
+ use_rslora=False,
295
+ loftq_config=None,
296
+ )
297
+ print(f"Model loaded in {time.time() - start:.1f}s")
298
+
299
+ # 2. Load dataset (streaming or non-streaming)
300
+ print(
301
+ f"\n[2/5] Loading dataset ({'streaming' if args.streaming else 'non-streaming'})..."
302
+ )
303
+ start = time.time()
304
+
305
+ if args.streaming:
306
+ # Streaming mode: take limited samples
307
+ dataset = load_dataset(args.dataset, split="train", streaming=True)
308
+ num_samples = args.num_samples or 500
309
+
310
+ # Peek at first sample to show info
311
+ sample = next(iter(dataset))
312
+ if "messages" in sample:
313
+ print(f" Sample has {len(sample['messages'])} messages")
314
+ if "images" in sample:
315
+ img_count = (
316
+ len(sample["images"]) if isinstance(sample["images"], list) else 1
317
+ )
318
+ print(f" Sample has {img_count} image(s)")
319
+
320
+ # Reload and take samples
321
+ dataset = load_dataset(args.dataset, split="train", streaming=True)
322
+ all_data = list(dataset.take(num_samples))
323
+ print(f" Loaded {len(all_data)} samples in {time.time() - start:.1f}s")
324
+
325
+ if args.eval_split > 0:
326
+ # Manual shuffle for streaming (no built-in split)
327
+ import random
328
+
329
+ random.seed(args.seed)
330
+ random.shuffle(all_data)
331
+ split_idx = int(len(all_data) * (1 - args.eval_split))
332
+ train_data = all_data[:split_idx]
333
+ eval_data = all_data[split_idx:]
334
+ print(f" Train: {len(train_data)} samples, Eval: {len(eval_data)} samples")
335
+ else:
336
+ train_data = all_data
337
+ eval_data = None
338
+ else:
339
+ # Non-streaming: use proper train_test_split
340
+ dataset = load_dataset(args.dataset, split="train")
341
+ print(f" Dataset has {len(dataset)} total samples")
342
+
343
+ # Peek at first sample
344
+ sample = dataset[0]
345
+ if "messages" in sample:
346
+ print(f" Sample has {len(sample['messages'])} messages")
347
+ if "images" in sample:
348
+ img_count = (
349
+ len(sample["images"]) if isinstance(sample["images"], list) else 1
350
+ )
351
+ print(f" Sample has {img_count} image(s)")
352
+
353
+ if args.num_samples:
354
+ dataset = dataset.select(range(min(args.num_samples, len(dataset))))
355
+ print(f" Limited to {len(dataset)} samples")
356
+
357
+ if args.eval_split > 0:
358
+ split = dataset.train_test_split(test_size=args.eval_split, seed=args.seed)
359
+ train_data = list(split["train"])
360
+ eval_data = list(split["test"])
361
+ print(f" Train: {len(train_data)} samples, Eval: {len(eval_data)} samples")
362
+ else:
363
+ train_data = list(dataset)
364
+ eval_data = None
365
+
366
+ print(f" Dataset ready in {time.time() - start:.1f}s")
367
+
368
+ # 3. Configure trainer
369
+ print("\n[3/5] Configuring trainer...")
370
+
371
+ # Enable training mode
372
+ FastVisionModel.for_training(model)
373
+
374
+ # Calculate steps per epoch for logging/eval intervals
375
+ effective_batch = args.batch_size * args.gradient_accumulation
376
+ steps_per_epoch = len(train_data) // effective_batch
377
+
378
+ # Determine run name and logging steps
379
+ if args.num_epochs:
380
+ run_name = f"vlm-sft-{args.num_epochs}ep"
381
+ logging_steps = max(1, steps_per_epoch // 10) # ~10 logs per epoch
382
+ else:
383
+ run_name = f"vlm-sft-{args.max_steps}steps"
384
+ logging_steps = max(1, args.max_steps // 20)
385
+
386
+ training_config = SFTConfig(
387
+ output_dir=args.save_local,
388
+ per_device_train_batch_size=args.batch_size,
389
+ gradient_accumulation_steps=args.gradient_accumulation,
390
+ warmup_steps=5, # Per notebook (not warmup_ratio)
391
+ num_train_epochs=args.num_epochs if args.num_epochs else 1,
392
+ max_steps=args.max_steps if args.max_steps else -1, # -1 means use epochs
393
+ learning_rate=args.learning_rate,
394
+ logging_steps=logging_steps,
395
+ optim="adamw_8bit", # Per notebook
396
+ weight_decay=0.001,
397
+ lr_scheduler_type="cosine" if args.num_epochs else "linear",
398
+ seed=args.seed,
399
+ # VLM-specific settings (required for Unsloth)
400
+ remove_unused_columns=False,
401
+ dataset_text_field="",
402
+ dataset_kwargs={"skip_prepare_dataset": True},
403
+ max_length=args.max_seq_length,
404
+ # Logging disabled for testing
405
+ report_to="none",
406
+ run_name=run_name,
407
+ )
408
+
409
+ # Add evaluation config if eval is enabled
410
+ if eval_data:
411
+ if args.num_epochs:
412
+ # For epoch-based training, eval at end of each epoch
413
+ training_config.eval_strategy = "epoch"
414
+ print(" Evaluation enabled: every epoch")
415
+ else:
416
+ training_config.eval_strategy = "steps"
417
+ training_config.eval_steps = max(1, args.max_steps // 5)
418
+ print(f" Evaluation enabled: every {training_config.eval_steps} steps")
419
+
420
+ # Use older 'tokenizer=' parameter (not processing_class) - required for Unsloth VLM
421
+ trainer = SFTTrainer(
422
+ model=model,
423
+ tokenizer=tokenizer, # Full processor, not processor.tokenizer
424
+ data_collator=UnslothVisionDataCollator(model, tokenizer),
425
+ train_dataset=train_data,
426
+ eval_dataset=eval_data, # None if no eval
427
+ args=training_config,
428
+ )
429
+
430
+ # 4. Train
431
+ print(f"\n[4/5] Training for {duration_str}...")
432
+ if args.num_epochs:
433
+ print(
434
+ f" (~{steps_per_epoch} steps/epoch, {int(steps_per_epoch * args.num_epochs)} total steps)"
435
+ )
436
+ start = time.time()
437
+
438
+ train_result = trainer.train()
439
+
440
+ train_time = time.time() - start
441
+ total_steps = train_result.metrics.get(
442
+ "train_steps", args.max_steps or steps_per_epoch * args.num_epochs
443
+ )
444
+ print(f"\nTraining completed in {train_time / 60:.1f} minutes")
445
+ print(f" Speed: {total_steps / train_time:.2f} steps/s")
446
+
447
+ # Print training metrics
448
+ if train_result.metrics:
449
+ train_loss = train_result.metrics.get("train_loss")
450
+ if train_loss:
451
+ print(f" Final train loss: {train_loss:.4f}")
452
+
453
+ # Print eval results if eval was enabled
454
+ if eval_data:
455
+ print("\nRunning final evaluation...")
456
+ eval_results = trainer.evaluate()
457
+ eval_loss = eval_results.get("eval_loss")
458
+ if eval_loss:
459
+ print(f" Final eval loss: {eval_loss:.4f}")
460
+ if train_loss:
461
+ ratio = eval_loss / train_loss
462
+ if ratio > 1.5:
463
+ print(
464
+ f" ⚠️ Eval loss is {ratio:.1f}x train loss - possible overfitting"
465
+ )
466
+ else:
467
+ print(
468
+ f" ✓ Eval/train ratio: {ratio:.2f} - model generalizes well"
469
+ )
470
+
471
+ # 5. Save and push
472
+ print("\n[5/5] Saving model...")
473
+
474
+ # Save locally
475
+ model.save_pretrained(args.save_local)
476
+ tokenizer.save_pretrained(args.save_local)
477
+ print(f"Saved locally to {args.save_local}/")
478
+
479
+ # Push to Hub
480
+ print(f"\nPushing to {args.output_repo}...")
481
+ model.push_to_hub(args.output_repo, tokenizer=tokenizer)
482
+ print(f"Model available at: https://huggingface.co/{args.output_repo}")
483
+
484
+ print("\n" + "=" * 70)
485
+ print("Done!")
486
+ print("=" * 70)
487
+
488
+
489
+ if __name__ == "__main__":
490
+ # Show example usage if no arguments
491
+ if len(sys.argv) == 1:
492
+ print("=" * 70)
493
+ print("VLM Fine-tuning with Unsloth")
494
+ print("=" * 70)
495
+ print("\nFine-tune Vision-Language Models with optional train/eval split.")
496
+ print("\nFeatures:")
497
+ print(" - ~60% less VRAM with Unsloth optimizations")
498
+ print(" - 2x faster training vs standard methods")
499
+ print(" - Epoch-based or step-based training")
500
+ print(" - Optional evaluation to detect overfitting")
501
+ print(" - Trackio integration for monitoring")
502
+ print("\nEpoch-based training (recommended for full datasets):")
503
+ print("\n uv run vlm-streaming-sft-unsloth-qwen.py \\")
504
+ print(" --num-epochs 1 \\")
505
+ print(" --eval-split 0.2 \\")
506
+ print(" --output-repo your-username/vlm-finetuned")
507
+ print("\nHF Jobs example (1 epoch with eval):")
508
+ print(
509
+ "\n hf jobs uv run --flavor a100-large --secrets HF_TOKEN --timeout 4h -- \\"
510
+ )
511
+ print(
512
+ " https://huggingface.co/datasets/uv-scripts/training/raw/main/vlm-streaming-sft-unsloth-qwen.py \\"
513
+ )
514
+ print(" --num-epochs 1 \\")
515
+ print(" --eval-split 0.2 \\")
516
+ print(" --output-repo your-username/vlm-finetuned")
517
+ print("\nStep-based training (for streaming or quick tests):")
518
+ print("\n uv run vlm-streaming-sft-unsloth-qwen.py \\")
519
+ print(" --streaming \\")
520
+ print(" --max-steps 500 \\")
521
+ print(" --output-repo your-username/vlm-finetuned")
522
+ print("\nFor full help: uv run vlm-streaming-sft-unsloth-qwen.py --help")
523
+ print("=" * 70)
524
+ sys.exit(0)
525
+
526
+ main()