davanstrien HF Staff commited on
Commit
db2156a
·
verified ·
1 Parent(s): 853c91a

Upload vlm-streaming-sft-unsloth-qwen.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vlm-streaming-sft-unsloth-qwen.py +363 -0
vlm-streaming-sft-unsloth-qwen.py ADDED
@@ -0,0 +1,363 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.10"
3
+ # dependencies = [
4
+ # "unsloth",
5
+ # "datasets",
6
+ # "trl",
7
+ # "huggingface_hub[hf_transfer]",
8
+ # "trackio",
9
+ # "transformers==4.56.2",
10
+ # "trl==0.22.2",
11
+ # ]
12
+ # ///
13
+ """
14
+ Fine-tune Vision Language Models using streaming datasets and Unsloth optimizations.
15
+
16
+ Streams data directly from the Hub - no disk space needed for massive VLM datasets.
17
+ Uses Unsloth for ~60% less VRAM and 2x faster training.
18
+
19
+ Run locally (if you have a GPU):
20
+ uv run vlm-streaming-sft-unsloth.py \
21
+ --max-steps 100 \
22
+ --output-repo your-username/vlm-test
23
+
24
+ Run on HF Jobs:
25
+ hf jobs uv run vlm-streaming-sft-unsloth.py \
26
+ --flavor a100-large \
27
+ --secrets HF_TOKEN \
28
+ -- \
29
+ --max-steps 500 \
30
+ --output-repo your-username/vlm-finetuned
31
+
32
+ With Trackio dashboard:
33
+ uv run vlm-streaming-sft-unsloth.py \
34
+ --max-steps 500 \
35
+ --output-repo your-username/vlm-finetuned \
36
+ --trackio-space your-username/trackio
37
+ """
38
+
39
+ import argparse
40
+ import logging
41
+ import os
42
+ import sys
43
+ import time
44
+
45
+ logging.basicConfig(
46
+ level=logging.INFO,
47
+ format="%(asctime)s - %(levelname)s - %(message)s",
48
+ )
49
+ logger = logging.getLogger(__name__)
50
+
51
+
52
+ def check_cuda():
53
+ """Check CUDA availability and exit if not available."""
54
+ import torch
55
+
56
+ if not torch.cuda.is_available():
57
+ logger.error("CUDA is not available. This script requires a GPU.")
58
+ logger.error("Run on a machine with a CUDA-capable GPU or use HF Jobs:")
59
+ logger.error(
60
+ " hf jobs uv run vlm-streaming-sft-unsloth.py --flavor a100-large ..."
61
+ )
62
+ sys.exit(1)
63
+ logger.info(f"CUDA available: {torch.cuda.get_device_name(0)}")
64
+
65
+
66
+ def parse_args():
67
+ parser = argparse.ArgumentParser(
68
+ description="Fine-tune VLMs with streaming datasets using Unsloth",
69
+ formatter_class=argparse.RawDescriptionHelpFormatter,
70
+ epilog="""
71
+ Examples:
72
+ # Quick test run
73
+ uv run vlm-streaming-sft-unsloth.py \\
74
+ --max-steps 50 \\
75
+ --output-repo username/vlm-test
76
+
77
+ # Full training with Trackio monitoring
78
+ uv run vlm-streaming-sft-unsloth.py \\
79
+ --max-steps 500 \\
80
+ --output-repo username/vlm-finetuned \\
81
+ --trackio-space username/trackio
82
+
83
+ # Custom dataset and model
84
+ uv run vlm-streaming-sft-unsloth.py \\
85
+ --base-model unsloth/Qwen3-VL-8B-Instruct-unsloth-bnb-4bit \\
86
+ --dataset your-username/your-vlm-dataset \\
87
+ --max-steps 1000 \\
88
+ --output-repo username/custom-vlm
89
+ """,
90
+ )
91
+
92
+ # Model and data
93
+ parser.add_argument(
94
+ "--base-model",
95
+ default="unsloth/Qwen3-VL-8B-Instruct-unsloth-bnb-4bit",
96
+ help="Base VLM model (default: unsloth/Qwen3-VL-8B-Instruct-unsloth-bnb-4bit)",
97
+ )
98
+ parser.add_argument(
99
+ "--dataset",
100
+ default="davanstrien/iconclass-vlm-sft",
101
+ help="Dataset with 'images' and 'messages' columns (default: davanstrien/iconclass-vlm-sft)",
102
+ )
103
+ parser.add_argument(
104
+ "--output-repo",
105
+ required=True,
106
+ help="HF Hub repo to push model to (e.g., 'username/vlm-finetuned')",
107
+ )
108
+
109
+ # Training config
110
+ parser.add_argument(
111
+ "--max-steps",
112
+ type=int,
113
+ default=500,
114
+ help="Training steps (default: 500). Required for streaming datasets.",
115
+ )
116
+ parser.add_argument(
117
+ "--batch-size",
118
+ type=int,
119
+ default=2,
120
+ help="Per-device batch size (default: 2)",
121
+ )
122
+ parser.add_argument(
123
+ "--gradient-accumulation",
124
+ type=int,
125
+ default=4,
126
+ help="Gradient accumulation steps (default: 4). Effective batch = batch-size * this",
127
+ )
128
+ parser.add_argument(
129
+ "--learning-rate",
130
+ type=float,
131
+ default=2e-4,
132
+ help="Learning rate (default: 2e-4)",
133
+ )
134
+ parser.add_argument(
135
+ "--max-seq-length",
136
+ type=int,
137
+ default=2048,
138
+ help="Maximum sequence length (default: 2048)",
139
+ )
140
+
141
+ # LoRA config
142
+ parser.add_argument(
143
+ "--lora-r",
144
+ type=int,
145
+ default=16,
146
+ help="LoRA rank (default: 16). Higher = more capacity but more VRAM",
147
+ )
148
+ parser.add_argument(
149
+ "--lora-alpha",
150
+ type=int,
151
+ default=32,
152
+ help="LoRA alpha (default: 32). Usually 2*r",
153
+ )
154
+
155
+ # Logging
156
+ parser.add_argument(
157
+ "--trackio-space",
158
+ default=None,
159
+ help="HF Space for Trackio dashboard (e.g., 'username/trackio')",
160
+ )
161
+ parser.add_argument(
162
+ "--save-local",
163
+ default="vlm-streaming-output",
164
+ help="Local directory to save model (default: vlm-streaming-output)",
165
+ )
166
+
167
+ return parser.parse_args()
168
+
169
+
170
+ def main():
171
+ args = parse_args()
172
+
173
+ print("=" * 70)
174
+ print("VLM Streaming Fine-tuning with Unsloth")
175
+ print("=" * 70)
176
+ print("\nConfiguration:")
177
+ print(f" Base model: {args.base_model}")
178
+ print(f" Dataset: {args.dataset}")
179
+ print(f" Max steps: {args.max_steps}")
180
+ print(
181
+ f" Batch size: {args.batch_size} x {args.gradient_accumulation} = {args.batch_size * args.gradient_accumulation}"
182
+ )
183
+ print(f" Learning rate: {args.learning_rate}")
184
+ print(f" LoRA rank: {args.lora_r}")
185
+ print(f" Output repo: {args.output_repo}")
186
+ print(f" Trackio space: {args.trackio_space or '(not configured)'}")
187
+ print()
188
+
189
+ # Check CUDA before heavy imports
190
+ check_cuda()
191
+
192
+ # Enable fast transfers
193
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
194
+
195
+ # Set Trackio space if provided
196
+ if args.trackio_space:
197
+ os.environ["TRACKIO_SPACE_ID"] = args.trackio_space
198
+ logger.info(f"Trackio dashboard: https://huggingface.co/spaces/{args.trackio_space}")
199
+
200
+ # Import heavy dependencies (note: import from unsloth.trainer for VLM)
201
+ from unsloth import FastVisionModel
202
+ from unsloth.trainer import UnslothVisionDataCollator
203
+ from datasets import load_dataset
204
+ from trl import SFTTrainer, SFTConfig
205
+ from huggingface_hub import login
206
+
207
+ # Login to Hub
208
+ token = os.environ.get("HF_TOKEN")
209
+ if token:
210
+ login(token=token)
211
+ logger.info("Logged in to Hugging Face Hub")
212
+ else:
213
+ logger.warning("HF_TOKEN not set - model upload may fail")
214
+
215
+ # 1. Load model (Qwen returns tokenizer, not processor)
216
+ print("\n[1/5] Loading model...")
217
+ start = time.time()
218
+
219
+ model, tokenizer = FastVisionModel.from_pretrained(
220
+ args.base_model,
221
+ load_in_4bit=True,
222
+ use_gradient_checkpointing="unsloth",
223
+ )
224
+
225
+ model = FastVisionModel.get_peft_model(
226
+ model,
227
+ finetune_vision_layers=True,
228
+ finetune_language_layers=True,
229
+ finetune_attention_modules=True,
230
+ finetune_mlp_modules=True,
231
+ r=args.lora_r,
232
+ lora_alpha=args.lora_alpha,
233
+ lora_dropout=0,
234
+ bias="none",
235
+ random_state=3407,
236
+ use_rslora=False,
237
+ loftq_config=None,
238
+ )
239
+ print(f"Model loaded in {time.time() - start:.1f}s")
240
+
241
+ # 2. Load streaming dataset
242
+ print("\n[2/5] Loading streaming dataset...")
243
+ start = time.time()
244
+
245
+ dataset = load_dataset(
246
+ args.dataset,
247
+ split="train",
248
+ streaming=True,
249
+ )
250
+
251
+ # Peek at first sample to show info
252
+ sample = next(iter(dataset))
253
+ print(f"Dataset ready in {time.time() - start:.1f}s")
254
+ if "messages" in sample:
255
+ print(f" Sample has {len(sample['messages'])} messages")
256
+ if "images" in sample:
257
+ img_count = len(sample['images']) if isinstance(sample['images'], list) else 1
258
+ print(f" Sample has {img_count} image(s)")
259
+
260
+ # Reload dataset (consumed one sample above)
261
+ dataset = load_dataset(
262
+ args.dataset,
263
+ split="train",
264
+ streaming=True,
265
+ )
266
+
267
+ # 3. Configure trainer
268
+ print("\n[3/5] Configuring trainer...")
269
+
270
+ # Enable training mode
271
+ FastVisionModel.for_training(model)
272
+
273
+ training_config = SFTConfig(
274
+ output_dir=args.save_local,
275
+ per_device_train_batch_size=args.batch_size,
276
+ gradient_accumulation_steps=args.gradient_accumulation,
277
+ gradient_checkpointing=True,
278
+ gradient_checkpointing_kwargs={"use_reentrant": False},
279
+ max_grad_norm=0.3,
280
+ warmup_ratio=0.03,
281
+ max_steps=args.max_steps,
282
+ learning_rate=args.learning_rate,
283
+ logging_steps=max(1, args.max_steps // 20),
284
+ save_strategy="steps",
285
+ optim="adamw_torch_fused",
286
+ weight_decay=0.001,
287
+ lr_scheduler_type="cosine",
288
+ seed=3407,
289
+ # VLM-specific settings (required for Unsloth)
290
+ remove_unused_columns=False,
291
+ dataset_text_field="",
292
+ dataset_kwargs={"skip_prepare_dataset": True},
293
+ max_length=args.max_seq_length,
294
+ # Logging
295
+ report_to="trackio",
296
+ run_name=f"vlm-streaming-{args.max_steps}steps",
297
+ )
298
+
299
+ trainer = SFTTrainer(
300
+ model=model,
301
+ tokenizer=tokenizer,
302
+ data_collator=UnslothVisionDataCollator(model, tokenizer),
303
+ train_dataset=dataset,
304
+ args=training_config,
305
+ )
306
+
307
+ # 4. Train
308
+ print(f"\n[4/5] Training for {args.max_steps} steps...")
309
+ start = time.time()
310
+
311
+ trainer.train()
312
+
313
+ train_time = time.time() - start
314
+ print(f"\nTraining completed in {train_time / 60:.1f} minutes")
315
+ print(f" Speed: {args.max_steps / train_time:.2f} steps/s")
316
+
317
+ # 5. Save and push
318
+ print("\n[5/5] Saving model...")
319
+
320
+ # Save locally
321
+ model.save_pretrained(args.save_local)
322
+ tokenizer.save_pretrained(args.save_local)
323
+ print(f"Saved locally to {args.save_local}/")
324
+
325
+ # Push to Hub
326
+ print(f"\nPushing to {args.output_repo}...")
327
+ model.push_to_hub(args.output_repo, tokenizer=tokenizer)
328
+ print(f"Model available at: https://huggingface.co/{args.output_repo}")
329
+
330
+ print("\n" + "=" * 70)
331
+ print("Done!")
332
+ print("=" * 70)
333
+
334
+
335
+ if __name__ == "__main__":
336
+ # Show example usage if no arguments
337
+ if len(sys.argv) == 1:
338
+ print("=" * 70)
339
+ print("VLM Streaming Fine-tuning with Unsloth")
340
+ print("=" * 70)
341
+ print("\nFine-tune Vision-Language Models using streaming datasets.")
342
+ print("Data streams directly from the Hub - no disk space needed.")
343
+ print("\nFeatures:")
344
+ print(" - ~60% less VRAM with Unsloth optimizations")
345
+ print(" - 2x faster training vs standard methods")
346
+ print(" - Trackio integration for monitoring")
347
+ print(" - Works with any VLM dataset in conversation format")
348
+ print("\nExample usage:")
349
+ print("\n uv run vlm-streaming-sft-unsloth.py \\")
350
+ print(" --max-steps 500 \\")
351
+ print(" --output-repo your-username/vlm-finetuned")
352
+ print("\nHF Jobs example:")
353
+ print("\n hf jobs uv run vlm-streaming-sft-unsloth.py \\")
354
+ print(" --flavor a100-large \\")
355
+ print(" --secrets HF_TOKEN \\")
356
+ print(" -- \\")
357
+ print(" --max-steps 500 \\")
358
+ print(" --output-repo your-username/vlm-finetuned")
359
+ print("\nFor full help: uv run vlm-streaming-sft-unsloth.py --help")
360
+ print("=" * 70)
361
+ sys.exit(0)
362
+
363
+ main()