davanstrien HF Staff commited on
Commit
7419e05
·
verified ·
1 Parent(s): 3eb3121

Upload vlm-streaming-sft-unsloth.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vlm-streaming-sft-unsloth.py +385 -0
vlm-streaming-sft-unsloth.py ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.10"
3
+ # dependencies = [
4
+ # "unsloth",
5
+ # "datasets",
6
+ # "trl",
7
+ # "huggingface_hub[hf_transfer]",
8
+ # "trackio",
9
+ # ]
10
+ # ///
11
+ """
12
+ Fine-tune Vision Language Models using streaming datasets and Unsloth optimizations.
13
+
14
+ Streams data directly from the Hub - no disk space needed for massive VLM datasets.
15
+ Uses Unsloth for ~60% less VRAM and 2x faster training.
16
+
17
+ Run locally (if you have a GPU):
18
+ uv run vlm-streaming-sft-unsloth.py \
19
+ --max-steps 100 \
20
+ --output-repo your-username/vlm-test
21
+
22
+ Run on HF Jobs:
23
+ hf jobs uv run vlm-streaming-sft-unsloth.py \
24
+ --flavor a100-large \
25
+ --secrets HF_TOKEN \
26
+ -- \
27
+ --max-steps 500 \
28
+ --output-repo your-username/vlm-finetuned
29
+
30
+ With Trackio dashboard:
31
+ uv run vlm-streaming-sft-unsloth.py \
32
+ --max-steps 500 \
33
+ --output-repo your-username/vlm-finetuned \
34
+ --trackio-space your-username/trackio
35
+ """
36
+
37
+ import argparse
38
+ import logging
39
+ import os
40
+ import sys
41
+ import time
42
+
43
+ logging.basicConfig(
44
+ level=logging.INFO,
45
+ format="%(asctime)s - %(levelname)s - %(message)s",
46
+ )
47
+ logger = logging.getLogger(__name__)
48
+
49
+
50
+ def check_cuda():
51
+ """Check CUDA availability and exit if not available."""
52
+ import torch
53
+
54
+ if not torch.cuda.is_available():
55
+ logger.error("CUDA is not available. This script requires a GPU.")
56
+ logger.error("Run on a machine with a CUDA-capable GPU or use HF Jobs:")
57
+ logger.error(
58
+ " hf jobs uv run vlm-streaming-sft-unsloth.py --flavor a100-large ..."
59
+ )
60
+ sys.exit(1)
61
+ logger.info(f"CUDA available: {torch.cuda.get_device_name(0)}")
62
+
63
+
64
+ def parse_args():
65
+ parser = argparse.ArgumentParser(
66
+ description="Fine-tune VLMs with streaming datasets using Unsloth",
67
+ formatter_class=argparse.RawDescriptionHelpFormatter,
68
+ epilog="""
69
+ Examples:
70
+ # Quick test run
71
+ uv run vlm-streaming-sft-unsloth.py \\
72
+ --max-steps 50 \\
73
+ --output-repo username/vlm-test
74
+
75
+ # Full training with Trackio monitoring
76
+ uv run vlm-streaming-sft-unsloth.py \\
77
+ --max-steps 500 \\
78
+ --output-repo username/vlm-finetuned \\
79
+ --trackio-space username/trackio
80
+
81
+ # Custom dataset and model
82
+ uv run vlm-streaming-sft-unsloth.py \\
83
+ --base-model unsloth/Qwen3-VL-8B-Instruct-unsloth-bnb-4bit \\
84
+ --dataset your-username/your-vlm-dataset \\
85
+ --max-steps 1000 \\
86
+ --output-repo username/custom-vlm
87
+ """,
88
+ )
89
+
90
+ # Model and data
91
+ parser.add_argument(
92
+ "--base-model",
93
+ default="unsloth/Qwen3-VL-8B-Instruct-unsloth-bnb-4bit",
94
+ help="Base VLM model (default: unsloth/Qwen3-VL-8B-Instruct-unsloth-bnb-4bit)",
95
+ )
96
+ parser.add_argument(
97
+ "--dataset",
98
+ default="davanstrien/iconclass-vlm-sft",
99
+ help="Dataset with 'images' and 'messages' columns (default: davanstrien/iconclass-vlm-sft)",
100
+ )
101
+ parser.add_argument(
102
+ "--output-repo",
103
+ required=True,
104
+ help="HF Hub repo to push model to (e.g., 'username/vlm-finetuned')",
105
+ )
106
+
107
+ # Training config
108
+ parser.add_argument(
109
+ "--max-steps",
110
+ type=int,
111
+ default=500,
112
+ help="Training steps (default: 500). Required for streaming datasets.",
113
+ )
114
+ parser.add_argument(
115
+ "--batch-size",
116
+ type=int,
117
+ default=2,
118
+ help="Per-device batch size (default: 2)",
119
+ )
120
+ parser.add_argument(
121
+ "--gradient-accumulation",
122
+ type=int,
123
+ default=4,
124
+ help="Gradient accumulation steps (default: 4). Effective batch = batch-size * this",
125
+ )
126
+ parser.add_argument(
127
+ "--learning-rate",
128
+ type=float,
129
+ default=2e-4,
130
+ help="Learning rate (default: 2e-4)",
131
+ )
132
+ parser.add_argument(
133
+ "--max-seq-length",
134
+ type=int,
135
+ default=2048,
136
+ help="Maximum sequence length (default: 2048)",
137
+ )
138
+
139
+ # LoRA config
140
+ parser.add_argument(
141
+ "--lora-r",
142
+ type=int,
143
+ default=16,
144
+ help="LoRA rank (default: 16). Higher = more capacity but more VRAM",
145
+ )
146
+ parser.add_argument(
147
+ "--lora-alpha",
148
+ type=int,
149
+ default=32,
150
+ help="LoRA alpha (default: 32). Usually 2*r",
151
+ )
152
+
153
+ # Logging
154
+ parser.add_argument(
155
+ "--trackio-space",
156
+ default=None,
157
+ help="HF Space for Trackio dashboard (e.g., 'username/trackio')",
158
+ )
159
+ parser.add_argument(
160
+ "--save-local",
161
+ default="vlm-streaming-output",
162
+ help="Local directory to save model (default: vlm-streaming-output)",
163
+ )
164
+
165
+ return parser.parse_args()
166
+
167
+
168
+ def main():
169
+ args = parse_args()
170
+
171
+ print("=" * 70)
172
+ print("VLM Streaming Fine-tuning with Unsloth")
173
+ print("=" * 70)
174
+ print("\nConfiguration:")
175
+ print(f" Base model: {args.base_model}")
176
+ print(f" Dataset: {args.dataset}")
177
+ print(f" Max steps: {args.max_steps}")
178
+ print(
179
+ f" Batch size: {args.batch_size} x {args.gradient_accumulation} = {args.batch_size * args.gradient_accumulation}"
180
+ )
181
+ print(f" Learning rate: {args.learning_rate}")
182
+ print(f" LoRA rank: {args.lora_r}")
183
+ print(f" Output repo: {args.output_repo}")
184
+ print(f" Trackio space: {args.trackio_space or '(not configured)'}")
185
+ print()
186
+
187
+ # Check CUDA before heavy imports
188
+ check_cuda()
189
+
190
+ # Enable fast transfers
191
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
192
+
193
+ # Set Trackio space if provided
194
+ if args.trackio_space:
195
+ os.environ["TRACKIO_SPACE_ID"] = args.trackio_space
196
+ logger.info(f"Trackio dashboard: https://huggingface.co/spaces/{args.trackio_space}")
197
+
198
+ # Import heavy dependencies
199
+ from unsloth import FastVisionModel, UnslothVisionDataCollator
200
+ from datasets import load_dataset
201
+ from trl import SFTTrainer, SFTConfig
202
+ from huggingface_hub import login
203
+
204
+ # Login to Hub
205
+ token = os.environ.get("HF_TOKEN")
206
+ if token:
207
+ login(token=token)
208
+ logger.info("Logged in to Hugging Face Hub")
209
+ else:
210
+ logger.warning("HF_TOKEN not set - model upload may fail")
211
+
212
+ # 1. Load model
213
+ print("\n[1/5] Loading model...")
214
+ start = time.time()
215
+
216
+ model, tokenizer = FastVisionModel.from_pretrained(
217
+ args.base_model,
218
+ max_seq_length=args.max_seq_length,
219
+ load_in_4bit=True,
220
+ )
221
+
222
+ model = FastVisionModel.get_peft_model(
223
+ model,
224
+ finetune_vision_layers=False,
225
+ finetune_language_layers=True,
226
+ finetune_attention_modules=True,
227
+ finetune_mlp_modules=True,
228
+ r=args.lora_r,
229
+ lora_alpha=args.lora_alpha,
230
+ lora_dropout=0,
231
+ bias="none",
232
+ random_state=3407,
233
+ use_rslora=False,
234
+ use_gradient_checkpointing="unsloth",
235
+ )
236
+
237
+ model = FastVisionModel.for_training(model)
238
+ print(f"Model loaded in {time.time() - start:.1f}s")
239
+
240
+ # 2. Load streaming dataset
241
+ print("\n[2/5] Loading streaming dataset...")
242
+ start = time.time()
243
+
244
+ dataset = load_dataset(
245
+ args.dataset,
246
+ split="train",
247
+ streaming=True,
248
+ )
249
+
250
+ # Peek at first sample
251
+ sample = next(iter(dataset))
252
+ print(f"Dataset ready in {time.time() - start:.1f}s")
253
+ if "messages" in sample:
254
+ print(f" Sample has {len(sample['messages'])} messages")
255
+ if "images" in sample:
256
+ print(f" Sample has {len(sample['images']) if isinstance(sample['images'], list) else 1} image(s)")
257
+
258
+ # 3. Configure trainer
259
+ print("\n[3/5] Configuring trainer...")
260
+
261
+ training_config = SFTConfig(
262
+ output_dir=args.save_local,
263
+ per_device_train_batch_size=args.batch_size,
264
+ gradient_accumulation_steps=args.gradient_accumulation,
265
+ max_steps=args.max_steps,
266
+ learning_rate=args.learning_rate,
267
+ warmup_steps=min(10, args.max_steps // 10),
268
+ logging_steps=max(1, args.max_steps // 20),
269
+ optim="adamw_8bit",
270
+ weight_decay=0.01,
271
+ lr_scheduler_type="cosine",
272
+ seed=3407,
273
+ bf16=True,
274
+ # VLM-specific settings (required for Unsloth)
275
+ remove_unused_columns=False,
276
+ dataset_text_field="",
277
+ dataset_kwargs={"skip_prepare_dataset": True},
278
+ max_seq_length=args.max_seq_length,
279
+ # Logging
280
+ report_to="trackio",
281
+ run_name=f"vlm-streaming-{args.max_steps}steps",
282
+ )
283
+
284
+ trainer = SFTTrainer(
285
+ model=model,
286
+ tokenizer=tokenizer,
287
+ data_collator=UnslothVisionDataCollator(model, tokenizer),
288
+ train_dataset=dataset,
289
+ args=training_config,
290
+ )
291
+
292
+ # 4. Train
293
+ print(f"\n[4/5] Training for {args.max_steps} steps...")
294
+ start = time.time()
295
+
296
+ trainer.train()
297
+
298
+ train_time = time.time() - start
299
+ print(f"\nTraining completed in {train_time / 60:.1f} minutes")
300
+ print(f" Speed: {args.max_steps / train_time:.2f} steps/s")
301
+
302
+ # 5. Save and push
303
+ print("\n[5/5] Saving model...")
304
+
305
+ # Save locally
306
+ model.save_pretrained(args.save_local)
307
+ tokenizer.save_pretrained(args.save_local)
308
+ print(f"Saved locally to {args.save_local}/")
309
+
310
+ # Push to Hub
311
+ print(f"\nPushing to {args.output_repo}...")
312
+ model.push_to_hub(args.output_repo, tokenizer=tokenizer)
313
+ print(f"Model available at: https://huggingface.co/{args.output_repo}")
314
+
315
+ # Quick inference test
316
+ print("\n" + "=" * 70)
317
+ print("Quick inference test:")
318
+ print("=" * 70)
319
+
320
+ FastVisionModel.for_inference(model)
321
+
322
+ # Create a simple test prompt
323
+ test_messages = [
324
+ {
325
+ "role": "user",
326
+ "content": [
327
+ {"type": "text", "text": "Describe what you see in this image."},
328
+ ],
329
+ }
330
+ ]
331
+
332
+ inputs = tokenizer.apply_chat_template(
333
+ test_messages,
334
+ add_generation_prompt=True,
335
+ return_tensors="pt",
336
+ ).to("cuda")
337
+
338
+ print("\nTest prompt: 'Describe what you see in this image.'")
339
+ print("(Note: No image provided - this just tests the model loads correctly)")
340
+
341
+ outputs = model.generate(
342
+ input_ids=inputs,
343
+ max_new_tokens=32,
344
+ temperature=0.7,
345
+ top_p=0.8,
346
+ do_sample=True,
347
+ )
348
+
349
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
350
+ print(f"Response preview: {response[:200]}...")
351
+
352
+ print("\n" + "=" * 70)
353
+ print("Done!")
354
+ print("=" * 70)
355
+
356
+
357
+ if __name__ == "__main__":
358
+ # Show example usage if no arguments
359
+ if len(sys.argv) == 1:
360
+ print("=" * 70)
361
+ print("VLM Streaming Fine-tuning with Unsloth")
362
+ print("=" * 70)
363
+ print("\nFine-tune Vision-Language Models using streaming datasets.")
364
+ print("Data streams directly from the Hub - no disk space needed.")
365
+ print("\nFeatures:")
366
+ print(" - ~60% less VRAM with Unsloth optimizations")
367
+ print(" - 2x faster training vs standard methods")
368
+ print(" - Trackio integration for monitoring")
369
+ print(" - Works with any VLM dataset in conversation format")
370
+ print("\nExample usage:")
371
+ print("\n uv run vlm-streaming-sft-unsloth.py \\")
372
+ print(" --max-steps 500 \\")
373
+ print(" --output-repo your-username/vlm-finetuned")
374
+ print("\nHF Jobs example:")
375
+ print("\n hf jobs uv run vlm-streaming-sft-unsloth.py \\")
376
+ print(" --flavor a100-large \\")
377
+ print(" --secrets HF_TOKEN \\")
378
+ print(" -- \\")
379
+ print(" --max-steps 500 \\")
380
+ print(" --output-repo your-username/vlm-finetuned")
381
+ print("\nFor full help: uv run vlm-streaming-sft-unsloth.py --help")
382
+ print("=" * 70)
383
+ sys.exit(0)
384
+
385
+ main()