davanstrien HF Staff commited on
Commit
8b6653d
·
verified ·
1 Parent(s): 1279f5c

Upload vlm-streaming-sft-unsloth.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vlm-streaming-sft-unsloth.py +38 -61
vlm-streaming-sft-unsloth.py CHANGED
@@ -6,6 +6,8 @@
6
  # "trl",
7
  # "huggingface_hub[hf_transfer]",
8
  # "trackio",
 
 
9
  # ]
10
  # ///
11
  """
@@ -90,8 +92,8 @@ Examples:
90
  # Model and data
91
  parser.add_argument(
92
  "--base-model",
93
- default="unsloth/gemma-3-4b-it-unsloth-bnb-4bit",
94
- help="Base VLM model (default: unsloth/gemma-3-4b-it-unsloth-bnb-4bit)",
95
  )
96
  parser.add_argument(
97
  "--dataset",
@@ -195,8 +197,9 @@ def main():
195
  os.environ["TRACKIO_SPACE_ID"] = args.trackio_space
196
  logger.info(f"Trackio dashboard: https://huggingface.co/spaces/{args.trackio_space}")
197
 
198
- # Import heavy dependencies
199
- from unsloth import FastVisionModel, UnslothVisionDataCollator
 
200
  from datasets import load_dataset
201
  from trl import SFTTrainer, SFTConfig
202
  from huggingface_hub import login
@@ -213,17 +216,15 @@ def main():
213
  print("\n[1/5] Loading model...")
214
  start = time.time()
215
 
216
- model, tokenizer = FastVisionModel.from_pretrained(
217
  args.base_model,
218
- max_seq_length=args.max_seq_length,
219
  load_in_4bit=True,
220
- dtype=None, # Auto-detect
221
- fast_inference=False, # For training
222
  )
223
 
224
  model = FastVisionModel.get_peft_model(
225
  model,
226
- finetune_vision_layers=False,
227
  finetune_language_layers=True,
228
  finetune_attention_modules=True,
229
  finetune_mlp_modules=True,
@@ -233,10 +234,9 @@ def main():
233
  bias="none",
234
  random_state=3407,
235
  use_rslora=False,
236
- use_gradient_checkpointing="unsloth",
 
237
  )
238
-
239
- model = FastVisionModel.for_training(model)
240
  print(f"Model loaded in {time.time() - start:.1f}s")
241
 
242
  # 2. Load streaming dataset
@@ -249,35 +249,49 @@ def main():
249
  streaming=True,
250
  )
251
 
252
- # Peek at first sample
253
  sample = next(iter(dataset))
254
  print(f"Dataset ready in {time.time() - start:.1f}s")
255
  if "messages" in sample:
256
  print(f" Sample has {len(sample['messages'])} messages")
257
  if "images" in sample:
258
- print(f" Sample has {len(sample['images']) if isinstance(sample['images'], list) else 1} image(s)")
 
 
 
 
 
 
 
 
259
 
260
  # 3. Configure trainer
261
  print("\n[3/5] Configuring trainer...")
262
 
 
 
 
263
  training_config = SFTConfig(
264
  output_dir=args.save_local,
265
  per_device_train_batch_size=args.batch_size,
266
  gradient_accumulation_steps=args.gradient_accumulation,
 
 
 
 
267
  max_steps=args.max_steps,
268
  learning_rate=args.learning_rate,
269
- warmup_steps=min(10, args.max_steps // 10),
270
  logging_steps=max(1, args.max_steps // 20),
271
- optim="adamw_8bit",
272
- weight_decay=0.01,
 
273
  lr_scheduler_type="cosine",
274
  seed=3407,
275
- bf16=True,
276
  # VLM-specific settings (required for Unsloth)
277
  remove_unused_columns=False,
278
  dataset_text_field="",
279
  dataset_kwargs={"skip_prepare_dataset": True},
280
- max_seq_length=args.max_seq_length,
281
  # Logging
282
  report_to="trackio",
283
  run_name=f"vlm-streaming-{args.max_steps}steps",
@@ -285,11 +299,10 @@ def main():
285
 
286
  trainer = SFTTrainer(
287
  model=model,
288
- tokenizer=tokenizer,
289
- data_collator=UnslothVisionDataCollator(model, tokenizer),
290
  train_dataset=dataset,
 
 
291
  args=training_config,
292
- processing_class=tokenizer, # Required for Unsloth to detect VLM
293
  )
294
 
295
  # 4. Train
@@ -307,51 +320,15 @@ def main():
307
 
308
  # Save locally
309
  model.save_pretrained(args.save_local)
310
- tokenizer.save_pretrained(args.save_local)
311
  print(f"Saved locally to {args.save_local}/")
312
 
313
  # Push to Hub
314
  print(f"\nPushing to {args.output_repo}...")
315
- model.push_to_hub(args.output_repo, tokenizer=tokenizer)
 
316
  print(f"Model available at: https://huggingface.co/{args.output_repo}")
317
 
318
- # Quick inference test
319
- print("\n" + "=" * 70)
320
- print("Quick inference test:")
321
- print("=" * 70)
322
-
323
- FastVisionModel.for_inference(model)
324
-
325
- # Create a simple test prompt
326
- test_messages = [
327
- {
328
- "role": "user",
329
- "content": [
330
- {"type": "text", "text": "Describe what you see in this image."},
331
- ],
332
- }
333
- ]
334
-
335
- inputs = tokenizer.apply_chat_template(
336
- test_messages,
337
- add_generation_prompt=True,
338
- return_tensors="pt",
339
- ).to("cuda")
340
-
341
- print("\nTest prompt: 'Describe what you see in this image.'")
342
- print("(Note: No image provided - this just tests the model loads correctly)")
343
-
344
- outputs = model.generate(
345
- input_ids=inputs,
346
- max_new_tokens=32,
347
- temperature=0.7,
348
- top_p=0.8,
349
- do_sample=True,
350
- )
351
-
352
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
353
- print(f"Response preview: {response[:200]}...")
354
-
355
  print("\n" + "=" * 70)
356
  print("Done!")
357
  print("=" * 70)
 
6
  # "trl",
7
  # "huggingface_hub[hf_transfer]",
8
  # "trackio",
9
+ # "transformers==4.56.2",
10
+ # "trl==0.22.2",
11
  # ]
12
  # ///
13
  """
 
92
  # Model and data
93
  parser.add_argument(
94
  "--base-model",
95
+ default="unsloth/gemma-3-4b-pt",
96
+ help="Base VLM model (default: unsloth/gemma-3-4b-pt)",
97
  )
98
  parser.add_argument(
99
  "--dataset",
 
197
  os.environ["TRACKIO_SPACE_ID"] = args.trackio_space
198
  logger.info(f"Trackio dashboard: https://huggingface.co/spaces/{args.trackio_space}")
199
 
200
+ # Import heavy dependencies (note: import from unsloth.trainer for VLM)
201
+ from unsloth import FastVisionModel
202
+ from unsloth.trainer import UnslothVisionDataCollator
203
  from datasets import load_dataset
204
  from trl import SFTTrainer, SFTConfig
205
  from huggingface_hub import login
 
216
  print("\n[1/5] Loading model...")
217
  start = time.time()
218
 
219
+ model, processor = FastVisionModel.from_pretrained(
220
  args.base_model,
 
221
  load_in_4bit=True,
222
+ use_gradient_checkpointing="unsloth",
 
223
  )
224
 
225
  model = FastVisionModel.get_peft_model(
226
  model,
227
+ finetune_vision_layers=True,
228
  finetune_language_layers=True,
229
  finetune_attention_modules=True,
230
  finetune_mlp_modules=True,
 
234
  bias="none",
235
  random_state=3407,
236
  use_rslora=False,
237
+ loftq_config=None,
238
+ target_modules="all-linear",
239
  )
 
 
240
  print(f"Model loaded in {time.time() - start:.1f}s")
241
 
242
  # 2. Load streaming dataset
 
249
  streaming=True,
250
  )
251
 
252
+ # Peek at first sample to show info
253
  sample = next(iter(dataset))
254
  print(f"Dataset ready in {time.time() - start:.1f}s")
255
  if "messages" in sample:
256
  print(f" Sample has {len(sample['messages'])} messages")
257
  if "images" in sample:
258
+ img_count = len(sample['images']) if isinstance(sample['images'], list) else 1
259
+ print(f" Sample has {img_count} image(s)")
260
+
261
+ # Reload dataset (consumed one sample above)
262
+ dataset = load_dataset(
263
+ args.dataset,
264
+ split="train",
265
+ streaming=True,
266
+ )
267
 
268
  # 3. Configure trainer
269
  print("\n[3/5] Configuring trainer...")
270
 
271
+ # Enable training mode
272
+ FastVisionModel.for_training(model)
273
+
274
  training_config = SFTConfig(
275
  output_dir=args.save_local,
276
  per_device_train_batch_size=args.batch_size,
277
  gradient_accumulation_steps=args.gradient_accumulation,
278
+ gradient_checkpointing=True,
279
+ gradient_checkpointing_kwargs={"use_reentrant": False},
280
+ max_grad_norm=0.3,
281
+ warmup_ratio=0.03,
282
  max_steps=args.max_steps,
283
  learning_rate=args.learning_rate,
 
284
  logging_steps=max(1, args.max_steps // 20),
285
+ save_strategy="steps",
286
+ optim="adamw_torch_fused",
287
+ weight_decay=0.001,
288
  lr_scheduler_type="cosine",
289
  seed=3407,
 
290
  # VLM-specific settings (required for Unsloth)
291
  remove_unused_columns=False,
292
  dataset_text_field="",
293
  dataset_kwargs={"skip_prepare_dataset": True},
294
+ max_length=args.max_seq_length,
295
  # Logging
296
  report_to="trackio",
297
  run_name=f"vlm-streaming-{args.max_steps}steps",
 
299
 
300
  trainer = SFTTrainer(
301
  model=model,
 
 
302
  train_dataset=dataset,
303
+ processing_class=processor.tokenizer,
304
+ data_collator=UnslothVisionDataCollator(model, processor),
305
  args=training_config,
 
306
  )
307
 
308
  # 4. Train
 
320
 
321
  # Save locally
322
  model.save_pretrained(args.save_local)
323
+ processor.save_pretrained(args.save_local)
324
  print(f"Saved locally to {args.save_local}/")
325
 
326
  # Push to Hub
327
  print(f"\nPushing to {args.output_repo}...")
328
+ model.push_to_hub(args.output_repo)
329
+ processor.push_to_hub(args.output_repo)
330
  print(f"Model available at: https://huggingface.co/{args.output_repo}")
331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332
  print("\n" + "=" * 70)
333
  print("Done!")
334
  print("=" * 70)