Upload vlm-streaming-sft-unsloth-qwen.py with huggingface_hub
Browse files
vlm-streaming-sft-unsloth-qwen.py
CHANGED
|
@@ -294,11 +294,17 @@ def main():
|
|
| 294 |
run_name=f"vlm-streaming-{args.max_steps}steps",
|
| 295 |
)
|
| 296 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 297 |
trainer = SFTTrainer(
|
| 298 |
model=model,
|
| 299 |
-
|
| 300 |
-
processing_class=tokenizer,
|
| 301 |
data_collator=UnslothVisionDataCollator(model, tokenizer),
|
|
|
|
| 302 |
args=training_config,
|
| 303 |
)
|
| 304 |
|
|
|
|
| 294 |
run_name=f"vlm-streaming-{args.max_steps}steps",
|
| 295 |
)
|
| 296 |
|
| 297 |
+
# Convert streaming dataset to list (required for Qwen3-VL per Unsloth docs)
|
| 298 |
+
# "Using map kicks in dataset standardization which can be complicated"
|
| 299 |
+
print(" Converting streaming dataset to list...")
|
| 300 |
+
train_data = list(dataset.take(500)) # Take enough samples for training
|
| 301 |
+
print(f" Loaded {len(train_data)} samples")
|
| 302 |
+
|
| 303 |
trainer = SFTTrainer(
|
| 304 |
model=model,
|
| 305 |
+
tokenizer=tokenizer,
|
|
|
|
| 306 |
data_collator=UnslothVisionDataCollator(model, tokenizer),
|
| 307 |
+
train_dataset=train_data,
|
| 308 |
args=training_config,
|
| 309 |
)
|
| 310 |
|