# /// script # requires-python = ">=3.11" # dependencies = [] # # # Note: All dependencies are pre-installed in davanstrien13/unsloth-uv Docker image: # # unsloth, torch, transformers, peft, bitsandbytes, accelerate, xformers, # # pillow, datasets, trl, huggingface-hub[hf_transfer] # # /// """ Fine-tune Vision-Language Models for Iconclass metadata generation using Unsloth. This script trains VLMs to generate structured Iconclass codes from artwork images, using Unsloth's optimized training for 2x speed and lower memory usage. Features: - 🚀 2x faster training with Unsloth optimizations - 💾 4-bit quantization for efficient memory usage - 📊 LoRA fine-tuning for parameter efficiency - 🎨 Specialized for art history metadata (Iconclass) - 🤗 Seamless HF Hub integration """ # Import unsloth first for optimizations from unsloth import FastVisionModel, UnslothVisionDataCollator import argparse import json import logging import os import sys from datetime import datetime from typing import Any, Dict import torch from datasets import load_dataset from huggingface_hub import HfApi, ModelCard, login from trl import SFTConfig, SFTTrainer logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" ) logger = logging.getLogger(__name__) def check_cuda_availability(): """Check `if CUDA is available and exit if not.""" if not torch.cuda.is_available(): logger.error("CUDA is not available. This script requires a GPU.") logger.error("Please run on a machine with a CUDA-capable GPU.") sys.exit(1) else: logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}") def create_model_card( base_model: str, dataset: str, num_samples: int, training_time: str, lora_r: int, lora_alpha: int, learning_rate: float, batch_size: int, gradient_accumulation: int, max_steps: int, ) -> str: """Create a comprehensive model card for the fine-tuned model.""" model_name = base_model.split("/")[-1] return f"""--- base_model: {base_model} tags: - vision - vlm - iconclass - art-history - unsloth - fine-tuned - lora library_name: transformers license: mit --- # Iconclass VLM - Fine-tuned {model_name} This model generates [Iconclass](https://iconclass.org) metadata codes from artwork images. Fine-tuned using [Unsloth](https://github.com/unslothai/unsloth) for efficient training. ## Model Details - **Base Model**: [{base_model}](https://huggingface.co/{base_model}) - **Training Method**: Supervised Fine-Tuning with LoRA - **Training Framework**: Unsloth + TRL - **Task**: Structured metadata generation (JSON output) - **Domain**: Art history / Cultural heritage ## Training Details ### Dataset - **Source**: [{dataset}](https://huggingface.co/datasets/{dataset}) - **Samples**: {num_samples:,} - **Format**: Vision-language pairs with Iconclass labels - **Training Time**: {training_time} - **Training Date**: {datetime.now().strftime("%Y-%m-%d")} ### Configuration **LoRA Settings** - Rank (r): {lora_r} - Alpha: {lora_alpha} - Dropout: 0.1 - Target modules: Language layers + Attention **Training Hyperparameters** - Learning rate: {learning_rate} - Batch size: {batch_size} - Gradient accumulation: {gradient_accumulation} - Effective batch size: {batch_size * gradient_accumulation} - Max steps: {max_steps:,} - Optimizer: AdamW 8-bit - Precision: bfloat16 **Efficiency** - Quantization: 4-bit (Unsloth) - Training speedup: ~2x (vs standard training) - Memory optimization: Gradient checkpointing ## Usage ```python from unsloth import FastVisionModel from PIL import Image # Load model model, tokenizer = FastVisionModel.from_pretrained( model_name="your-username/this-model", load_in_4bit=True, max_seq_length=2048, ) FastVisionModel.for_inference(model) # Prepare input image = Image.open("artwork.jpg") prompt = "Extract ICONCLASS labels for this image." messages = [ {{ "role": "user", "content": [ {{"type": "image"}}, {{"type": "text", "text": prompt}}, ], }} ] inputs = tokenizer.apply_chat_template( messages, add_generation_prompt=True, return_tensors="pt", ).to("cuda") # Generate outputs = model.generate( **inputs, max_new_tokens=256, temperature=0.7, top_p=0.9, ) response = tokenizer.decode(outputs[0], skip_special_tokens=True) print(response) # {{"iconclass-codes": ["25H213", "25H216", "25I"]}} ``` ## Output Format The model outputs JSON with Iconclass codes: ```json {{ "iconclass-codes": ["31A235", "31A24(+1)", "61B(+54)"] }} ``` ## Iconclass System Iconclass is a hierarchical classification system for art and iconography: - **2** Nature (landscapes, animals, plants) - **3** Human Being (portraits, figures, anatomy) - **4** Society & Civilization (architecture, tools) - **7** Bible (religious scenes) - **9** Classical Mythology Learn more: [iconclass.org](https://iconclass.org) ## Limitations - Trained specifically on Western art history - Best performance on artworks with existing Iconclass labels - May struggle with contemporary or non-Western art - Outputs should be validated by domain experts ## Training Script Trained using UV script for reproducibility: ```bash uv run https://huggingface.co/datasets/uv-scripts/training/raw/main/iconclass-vlm-sft.py \\ --base-model {base_model} \\ --dataset {dataset} \\ --output-model your-username/iconclass-vlm \\ --lora-r {lora_r} \\ --learning-rate {learning_rate} ``` ## Citation If you use this model, please cite: ```bibtex @misc{{iconclass-vlm-{datetime.now().year}, author = {{Your Name}}, title = {{Iconclass VLM: Vision-Language Model for Art History Metadata}}, year = {{{datetime.now().year}}}, publisher = {{Hugging Face}}, howpublished = {{\\url{{https://huggingface.co/your-username/this-model}}}} }} ``` --- Fine-tuned with 🦥 [Unsloth](https://github.com/unslothai/unsloth) • Trained using 🤖 [UV Scripts](https://huggingface.co/uv-scripts) """ def main( base_model: str, dataset: str, output_model: str, lora_r: int = 16, lora_alpha: int = 32, lora_dropout: float = 0.1, learning_rate: float = 2e-5, batch_size: int = 2, gradient_accumulation: int = 8, max_steps: int = None, num_epochs: float = 1.0, warmup_ratio: float = 0.1, logging_steps: int = 10, save_steps: int = 100, eval_steps: int = 100, max_seq_length: int = 2048, hf_token: str = None, dataset_split: str = "train", eval_split: str = "test", private: bool = False, push_to_hub: bool = True, ): """Train a vision-language model for Iconclass metadata generation.""" # Check CUDA availability first check_cuda_availability() # Track start time start_time = datetime.now() # Enable HF_TRANSFER for faster downloads os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" # Login to HF if token provided HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") if HF_TOKEN: login(token=HF_TOKEN) else: logger.warning("No HF token provided. Push to Hub will fail without auth.") # Load dataset logger.info(f"Loading dataset: {dataset}") train_dataset = load_dataset(dataset, split=dataset_split) eval_dataset = load_dataset(dataset, split=eval_split) if eval_split else None logger.info(f"Training samples: {len(train_dataset):,}") if eval_dataset: logger.info(f"Evaluation samples: {len(eval_dataset):,}") # Calculate max_steps if not provided if max_steps is None: steps_per_epoch = len(train_dataset) // (batch_size * gradient_accumulation) max_steps = int(steps_per_epoch * num_epochs) logger.info( f"Calculated max_steps: {max_steps:,} ({num_epochs} epoch(s), {steps_per_epoch} steps/epoch)" ) # Load model with Unsloth logger.info(f"Loading model: {base_model}") model, tokenizer = FastVisionModel.from_pretrained( model_name=base_model, max_seq_length=max_seq_length, load_in_4bit=True, dtype=None, # Auto-detect fast_inference=False, # For training gpu_memory_utilization=0.8, ) # Apply LoRA logger.info("Configuring LoRA...") model = FastVisionModel.get_peft_model( model, finetune_vision_layers=False, # Only finetune language layers finetune_language_layers=True, finetune_attention_modules=True, finetune_mlp_modules=True, r=lora_r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, bias="none", random_state=42, use_rslora=False, use_gradient_checkpointing="unsloth", ) # Prepare model for training model = FastVisionModel.for_training(model) # Configure training logger.info("Configuring training...") training_args = SFTConfig( output_dir="./iconclass-vlm-outputs", per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, gradient_accumulation_steps=gradient_accumulation, max_steps=max_steps, learning_rate=learning_rate, warmup_ratio=warmup_ratio, logging_steps=logging_steps, save_steps=save_steps, eval_steps=eval_steps if eval_dataset else None, eval_strategy="steps" if eval_dataset else "no", save_strategy="steps", bf16=True, optim="adamw_8bit", weight_decay=0.01, lr_scheduler_type="cosine", seed=42, remove_unused_columns=False, # Required for Unsloth VLM dataset_text_field="", # Required for Unsloth VLM dataset_kwargs={"skip_prepare_dataset": True}, # Required for Unsloth VLM max_seq_length=max_seq_length, gradient_checkpointing=True, gradient_checkpointing_kwargs={"use_reentrant": False}, hub_model_id=output_model if push_to_hub else None, push_to_hub=push_to_hub, hub_private_repo=private, hub_token=HF_TOKEN, report_to="none", # Can change to "tensorboard" or "wandb" ) # Initialize trainer logger.info("Initializing trainer...") trainer = SFTTrainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=UnslothVisionDataCollator(model, tokenizer), processing_class=tokenizer, ) # Train! logger.info("Starting training...") logger.info(f"Total steps: {max_steps:,}") logger.info( f"Effective batch size: {batch_size * gradient_accumulation * torch.cuda.device_count()}" ) trainer.train() logger.info("Training complete!") # Calculate training time end_time = datetime.now() training_duration = end_time - start_time training_time = f"{training_duration.total_seconds() / 60:.1f} minutes" logger.info(f"Training time: {training_time}") # Save model logger.info("Saving model...") trainer.save_model(training_args.output_dir) # Create and push model card if push_to_hub: logger.info("Creating model card...") card_content = create_model_card( base_model=base_model, dataset=dataset, num_samples=len(train_dataset), training_time=training_time, lora_r=lora_r, lora_alpha=lora_alpha, learning_rate=learning_rate, batch_size=batch_size, gradient_accumulation=gradient_accumulation, max_steps=max_steps, ) card = ModelCard(card_content) card.push_to_hub(output_model, token=HF_TOKEN) logger.info("✅ Model card created and pushed!") logger.info("✅ Training complete!") logger.info(f"Model available at: https://huggingface.co/{output_model}") else: logger.info(f"✅ Training complete! Model saved to {training_args.output_dir}") if __name__ == "__main__": # Show example usage if no arguments if len(sys.argv) == 1: print("=" * 80) print("Unsloth VLM Fine-tuning for Iconclass Metadata") print("=" * 80) print("\nFine-tune vision-language models to generate Iconclass codes from") print("artwork images using Unsloth's 2x faster training.") print("\nFeatures:") print("- 🚀 2x faster training with Unsloth optimizations") print("- 💾 4-bit quantization for efficient memory usage") print("- 📊 LoRA fine-tuning for parameter efficiency") print("- 🎨 Specialized for art history metadata (Iconclass)") print("\nExample usage:") print("\n1. Basic training:") print(" uv run iconclass-vlm-sft.py \\") print(" --base-model Qwen/Qwen3-VL-8B-Instruct \\") print(" --dataset davanstrien/iconclass-vlm-sft \\") print(" --output-model your-username/iconclass-vlm") print("\n2. Custom LoRA settings:") print(" uv run iconclass-vlm-sft.py \\") print(" --base-model Qwen/Qwen3-VL-8B-Instruct \\") print(" --dataset davanstrien/iconclass-vlm-sft \\") print(" --output-model your-username/iconclass-vlm \\") print(" --lora-r 32 \\") print(" --lora-alpha 64 \\") print(" --learning-rate 1e-5") print("\n3. Quick test run (fewer steps):") print(" uv run iconclass-vlm-sft.py \\") print(" --base-model Qwen/Qwen3-VL-8B-Instruct \\") print(" --dataset davanstrien/iconclass-vlm-sft \\") print(" --output-model your-username/iconclass-vlm-test \\") print(" --max-steps 100") print("\n4. Running on HF Jobs:") print(" hf jobs uv run \\") print(" --flavor a100-large \\") print(" -s HF_TOKEN=$HF_TOKEN \\") print( " https://huggingface.co/datasets/uv-scripts/training/raw/main/iconclass-vlm-sft.py \\" ) print(" --base-model Qwen/Qwen3-VL-8B-Instruct \\") print(" --dataset davanstrien/iconclass-vlm-sft \\") print(" --output-model your-username/iconclass-vlm") print("\n" + "=" * 80) print("\nFor full help, run: uv run iconclass-vlm-sft.py --help") sys.exit(0) parser = argparse.ArgumentParser( description="Fine-tune VLMs for Iconclass metadata generation with Unsloth", formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" Examples: # Basic training uv run iconclass-vlm-sft.py \\ --base-model Qwen/Qwen3-VL-8B-Instruct \\ --dataset davanstrien/iconclass-vlm-sft \\ --output-model username/iconclass-vlm # Custom hyperparameters uv run iconclass-vlm-sft.py \\ --base-model Qwen/Qwen3-VL-8B-Instruct \\ --dataset davanstrien/iconclass-vlm-sft \\ --output-model username/iconclass-vlm \\ --lora-r 32 --learning-rate 1e-5 --batch-size 4 # Quick test uv run iconclass-vlm-sft.py \\ --base-model Qwen/Qwen3-VL-8B-Instruct \\ --dataset davanstrien/iconclass-vlm-sft \\ --output-model username/test \\ --max-steps 50 """, ) # Required arguments parser.add_argument( "--base-model", required=True, help="Base VLM model from Hugging Face Hub (e.g., Qwen/Qwen3-VL-8B-Instruct)", ) parser.add_argument( "--dataset", required=True, help="Training dataset ID from Hugging Face Hub", ) parser.add_argument( "--output-model", required=True, help="Output model ID for Hugging Face Hub (e.g., username/iconclass-vlm)", ) # LoRA configuration lora_group = parser.add_argument_group("LoRA Configuration") lora_group.add_argument( "--lora-r", type=int, default=16, help="LoRA rank (default: 16). Higher = more capacity but slower", ) lora_group.add_argument( "--lora-alpha", type=int, default=32, help="LoRA alpha scaling (default: 32). Usually 2*r", ) lora_group.add_argument( "--lora-dropout", type=float, default=0.1, help="LoRA dropout rate (default: 0.1)", ) # Training configuration training_group = parser.add_argument_group("Training Configuration") training_group.add_argument( "--learning-rate", type=float, default=2e-5, help="Learning rate (default: 2e-5)", ) training_group.add_argument( "--batch-size", type=int, default=2, help="Per-device batch size (default: 2)", ) training_group.add_argument( "--gradient-accumulation", type=int, default=8, help="Gradient accumulation steps (default: 8)", ) training_group.add_argument( "--max-steps", type=int, help="Maximum training steps. If not set, calculated from num-epochs", ) training_group.add_argument( "--num-epochs", type=float, default=1.0, help="Number of training epochs (default: 1.0). Ignored if max-steps is set", ) training_group.add_argument( "--warmup-ratio", type=float, default=0.1, help="Warmup ratio (default: 0.1)", ) # Logging and checkpointing logging_group = parser.add_argument_group("Logging and Checkpointing") logging_group.add_argument( "--logging-steps", type=int, default=10, help="Log every N steps (default: 10)", ) logging_group.add_argument( "--save-steps", type=int, default=100, help="Save checkpoint every N steps (default: 100)", ) logging_group.add_argument( "--eval-steps", type=int, default=100, help="Evaluate every N steps (default: 100)", ) # Dataset configuration dataset_group = parser.add_argument_group("Dataset Configuration") dataset_group.add_argument( "--dataset-split", default="train", help="Dataset split to use for training (default: train)", ) dataset_group.add_argument( "--eval-split", default="test", help="Dataset split to use for evaluation (default: test)", ) dataset_group.add_argument( "--max-seq-length", type=int, default=2048, help="Maximum sequence length (default: 2048)", ) # Misc misc_group = parser.add_argument_group("Miscellaneous") misc_group.add_argument( "--hf-token", help="Hugging Face API token (or set HF_TOKEN env var)", ) misc_group.add_argument( "--private", action="store_true", help="Make output model private", ) misc_group.add_argument( "--no-push", action="store_true", help="Don't push to Hub (save locally only)", ) args = parser.parse_args() main( base_model=args.base_model, dataset=args.dataset, output_model=args.output_model, lora_r=args.lora_r, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, learning_rate=args.learning_rate, batch_size=args.batch_size, gradient_accumulation=args.gradient_accumulation, max_steps=args.max_steps, num_epochs=args.num_epochs, warmup_ratio=args.warmup_ratio, logging_steps=args.logging_steps, save_steps=args.save_steps, eval_steps=args.eval_steps, max_seq_length=args.max_seq_length, hf_token=args.hf_token, dataset_split=args.dataset_split, eval_split=args.eval_split, private=args.private, push_to_hub=not args.no_push, )