Qwen-Image-Edit LoRA Adapter

This repository contains LoRA weights for Qwen-Image-Edit, fine-tuned for instruction-based image editing tasks.

Model Details

  • Base Model: Qwen-Image-Edit-2509 (or your specific base model)
  • Training: Fine-tuned using PEFT/LoRA.

Usage

To use this LoRA, you need to load the base QwenImageEditPlusPipeline and apply the adapter weights.

Since the LoRA keys trained via PEFT often differ from what Diffusers expects, the script below includes an automatic conversion step to ensure the weights load correctly.

Python Inference Script

You can run this script to edit a single image.

import os
import torch
from PIL import Image
from diffusers import QwenImageEditPlusPipeline
from safetensors.torch import load_file, save_file

def load_lora_with_conversion(pipeline, lora_folder_path, weight_name="adapter_model.safetensors"):
    """
    Automatically converts PEFT LoRA keys to Diffusers format if needed and loads them.
    """
    # Define paths
    original_weights_path = os.path.join(lora_folder_path, weight_name)
    converted_weights_path = os.path.join(lora_folder_path, "adapter_model_converted.safetensors")
    
    # Check if conversion is needed
    if not os.path.exists(converted_weights_path):
        print(f"โš ๏ธ Converted weights not found. Converting {original_weights_path}...")
        
        if not os.path.exists(original_weights_path):
            raise FileNotFoundError(f"Cannot find LoRA weights at {original_weights_path}")

        state_dict = load_file(original_weights_path)
        new_state_dict = {}
        
        # Conversion logic: replace 'base_model.model' with 'transformer'
        for key, value in state_dict.items():
            new_key = key.replace("base_model.model", "transformer")
            new_state_dict[new_key] = value
            
        save_file(new_state_dict, converted_weights_path)
        print(f"โœ… Conversion saved to {converted_weights_path}")
    else:
        print(f"โœ… Found converted weights at {converted_weights_path}")

    # Load the converted LoRA
    pipeline.load_lora_weights(
        lora_folder_path,
        weight_name="adapter_model_converted.safetensors",
        adapter_name="lora",
    )
    pipeline.set_adapters(["lora"], adapter_weights=[1.0])
    print("๐Ÿš€ LoRA loaded and active.")

def main():
    # --- Configuration ---
    # 1. Path to the base model (Local path or HuggingFace ID)
    base_model_path = "Qwen/Qwen-Image-Edit-2509" # Replace with your local path if needed
    
    # 2. Path to THIS LoRA folder (where you downloaded this repo)
    lora_path = "./" # Current directory if you cloned the repo
    
    # 3. Input Image and Prompt
    image_path = "test_image.jpg" # Replace with your image path
    prompt = "remove the dog and replace it with a cat"
    output_path = "result.png"
    # ---------------------

    # Load Pipeline
    print(f"Loading base model from: {base_model_path}")
    pipeline = QwenImageEditPlusPipeline.from_pretrained(
        base_model_path,
        torch_dtype=torch.bfloat16
    )
    pipeline.to("cuda")

    # Load LoRA
    try:
        load_lora_with_conversion(pipeline, lora_path)
    except Exception as e:
        print(f"Error loading LoRA: {e}")
        return

    # Load Image
    if not os.path.exists(image_path):
        print(f"Error: Image not found at {image_path}")
        return
    
    original_image = Image.open(image_path).convert("RGB")

    # Inference
    print("๐ŸŽจ Generating edit...")
    inputs = {
        "image": original_image,
        "prompt": prompt,
        "generator": torch.manual_seed(42), # Fixed seed for reproducibility
        "true_cfg_scale": 4.0,    # Recommended for Qwen-Edit
        "guidance_scale": 1.0,
        "negative_prompt": " ",
        "num_inference_steps": 40,
    }

    with torch.inference_mode():
        output = pipeline(**inputs)
        output_image = output.images[0]
        output_image.save(output_path)
        print(f"โœ… Image saved to {output_path}")

if __name__ == "__main__":
    main()
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐Ÿ™‹ Ask for provider support