Model Card for Model ID

This model is trained using Flow-GRPO with LoRA. We provide only the LoRA weights here, so you will need to download the Flux.1-dev base model first.

Model Details

Model Sources

Uses

import os
import torch
from diffusers import FluxPipeline
from peft import PeftModel

# Environment variable configuration (consistent with FLUX inference code)
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"

def load_model(model_path, lora_path=None):
    """Load FLUX pipeline with optional LoRA weights (aligned with FLUX inference code)"""
    torch_dtype = torch.bfloat16
    device = "cuda"
    
    # Initialize FLUX Pipeline (FLUX.1-dev dedicated pipeline)
    pipe = FluxPipeline.from_pretrained(
        model_path,
        torch_dtype=torch_dtype,
    ).to(device)
    
    # Disable safety checker (core configuration from FLUX inference code)
    pipe.safety_checker = None
    # Optimize progress bar display (consistent with FLUX inference code)
    pipe.set_progress_bar_config(
        position=1,
        disable=False,
        leave=False,
        desc="Timestep",
        dynamic_ncols=True,
    )

    # Load LoRA weights (standard FLUX LoRA loading method)
    if lora_path is not None and os.path.exists(lora_path):
        pipe.transformer = PeftModel.from_pretrained(pipe.transformer, lora_path)
        pipe.transformer.eval()  # Set to inference mode
        print(f"Successfully loaded LoRA weights from: {lora_path}")
    
    return pipe

# Core configuration (aligned with FLUX inference code parameters)
model_id = "black-forest-labs/FLUX.1-dev"
lora_ckpt_path = "CIawevy/Flux.1-dev-TextPecker-SQPA"  # Replace with your FLUX LoRA path
device = "cuda"

# FLUX inference parameters (exact match with reference code)
negative_prompt = " "
width, height = 1024, 1024  # Standard resolution for FLUX
num_inference_steps = 50     # Inference steps from FLUX reference code
guidance_scale = 3.5         # Guidance scale from FLUX reference code
max_sequence_length = 512    # FLUX-specific parameter (critical for proper inference)

# Load FLUX model with LoRA
pipe = load_model(model_id, lora_ckpt_path)

# Generate image (aligned with FLUX inference code parameter format)
prompt = 'a weathered cave explorers journal page, with the phrase "TextPecker" prominently written in faded ink, surrounded by sketches of ancient ruins and cryptic symbols, under a dim, mystical light.'
image = pipe(
    prompt=prompt,
    negative_prompt=negative_prompt,
    width=width,
    height=height,
    num_inference_steps=num_inference_steps,
    guidance_scale=guidance_scale,
    max_sequence_length=max_sequence_length,  # FLUX-specific core parameter
    generator=torch.Generator(device=device).manual_seed(42)
).images[0]

# Save result (FLUX naming convention)
image.save("TextPecker_flux_demo.png")
print("Image saved as: TextPecker_flux_demo.png")
Downloads last month
-
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Model tree for CIawevy/Flux.1-dev-TextPecker-SQPA

Adapter
(36544)
this model

Collection including CIawevy/Flux.1-dev-TextPecker-SQPA