|
|
import os, math, argparse, random |
|
|
from PIL import Image |
|
|
import torch |
|
|
import numpy as np |
|
|
from diffusers import FlowMatchEulerDiscreteScheduler |
|
|
from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline |
|
|
from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel |
|
|
|
|
|
IMG_EXTS = {".jpg", ".jpeg", ".png", ".webp", ".bmp"} |
|
|
dtype = torch.bfloat16 |
|
|
|
|
|
scheduler_config = { |
|
|
"base_image_seq_len": 256, |
|
|
"base_shift": math.log(3), |
|
|
"invert_sigmas": False, |
|
|
"max_image_seq_len": 8192, |
|
|
"max_shift": math.log(3), |
|
|
"num_train_timesteps": 1000, |
|
|
"shift": 1.0, |
|
|
"shift_terminal": None, |
|
|
"stochastic_sampling": False, |
|
|
"time_shift_type": "exponential", |
|
|
"use_beta_sigmas": False, |
|
|
"use_dynamic_shifting": True, |
|
|
"use_exponential_sigmas": False, |
|
|
"use_karras_sigmas": False, |
|
|
} |
|
|
|
|
|
FIX_PROMPT = ("seamlessly blend the object into the background, remove white sides and artifacts, " |
|
|
"smooth jagged edges, natural lighting and color consistency, photorealistic") |
|
|
|
|
|
def iter_images(root_dir): |
|
|
for root, _, files in os.walk(root_dir): |
|
|
for fn in files: |
|
|
ext = os.path.splitext(fn)[1].lower() |
|
|
if ext in IMG_EXTS: |
|
|
yield os.path.join(root, fn) |
|
|
|
|
|
def load_pipeline(base_model_path, lora_dir, lora_weight_name, device): |
|
|
scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config) |
|
|
pipe = QwenImageEditPlusPipeline.from_pretrained( |
|
|
base_model_path, scheduler=scheduler, torch_dtype=dtype |
|
|
).to(device) |
|
|
pipe.load_lora_weights(lora_dir, weight_name=lora_weight_name) |
|
|
pipe.fuse_lora(lora_scale=1.0) |
|
|
pipe.transformer.__class__ = QwenImageTransformer2DModel |
|
|
return pipe |
|
|
|
|
|
@torch.no_grad() |
|
|
def generate_single(pipe, input_image, prompt, seed, steps, true_cfg, device): |
|
|
generator = torch.Generator(device=device).manual_seed(seed) |
|
|
out = pipe( |
|
|
image=[input_image], |
|
|
prompt=prompt, |
|
|
negative_prompt=" ", |
|
|
num_inference_steps=steps, |
|
|
generator=generator, |
|
|
true_cfg_scale=true_cfg, |
|
|
num_images_per_prompt=1, |
|
|
).images[0] |
|
|
return out |
|
|
|
|
|
def main(): |
|
|
p = argparse.ArgumentParser() |
|
|
p.add_argument("--in_dir", default="/mnt/prev_nas/qhy_1/datasets/flux_gen_images_size_change") |
|
|
p.add_argument("--out_dir", default="/mnt/prev_nas/qhy_1/datasets/flux_gen_images_size_change_fixed") |
|
|
p.add_argument("--base_model_path", default="/mnt/5T_nas/cwl/model/Qwen-Image-Edit-2509") |
|
|
p.add_argument("--lora_dir", default="/mnt/prev_nas/qhy/Qwen-Edit-2509-Multiple-angles") |
|
|
p.add_argument("--lora_weight_name", default="Qwen-Image-Edit-2509-Lightning-4steps-V1.0-bf16_dim1.safetensors") |
|
|
p.add_argument("--steps", type=int, default=4) |
|
|
p.add_argument("--true_cfg", type=float, default=1.0) |
|
|
p.add_argument("--seed", type=int, default=0, help=">=0 固定;<0 随机") |
|
|
p.add_argument("--overwrite", action="store_true") |
|
|
|
|
|
|
|
|
p.add_argument("--rank", type=int, default=0) |
|
|
p.add_argument("--world_size", type=int, default=1) |
|
|
args = p.parse_args() |
|
|
|
|
|
assert 0 <= args.rank < args.world_size |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
os.makedirs(args.out_dir, exist_ok=True) |
|
|
|
|
|
pipe = load_pipeline(args.base_model_path, args.lora_dir, args.lora_weight_name, device) |
|
|
|
|
|
all_imgs = sorted(list(iter_images(args.in_dir))) |
|
|
print(f"rank {args.rank}/{args.world_size} total imgs: {len(all_imgs)}") |
|
|
|
|
|
max_seed = np.iinfo(np.int32).max |
|
|
|
|
|
for i, img_path in enumerate(all_imgs): |
|
|
if (i % args.world_size) != args.rank: |
|
|
continue |
|
|
|
|
|
rel = os.path.relpath(img_path, args.in_dir) |
|
|
out_path = os.path.join(args.out_dir, rel) |
|
|
os.makedirs(os.path.dirname(out_path), exist_ok=True) |
|
|
|
|
|
if (not args.overwrite) and os.path.exists(out_path): |
|
|
continue |
|
|
|
|
|
try: |
|
|
img = Image.open(img_path).convert("RGB") |
|
|
except Exception as e: |
|
|
print("open failed:", img_path, e) |
|
|
continue |
|
|
|
|
|
seed = args.seed if args.seed >= 0 else random.randint(0, max_seed) |
|
|
|
|
|
try: |
|
|
out_img = generate_single(pipe, img, FIX_PROMPT, seed, args.steps, args.true_cfg, device) |
|
|
except Exception as e: |
|
|
print("gen failed:", img_path, e) |
|
|
continue |
|
|
|
|
|
out_img.save(out_path) |
|
|
|
|
|
print(f"rank {args.rank} done.") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|