File size: 4,432 Bytes
ad44ad4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import os, math, argparse, random
from PIL import Image
import torch
import numpy as np
from diffusers import FlowMatchEulerDiscreteScheduler
from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel

IMG_EXTS = {".jpg", ".jpeg", ".png", ".webp", ".bmp"}
dtype = torch.bfloat16

scheduler_config = {
    "base_image_seq_len": 256,
    "base_shift": math.log(3),
    "invert_sigmas": False,
    "max_image_seq_len": 8192,
    "max_shift": math.log(3),
    "num_train_timesteps": 1000,
    "shift": 1.0,
    "shift_terminal": None,
    "stochastic_sampling": False,
    "time_shift_type": "exponential",
    "use_beta_sigmas": False,
    "use_dynamic_shifting": True,
    "use_exponential_sigmas": False,
    "use_karras_sigmas": False,
}

FIX_PROMPT = ("seamlessly blend the object into the background, remove white sides and artifacts, "
              "smooth jagged edges, natural lighting and color consistency, photorealistic")

def iter_images(root_dir):
    for root, _, files in os.walk(root_dir):
        for fn in files:
            ext = os.path.splitext(fn)[1].lower()
            if ext in IMG_EXTS:
                yield os.path.join(root, fn)

def load_pipeline(base_model_path, lora_dir, lora_weight_name, device):
    scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
    pipe = QwenImageEditPlusPipeline.from_pretrained(
        base_model_path, scheduler=scheduler, torch_dtype=dtype
    ).to(device)
    pipe.load_lora_weights(lora_dir, weight_name=lora_weight_name)
    pipe.fuse_lora(lora_scale=1.0)
    pipe.transformer.__class__ = QwenImageTransformer2DModel
    return pipe

@torch.no_grad()
def generate_single(pipe, input_image, prompt, seed, steps, true_cfg, device):
    generator = torch.Generator(device=device).manual_seed(seed)
    out = pipe(
        image=[input_image],
        prompt=prompt,
        negative_prompt=" ",
        num_inference_steps=steps,
        generator=generator,
        true_cfg_scale=true_cfg,
        num_images_per_prompt=1,
    ).images[0]
    return out

def main():
    p = argparse.ArgumentParser()
    p.add_argument("--in_dir", default="/mnt/prev_nas/qhy_1/datasets/flux_gen_images_size_change")
    p.add_argument("--out_dir", default="/mnt/prev_nas/qhy_1/datasets/flux_gen_images_size_change_fixed")
    p.add_argument("--base_model_path", default="/mnt/5T_nas/cwl/model/Qwen-Image-Edit-2509")
    p.add_argument("--lora_dir", default="/mnt/prev_nas/qhy/Qwen-Edit-2509-Multiple-angles")
    p.add_argument("--lora_weight_name", default="Qwen-Image-Edit-2509-Lightning-4steps-V1.0-bf16_dim1.safetensors")
    p.add_argument("--steps", type=int, default=4)
    p.add_argument("--true_cfg", type=float, default=1.0)
    p.add_argument("--seed", type=int, default=0, help=">=0 固定;<0 随机")
    p.add_argument("--overwrite", action="store_true")

    # 分卡参数
    p.add_argument("--rank", type=int, default=0)
    p.add_argument("--world_size", type=int, default=1)
    args = p.parse_args()

    assert 0 <= args.rank < args.world_size

    device = "cuda" if torch.cuda.is_available() else "cpu"
    os.makedirs(args.out_dir, exist_ok=True)

    pipe = load_pipeline(args.base_model_path, args.lora_dir, args.lora_weight_name, device)

    all_imgs = sorted(list(iter_images(args.in_dir)))
    print(f"rank {args.rank}/{args.world_size} total imgs: {len(all_imgs)}")

    max_seed = np.iinfo(np.int32).max

    for i, img_path in enumerate(all_imgs):
        if (i % args.world_size) != args.rank:
            continue

        rel = os.path.relpath(img_path, args.in_dir)
        out_path = os.path.join(args.out_dir, rel)
        os.makedirs(os.path.dirname(out_path), exist_ok=True)

        if (not args.overwrite) and os.path.exists(out_path):
            continue

        try:
            img = Image.open(img_path).convert("RGB")
        except Exception as e:
            print("open failed:", img_path, e)
            continue

        seed = args.seed if args.seed >= 0 else random.randint(0, max_seed)

        try:
            out_img = generate_single(pipe, img, FIX_PROMPT, seed, args.steps, args.true_cfg, device)
        except Exception as e:
            print("gen failed:", img_path, e)
            continue

        out_img.save(out_path)

    print(f"rank {args.rank} done.")

if __name__ == "__main__":
    main()