|
|
import os |
|
|
import json |
|
|
import cv2 |
|
|
import numpy as np |
|
|
from pathlib import Path |
|
|
import random |
|
|
|
|
|
OBJECT_SIZE_TEMPLATES = { |
|
|
"28": [ |
|
|
"There is a <>. Adjust the dimensions of <> to make it 1.2 times its original size.", |
|
|
"There is a <>. Enlarge the <> such that it becomes 1.2 times its starting size.", |
|
|
"There is a <>. Ensure the <> grows to precisely 1.2 times its original size.", |
|
|
"There is a <>. Increase the size of <> to 1.2 times its original size.", |
|
|
"There is a <>. Resize the <> to ensure it is 1.2 times its original dimensions.", |
|
|
"There is a <>. Scale the <> up so that it reaches 1.2 times its initial dimensions.", |
|
|
"There is a <>. Transform the <> by setting its size to 1.2 times its original value.", |
|
|
], |
|
|
"29": [ |
|
|
"There is a <>. Adjust the height of <> to be 20cm taller.", |
|
|
"There is a <>. Enlarge the <> in height by 20cm.", |
|
|
"There is a <>. Increase the height of <> by 20cm.", |
|
|
"There is a <>. Raise <>'s height by 20cm", |
|
|
], |
|
|
"30": [ |
|
|
"There is a <>. Adjust the length of <> to be 50cm longer.", |
|
|
"There is a <>. Enlarge the <> in length by 50cm.", |
|
|
"There is a <>. Extend <>'s length by 50cm", |
|
|
"There is a <>. Increase the length of <> by 50cm.", |
|
|
], |
|
|
"31": [ |
|
|
"There is a <>. Adjust the width of <> to be 40cm wider.", |
|
|
"There is a <>. Enlarge the <> in width by 40cm.", |
|
|
"There is a <>. Increase the width of <> by 40cm.", |
|
|
"There is a <>. Widen <>'s width by 40cm", |
|
|
], |
|
|
} |
|
|
|
|
|
OPERATIONS = { |
|
|
"28": (1.3, 1.3), |
|
|
"29": (1.0, 1.3), |
|
|
"30": (1.3, 1.0), |
|
|
"31": (1.3, 1.0), |
|
|
} |
|
|
|
|
|
def is_mask_single_component(mask_path): |
|
|
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) |
|
|
if mask is None: |
|
|
return False |
|
|
_, binary = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY) |
|
|
num_labels, _ = cv2.connectedComponents(binary) |
|
|
return (num_labels - 1) == 1 |
|
|
|
|
|
def resize_and_paste_back_with_repair_mask( |
|
|
image_path, mask_path, scale_width=1.0, scale_height=1.0, output_image_path="output.png" |
|
|
): |
|
|
image = cv2.imread(image_path) |
|
|
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) |
|
|
if image is None or mask is None: |
|
|
raise FileNotFoundError("Image or mask not found!") |
|
|
|
|
|
h, w = image.shape[:2] |
|
|
assert mask.shape == (h, w), "Mask and image size mismatch!" |
|
|
|
|
|
coords = cv2.findNonZero(mask) |
|
|
if coords is None: |
|
|
raise ValueError("No object in mask!") |
|
|
x, y, obj_w, obj_h = cv2.boundingRect(coords) |
|
|
|
|
|
center_x = x + obj_w // 2 |
|
|
center_y = y + obj_h // 2 |
|
|
|
|
|
rgba = np.dstack([image, mask]) |
|
|
obj_rgba = rgba[y:y+obj_h, x:x+obj_w] |
|
|
|
|
|
new_w = max(1, int(obj_w * scale_width)) |
|
|
new_h = max(1, int(obj_h * scale_height)) |
|
|
resized_obj = cv2.resize(obj_rgba, (new_w, new_h), interpolation=cv2.INTER_AREA) |
|
|
|
|
|
new_x = center_x - new_w // 2 |
|
|
new_y = center_y - new_h // 2 |
|
|
|
|
|
dst_x_start = max(0, new_x) |
|
|
dst_y_start = max(0, new_y) |
|
|
dst_x_end = min(w, new_x + new_w) |
|
|
dst_y_end = min(h, new_y + new_h) |
|
|
|
|
|
src_x_start = max(0, -new_x) |
|
|
src_y_start = max(0, -new_y) |
|
|
src_x_end = min(new_w, w - dst_x_start) |
|
|
src_y_end = min(new_h, h - dst_y_start) |
|
|
|
|
|
background = image.copy() |
|
|
background[mask > 0] = [255, 255, 255] |
|
|
output_img = background.copy() |
|
|
|
|
|
if src_x_end > src_x_start and src_y_end > src_y_start: |
|
|
obj_part = resized_obj[src_y_start:src_y_end, src_x_start:src_x_end] |
|
|
alpha = obj_part[:, :, 3].astype(np.float32) / 255.0 |
|
|
bg_part = output_img[dst_y_start:dst_y_end, dst_x_start:dst_x_end] |
|
|
fg_part = obj_part[:, :, :3] |
|
|
|
|
|
|
|
|
hh = min(fg_part.shape[0], bg_part.shape[0], alpha.shape[0]) |
|
|
ww = min(fg_part.shape[1], bg_part.shape[1], alpha.shape[1]) |
|
|
fg_part = fg_part[:hh, :ww] |
|
|
bg_part = bg_part[:hh, :ww] |
|
|
alpha = alpha[:hh, :ww] |
|
|
|
|
|
blended = fg_part * alpha[..., None] + bg_part * (1 - alpha[..., None]) |
|
|
output_img[dst_y_start:dst_y_start+hh, dst_x_start:dst_x_start+ww] = blended.astype(np.uint8) |
|
|
|
|
|
os.makedirs(os.path.dirname(output_image_path), exist_ok=True) |
|
|
cv2.imwrite(output_image_path, output_img) |
|
|
return output_img |
|
|
|
|
|
def load_object_class_mapping(genspace_json_path): |
|
|
with open(genspace_json_path, "r", encoding="utf-8") as f: |
|
|
data = json.load(f) |
|
|
return {s["sample_id"]: s["object_class"] for s in data["samples"]} |
|
|
|
|
|
def main(): |
|
|
MASK_DIR = "/mnt/prev_nas/qhy_1/datasets/flux_gen_images_masks" |
|
|
IMAGE_DIR = "/mnt/prev_nas/qhy_1/datasets/flux_gen_images" |
|
|
OUTPUT_DIR = "/mnt/prev_nas/qhy_1/datasets/flux_gen_images_size_change" |
|
|
GENSPACE_JSON = "/mnt/prev_nas/qhy_1/datasets/unedit_image_prompts/genspace_prompts_vlm.json" |
|
|
|
|
|
OUTPUT_JSONL_PATH = os.path.join(OUTPUT_DIR, "size_edit_annotations.jsonl") |
|
|
|
|
|
os.makedirs(OUTPUT_DIR, exist_ok=True) |
|
|
|
|
|
obj_class_map = load_object_class_mapping(GENSPACE_JSON) |
|
|
|
|
|
mask_files = sorted([f for f in os.listdir(MASK_DIR) if f.endswith(".png")]) |
|
|
print(f"Found {len(mask_files)} mask files.") |
|
|
|
|
|
num_imgs_generated = 0 |
|
|
num_lines_written = 0 |
|
|
num_skipped_multi = 0 |
|
|
num_missing_img = 0 |
|
|
|
|
|
with open(OUTPUT_JSONL_PATH, "w", encoding="utf-8") as f_out: |
|
|
for mask_file in mask_files: |
|
|
mask_path = os.path.join(MASK_DIR, mask_file) |
|
|
|
|
|
if not is_mask_single_component(mask_path): |
|
|
num_skipped_multi += 1 |
|
|
print(f"💥 Skipping multi-component mask: {mask_file}") |
|
|
continue |
|
|
|
|
|
stem = Path(mask_file).stem |
|
|
|
|
|
object_class = obj_class_map.get(stem, None) |
|
|
if object_class is None: |
|
|
|
|
|
print(f"💥 No class for {stem}") |
|
|
continue |
|
|
|
|
|
image_path = os.path.join(IMAGE_DIR, mask_file) |
|
|
if not os.path.exists(image_path): |
|
|
num_missing_img += 1 |
|
|
print(f"💥 Missing image for {stem}") |
|
|
continue |
|
|
|
|
|
for task_id, (scale_w, scale_h) in OPERATIONS.items(): |
|
|
output_name = f"{stem}_{task_id}.png" |
|
|
output_path = os.path.join(OUTPUT_DIR, output_name) |
|
|
|
|
|
|
|
|
if not os.path.exists(output_path): |
|
|
try: |
|
|
resize_and_paste_back_with_repair_mask( |
|
|
image_path=image_path, |
|
|
mask_path=mask_path, |
|
|
scale_width=scale_w, |
|
|
scale_height=scale_h, |
|
|
output_image_path=output_path, |
|
|
) |
|
|
num_imgs_generated += 1 |
|
|
except Exception as e: |
|
|
print(f"💥 Error generating {output_name}: {e}") |
|
|
continue |
|
|
else: |
|
|
print(f"✅ Already exists: {output_name}") |
|
|
|
|
|
|
|
|
template = random.choice(OBJECT_SIZE_TEMPLATES[task_id]) |
|
|
instruction = template.replace("<>", f"<{object_class}>") |
|
|
|
|
|
item = { |
|
|
"task_type": "edit", |
|
|
"instruction": instruction, |
|
|
"input_images": [image_path], |
|
|
"output_image": output_path, |
|
|
"object_class": object_class, |
|
|
"task_id": int(task_id), |
|
|
} |
|
|
f_out.write(json.dumps(item, ensure_ascii=False) + "\n") |
|
|
num_lines_written += 1 |
|
|
|
|
|
print("\nDone.") |
|
|
print("images generated:", num_imgs_generated) |
|
|
print("jsonl lines written:", num_lines_written) |
|
|
print("skipped (multi-components):", num_skipped_multi) |
|
|
print("missing images:", num_missing_img) |
|
|
print("jsonl saved to:", OUTPUT_JSONL_PATH) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|