|
|
import os |
|
|
os.environ["FORCE_TORCH_LAYERNORM"] = "1" |
|
|
import sys |
|
|
import torch |
|
|
import gradio as gr |
|
|
import numpy as np |
|
|
import json |
|
|
import cv2 |
|
|
from PIL import Image |
|
|
from datetime import datetime |
|
|
import tempfile |
|
|
import os.path as osp |
|
|
|
|
|
|
|
|
from src.condition import Condition |
|
|
from src.SubjectGeniusTransformer2DModel import SubjectGeniusTransformer2DModel |
|
|
from src.SubjectGeniusPipeline import SubjectGeniusPipeline |
|
|
from accelerate.utils import set_seed |
|
|
|
|
|
|
|
|
weight_dtype = torch.bfloat16 |
|
|
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") |
|
|
transformer = None |
|
|
pipe = None |
|
|
TEMP_DIR = tempfile.mkdtemp() |
|
|
|
|
|
|
|
|
DEFAULT_CONFIG = { |
|
|
"pretrained_model_name_or_path": "/data/ydchen/VLP/SubjectGenius/model/FLUX.1-schnell", |
|
|
"transformer": "/data/ydchen/VLP/SubjectGenius/model/FLUX.1-schnell/transformer", |
|
|
"condition_types": ["fill", "subject"], |
|
|
"denoising_lora": "/data/ydchen/VLP/SubjectGenius/model/Subject_genuis/Denoising_LoRA/subject_fill_union", |
|
|
"denoising_lora_weight": 1.0, |
|
|
"condition_lora_dir": "/data/ydchen/VLP/SubjectGenius/model/Subject_genuis/Condition_LoRA", |
|
|
"resolution": 512, |
|
|
"num_inference_steps": 8, |
|
|
"max_sequence_length": 512 |
|
|
} |
|
|
|
|
|
def load_model(): |
|
|
global transformer, pipe |
|
|
|
|
|
print("开始加载transformer模型...") |
|
|
|
|
|
transformer = SubjectGeniusTransformer2DModel.from_pretrained( |
|
|
pretrained_model_name_or_path=DEFAULT_CONFIG["transformer"], |
|
|
).to(device=device, dtype=weight_dtype) |
|
|
print("transformer模型加载完成") |
|
|
|
|
|
print("开始加载condition LoRA...") |
|
|
|
|
|
for condition_type in DEFAULT_CONFIG["condition_types"]: |
|
|
print(f"加载{condition_type} LoRA...") |
|
|
transformer.load_lora_adapter( |
|
|
f"{DEFAULT_CONFIG['condition_lora_dir']}/{condition_type}.safetensors", |
|
|
adapter_name=condition_type |
|
|
) |
|
|
print("所有condition LoRA加载完成") |
|
|
|
|
|
print("开始创建pipeline...") |
|
|
|
|
|
pipe = SubjectGeniusPipeline.from_pretrained( |
|
|
DEFAULT_CONFIG["pretrained_model_name_or_path"], |
|
|
torch_dtype=weight_dtype, |
|
|
transformer=None |
|
|
) |
|
|
print("pipeline创建完成") |
|
|
|
|
|
print("设置transformer...") |
|
|
pipe.transformer = transformer |
|
|
|
|
|
print("设置adapter...") |
|
|
|
|
|
pipe.transformer.set_adapters([i for i in DEFAULT_CONFIG["condition_types"]]) |
|
|
pipe = pipe.to(device) |
|
|
print("模型完全加载完成!") |
|
|
|
|
|
return "模型加载完成!" |
|
|
|
|
|
def process_image_for_display(image_array): |
|
|
"""将图像处理为适合显示的格式,保持原始尺寸,但确保是RGB格式""" |
|
|
if image_array is None: |
|
|
return None |
|
|
|
|
|
|
|
|
if isinstance(image_array, Image.Image): |
|
|
image_array = np.array(image_array) |
|
|
|
|
|
|
|
|
if len(image_array.shape) == 2: |
|
|
image_array = cv2.cvtColor(image_array, cv2.COLOR_GRAY2RGB) |
|
|
elif image_array.shape[2] == 4: |
|
|
image_array = image_array[:, :, :3] |
|
|
|
|
|
return image_array |
|
|
|
|
|
def save_image_for_model(image_array, path): |
|
|
"""保存图像用于模型输入""" |
|
|
if image_array is None: |
|
|
return None |
|
|
|
|
|
|
|
|
os.makedirs(os.path.dirname(path), exist_ok=True) |
|
|
|
|
|
|
|
|
if isinstance(image_array, Image.Image): |
|
|
image_array.save(path) |
|
|
return path |
|
|
|
|
|
|
|
|
Image.fromarray(process_image_for_display(image_array)).save(path) |
|
|
return path |
|
|
|
|
|
def preserve_aspect_ratio(image, target_size=(512, 512)): |
|
|
"""保持原始比例调整图像大小""" |
|
|
if isinstance(image, np.ndarray): |
|
|
pil_image = Image.fromarray(image) |
|
|
else: |
|
|
pil_image = image |
|
|
|
|
|
|
|
|
width, height = pil_image.size |
|
|
aspect_ratio = width / height |
|
|
|
|
|
|
|
|
new_image = Image.new("RGB", target_size, (255, 255, 255)) |
|
|
|
|
|
|
|
|
if aspect_ratio > 1: |
|
|
new_width = target_size[0] |
|
|
new_height = int(new_width / aspect_ratio) |
|
|
else: |
|
|
new_height = target_size[1] |
|
|
new_width = int(new_height * aspect_ratio) |
|
|
|
|
|
|
|
|
resized_image = pil_image.resize((new_width, new_height), Image.LANCZOS) |
|
|
|
|
|
|
|
|
paste_position = ((target_size[0] - new_width) // 2, |
|
|
(target_size[1] - new_height) // 2) |
|
|
new_image.paste(resized_image, paste_position) |
|
|
|
|
|
return new_image |
|
|
|
|
|
def generate_image( |
|
|
prompt, |
|
|
subject_image, |
|
|
background_image, |
|
|
x1, y1, x2, y2, |
|
|
version="training-free", |
|
|
seed=0, |
|
|
num_inference_steps=8 |
|
|
): |
|
|
global pipe |
|
|
|
|
|
|
|
|
if pipe is None: |
|
|
load_model() |
|
|
|
|
|
|
|
|
if subject_image is None or background_image is None: |
|
|
return None, None, "请同时上传主体图像和背景图像" |
|
|
|
|
|
try: |
|
|
|
|
|
x1, y1, x2, y2 = int(float(x1)), int(float(y1)), int(float(x2)), int(float(y2)) |
|
|
if x1 > x2: x1, x2 = x2, x1 |
|
|
if y1 > y2: y1, y2 = y2, y1 |
|
|
|
|
|
|
|
|
MODEL_SIZE = (512, 512) |
|
|
|
|
|
|
|
|
subject_pil = Image.fromarray(subject_image) if isinstance(subject_image, np.ndarray) else subject_image |
|
|
|
|
|
subject_processed = Image.new("RGB", MODEL_SIZE, (255, 255, 255)) |
|
|
|
|
|
subject_pil.thumbnail((MODEL_SIZE[0], MODEL_SIZE[1]), Image.LANCZOS) |
|
|
|
|
|
paste_pos = ((MODEL_SIZE[0] - subject_pil.width) // 2, |
|
|
(MODEL_SIZE[1] - subject_pil.height) // 2) |
|
|
subject_processed.paste(subject_pil, paste_pos) |
|
|
|
|
|
|
|
|
background_pil = Image.fromarray(background_image) if isinstance(background_image, np.ndarray) else background_image |
|
|
|
|
|
|
|
|
orig_width, orig_height = background_pil.size |
|
|
|
|
|
|
|
|
background_processed = Image.new("RGB", MODEL_SIZE, (255, 255, 255)) |
|
|
background_pil.thumbnail((MODEL_SIZE[0], MODEL_SIZE[1]), Image.LANCZOS) |
|
|
bg_paste_pos = ((MODEL_SIZE[0] - background_pil.width) // 2, |
|
|
(MODEL_SIZE[1] - background_pil.height) // 2) |
|
|
background_processed.paste(background_pil, bg_paste_pos) |
|
|
|
|
|
|
|
|
scale_x = background_pil.width / orig_width |
|
|
scale_y = background_pil.height / orig_height |
|
|
|
|
|
adjusted_x1 = int(x1 * scale_x) + bg_paste_pos[0] |
|
|
adjusted_y1 = int(y1 * scale_y) + bg_paste_pos[1] |
|
|
adjusted_x2 = int(x2 * scale_x) + bg_paste_pos[0] |
|
|
adjusted_y2 = int(y2 * scale_y) + bg_paste_pos[1] |
|
|
|
|
|
|
|
|
adjusted_x1 = max(0, min(adjusted_x1, MODEL_SIZE[0]-1)) |
|
|
adjusted_y1 = max(0, min(adjusted_y1, MODEL_SIZE[1]-1)) |
|
|
adjusted_x2 = max(0, min(adjusted_x2, MODEL_SIZE[0]-1)) |
|
|
adjusted_y2 = max(0, min(adjusted_y2, MODEL_SIZE[1]-1)) |
|
|
|
|
|
|
|
|
bbox = [adjusted_x1, adjusted_y1, adjusted_x2, adjusted_y2] |
|
|
|
|
|
|
|
|
background_display = background_processed.copy() |
|
|
|
|
|
|
|
|
background_for_model = background_processed.copy() |
|
|
background_for_model_array = np.array(background_for_model) |
|
|
|
|
|
background_for_model_array[adjusted_y1:adjusted_y2+1, adjusted_x1:adjusted_x2+1] = (0, 0, 0) |
|
|
background_for_model = Image.fromarray(background_for_model_array) |
|
|
|
|
|
|
|
|
subject_condition = Condition("subject", raw_img=subject_processed, no_process=True) |
|
|
|
|
|
fill_condition = Condition("fill", raw_img=background_for_model, no_process=True) |
|
|
|
|
|
conditions = [subject_condition, fill_condition] |
|
|
|
|
|
|
|
|
if seed is not None: |
|
|
set_seed(seed) |
|
|
|
|
|
|
|
|
json_data = { |
|
|
"description": prompt, |
|
|
"bbox": bbox |
|
|
} |
|
|
|
|
|
|
|
|
if version == "training-based": |
|
|
denoising_lora_name = os.path.basename(os.path.normpath(DEFAULT_CONFIG["denoising_lora"])) |
|
|
pipe.transformer.load_lora_adapter( |
|
|
DEFAULT_CONFIG["denoising_lora"], |
|
|
adapter_name=denoising_lora_name, |
|
|
use_safetensors=True |
|
|
) |
|
|
pipe.transformer.set_adapters( |
|
|
[i for i in DEFAULT_CONFIG["condition_types"]] + [denoising_lora_name], |
|
|
[1.0, 1.0, DEFAULT_CONFIG["denoising_lora_weight"]] |
|
|
) |
|
|
elif version == "training-free": |
|
|
pipe.transformer.set_adapters([i for i in DEFAULT_CONFIG["condition_types"]]) |
|
|
|
|
|
|
|
|
result_img = pipe( |
|
|
prompt=prompt, |
|
|
conditions=conditions, |
|
|
height=MODEL_SIZE[1], |
|
|
width=MODEL_SIZE[0], |
|
|
num_inference_steps=num_inference_steps, |
|
|
max_sequence_length=DEFAULT_CONFIG["max_sequence_length"], |
|
|
model_config={"json_data": json_data}, |
|
|
).images[0] |
|
|
|
|
|
|
|
|
concat_image = Image.new("RGB", (MODEL_SIZE[0] * 3, MODEL_SIZE[1]), (255, 255, 255)) |
|
|
|
|
|
|
|
|
concat_image.paste(subject_processed, (0, 0)) |
|
|
|
|
|
|
|
|
concat_image.paste(background_for_model, (MODEL_SIZE[0], 0)) |
|
|
|
|
|
|
|
|
concat_image.paste(result_img, (MODEL_SIZE[0] * 2, 0)) |
|
|
|
|
|
return concat_image, result_img, "生成成功!" |
|
|
|
|
|
except Exception as e: |
|
|
import traceback |
|
|
traceback.print_exc() |
|
|
return None, None, f"生成图像时发生错误: {str(e)}" |
|
|
|
|
|
def draw_bbox(background_image, evt: gr.SelectData): |
|
|
"""处理用户在图片上的选择,绘制矩形""" |
|
|
|
|
|
if not hasattr(draw_bbox, "start_point"): |
|
|
draw_bbox.start_point = None |
|
|
draw_bbox.current_image = None |
|
|
|
|
|
|
|
|
if background_image is None: |
|
|
return background_image, "", "", "", "" |
|
|
|
|
|
try: |
|
|
|
|
|
h, w = background_image.shape[:2] |
|
|
|
|
|
|
|
|
target_width = getattr(evt, 'target_width', None) or getattr(evt.target, 'width', None) or w |
|
|
target_height = getattr(evt, 'target_height', None) or getattr(evt.target, 'height', None) or h |
|
|
|
|
|
|
|
|
scale_x = w / target_width if target_width else 1.0 |
|
|
scale_y = h / target_height if target_height else 1.0 |
|
|
|
|
|
|
|
|
x = min(max(0, int(evt.index[0] * scale_x)), w-1) |
|
|
y = min(max(0, int(evt.index[1] * scale_y)), h-1) |
|
|
|
|
|
|
|
|
if draw_bbox.start_point is None: |
|
|
draw_bbox.start_point = (x, y) |
|
|
draw_bbox.current_image = background_image.copy() |
|
|
return background_image, "", "", "", "" |
|
|
|
|
|
|
|
|
end_point = (x, y) |
|
|
|
|
|
|
|
|
x1 = min(draw_bbox.start_point[0], end_point[0]) |
|
|
y1 = min(draw_bbox.start_point[1], end_point[1]) |
|
|
x2 = max(draw_bbox.start_point[0], end_point[0]) |
|
|
y2 = max(draw_bbox.start_point[1], end_point[1]) |
|
|
|
|
|
|
|
|
img_with_rect = draw_bbox.current_image.copy() |
|
|
cv2.rectangle(img_with_rect, (x1, y1), (x2, y2), (0, 255, 0), 2) |
|
|
|
|
|
|
|
|
draw_bbox.start_point = None |
|
|
|
|
|
return img_with_rect, str(x1), str(y1), str(x2), str(y2) |
|
|
|
|
|
except Exception as e: |
|
|
print(f"绘制边界框时发生错误: {e}") |
|
|
draw_bbox.start_point = None |
|
|
return background_image, "", "", "", "" |
|
|
|
|
|
def update_bbox_from_input(background_image, x1, y1, x2, y2): |
|
|
"""根据输入的坐标值更新矩形框""" |
|
|
try: |
|
|
if background_image is None: |
|
|
return background_image |
|
|
|
|
|
|
|
|
x1, y1, x2, y2 = int(float(x1) if x1 else 0), int(float(y1) if y1 else 0), \ |
|
|
int(float(x2) if x2 else 0), int(float(y2) if y2 else 0) |
|
|
|
|
|
|
|
|
h, w = background_image.shape[:2] |
|
|
|
|
|
|
|
|
x1 = max(0, min(x1, w-1)) |
|
|
y1 = max(0, min(y1, h-1)) |
|
|
x2 = max(0, min(x2, w-1)) |
|
|
y2 = max(0, min(y2, h-1)) |
|
|
|
|
|
|
|
|
if x1 > x2: |
|
|
x1, x2 = x2, x1 |
|
|
if y1 > y2: |
|
|
y1, y2 = y2, y1 |
|
|
|
|
|
|
|
|
img_with_rect = background_image.copy() |
|
|
cv2.rectangle(img_with_rect, (x1, y1), (x2, y2), (0, 255, 0), 2) |
|
|
|
|
|
return img_with_rect |
|
|
except: |
|
|
return background_image |
|
|
|
|
|
def reset_bbox(background_image): |
|
|
"""重置边界框和图像""" |
|
|
if hasattr(draw_bbox, "start_point"): |
|
|
draw_bbox.start_point = None |
|
|
|
|
|
if background_image is None: |
|
|
return None, "", "", "", "" |
|
|
else: |
|
|
return background_image.copy(), "", "", "", "" |
|
|
|
|
|
|
|
|
def create_interface(): |
|
|
with gr.Blocks(title="SubjectGenius 图像生成器") as demo: |
|
|
gr.Markdown("# SubjectGenius 图像生成器") |
|
|
gr.Markdown("上传参考图像和背景图像,并在背景上选择区域来生成新的图像。") |
|
|
|
|
|
status_message = gr.Textbox(label="状态信息", interactive=False) |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
gr.Markdown("### 输入参数") |
|
|
|
|
|
prompt = gr.Textbox(label="图像描述文本", placeholder="例如:A decorative fabric topper for windows.") |
|
|
|
|
|
with gr.Row(): |
|
|
subject_image = gr.Image(label="主体图像 (Subject)", type="numpy") |
|
|
background_image = gr.Image(label="背景图像 (Fill)", type="numpy") |
|
|
|
|
|
gr.Markdown("### 在背景图上选择区域(点击两次确定对角线顶点)或手动输入坐标") |
|
|
|
|
|
with gr.Row(): |
|
|
x1_input = gr.Textbox(label="X1", placeholder="左上角 X 坐标") |
|
|
y1_input = gr.Textbox(label="Y1", placeholder="左上角 Y 坐标") |
|
|
x2_input = gr.Textbox(label="X2", placeholder="右下角 X 坐标") |
|
|
y2_input = gr.Textbox(label="Y2", placeholder="右下角 Y 坐标") |
|
|
reset_btn = gr.Button("重置选择") |
|
|
|
|
|
with gr.Accordion("高级选项", open=False): |
|
|
version = gr.Radio( |
|
|
["training-free", "training-based"], |
|
|
label="版本", |
|
|
value="training-free" |
|
|
) |
|
|
seed = gr.Slider( |
|
|
0, 1000, value=0, step=1, |
|
|
label="随机种子" |
|
|
) |
|
|
steps = gr.Slider( |
|
|
4, 50, value=8, step=1, |
|
|
label="推理步数(越大越慢但质量可能更好)" |
|
|
) |
|
|
|
|
|
generate_btn = gr.Button("生成图像", variant="primary") |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
gr.Markdown("### 预览区域选择") |
|
|
preview_image = gr.Image(label="区域预览", type="numpy", elem_id="preview_image") |
|
|
|
|
|
gr.Markdown("### 生成结果") |
|
|
with gr.Tabs(): |
|
|
with gr.TabItem("完整结果"): |
|
|
output_image_full = gr.Image(label="完整结果(包含条件图像)") |
|
|
with gr.TabItem("仅生成图像"): |
|
|
output_image = gr.Image(label="生成图像") |
|
|
|
|
|
|
|
|
background_image.select( |
|
|
draw_bbox, |
|
|
inputs=[background_image], |
|
|
outputs=[preview_image, x1_input, y1_input, x2_input, y2_input] |
|
|
) |
|
|
|
|
|
|
|
|
coord_inputs = [x1_input, y1_input, x2_input, y2_input] |
|
|
for coord in coord_inputs: |
|
|
coord.change( |
|
|
update_bbox_from_input, |
|
|
inputs=[background_image, x1_input, y1_input, x2_input, y2_input], |
|
|
outputs=[preview_image] |
|
|
) |
|
|
|
|
|
|
|
|
reset_btn.click( |
|
|
reset_bbox, |
|
|
inputs=[background_image], |
|
|
outputs=[preview_image, x1_input, y1_input, x2_input, y2_input] |
|
|
) |
|
|
|
|
|
|
|
|
generate_btn.click( |
|
|
generate_image, |
|
|
inputs=[prompt, subject_image, background_image, |
|
|
x1_input, y1_input, x2_input, y2_input, |
|
|
version, seed, steps], |
|
|
outputs=[output_image_full, output_image, status_message] |
|
|
) |
|
|
|
|
|
return demo |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
demo = create_interface() |
|
|
|
|
|
|
|
|
print("正在加载模型...") |
|
|
load_model() |
|
|
|
|
|
|
|
|
demo.launch(share=True) |