Datasets:

ArXiv:
sd3_5_fine_sixcard / duibi.py
yyyzzzzyyy's picture
Add files using upload-large-folder tool
4535b2e verified
raw
history blame
7.8 kB
import torch
from diffusers import StableDiffusion3Pipeline
import os
import pandas as pd
device = "cuda:0"
number = 10
pipe = StableDiffusion3Pipeline.from_pretrained("/home/zhaoyu/.cache/huggingface/hub/models--stabilityai--stable-diffusion-3.5-large/snapshots/ceddf0a7fdf2064ea28e2213e3b84e4afa170a0f", torch_dtype=torch.bfloat16)
pipe = pipe.to(device)
# # 设置一个可复现的随机种子
seed = 3
#to(accelerator.device)
#----------------------------------------------------------------
# load_loar_dir11 = "/home/wangjiarui/zy/sd3_5/sd3_5_noreward_p05/checkpoint-1000"
# load_loar_dir12 = "/home/wangjiarui/zy/sd3_5/trained-sd3_5_[p=05]_[exp]_[lora_loocks]_[use_weighting_scheme]_[lora_rank768]_[size1024]2/checkpoint-1000"
# load_loar_dir13 = "/home/wangjiarui/zy/sd3_5/trained-sd3_5[1dpo]_[p=05]_[lora_blocks]_[use_weighting_scheme]_[lora_rank768]_[size1024]_[batch_size2]_[beta500]/checkpoint-6500"
# load_loar_dir21 = ""
# load_loar_dir22 = "/home/wangjiarui/zy/sd3_5/rwr_p1/checkpoint-1000"
# load_loar_dir23 = ""
# load_loar_dir32 = "/home/wangjiarui/zy/sd3_5/trained-sd3_5_[p=0]_[exp]_[lora_blocks]_[use_weighting_scheme]_[lora_rank768]_[size1024]/checkpoint-1000"
# #5000是第二个周期
# save_dir_zero="/home/wangjiarui/zy/sd3_5/image_zero"
# save_dir11="/home/wangjiarui/zy/sd3_5/image_6000_noreward_p05"
# save_dir12="/home/wangjiarui/zy/sd3_5/image_6000_rwr_p05"
# save_dir13="/home/wangjiarui/zy/sd3_5/image_6000_dpo_p05_1epoch"
# save_dir21="/home/wangjiarui/zy/sd3_5/image_6000_noreward_p1"
# save_dir22="/home/wangjiarui/zy/sd3_5/image_6000_rwr_p1"
# save_dir23="/home/wangjiarui/zy/sd3_5/image_6000_dpo_p1"
# save_dir32="/home/wangjiarui/zy/sd3_5/image_6000_rwr_p0"
# save_dir_AGIQA_13 = "/home/wangjiarui/zy/sd3_5/AGIQA-300/image_6000_dpo_p05"
# save_dir_AGIQA_22 = "/home/wangjiarui/zy/sd3_5/AGIQA-300/image_6000_rwr_p1"
load_loar_dir1 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/trained-sd3_5[dpo]_[p=05]_[lora_blocks]_[use_weighting_scheme]_[lora_rank768]_[size1024]_[batch_size4]_[beta500]_[2e-5]/checkpoint-2500"
load_loar_dir2 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/trained-sd3_5[dpo]_[p=05]_[lora_blocks]_[use_weighting_scheme]_[lora_rank768]_[size1024]_[batch_size4]_[beta500]_[2e-5]/checkpoint-4000"
load_loar_dir3 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/trained-sd3_5[dpo]_[p=05]_[lora_blocks]_[use_weighting_scheme]_[lora_rank768]_[size1024]_[batch_size4]_[beta500]_[1e-4]/checkpoint-1000"
load_loar_dir4 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/trained-sd3_5[dpo]_[p=05]_[lora_blocks]_[use_weighting_scheme]_[lora_rank768]_[size1024]_[batch_size4]_[beta500]_[1e-4]/checkpoint-1500"
load_loar_dir5 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/trained-sd3_5[dpo]_[p=05]_[lora_blocks]_[use_weighting_scheme]_[lora_rank768]_[size1024]_[batch_size4]_[beta500]_[2e-5]/checkpoint-1000"
load_loar_dir6 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/trained-sd3_5[dpo]_[p=05]_[lora_blocks]_[use_weighting_scheme]_[lora_rank768]_[size1024]_[batch_size4]_[beta250]_[2e-5]/checkpoint-1000"
load_loar_dir7 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/trained-sd3_5[dpo]_[p=05]_[lora_blocks]_[use_weighting_scheme]_[lora_rank768]_[size1024]_[batch_size4]_[beta250]_[5e-6]/checkpoint-1000"
load_lora_dir8 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/trained-sd3_5[dpo_and_rwr]_[chengyi]_[p=05]_[batch_size4]_[beta250]_[1e-4]/checkpoint-1000"
load_lora_dir9 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/trained-sd3_5[dpo_and_rwr]_[chengyi]_[p=05]_[batch_size4]_[beta250]_[1e-4]/checkpoint-1500"
load_lora_dir10 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/trained-sd3_5[dpo_and_rwr]_[guiyihua,chengyiw-w,l-1]_[p=05]_[batch_size4]_[beta250]_[1e-4]/checkpoint-1500"
load_lora_dir11 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/trained-sd3_5[dpo_and_rwr]_[guiyihua,chengyiw-w,l-1]_[p=05]_[batch_size4]_[beta150]_[1e-4]/checkpoint-1000"
load_lora_dirs = [load_loar_dir1, load_loar_dir2, load_loar_dir3, load_loar_dir4, load_loar_dir5, load_loar_dir6, load_loar_dir7, load_lora_dir8, load_lora_dir9, load_lora_dir10, load_lora_dir11]
save_dir_dpo1 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/image_30000triple_dpo_2e-5_500"
save_dir_dpo2 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/image_48000triple_dpo_2e-5_500"
save_dir_dpo3 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/image_12000triple_dpo_1e-4_500"
save_dir_dpo4 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/image_18000triple_dpo_1e-4_500"
save_dir_dpo5 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/image_12000triple_dpo_2e-5_500"
save_dir_dpo6 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/image_12000triple_dpo_2e-5_250"
save_dir_dpo7 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/image_12000triple_dpo_5e-6_250"
save_dir_dpo8 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/image_12000triple_dpo_rwr_1e-4_250"
save_dir_dpo9 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/image_18000triple_dpo_rwr_1e-4_250"
save_dir_dpo10 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/image_12000triple_dpo_rwr_guiyihua,chengyiw-w,l-l_1e-4_250"
save_dir_dpo11 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/image_12000triple_dpo_rwr_guiyihua,chengyiw-w,l-l_1e-4_150"
save_dir_dpos = [save_dir_dpo1, save_dir_dpo2, save_dir_dpo3, save_dir_dpo4, save_dir_dpo5, save_dir_dpo6, save_dir_dpo7,save_dir_dpo8, save_dir_dpo9,save_dir_dpo10, save_dir_dpo11]
#---------------------------------------------------------------------------------
prompts_file = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/prompt.txt"
with open(prompts_file, "r") as f:
prompts = [line.strip() for line in f.readlines()]
cnt = 1185
prompts = prompts[cnt - 1:] # 截取的时候是包含当前这一项的,也就是包含cnt-1这一项
# # 想要的区间(注意 Python 索引从0开始)
# ranges_to_generate = list(range(1576, 1677)) + list(range(2070, 2079))
#---------------------------------------------------------------------
#----------------------------------------------------------------------
# df = pd.read_csv("/home/wangjiarui/zy/sd3_5/data.csv")
# prompts= df["prompt"]
# prompts = prompts[:300]
# cnt = 96
# prompts = prompts[cnt-1:]
# print(len(prompts))
# # #-----------------------------------------------------------------------------
# os.makedirs(save_dir_zero, exist_ok=True)
# os.makedirs(save_dir11, exist_ok=True)
# os.makedirs(save_dir12, exist_ok=True)
# os.makedirs(save_dir13, exist_ok=True)
# os.makedirs(save_dir21, exist_ok=True)
# os.makedirs(save_dir22, exist_ok=True)
for save_dir_dpo in save_dir_dpos:
os.makedirs(save_dir_dpo, exist_ok=True)
# load attention processors
pipe.load_lora_weights(load_lora_dirs[number])
for index,prompt in enumerate(prompts,start=cnt-1):
# if index not in ranges_to_generate:
# continue # 跳过不在范围内的 prompt
generator = torch.Generator(device).manual_seed(seed)
image_name = f"fintune_2epoch_{index:06d}.png"
save_path = os.path.join(save_dir_dpos[number] , image_name)
# 检查文件是否已存在,如果存在则跳过
if os.path.exists(save_path):
print(f"文件 {image_name} 已存在,跳过生成")
continue
image = pipe(
prompt=prompt,
num_inference_steps=28,
guidance_scale=3.5,
generator=generator,
).images[0]
image.save(save_path)