Datasets:

ArXiv:
sd3_5_fine_sixcard / duibi0.py
yyyzzzzyyy's picture
Add files using upload-large-folder tool
4535b2e verified
raw
history blame
2.41 kB
import torch
from diffusers import StableDiffusion3Pipeline
import os
import pandas as pd
pipe = StableDiffusion3Pipeline.from_pretrained("/home/zhaoyu/.cache/huggingface/hub/models--stabilityai--stable-diffusion-3.5-large/snapshots/ceddf0a7fdf2064ea28e2213e3b84e4afa170a0f", torch_dtype=torch.bfloat16)
pipe = pipe.to("cuda:0")
# # 设置一个可复现的随机种子
seed = 3
#to(accelerator.device)
#----------------------------------------------------------------
load_loar_dir11 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/trained-sd3_5[dpo]_[p=05]_[lora_blocks]_[use_weighting_scheme]_[lora_rank768]_[size1024]_[batch_size2]_[beta250]_[1e-4]/checkpoint-1000"
#5000是第二个周期
save_dir_zero="/home/wangjiarui/zy/sd3_5/image_zero"
save_dir11="/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/AGIQA-300/image_12000triple_dpo_1e-4_250"
#---------------------------------------------------------------------------------
# prompts_file = "/DATA/DATA3/zhaoyu/AIGI-LLM/prompt.txt"
# with open(prompts_file, "r") as f:
# prompts = [line.strip() for line in f.readlines()]
# cnt = 1
# prompts = prompts[cnt - 1:] # 截取的时候是包含当前这一项的,也就是包含cnt-1这一项
#---------------------------------------------------------------------
#----------------------------------------------------------------------
df = pd.read_csv("/DATA/DATA3/zhaoyu/T2I_model_SD35/dataset_AGIQA_3K/data.csv")
prompts= df["prompt"]
prompts = prompts[:300]
cnt = 27
prompts = prompts[cnt-1:]
print(len(prompts))
# # #-----------------------------------------------------------------------------
# os.makedirs(save_dir_zero, exist_ok=True)
os.makedirs(save_dir11, exist_ok=True)
# os.makedirs(save_dir12, exist_ok=True)
# os.makedirs(save_dir13, exist_ok=True)
# os.makedirs(save_dir21, exist_ok=True)
# os.makedirs(save_dir22, exist_ok=True)
# os.makedirs(save_dir13, exist_ok=True)
# load attention processors
pipe.load_lora_weights(load_loar_dir11)
for index,prompt in enumerate(prompts,start=cnt-1):
generator = torch.Generator(device="cuda:0").manual_seed(seed)
image_name = f"fintune_2epoch_{index:06d}.png"
save_path = os.path.join(save_dir11 , image_name)
image = pipe(
prompt=prompt,
num_inference_steps=28,
guidance_scale=3.5,
generator=generator,
).images[0]
image.save(save_path)