Datasets:

ArXiv:
File size: 2,411 Bytes
4535b2e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import torch
from diffusers import StableDiffusion3Pipeline
import os
import pandas as pd

pipe = StableDiffusion3Pipeline.from_pretrained("/home/zhaoyu/.cache/huggingface/hub/models--stabilityai--stable-diffusion-3.5-large/snapshots/ceddf0a7fdf2064ea28e2213e3b84e4afa170a0f", torch_dtype=torch.bfloat16)
pipe = pipe.to("cuda:0")

# # 设置一个可复现的随机种子
seed = 3
#to(accelerator.device)
#----------------------------------------------------------------
load_loar_dir11 = "/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/trained-sd3_5[dpo]_[p=05]_[lora_blocks]_[use_weighting_scheme]_[lora_rank768]_[size1024]_[batch_size2]_[beta250]_[1e-4]/checkpoint-1000"
#5000是第二个周期
save_dir_zero="/home/wangjiarui/zy/sd3_5/image_zero"
save_dir11="/DATA/DATA3/zhaoyu/T2I_model_SD35/diffusers/examples/dreambooth/AGIQA-300/image_12000triple_dpo_1e-4_250"

#---------------------------------------------------------------------------------
# prompts_file = "/DATA/DATA3/zhaoyu/AIGI-LLM/prompt.txt"
# with open(prompts_file, "r") as f:
#         prompts = [line.strip() for line in f.readlines()]
# cnt = 1
# prompts = prompts[cnt - 1:]  # 截取的时候是包含当前这一项的,也就是包含cnt-1这一项
#---------------------------------------------------------------------
#----------------------------------------------------------------------
df = pd.read_csv("/DATA/DATA3/zhaoyu/T2I_model_SD35/dataset_AGIQA_3K/data.csv")

prompts= df["prompt"]
prompts = prompts[:300]
cnt = 27
prompts = prompts[cnt-1:]
print(len(prompts))
# # #-----------------------------------------------------------------------------
# os.makedirs(save_dir_zero, exist_ok=True)
os.makedirs(save_dir11, exist_ok=True)
# os.makedirs(save_dir12, exist_ok=True)
# os.makedirs(save_dir13, exist_ok=True)
# os.makedirs(save_dir21, exist_ok=True)
# os.makedirs(save_dir22, exist_ok=True)
# os.makedirs(save_dir13, exist_ok=True)


# load attention processors
pipe.load_lora_weights(load_loar_dir11)

for index,prompt in enumerate(prompts,start=cnt-1):
    generator = torch.Generator(device="cuda:0").manual_seed(seed)
    image_name = f"fintune_2epoch_{index:06d}.png"
    save_path = os.path.join(save_dir11 , image_name)
    image = pipe(
        prompt=prompt,
        num_inference_steps=28,
        guidance_scale=3.5,
        generator=generator,
    ).images[0]
    image.save(save_path)