Datasets:

ArXiv:
sd3_5_fine_sixcard / duibi_300.py
yyyzzzzyyy's picture
Add files using upload-large-folder tool
4535b2e verified
raw
history blame
1.92 kB
import argparse
import os
import torch
import pandas as pd
from diffusers import StableDiffusion3Pipeline
def main(args):
pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-large", torch_dtype=torch.bfloat16)
pipe = pipe.to("cuda:2")
df = pd.read_csv("/DATA/DATA3/zhaoyu/T2I_model_SD35/dataset_AGIQA_3K/data.csv")
prompts= df["prompt"]
prompts = prompts[:300]
cnt = 1
prompts = prompts[cnt-1:]
print(len(prompts))
# 设置一个可复现的随机种子
seed = 3
#to(accelerator.device)
#----------------------------------------------------------------
# for index,prompt in enumerate(prompts,start=cnt-1):
# generator = torch.Generator(device="cuda:0").manual_seed(seed)
# image_name = f"zero1_{index:06d}.png"
# save_path = os.path.join(save_dir0, image_name)
# image = pipe(
# prompt=prompt,
# num_inference_steps=28,
# guidance_scale=3.5,
# generator=generator,
# ).images[0]
# image.save(save_path)
# # load attention processors
pipe.load_lora_weights(args.lora_dir)
for index,prompt in enumerate(prompts):
generator = torch.Generator(device="cuda:2").manual_seed(seed)
image_name = f"fintune_2epoch_{index:06d}.png"
save_path = os.path.join(args.output_dir, image_name)
image = pipe(
prompt=prompt,
num_inference_steps=28,
guidance_scale=3.5,
generator=generator,
).images[0]
image.save(save_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--lora_dir", type=str, required=True, help="Path to LoRA checkpoint directory")
parser.add_argument("--output_dir", type=str, required=True, help="Directory to save generated images")
args = parser.parse_args()
main(args)