Datasets:

ArXiv:
sd3_5_fine_sixcard / train.sh
yyyzzzzyyy's picture
Add files using upload-large-folder tool
4535b2e verified
raw
history blame
3.88 kB
export MODEL_NAME="/home/zhaoyu/.cache/huggingface/hub/models--stabilityai--stable-diffusion-3.5-large/snapshots/ceddf0a7fdf2064ea28e2213e3b84e4afa170a0f"
export OUTPUT_DIR="trained-sd3_5_[p=05]_[exp]_[lora_blocks]_[use_weighting_scheme]_[lora_rank768]_[size1024]"
export CUDA_VISIBLE_DEVICES=0,1,2
accelerate launch --num_processes=3 --main_process_port=29500 train_gors_lora_sd3.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--output_dir=$OUTPUT_DIR \
--mixed_precision="bf16" \
--resolution=1024 \
--train_batch_size=2 \
--gradient_accumulation_steps=1 \
--learning_rate=1e-4 \
--report_to="wandb" \
--lr_scheduler="cosine" \
--rank=768 \
--lr_warmup_steps=2400 \
--num_train_epochs=2 \
--validation_prompts "A photo of sks dog in a bucket" "As the evening approaches, golden light pours over the fields, transforming the landscape into a peaceful, dreamlike scene. The sun, low in the sky, casts long, soft shadows that stretch across the green fields, while the light dances on the edges of trees and flowers. The air is warm, but not oppressive, and the soft glow of the fading sun creates a sense of tranquility, as if the world is winding down, ready for the night to come." "A dog is a chef in a mystical kitchen, cooking up magical dishes that cause time to freeze when they are served." "a face image with a flat nose, long hair, and a neutral emotion"\
--validation_steps=500 \
--seed="42" \
--gradient_checkpointing \
--train_text_encoder \
--use_8bit_adam \
--train_data_file="/DATA/DATA3/zhaoyu/T2I_model_SD35/dataset/AIGI_2025_train/train_imgname_prompt_mos1_mos2_human_reward.xlsx" \
--image_base_dir="/DATA/DATA3/wjr/AIGI2025" \
--lora_blocks "12,13,14,15,16,17,18,19,20,21,22,23,24"\
#---------------------------------------------------------------
# --lr_scheduler="cosine" \
# --report_to="wandb" \
# --lr_warmup_steps=2400 \这两个参数相比于之前也是修改了的
#把lora矩阵的层数从4层改成了768
#把图片的清晰度改成了1024(因为SD3.5本身就是1024的,不能说微调的时候就给他改低了吧),然后微调的周期设置成了2,先试试水,然后改成了exp,不再是线性的
#然后关于微调的层数我也想修改(但还没修改,先看看效果),然后还要训train_text_encoder
#更改lora层
#-----------------------------------------------------------------
#--resume_from_checkpoint="latest" \
#当数据集比较大的时候,要把validation_epochs设置的小一点
#max_sequence_length可以设置的大一点,不然会被截断,我已经把默认值设置成512了,看看会不会报错
# --max_train_steps=15000 \
# --num_train_epochs=2 \
# --resolution的设置真的会很大程度上影响训练时间768是10个小时,512是6个小时
#还有就是一点我突然想明白了,就是说为什么进行分布式训练了没有明显看到时间方面的提升
#原因就是在训练的时候,虽然你意识到了如果进行分布式训练但是设置训练参数的时候设置的是max_train_steps的话,那每个GPU还是会只用一个GPU的时候走一样的更新步数
#因为一次gloab_steps是所有的GPU都更新了一次之后才+1,也就相当于是一次更新过程中一共处理了GPU_num*train_batch_size*gradient_accumulation_steps张照片
#然后你意识到了这一点,设置训练参数的时候特意设置的是总周期num_train_epochs
#但是你没有发现你的代码中已经设置了max_train_steps的默认值,而代码中对于这些训练参数的检验逻辑又是以max_train_steps优先的
#当发现了max_train_steps并不为NONE的时候就会不看num_train_epochs了
#所以才造成了就算用了分布式训练也并没有提升训练效果的现象,但其实是有效果的,只不过你参数没设置对