Upload 415 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +123 -0
- data/configs/dataset.toml +69 -0
- data/configs/wan14b_t2v.toml +87 -0
- data/epoch1/adapter_config.json +38 -0
- data/epoch1/adapter_model.safetensors +3 -0
- data/epoch1/wan14b_t2v.toml +87 -0
- data/epoch10/adapter_config.json +38 -0
- data/epoch10/adapter_model.safetensors +3 -0
- data/epoch10/wan14b_t2v.toml +87 -0
- data/epoch11/adapter_config.json +38 -0
- data/epoch11/adapter_model.safetensors +3 -0
- data/epoch11/wan14b_t2v.toml +87 -0
- data/epoch12/adapter_config.json +38 -0
- data/epoch12/adapter_model.safetensors +3 -0
- data/epoch12/wan14b_t2v.toml +87 -0
- data/epoch13/adapter_config.json +38 -0
- data/epoch13/adapter_model.safetensors +3 -0
- data/epoch13/wan14b_t2v.toml +87 -0
- data/epoch14/adapter_config.json +38 -0
- data/epoch14/adapter_model.safetensors +3 -0
- data/epoch14/wan14b_t2v.toml +87 -0
- data/epoch15/adapter_config.json +38 -0
- data/epoch15/adapter_model.safetensors +3 -0
- data/epoch15/wan14b_t2v.toml +87 -0
- data/epoch16/adapter_config.json +38 -0
- data/epoch16/adapter_model.safetensors +3 -0
- data/epoch16/wan14b_t2v.toml +87 -0
- data/epoch17/adapter_config.json +38 -0
- data/epoch17/adapter_model.safetensors +3 -0
- data/epoch17/wan14b_t2v.toml +87 -0
- data/epoch18/adapter_config.json +38 -0
- data/epoch18/adapter_model.safetensors +3 -0
- data/epoch18/wan14b_t2v.toml +87 -0
- data/epoch19/adapter_config.json +38 -0
- data/epoch19/adapter_model.safetensors +3 -0
- data/epoch19/wan14b_t2v.toml +87 -0
- data/epoch2/adapter_config.json +38 -0
- data/epoch2/adapter_model.safetensors +3 -0
- data/epoch2/wan14b_t2v.toml +87 -0
- data/epoch20/adapter_config.json +38 -0
- data/epoch20/adapter_model.safetensors +3 -0
- data/epoch20/wan14b_t2v.toml +87 -0
- data/epoch21/adapter_config.json +38 -0
- data/epoch21/adapter_model.safetensors +3 -0
- data/epoch21/wan14b_t2v.toml +87 -0
- data/epoch22/adapter_config.json +38 -0
- data/epoch22/adapter_model.safetensors +3 -0
- data/epoch22/wan14b_t2v.toml +87 -0
- data/epoch23/adapter_config.json +38 -0
- data/epoch23/adapter_model.safetensors +3 -0
.gitattributes
CHANGED
|
@@ -57,3 +57,126 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
data/global_step1562/layer_01-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
data/global_step1562/layer_02-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
data/global_step1562/layer_03-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
data/global_step1562/layer_04-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
data/global_step1562/layer_05-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
data/global_step1562/layer_06-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 66 |
+
data/global_step1562/layer_07-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 67 |
+
data/global_step1562/layer_08-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 68 |
+
data/global_step1562/layer_09-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 69 |
+
data/global_step1562/layer_10-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 70 |
+
data/global_step1562/layer_11-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 71 |
+
data/global_step1562/layer_12-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 72 |
+
data/global_step1562/layer_13-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 73 |
+
data/global_step1562/layer_14-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 74 |
+
data/global_step1562/layer_15-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 75 |
+
data/global_step1562/layer_16-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 76 |
+
data/global_step1562/layer_17-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 77 |
+
data/global_step1562/layer_18-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 78 |
+
data/global_step1562/layer_19-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 79 |
+
data/global_step1562/layer_20-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 80 |
+
data/global_step1562/layer_21-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 81 |
+
data/global_step1562/layer_22-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 82 |
+
data/global_step1562/layer_23-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 83 |
+
data/global_step1562/layer_24-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 84 |
+
data/global_step1562/layer_25-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 85 |
+
data/global_step1562/layer_26-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 86 |
+
data/global_step1562/layer_27-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 87 |
+
data/global_step1562/layer_28-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 88 |
+
data/global_step1562/layer_29-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 89 |
+
data/global_step1562/layer_30-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 90 |
+
data/global_step1562/layer_31-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 91 |
+
data/global_step1562/layer_32-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 92 |
+
data/global_step1562/layer_33-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 93 |
+
data/global_step1562/layer_34-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 94 |
+
data/global_step1562/layer_35-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 95 |
+
data/global_step1562/layer_36-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 96 |
+
data/global_step1562/layer_37-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 97 |
+
data/global_step1562/layer_38-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 98 |
+
data/global_step1562/layer_39-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 99 |
+
data/global_step1562/layer_40-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 100 |
+
data/global_step1562/mp_rank_00_model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 101 |
+
data/global_step5973/layer_01-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 102 |
+
data/global_step5973/layer_02-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 103 |
+
data/global_step5973/layer_03-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 104 |
+
data/global_step5973/layer_04-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 105 |
+
data/global_step5973/layer_05-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 106 |
+
data/global_step5973/layer_06-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 107 |
+
data/global_step5973/layer_07-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 108 |
+
data/global_step5973/layer_08-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 109 |
+
data/global_step5973/layer_09-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 110 |
+
data/global_step5973/layer_10-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 111 |
+
data/global_step5973/layer_11-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 112 |
+
data/global_step5973/layer_12-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 113 |
+
data/global_step5973/layer_13-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 114 |
+
data/global_step5973/layer_14-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 115 |
+
data/global_step5973/layer_15-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 116 |
+
data/global_step5973/layer_16-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 117 |
+
data/global_step5973/layer_17-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 118 |
+
data/global_step5973/layer_18-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 119 |
+
data/global_step5973/layer_19-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 120 |
+
data/global_step5973/layer_20-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 121 |
+
data/global_step5973/layer_21-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 122 |
+
data/global_step5973/layer_22-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 123 |
+
data/global_step5973/layer_23-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 124 |
+
data/global_step5973/layer_24-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 125 |
+
data/global_step5973/layer_25-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 126 |
+
data/global_step5973/layer_26-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 127 |
+
data/global_step5973/layer_27-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 128 |
+
data/global_step5973/layer_28-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 129 |
+
data/global_step5973/layer_29-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 130 |
+
data/global_step5973/layer_30-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 131 |
+
data/global_step5973/layer_31-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 132 |
+
data/global_step5973/layer_32-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 133 |
+
data/global_step5973/layer_33-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 134 |
+
data/global_step5973/layer_34-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 135 |
+
data/global_step5973/layer_35-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 136 |
+
data/global_step5973/layer_36-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 137 |
+
data/global_step5973/layer_37-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 138 |
+
data/global_step5973/layer_38-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 139 |
+
data/global_step5973/layer_39-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 140 |
+
data/global_step5973/layer_40-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 141 |
+
data/global_step5973/mp_rank_00_model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 142 |
+
data/global_step8041/layer_01-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 143 |
+
data/global_step8041/layer_02-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 144 |
+
data/global_step8041/layer_03-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 145 |
+
data/global_step8041/layer_04-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 146 |
+
data/global_step8041/layer_05-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 147 |
+
data/global_step8041/layer_06-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 148 |
+
data/global_step8041/layer_07-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 149 |
+
data/global_step8041/layer_08-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 150 |
+
data/global_step8041/layer_09-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 151 |
+
data/global_step8041/layer_10-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 152 |
+
data/global_step8041/layer_11-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 153 |
+
data/global_step8041/layer_12-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 154 |
+
data/global_step8041/layer_13-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 155 |
+
data/global_step8041/layer_14-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 156 |
+
data/global_step8041/layer_15-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 157 |
+
data/global_step8041/layer_16-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 158 |
+
data/global_step8041/layer_17-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 159 |
+
data/global_step8041/layer_18-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 160 |
+
data/global_step8041/layer_19-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 161 |
+
data/global_step8041/layer_20-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 162 |
+
data/global_step8041/layer_21-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 163 |
+
data/global_step8041/layer_22-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 164 |
+
data/global_step8041/layer_23-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 165 |
+
data/global_step8041/layer_24-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 166 |
+
data/global_step8041/layer_25-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 167 |
+
data/global_step8041/layer_26-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 168 |
+
data/global_step8041/layer_27-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 169 |
+
data/global_step8041/layer_28-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 170 |
+
data/global_step8041/layer_29-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 171 |
+
data/global_step8041/layer_30-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 172 |
+
data/global_step8041/layer_31-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 173 |
+
data/global_step8041/layer_32-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 174 |
+
data/global_step8041/layer_33-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 175 |
+
data/global_step8041/layer_34-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 176 |
+
data/global_step8041/layer_35-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 177 |
+
data/global_step8041/layer_36-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 178 |
+
data/global_step8041/layer_37-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 179 |
+
data/global_step8041/layer_38-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 180 |
+
data/global_step8041/layer_39-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 181 |
+
data/global_step8041/layer_40-model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
| 182 |
+
data/global_step8041/mp_rank_00_model_states.ptrom filter=lfs diff=lfs merge=lfs -text
|
data/configs/dataset.toml
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Resolutions to train on, given as the side length of a square image. You can have multiple sizes here.
|
| 2 |
+
# !!!WARNING!!!: this might work differently to how you think it does. Images are first grouped to aspect ratio
|
| 3 |
+
# buckets, then each image is resized to ALL of the areas specified by the resolutions list. This is a way to do
|
| 4 |
+
# multi-resolution training, i.e. training on multiple total pixel areas at once. Your dataset is effectively duplicated
|
| 5 |
+
# as many times as the length of this list.
|
| 6 |
+
# If you just want to use predetermined (width, height, frames) size buckets, see the example cosmos_dataset.toml
|
| 7 |
+
# file for how you can do that.
|
| 8 |
+
resolutions = [768, 832]
|
| 9 |
+
|
| 10 |
+
# You can give resolutions as (width, height) pairs also. This doesn't do anything different, it's just
|
| 11 |
+
# another way of specifying the area(s) (i.e. total number of pixels) you want to train on.
|
| 12 |
+
# resolutions = [[1280, 720]]
|
| 13 |
+
|
| 14 |
+
# Enable aspect ratio bucketing. For the different AR buckets, the final size will be such that
|
| 15 |
+
# the areas match the resolutions you configured above.
|
| 16 |
+
enable_ar_bucket = true
|
| 17 |
+
|
| 18 |
+
# The aspect ratio and frame bucket settings may be specified for each [[directory]] entry as well.
|
| 19 |
+
# Directory-level settings will override top-level settings.
|
| 20 |
+
|
| 21 |
+
# Min and max aspect ratios, given as width/height ratio.
|
| 22 |
+
min_ar = 0.5
|
| 23 |
+
max_ar = 2.0
|
| 24 |
+
# Total number of aspect ratio buckets, evenly spaced (in log space) between min_ar and max_ar.
|
| 25 |
+
num_ar_buckets = 7
|
| 26 |
+
|
| 27 |
+
# Can manually specify ar_buckets instead of using the range-style config above.
|
| 28 |
+
# Each entry can be width/height ratio, or (width, height) pair. But you can't mix them, because of TOML.
|
| 29 |
+
# ar_buckets = [[512, 512], [448, 576]]
|
| 30 |
+
ar_buckets = [1.0, 0.58]
|
| 31 |
+
|
| 32 |
+
# For video training, you need to configure frame buckets (similar to aspect ratio buckets). There will always
|
| 33 |
+
# be a frame bucket of 1 for images. Videos will be assigned to the longest frame bucket possible, such that the video
|
| 34 |
+
# is still greater than or equal to the frame bucket length.
|
| 35 |
+
# But videos are never assigned to the image frame bucket (1); if the video is very short it would just be dropped.
|
| 36 |
+
frame_buckets = [1]
|
| 37 |
+
# If you have >24GB VRAM, or multiple GPUs and use pipeline parallelism, or lower the spatial resolution, you could maybe train with longer frame buckets
|
| 38 |
+
# frame_buckets = [1, 33, 65, 97]
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
[[directory]]
|
| 42 |
+
# Path to directory of images/videos, and corresponding caption files. The caption files should match the media file name, but with a .txt extension.
|
| 43 |
+
# A missing caption file will log a warning, but then just train using an empty caption.
|
| 44 |
+
path = '/image_dataset_here'
|
| 45 |
+
|
| 46 |
+
# You can do masked training, where the mask indicates which parts of the image to train on. The masking is done in the loss function. The mask directory should have mask
|
| 47 |
+
# images with the same names (ignoring the extension) as the training images. E.g. training image 1.jpg could have mask image 1.jpg, 1.png, etc. If a training image doesn't
|
| 48 |
+
# have a corresponding mask, a warning is printed but training proceeds with no mask for that image. In the mask, white means train on this, black means mask it out. Values
|
| 49 |
+
# in between black and white become a weight between 0 and 1, i.e. you can use a suitable value of grey for mask weight of 0.5. In actuality, only the R channel is extracted
|
| 50 |
+
# and converted to the mask weight.
|
| 51 |
+
# The mask_path can point to any directory containing mask images.
|
| 52 |
+
#mask_path = '/home/anon/data/images/grayscale/masks'
|
| 53 |
+
|
| 54 |
+
# How many repeats for 1 epoch. The dataset will act like it is duplicated this many times.
|
| 55 |
+
# The semantics of this are the same as sd-scripts: num_repeats=1 means one epoch is a single pass over all examples (no duplication).
|
| 56 |
+
num_repeats = 5
|
| 57 |
+
|
| 58 |
+
# Example of overriding some settings, and using ar_buckets to directly specify ARs.
|
| 59 |
+
# ar_buckets = [[448, 576]]
|
| 60 |
+
# resolutions = [[448, 576]]
|
| 61 |
+
# frame_buckets = [1]
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# You can list multiple directories.
|
| 65 |
+
# If you have a video dataset as well remove the hashtag from the following 3 lines and set your repeats
|
| 66 |
+
|
| 67 |
+
# [[directory]]
|
| 68 |
+
# path = '/video_dataset_here'
|
| 69 |
+
# num_repeats = 5
|
data/configs/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch1/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch1/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:281773d8c55111e4ebd397b4706f24980e23c77c50346f2a416fdd43e870a3f2
|
| 3 |
+
size 306807976
|
data/epoch1/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch10/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch10/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:27a0d0242bcfed86b58c0559559e1f49b5a192a72bd56635e3f472d64523a9a6
|
| 3 |
+
size 306807976
|
data/epoch10/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch11/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch11/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dde6888d3be326b8a7640f6ed2fd4634924e7aa443165ad2f8c794cf423aead5
|
| 3 |
+
size 306807976
|
data/epoch11/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch12/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch12/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f649bcafcc3e058568f4d6ad64bdf9741bc5d343261793415d5ebf3e8516fb74
|
| 3 |
+
size 306807976
|
data/epoch12/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch13/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch13/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2d177e126b96c812a795ec6324fba5e0475e8a0746b76841ac414f0c916c8007
|
| 3 |
+
size 306807976
|
data/epoch13/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch14/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch14/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:47fff5ec4df6e36a12e6dc5f1cf896f4fd7f70b0be817c435867e183c266753d
|
| 3 |
+
size 306807976
|
data/epoch14/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch15/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch15/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:661ed0a9299f76cf69c48cc82ed72adb65c96a13d827cc1204357c030f6df2af
|
| 3 |
+
size 306807976
|
data/epoch15/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch16/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch16/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9abc084fb7304abec4c55e5fd0f7aa73ff064c26911032baba7571af0676c390
|
| 3 |
+
size 306807976
|
data/epoch16/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch17/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch17/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a35f7ec88bfb9d32f8394f0c85dc76fd815c2afc489c5c95006f88c79a9b1c33
|
| 3 |
+
size 306807976
|
data/epoch17/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch18/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch18/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9dd9d7863275caec5fc4233955ebdc97c6906cef0ca9061417d9d86e077e24b5
|
| 3 |
+
size 306807976
|
data/epoch18/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch19/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch19/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:994d10e1a3518de692ac9cca5b013817e72d0000e309b9fe0eb4686f915cd733
|
| 3 |
+
size 306807976
|
data/epoch19/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch2/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch2/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6f9801ef17927341881215b9af36e776aa9ddf503e54f3446f56f225fca62087
|
| 3 |
+
size 306807976
|
data/epoch2/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch20/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch20/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5c9d2cc2090bf3474bb0367b01aba0c8b53df4b4dfe5b14498bed084b23fd6f3
|
| 3 |
+
size 306807976
|
data/epoch20/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch21/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch21/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:71592dd3b661199fb203b733792076335175907c1178b86338a9f86a9ea6c915
|
| 3 |
+
size 306807976
|
data/epoch21/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch22/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch22/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4ca5bb5666ce7caa5cfad597f2f096330f5160b5ba53711dfdd19443c6d8a755
|
| 3 |
+
size 306807976
|
data/epoch22/wan14b_t2v.toml
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Output path for training runs. Each training run makes a new directory in here.
|
| 2 |
+
output_dir = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras'
|
| 3 |
+
|
| 4 |
+
# Dataset config file.
|
| 5 |
+
dataset = 'examples/dataset.toml'
|
| 6 |
+
# You can have separate eval datasets. Give them a name for Tensorboard metrics.
|
| 7 |
+
# eval_datasets = [
|
| 8 |
+
# {name = 'something', config = 'path/to/eval_dataset.toml'},
|
| 9 |
+
# ]
|
| 10 |
+
|
| 11 |
+
# training settings
|
| 12 |
+
|
| 13 |
+
# I usually set this to a really high value because I don't know how long I want to train.
|
| 14 |
+
epochs = 1000
|
| 15 |
+
# Batch size of a single forward/backward pass for one GPU.
|
| 16 |
+
micro_batch_size_per_gpu = 1
|
| 17 |
+
# Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
|
| 18 |
+
pipeline_stages = 1
|
| 19 |
+
# Number of micro-batches sent through the pipeline for each training step.
|
| 20 |
+
# If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
|
| 21 |
+
gradient_accumulation_steps = 4
|
| 22 |
+
# Grad norm clipping.
|
| 23 |
+
gradient_clipping = 1.0
|
| 24 |
+
# Learning rate warmup.
|
| 25 |
+
warmup_steps = 100
|
| 26 |
+
|
| 27 |
+
# eval settings
|
| 28 |
+
|
| 29 |
+
eval_every_n_epochs = 1
|
| 30 |
+
eval_before_first_step = true
|
| 31 |
+
# Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
|
| 32 |
+
# Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
|
| 33 |
+
# more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
|
| 34 |
+
eval_micro_batch_size_per_gpu = 1
|
| 35 |
+
eval_gradient_accumulation_steps = 1
|
| 36 |
+
|
| 37 |
+
# misc settings
|
| 38 |
+
|
| 39 |
+
# Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
|
| 40 |
+
save_every_n_epochs = 1
|
| 41 |
+
# Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
|
| 42 |
+
#checkpoint_every_n_epochs = 1
|
| 43 |
+
checkpoint_every_n_minutes = 60
|
| 44 |
+
# Always set to true unless you have a huge amount of VRAM.
|
| 45 |
+
activation_checkpointing = true
|
| 46 |
+
# Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
|
| 47 |
+
partition_method = 'parameters'
|
| 48 |
+
# dtype for saving the LoRA or model, if different from training dtype
|
| 49 |
+
save_dtype = 'bfloat16'
|
| 50 |
+
# Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
|
| 51 |
+
caching_batch_size = 1
|
| 52 |
+
# How often deepspeed logs to console.
|
| 53 |
+
steps_per_print = 1
|
| 54 |
+
# How to extract video clips for training from a single input video file.
|
| 55 |
+
# The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
|
| 56 |
+
# number of frames for that bucket.
|
| 57 |
+
# single_beginning: one clip starting at the beginning of the video
|
| 58 |
+
# single_middle: one clip from the middle of the video (cutting off the start and end equally)
|
| 59 |
+
# multiple_overlapping: extract the minimum number of clips to cover the full range of the video. They might overlap some.
|
| 60 |
+
# default is single_middle
|
| 61 |
+
video_clip_mode = 'single_middle'
|
| 62 |
+
|
| 63 |
+
[model]
|
| 64 |
+
type = 'wan'
|
| 65 |
+
ckpt_path = '/Wan/Wan2.1-T2V-14B'
|
| 66 |
+
dtype = 'bfloat16'
|
| 67 |
+
# You can use fp8 for the transformer when training LoRA.
|
| 68 |
+
#transformer_dtype = 'float8'
|
| 69 |
+
timestep_sample_method = 'logit_normal'
|
| 70 |
+
|
| 71 |
+
# For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
|
| 72 |
+
[adapter]
|
| 73 |
+
type = 'lora'
|
| 74 |
+
rank = 32
|
| 75 |
+
# Dtype for the LoRA weights you are training.
|
| 76 |
+
dtype = 'bfloat16'
|
| 77 |
+
# You can initialize the lora weights from a previously trained lora.
|
| 78 |
+
# init_from_existing = '/data/diffusion_pipe_training_runs/wan14b_t2v_video_loras/20250424_05-04-02/epoch30'
|
| 79 |
+
|
| 80 |
+
[optimizer]
|
| 81 |
+
# AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
|
| 82 |
+
# Look at train.py for other options. You could also easily edit the file and add your own.
|
| 83 |
+
type = 'adamw_optimi'
|
| 84 |
+
lr = 2e-5
|
| 85 |
+
betas = [0.9, 0.99]
|
| 86 |
+
weight_decay = 0.01
|
| 87 |
+
eps = 1e-8
|
data/epoch23/adapter_config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": null,
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": false,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 32,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.0,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": null,
|
| 22 |
+
"peft_type": "LORA",
|
| 23 |
+
"r": 32,
|
| 24 |
+
"rank_pattern": {},
|
| 25 |
+
"revision": null,
|
| 26 |
+
"target_modules": [
|
| 27 |
+
"ffn.0",
|
| 28 |
+
"o",
|
| 29 |
+
"k",
|
| 30 |
+
"q",
|
| 31 |
+
"v",
|
| 32 |
+
"ffn.2"
|
| 33 |
+
],
|
| 34 |
+
"task_type": null,
|
| 35 |
+
"trainable_token_indices": null,
|
| 36 |
+
"use_dora": false,
|
| 37 |
+
"use_rslora": false
|
| 38 |
+
}
|
data/epoch23/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f8c4436c58f3dae79f1872c669936f15691587aacc06b0a6f01ca34bd10e5e78
|
| 3 |
+
size 306807976
|