diff --git a/12000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/12000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..cc37c876867f10f35ae5350055a533a6bfcd5d43 --- /dev/null +++ b/12000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b05a01abaa27522c66fd248087101b4ea73fb31f8cccc4fe634ca0ea756637f +size 5225110672 diff --git a/12000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/12000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..5b8456b47f8aef092a202e3810f0d8bdc13f25e2 --- /dev/null +++ b/12000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e8c0d30d0cc2623e936b02e486ae49b1367d40c0f48291146c7a9118e5be31b +size 5225115920 diff --git a/12000/mp_rank_00_model_states.pt b/12000/mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..a0ef786a02a6bcabb2bfa0062f2ae41382abac79 --- /dev/null +++ b/12000/mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7be615132f7eb2741e0c4d93d88bec6d8d4b84c12debae8b2e245fb79d69a28 +size 1741912428 diff --git a/15000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/15000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..7c10540b867de30a8a6930e01ebae1d0225720be --- /dev/null +++ b/15000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:787e8db493011b5574ed931c223009d127ed11cda8b44adb03e55387019faca6 +size 5225110672 diff --git a/15000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/15000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..95e3e72b6e1f2b685c88e265b2021661aa23c358 --- /dev/null +++ b/15000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65d1a682537d589d66d60b77a68718e5cd0a79886be3ce416a9a1c92d8d622ea +size 5225115920 diff --git a/15000/mp_rank_00_model_states.pt b/15000/mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..dd4d94e67b992b4b8bcbf3636bbe6a3f6043c7f1 --- /dev/null +++ b/15000/mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9810d052a449d26c9a12d27ff0328abc4ec9c21c2ee72f41f1215c7395d1c6a1 +size 1741912428 diff --git a/18000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/18000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..cab9ca40da1c1c6447929de8295797bcb59310f7 --- /dev/null +++ b/18000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa6f2d4bb446d874c82d91a30d3ff2d9917f48be09d547cbff4d427ecaa7c35c +size 5225110672 diff --git a/18000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/18000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..c93a8ef6960b9f3b429f9df7cf9462c7444115fb --- /dev/null +++ b/18000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b7c8a02c96306db2ec797e438c972e0bd214f7f3bfc9a38453cf5f5c2e3fecc +size 5225115920 diff --git a/18000/mp_rank_00_model_states.pt b/18000/mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..2143766be46c2d98090841e933a055a8dfb05437 --- /dev/null +++ b/18000/mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2548bfa082e4eecdebcf3d0193b58dabd2fbdf2f5e99c31dc6821e40a9869ef5 +size 1741912428 diff --git a/21000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/21000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..cd4d0185ebe4456d808cde8f207da9cf9ef7b3eb --- /dev/null +++ b/21000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6532ba120ae2a5c3a44629f30ac346078bbdcae326c91f29b3856e36ee57c5fe +size 5225110672 diff --git a/21000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/21000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..d68470f4f105fa8172ada336fa667bf502cb0fee --- /dev/null +++ b/21000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3285f114bfb3ba0ad21a3172fd283a2945e0a9b3edcb538094014c667b10371 +size 5225115920 diff --git a/21000/mp_rank_00_model_states.pt b/21000/mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..6927c0b8c4a1da223e6d75a480b88abc3a166d4f --- /dev/null +++ b/21000/mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af458855374cc868189a87a9bd4347935ac52fcc3262cad686ce92a5dca1b6eb +size 1741912428 diff --git a/24000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/24000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..8f28068d792fbbf286f90c6f93629f32c43569e4 --- /dev/null +++ b/24000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49a13baf689c042a7ad7cea939b4e0bee3df658179bec29ca41a338fc29e190d +size 5225110672 diff --git a/24000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/24000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..aacacd483db6aa9583d6388f1f9e2e48aa71416c --- /dev/null +++ b/24000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77a200444f139a1ac3fed7e1e462018dece93fef7291e067aaf7d9f4f5f334c3 +size 5225115920 diff --git a/24000/mp_rank_00_model_states.pt b/24000/mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..ac0773d1d01c94049fa26b4e9b9f420de89e0178 --- /dev/null +++ b/24000/mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76ea93450e55520fe198e4565d6503f30d4be3703c22c3f4940034a1221ff2df +size 1741912428 diff --git a/27000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/27000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..9af303343d1cf313f6c8628cbe754881f8fc6671 --- /dev/null +++ b/27000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92952328b73945489224c819aa00d7e2f0a7988794704cb2bd9061a8ac849428 +size 5225110672 diff --git a/27000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/27000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..c531a6440def9690e6955d592bdfc5cd85b4e1d6 --- /dev/null +++ b/27000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87fc8e7603aa37bef348cee2d0969fea273b8a2406a2cda146bfa5fbcbffd906 +size 5225115920 diff --git a/27000/mp_rank_00_model_states.pt b/27000/mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..4b0614bde16b4b9a83de55f8f25eacd0cbfb3da9 --- /dev/null +++ b/27000/mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:458f5a92980309ae78b7ce6ae742b1fa4c4be48b1e26b08e047c4a25acf61917 +size 1741912428 diff --git a/3000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/3000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..28183c7aca5a6eb08b8862caa913b7c7274c0230 --- /dev/null +++ b/3000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb0ecd1e0e8149d464d3cec334b0b136a477800f1c7b34cdc976571c990e52a0 +size 5225110672 diff --git a/3000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/3000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..6dcaf94cf9c6b0932d81df9558eacf413bb5db67 --- /dev/null +++ b/3000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3036d3e7508f0de7fb8051e8523cfd432ca07593fa2f817e1f37fdbc130aa450 +size 5225115920 diff --git a/3000/mp_rank_00_model_states.pt b/3000/mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..c92a5c329c4821193a48b9038c2ffee144968205 --- /dev/null +++ b/3000/mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5782f1cc77077c8b3e5a85fceaf7dee3b0442b21fd4c916a8a67dfe17bc9369 +size 1741912428 diff --git a/30000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/30000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..7cfe382005afe0340fe322cd8036ff1be0810f4d --- /dev/null +++ b/30000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e28effa6098a81830723cb5c5d1342e13e89b752b65dbb15cd424186fb3e08f +size 5225110672 diff --git a/30000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/30000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..485ac9772d9542b5f2e9dca5a300d68ec8dd00db --- /dev/null +++ b/30000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:738bb601db51fb5b7f816b1c16bcaaa47afceada8b64df02b1d419a1fdae6978 +size 5225115920 diff --git a/30000/mp_rank_00_model_states.pt b/30000/mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..3089d97addaf6f25bb56cb6c32f5606792e3ea29 --- /dev/null +++ b/30000/mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68e94bc5d773b0776c1d78207f4d8868b25bd664efefcfb8f5023adca9c02455 +size 1741912428 diff --git a/6000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/6000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..074546bdfeb9f8e345bec3ee2df5989c5efd24f9 --- /dev/null +++ b/6000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41b9b9e951ac0e8f8ff3bd135406ddb800da773cc7754f92c33ec5873c88739f +size 5225110672 diff --git a/6000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/6000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..d0d6c26f90cf732c95b95d1fd84d14d309177560 --- /dev/null +++ b/6000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:222714450e59bf4c817ebcae3263900d71c6509d7b4a6972dcfc4373ff651202 +size 5225115920 diff --git a/6000/mp_rank_00_model_states.pt b/6000/mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..a157781e0a8f89f1fe2415108dd1b72745ca6da1 --- /dev/null +++ b/6000/mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa866c6f800aad536118688f7584a473699aa694c057711a257801bc11578019 +size 1741912428 diff --git a/9000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/9000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..24a9cd31e0dbb2be2ac549d298cdd4b61620c3c3 --- /dev/null +++ b/9000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d58276c437d022100fd60b3c303be5b69736edd9464868a816a6c15230ed4d91 +size 5225110672 diff --git a/9000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/9000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..42f2dfe29e2304b308916fc900388ac2c478c276 --- /dev/null +++ b/9000/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17ed13ec8a0882ca86f81ac34aa02060d6015057ac34c510f9db3c801486e302 +size 5225115920 diff --git a/9000/mp_rank_00_model_states.pt b/9000/mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..1ce98e6ddb91218fd1f3d28699ad5b359e33623a --- /dev/null +++ b/9000/mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51c5e4f6bfa5895fe47ceffa75fcc242509b74398d13abbfa146d5fbb85dddd5 +size 1741912428 diff --git a/latest b/latest new file mode 100644 index 0000000000000000000000000000000000000000..137b176f34f2af95fd07ddce0f2b3a75125ad367 --- /dev/null +++ b/latest @@ -0,0 +1 @@ +30000 \ No newline at end of file diff --git a/logs.zip b/logs.zip new file mode 100644 index 0000000000000000000000000000000000000000..746465bf5d84b5333f4f2fcaff7af84da92c2e12 --- /dev/null +++ b/logs.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53d329d489036299c3fefc1d497c0c110a31ca0c5c2998f88e42639c6ee217e3 +size 1012507 diff --git a/logs/text2image/1743687440.730791/events.out.tfevents.1743687440.autodl-container-d6a547ba47-8b850992.7729.1 b/logs/text2image/1743687440.730791/events.out.tfevents.1743687440.autodl-container-d6a547ba47-8b850992.7729.1 new file mode 100644 index 0000000000000000000000000000000000000000..f6b90bb1aba96586173ef6bfdf224bc30cd65b7d --- /dev/null +++ b/logs/text2image/1743687440.730791/events.out.tfevents.1743687440.autodl-container-d6a547ba47-8b850992.7729.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10438f72f6a3e869bc2ab04a40d5e7574f29b4065ef9c3726bae78daf9697fb9 +size 2092 diff --git a/logs/text2image/1743687440.7342193/hparams.yml b/logs/text2image/1743687440.7342193/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..b35d602d98339de487363b90de3eb7180be84bb5 --- /dev/null +++ b/logs/text2image/1743687440.7342193/hparams.yml @@ -0,0 +1,35 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +checkpointing_steps: 500 +gradient_accumulation_steps: 1 +gradient_checkpointing: false +image_encoder_g_path: laion/CLIP-ViT-H-14-laion2B-s32B-b79K +image_encoder_p_path: facebook/dinov2-giant +image_root_path: /root/autodl-tmp/data/test +img_height: 512 +img_width: 384 +json_path: ./datasets/deepfashing/train_data.json +learning_rate: 5.0e-06 +logging_dir: logs +lr_num_cycles: 3 +lr_power: 1.0 +lr_scheduler: cosine_with_restarts +lr_warmup_steps: 500 +max_grad_norm: 1.0 +max_train_steps: 20320000 +mixed_precision: null +noise_offset: 0 +num_train_epochs: 10000 +output_dir: ./logs/stage2 +pretrained_model_name_or_path: stabilityai/stable-diffusion-2-1-base +report_to: tensorboard +resume_from_checkpoint: null +scale_lr: false +seed: null +set_grads_to_none: false +train_batch_size: 1 +val_batch_size: 128 +val_image_root_path: /home/y/yuansui/tryon_stage1/data/VTON/test diff --git a/logs/text2image/1743687553.5937865/events.out.tfevents.1743687553.autodl-container-d6a547ba47-8b850992.8156.1 b/logs/text2image/1743687553.5937865/events.out.tfevents.1743687553.autodl-container-d6a547ba47-8b850992.8156.1 new file mode 100644 index 0000000000000000000000000000000000000000..4266e2066aefec70d8e41698123289ad654d819c --- /dev/null +++ b/logs/text2image/1743687553.5937865/events.out.tfevents.1743687553.autodl-container-d6a547ba47-8b850992.8156.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ced995b6a8bb0c296b27857362f711a28f54ad00e4b103a29dd8be9167e0ea7a +size 2092 diff --git a/logs/text2image/1743687553.5972252/hparams.yml b/logs/text2image/1743687553.5972252/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..b35d602d98339de487363b90de3eb7180be84bb5 --- /dev/null +++ b/logs/text2image/1743687553.5972252/hparams.yml @@ -0,0 +1,35 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +checkpointing_steps: 500 +gradient_accumulation_steps: 1 +gradient_checkpointing: false +image_encoder_g_path: laion/CLIP-ViT-H-14-laion2B-s32B-b79K +image_encoder_p_path: facebook/dinov2-giant +image_root_path: /root/autodl-tmp/data/test +img_height: 512 +img_width: 384 +json_path: ./datasets/deepfashing/train_data.json +learning_rate: 5.0e-06 +logging_dir: logs +lr_num_cycles: 3 +lr_power: 1.0 +lr_scheduler: cosine_with_restarts +lr_warmup_steps: 500 +max_grad_norm: 1.0 +max_train_steps: 20320000 +mixed_precision: null +noise_offset: 0 +num_train_epochs: 10000 +output_dir: ./logs/stage2 +pretrained_model_name_or_path: stabilityai/stable-diffusion-2-1-base +report_to: tensorboard +resume_from_checkpoint: null +scale_lr: false +seed: null +set_grads_to_none: false +train_batch_size: 1 +val_batch_size: 128 +val_image_root_path: /home/y/yuansui/tryon_stage1/data/VTON/test diff --git a/logs/text2image/1743688005.537362/events.out.tfevents.1743688005.autodl-container-d6a547ba47-8b850992.8834.1 b/logs/text2image/1743688005.537362/events.out.tfevents.1743688005.autodl-container-d6a547ba47-8b850992.8834.1 new file mode 100644 index 0000000000000000000000000000000000000000..eb7468a6bbdc9f7273c9c162bb53f7e904e85979 --- /dev/null +++ b/logs/text2image/1743688005.537362/events.out.tfevents.1743688005.autodl-container-d6a547ba47-8b850992.8834.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c4c4493d3df33829f0d89231f216799b9fdd727e5041a1d3f49eee12911c30b +size 2092 diff --git a/logs/text2image/1743688005.540771/hparams.yml b/logs/text2image/1743688005.540771/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..b35d602d98339de487363b90de3eb7180be84bb5 --- /dev/null +++ b/logs/text2image/1743688005.540771/hparams.yml @@ -0,0 +1,35 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +checkpointing_steps: 500 +gradient_accumulation_steps: 1 +gradient_checkpointing: false +image_encoder_g_path: laion/CLIP-ViT-H-14-laion2B-s32B-b79K +image_encoder_p_path: facebook/dinov2-giant +image_root_path: /root/autodl-tmp/data/test +img_height: 512 +img_width: 384 +json_path: ./datasets/deepfashing/train_data.json +learning_rate: 5.0e-06 +logging_dir: logs +lr_num_cycles: 3 +lr_power: 1.0 +lr_scheduler: cosine_with_restarts +lr_warmup_steps: 500 +max_grad_norm: 1.0 +max_train_steps: 20320000 +mixed_precision: null +noise_offset: 0 +num_train_epochs: 10000 +output_dir: ./logs/stage2 +pretrained_model_name_or_path: stabilityai/stable-diffusion-2-1-base +report_to: tensorboard +resume_from_checkpoint: null +scale_lr: false +seed: null +set_grads_to_none: false +train_batch_size: 1 +val_batch_size: 128 +val_image_root_path: /home/y/yuansui/tryon_stage1/data/VTON/test diff --git a/logs/text2image/1743688932.1403463/events.out.tfevents.1743688932.autodl-container-d6a547ba47-8b850992.10143.1 b/logs/text2image/1743688932.1403463/events.out.tfevents.1743688932.autodl-container-d6a547ba47-8b850992.10143.1 new file mode 100644 index 0000000000000000000000000000000000000000..2619936f1f8c6cb09047245e7c090f4f1d474e3c --- /dev/null +++ b/logs/text2image/1743688932.1403463/events.out.tfevents.1743688932.autodl-container-d6a547ba47-8b850992.10143.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e219c1d0e0253826205a6a7c81c05882d592786e546ada5b8d0ca76d6542f40b +size 2092 diff --git a/logs/text2image/1743688932.1439452/hparams.yml b/logs/text2image/1743688932.1439452/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..b35d602d98339de487363b90de3eb7180be84bb5 --- /dev/null +++ b/logs/text2image/1743688932.1439452/hparams.yml @@ -0,0 +1,35 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +checkpointing_steps: 500 +gradient_accumulation_steps: 1 +gradient_checkpointing: false +image_encoder_g_path: laion/CLIP-ViT-H-14-laion2B-s32B-b79K +image_encoder_p_path: facebook/dinov2-giant +image_root_path: /root/autodl-tmp/data/test +img_height: 512 +img_width: 384 +json_path: ./datasets/deepfashing/train_data.json +learning_rate: 5.0e-06 +logging_dir: logs +lr_num_cycles: 3 +lr_power: 1.0 +lr_scheduler: cosine_with_restarts +lr_warmup_steps: 500 +max_grad_norm: 1.0 +max_train_steps: 20320000 +mixed_precision: null +noise_offset: 0 +num_train_epochs: 10000 +output_dir: ./logs/stage2 +pretrained_model_name_or_path: stabilityai/stable-diffusion-2-1-base +report_to: tensorboard +resume_from_checkpoint: null +scale_lr: false +seed: null +set_grads_to_none: false +train_batch_size: 1 +val_batch_size: 128 +val_image_root_path: /home/y/yuansui/tryon_stage1/data/VTON/test diff --git a/logs/text2image/1743697252.7320726/events.out.tfevents.1743697252.autodl-container-d6a547ba47-8b850992.3253.1 b/logs/text2image/1743697252.7320726/events.out.tfevents.1743697252.autodl-container-d6a547ba47-8b850992.3253.1 new file mode 100644 index 0000000000000000000000000000000000000000..e5880b852c0683956e8844137a6b15bcef57bae7 --- /dev/null +++ b/logs/text2image/1743697252.7320726/events.out.tfevents.1743697252.autodl-container-d6a547ba47-8b850992.3253.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13ce74ab7a17508fd3cbb30f03b04078026de849aec83fed496821894ef98e6d +size 2092 diff --git a/logs/text2image/1743697252.7336254/hparams.yml b/logs/text2image/1743697252.7336254/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..b35d602d98339de487363b90de3eb7180be84bb5 --- /dev/null +++ b/logs/text2image/1743697252.7336254/hparams.yml @@ -0,0 +1,35 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +checkpointing_steps: 500 +gradient_accumulation_steps: 1 +gradient_checkpointing: false +image_encoder_g_path: laion/CLIP-ViT-H-14-laion2B-s32B-b79K +image_encoder_p_path: facebook/dinov2-giant +image_root_path: /root/autodl-tmp/data/test +img_height: 512 +img_width: 384 +json_path: ./datasets/deepfashing/train_data.json +learning_rate: 5.0e-06 +logging_dir: logs +lr_num_cycles: 3 +lr_power: 1.0 +lr_scheduler: cosine_with_restarts +lr_warmup_steps: 500 +max_grad_norm: 1.0 +max_train_steps: 20320000 +mixed_precision: null +noise_offset: 0 +num_train_epochs: 10000 +output_dir: ./logs/stage2 +pretrained_model_name_or_path: stabilityai/stable-diffusion-2-1-base +report_to: tensorboard +resume_from_checkpoint: null +scale_lr: false +seed: null +set_grads_to_none: false +train_batch_size: 1 +val_batch_size: 128 +val_image_root_path: /home/y/yuansui/tryon_stage1/data/VTON/test diff --git a/logs/text2image/1743697690.6630423/events.out.tfevents.1743697690.autodl-container-d6a547ba47-8b850992.4598.1 b/logs/text2image/1743697690.6630423/events.out.tfevents.1743697690.autodl-container-d6a547ba47-8b850992.4598.1 new file mode 100644 index 0000000000000000000000000000000000000000..7841b33967a8833efb3e10a506d2f3ffd99a0def --- /dev/null +++ b/logs/text2image/1743697690.6630423/events.out.tfevents.1743697690.autodl-container-d6a547ba47-8b850992.4598.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cb298d6337302d12574329579d163e1b072705ff22a81e1afd634e26d8f0fc9 +size 2092 diff --git a/logs/text2image/1743697690.6647506/hparams.yml b/logs/text2image/1743697690.6647506/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..b35d602d98339de487363b90de3eb7180be84bb5 --- /dev/null +++ b/logs/text2image/1743697690.6647506/hparams.yml @@ -0,0 +1,35 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +checkpointing_steps: 500 +gradient_accumulation_steps: 1 +gradient_checkpointing: false +image_encoder_g_path: laion/CLIP-ViT-H-14-laion2B-s32B-b79K +image_encoder_p_path: facebook/dinov2-giant +image_root_path: /root/autodl-tmp/data/test +img_height: 512 +img_width: 384 +json_path: ./datasets/deepfashing/train_data.json +learning_rate: 5.0e-06 +logging_dir: logs +lr_num_cycles: 3 +lr_power: 1.0 +lr_scheduler: cosine_with_restarts +lr_warmup_steps: 500 +max_grad_norm: 1.0 +max_train_steps: 20320000 +mixed_precision: null +noise_offset: 0 +num_train_epochs: 10000 +output_dir: ./logs/stage2 +pretrained_model_name_or_path: stabilityai/stable-diffusion-2-1-base +report_to: tensorboard +resume_from_checkpoint: null +scale_lr: false +seed: null +set_grads_to_none: false +train_batch_size: 1 +val_batch_size: 128 +val_image_root_path: /home/y/yuansui/tryon_stage1/data/VTON/test diff --git a/logs/text2image/1743698079.674401/events.out.tfevents.1743698079.autodl-container-d6a547ba47-8b850992.5966.1 b/logs/text2image/1743698079.674401/events.out.tfevents.1743698079.autodl-container-d6a547ba47-8b850992.5966.1 new file mode 100644 index 0000000000000000000000000000000000000000..9f26bf1b23d4bb4fd1f3bdb6f39727e2da7928ca --- /dev/null +++ b/logs/text2image/1743698079.674401/events.out.tfevents.1743698079.autodl-container-d6a547ba47-8b850992.5966.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2945d4fe3c241d181d753b4c5ebdf31378fcc813e26a406d5050792eb87c3067 +size 2092 diff --git a/logs/text2image/1743698079.6762056/hparams.yml b/logs/text2image/1743698079.6762056/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..b35d602d98339de487363b90de3eb7180be84bb5 --- /dev/null +++ b/logs/text2image/1743698079.6762056/hparams.yml @@ -0,0 +1,35 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +checkpointing_steps: 500 +gradient_accumulation_steps: 1 +gradient_checkpointing: false +image_encoder_g_path: laion/CLIP-ViT-H-14-laion2B-s32B-b79K +image_encoder_p_path: facebook/dinov2-giant +image_root_path: /root/autodl-tmp/data/test +img_height: 512 +img_width: 384 +json_path: ./datasets/deepfashing/train_data.json +learning_rate: 5.0e-06 +logging_dir: logs +lr_num_cycles: 3 +lr_power: 1.0 +lr_scheduler: cosine_with_restarts +lr_warmup_steps: 500 +max_grad_norm: 1.0 +max_train_steps: 20320000 +mixed_precision: null +noise_offset: 0 +num_train_epochs: 10000 +output_dir: ./logs/stage2 +pretrained_model_name_or_path: stabilityai/stable-diffusion-2-1-base +report_to: tensorboard +resume_from_checkpoint: null +scale_lr: false +seed: null +set_grads_to_none: false +train_batch_size: 1 +val_batch_size: 128 +val_image_root_path: /home/y/yuansui/tryon_stage1/data/VTON/test diff --git a/logs/text2image/1743747786.0215201/events.out.tfevents.1743747786.xgpi6.1019417.1 b/logs/text2image/1743747786.0215201/events.out.tfevents.1743747786.xgpi6.1019417.1 new file mode 100644 index 0000000000000000000000000000000000000000..334ed3805de4960d4e9576533378a9c667104e5d --- /dev/null +++ b/logs/text2image/1743747786.0215201/events.out.tfevents.1743747786.xgpi6.1019417.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45aa793fed7e728394956978b23372c428155e5a006a9af5421836fa07109429 +size 2174 diff --git a/logs/text2image/1743747786.02946/hparams.yml b/logs/text2image/1743747786.02946/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..f73830fedc0bed4c0c8b0b82cbe500af20a312a6 --- /dev/null +++ b/logs/text2image/1743747786.02946/hparams.yml @@ -0,0 +1,35 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +checkpointing_steps: 3000 +gradient_accumulation_steps: 2 +gradient_checkpointing: false +image_encoder_g_path: laion/CLIP-ViT-H-14-laion2B-s32B-b79K +image_encoder_p_path: facebook/dinov2-giant +image_root_path: /home/y/yuansui/data/VTON/train +img_height: 512 +img_width: 384 +json_path: ./datasets/deepfashing/train_data.json +learning_rate: 1.0e-05 +logging_dir: logs +lr_num_cycles: 3 +lr_power: 1.0 +lr_scheduler: cosine_with_restarts +lr_warmup_steps: 5000 +max_grad_norm: 1.0 +max_train_steps: 30010 +mixed_precision: bf16 +noise_offset: 0.1 +num_train_epochs: 969 +output_dir: ./logs/stage2 +pretrained_model_name_or_path: stabilityai/stable-diffusion-2-1-base +report_to: tensorboard +resume_from_checkpoint: null +scale_lr: false +seed: 42 +set_grads_to_none: false +train_batch_size: 48 +val_batch_size: 32 +val_image_root_path: /home/y/yuansui/tryon_stage1/data/VTON/test diff --git a/logs/text2image/1743812190.3328395/events.out.tfevents.1743812190.xgpi6.2180967.1 b/logs/text2image/1743812190.3328395/events.out.tfevents.1743812190.xgpi6.2180967.1 new file mode 100644 index 0000000000000000000000000000000000000000..74311554adadcf731fd5fe8156c72245be335cb2 --- /dev/null +++ b/logs/text2image/1743812190.3328395/events.out.tfevents.1743812190.xgpi6.2180967.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ea77940b508fd56b25e70bc05741aef6560052b2214e359552fc54c02351703 +size 2174 diff --git a/logs/text2image/1743812190.3426733/hparams.yml b/logs/text2image/1743812190.3426733/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..f73830fedc0bed4c0c8b0b82cbe500af20a312a6 --- /dev/null +++ b/logs/text2image/1743812190.3426733/hparams.yml @@ -0,0 +1,35 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +checkpointing_steps: 3000 +gradient_accumulation_steps: 2 +gradient_checkpointing: false +image_encoder_g_path: laion/CLIP-ViT-H-14-laion2B-s32B-b79K +image_encoder_p_path: facebook/dinov2-giant +image_root_path: /home/y/yuansui/data/VTON/train +img_height: 512 +img_width: 384 +json_path: ./datasets/deepfashing/train_data.json +learning_rate: 1.0e-05 +logging_dir: logs +lr_num_cycles: 3 +lr_power: 1.0 +lr_scheduler: cosine_with_restarts +lr_warmup_steps: 5000 +max_grad_norm: 1.0 +max_train_steps: 30010 +mixed_precision: bf16 +noise_offset: 0.1 +num_train_epochs: 969 +output_dir: ./logs/stage2 +pretrained_model_name_or_path: stabilityai/stable-diffusion-2-1-base +report_to: tensorboard +resume_from_checkpoint: null +scale_lr: false +seed: 42 +set_grads_to_none: false +train_batch_size: 48 +val_batch_size: 32 +val_image_root_path: /home/y/yuansui/tryon_stage1/data/VTON/test diff --git a/logs/text2image/1743812860.5995035/events.out.tfevents.1743812860.xgpi1.163180.1 b/logs/text2image/1743812860.5995035/events.out.tfevents.1743812860.xgpi1.163180.1 new file mode 100644 index 0000000000000000000000000000000000000000..e8180cf8ec14d1a3505039f9231933e558afef19 --- /dev/null +++ b/logs/text2image/1743812860.5995035/events.out.tfevents.1743812860.xgpi1.163180.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba7662df8290be5b377c2495575295d38600f23317b6775db56b0b5fe4423de2 +size 2174 diff --git a/logs/text2image/1743812860.6084385/hparams.yml b/logs/text2image/1743812860.6084385/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..f73830fedc0bed4c0c8b0b82cbe500af20a312a6 --- /dev/null +++ b/logs/text2image/1743812860.6084385/hparams.yml @@ -0,0 +1,35 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +checkpointing_steps: 3000 +gradient_accumulation_steps: 2 +gradient_checkpointing: false +image_encoder_g_path: laion/CLIP-ViT-H-14-laion2B-s32B-b79K +image_encoder_p_path: facebook/dinov2-giant +image_root_path: /home/y/yuansui/data/VTON/train +img_height: 512 +img_width: 384 +json_path: ./datasets/deepfashing/train_data.json +learning_rate: 1.0e-05 +logging_dir: logs +lr_num_cycles: 3 +lr_power: 1.0 +lr_scheduler: cosine_with_restarts +lr_warmup_steps: 5000 +max_grad_norm: 1.0 +max_train_steps: 30010 +mixed_precision: bf16 +noise_offset: 0.1 +num_train_epochs: 969 +output_dir: ./logs/stage2 +pretrained_model_name_or_path: stabilityai/stable-diffusion-2-1-base +report_to: tensorboard +resume_from_checkpoint: null +scale_lr: false +seed: 42 +set_grads_to_none: false +train_batch_size: 48 +val_batch_size: 32 +val_image_root_path: /home/y/yuansui/tryon_stage1/data/VTON/test diff --git a/logs/text2image/1743812959.0772085/events.out.tfevents.1743812959.xgpi1.163397.1 b/logs/text2image/1743812959.0772085/events.out.tfevents.1743812959.xgpi1.163397.1 new file mode 100644 index 0000000000000000000000000000000000000000..8fda244d0635c367a243b6536ba33d73c83e1c1b --- /dev/null +++ b/logs/text2image/1743812959.0772085/events.out.tfevents.1743812959.xgpi1.163397.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac14d212abc9cafe30149424f597e83acc222df33b7cd48caafbf3d205d889a5 +size 2174 diff --git a/logs/text2image/1743812959.0871334/hparams.yml b/logs/text2image/1743812959.0871334/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..f73830fedc0bed4c0c8b0b82cbe500af20a312a6 --- /dev/null +++ b/logs/text2image/1743812959.0871334/hparams.yml @@ -0,0 +1,35 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +checkpointing_steps: 3000 +gradient_accumulation_steps: 2 +gradient_checkpointing: false +image_encoder_g_path: laion/CLIP-ViT-H-14-laion2B-s32B-b79K +image_encoder_p_path: facebook/dinov2-giant +image_root_path: /home/y/yuansui/data/VTON/train +img_height: 512 +img_width: 384 +json_path: ./datasets/deepfashing/train_data.json +learning_rate: 1.0e-05 +logging_dir: logs +lr_num_cycles: 3 +lr_power: 1.0 +lr_scheduler: cosine_with_restarts +lr_warmup_steps: 5000 +max_grad_norm: 1.0 +max_train_steps: 30010 +mixed_precision: bf16 +noise_offset: 0.1 +num_train_epochs: 969 +output_dir: ./logs/stage2 +pretrained_model_name_or_path: stabilityai/stable-diffusion-2-1-base +report_to: tensorboard +resume_from_checkpoint: null +scale_lr: false +seed: 42 +set_grads_to_none: false +train_batch_size: 48 +val_batch_size: 32 +val_image_root_path: /home/y/yuansui/tryon_stage1/data/VTON/test diff --git a/logs/text2image/1743813073.0578775/events.out.tfevents.1743813073.xgpi6.2181602.1 b/logs/text2image/1743813073.0578775/events.out.tfevents.1743813073.xgpi6.2181602.1 new file mode 100644 index 0000000000000000000000000000000000000000..de3c268b0c1c6599b2adf28f89a31dd55d4811b1 --- /dev/null +++ b/logs/text2image/1743813073.0578775/events.out.tfevents.1743813073.xgpi6.2181602.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5b4ddf6944ab9b28fff02fa14d5cef008348d3f30218a434ff4f0052a5acd47 +size 2174 diff --git a/logs/text2image/1743813073.0666804/hparams.yml b/logs/text2image/1743813073.0666804/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..f73830fedc0bed4c0c8b0b82cbe500af20a312a6 --- /dev/null +++ b/logs/text2image/1743813073.0666804/hparams.yml @@ -0,0 +1,35 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +checkpointing_steps: 3000 +gradient_accumulation_steps: 2 +gradient_checkpointing: false +image_encoder_g_path: laion/CLIP-ViT-H-14-laion2B-s32B-b79K +image_encoder_p_path: facebook/dinov2-giant +image_root_path: /home/y/yuansui/data/VTON/train +img_height: 512 +img_width: 384 +json_path: ./datasets/deepfashing/train_data.json +learning_rate: 1.0e-05 +logging_dir: logs +lr_num_cycles: 3 +lr_power: 1.0 +lr_scheduler: cosine_with_restarts +lr_warmup_steps: 5000 +max_grad_norm: 1.0 +max_train_steps: 30010 +mixed_precision: bf16 +noise_offset: 0.1 +num_train_epochs: 969 +output_dir: ./logs/stage2 +pretrained_model_name_or_path: stabilityai/stable-diffusion-2-1-base +report_to: tensorboard +resume_from_checkpoint: null +scale_lr: false +seed: 42 +set_grads_to_none: false +train_batch_size: 48 +val_batch_size: 32 +val_image_root_path: /home/y/yuansui/tryon_stage1/data/VTON/test diff --git a/logs/text2image/events.out.tfevents.1743687440.autodl-container-d6a547ba47-8b850992.7729.0 b/logs/text2image/events.out.tfevents.1743687440.autodl-container-d6a547ba47-8b850992.7729.0 new file mode 100644 index 0000000000000000000000000000000000000000..9fa83b3cc43311cdafdeea886d174ed9cc3af3ec --- /dev/null +++ b/logs/text2image/events.out.tfevents.1743687440.autodl-container-d6a547ba47-8b850992.7729.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:766fd80e0e7050e729f2db21e253d9614d83306349b02194f6bcdebfe08450d0 +size 88 diff --git a/logs/text2image/events.out.tfevents.1743687553.autodl-container-d6a547ba47-8b850992.8156.0 b/logs/text2image/events.out.tfevents.1743687553.autodl-container-d6a547ba47-8b850992.8156.0 new file mode 100644 index 0000000000000000000000000000000000000000..5ccbd1cbf79cc07f46a3ef64675f8c7ec6a71f91 --- /dev/null +++ b/logs/text2image/events.out.tfevents.1743687553.autodl-container-d6a547ba47-8b850992.8156.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdb55a44cfd345c8eebffbe7bc50ddd3f615ebf360215a9dbfa762249eacca0b +size 88 diff --git a/logs/text2image/events.out.tfevents.1743688005.autodl-container-d6a547ba47-8b850992.8834.0 b/logs/text2image/events.out.tfevents.1743688005.autodl-container-d6a547ba47-8b850992.8834.0 new file mode 100644 index 0000000000000000000000000000000000000000..3f84665d3f4c215a8eb800e509d03fbc996067a2 --- /dev/null +++ b/logs/text2image/events.out.tfevents.1743688005.autodl-container-d6a547ba47-8b850992.8834.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4660c12f9d07d74f4ddd2e6a98e9bd910ab5c47692d21f43b67ec5713f150637 +size 252 diff --git a/logs/text2image/events.out.tfevents.1743688932.autodl-container-d6a547ba47-8b850992.10143.0 b/logs/text2image/events.out.tfevents.1743688932.autodl-container-d6a547ba47-8b850992.10143.0 new file mode 100644 index 0000000000000000000000000000000000000000..0e2caa0d38a1c91e044af31ef41f15fa6a6b8241 --- /dev/null +++ b/logs/text2image/events.out.tfevents.1743688932.autodl-container-d6a547ba47-8b850992.10143.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2bcb53d1981f9025e53143c780fd54fc5c57e946455364664d361362352852c +size 170 diff --git a/logs/text2image/events.out.tfevents.1743697252.autodl-container-d6a547ba47-8b850992.3253.0 b/logs/text2image/events.out.tfevents.1743697252.autodl-container-d6a547ba47-8b850992.3253.0 new file mode 100644 index 0000000000000000000000000000000000000000..3c608cafa5638cec0fe003a8c7e4a5afb3ab7b08 --- /dev/null +++ b/logs/text2image/events.out.tfevents.1743697252.autodl-container-d6a547ba47-8b850992.3253.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abc49592517da2afdd7ad4de43736207dc604ad8ba759241c2e0ba8689154a45 +size 21170 diff --git a/logs/text2image/events.out.tfevents.1743697690.autodl-container-d6a547ba47-8b850992.4598.0 b/logs/text2image/events.out.tfevents.1743697690.autodl-container-d6a547ba47-8b850992.4598.0 new file mode 100644 index 0000000000000000000000000000000000000000..6c905a5dba934a2b4517450435436cb295d6656e --- /dev/null +++ b/logs/text2image/events.out.tfevents.1743697690.autodl-container-d6a547ba47-8b850992.4598.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:beaaa1ed37a8da5d5320fe9f2d7db2028b6e1f10a123c2bdf5f575fa384909e3 +size 24866 diff --git a/logs/text2image/events.out.tfevents.1743698079.autodl-container-d6a547ba47-8b850992.5966.0 b/logs/text2image/events.out.tfevents.1743698079.autodl-container-d6a547ba47-8b850992.5966.0 new file mode 100644 index 0000000000000000000000000000000000000000..025093cd7f8020cac629df13abd2d8cb13c79b2b --- /dev/null +++ b/logs/text2image/events.out.tfevents.1743698079.autodl-container-d6a547ba47-8b850992.5966.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b90da6be35553c87aa9c9e5a3d63f9b037ed37f026b938589bd55c199d32aa4 +size 6156 diff --git a/logs/text2image/events.out.tfevents.1743747786.xgpi6.1019417.0 b/logs/text2image/events.out.tfevents.1743747786.xgpi6.1019417.0 new file mode 100644 index 0000000000000000000000000000000000000000..cf413eb2c1013a293338bbe498a375c6098c781f --- /dev/null +++ b/logs/text2image/events.out.tfevents.1743747786.xgpi6.1019417.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8998b68fd53f056f1b4a20ceefbfe2da5993f6b1cc50fe91d60a6a8f0bd13ecb +size 88 diff --git a/logs/text2image/events.out.tfevents.1743812190.xgpi6.2180967.0 b/logs/text2image/events.out.tfevents.1743812190.xgpi6.2180967.0 new file mode 100644 index 0000000000000000000000000000000000000000..0230aeb4b27018bb095560bed590ef5b08e2b5cd --- /dev/null +++ b/logs/text2image/events.out.tfevents.1743812190.xgpi6.2180967.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2bb3e9b5c9a8e71e19cd2944fc07bbd1fcb23903816c734368b099e40e3342b +size 88 diff --git a/logs/text2image/events.out.tfevents.1743812860.xgpi1.163180.0 b/logs/text2image/events.out.tfevents.1743812860.xgpi1.163180.0 new file mode 100644 index 0000000000000000000000000000000000000000..9eb74c9848f139884acc049e20c15e0de5834d85 --- /dev/null +++ b/logs/text2image/events.out.tfevents.1743812860.xgpi1.163180.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dca5ac8c1a2526ae81f3b09c1583f4a2f7365ee1eb8263b8cf44858319201fcf +size 416 diff --git a/logs/text2image/events.out.tfevents.1743812959.xgpi1.163397.0 b/logs/text2image/events.out.tfevents.1743812959.xgpi1.163397.0 new file mode 100644 index 0000000000000000000000000000000000000000..9a0f974044373808e59e92d70ee107a8da7d2547 --- /dev/null +++ b/logs/text2image/events.out.tfevents.1743812959.xgpi1.163397.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:befa01b41495d8899b354e898df677532735f4a06f64de731d451c94b71db3b4 +size 1564 diff --git a/logs/text2image/events.out.tfevents.1743813073.xgpi6.2181602.0 b/logs/text2image/events.out.tfevents.1743813073.xgpi6.2181602.0 new file mode 100644 index 0000000000000000000000000000000000000000..a75cd15ef3ac78e67a0768a3636953277b50688f --- /dev/null +++ b/logs/text2image/events.out.tfevents.1743813073.xgpi6.2181602.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:190b375fe963049f9b877e88b414d4a4251e9b3f03cfd4488e93418ea0959c00 +size 2553346 diff --git a/zero_to_fp32.py b/zero_to_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04 --- /dev/null +++ b/zero_to_fp32.py @@ -0,0 +1,760 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets +# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in +# the future. Once extracted, the weights don't require DeepSpeed and can be used in any +# application. +# +# example: +# python zero_to_fp32.py . output_dir/ +# or +# python zero_to_fp32.py . output_dir/ --safe_serialization + +import argparse +import torch +import glob +import math +import os +import re +import gc +import json +import numpy as np +from tqdm import tqdm +from collections import OrderedDict +from dataclasses import dataclass + +# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with +# DeepSpeed data structures it has to be available in the current python environment. +from deepspeed.utils import logger +from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, + FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, + FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) + + +@dataclass +class zero_model_state: + buffers: dict() + param_shapes: dict() + shared_params: list + ds_version: int + frozen_param_shapes: dict() + frozen_param_fragments: dict() + + +debug = 0 + +# load to cpu +device = torch.device('cpu') + + +def atoi(text): + return int(text) if text.isdigit() else text + + +def natural_keys(text): + ''' + alist.sort(key=natural_keys) sorts in human order + http://nedbatchelder.com/blog/200712/human_sorting.html + (See Toothy's implementation in the comments) + ''' + return [atoi(c) for c in re.split(r'(\d+)', text)] + + +def get_model_state_file(checkpoint_dir, zero_stage): + if not os.path.isdir(checkpoint_dir): + raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") + + # there should be only one file + if zero_stage <= 2: + file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") + elif zero_stage == 3: + file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") + + if not os.path.exists(file): + raise FileNotFoundError(f"can't find model states file at '{file}'") + + return file + + +def get_checkpoint_files(checkpoint_dir, glob_pattern): + # XXX: need to test that this simple glob rule works for multi-node setup too + ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) + + if len(ckpt_files) == 0: + raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") + + return ckpt_files + + +def get_optim_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") + + +def get_model_state_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") + + +def parse_model_states(files): + zero_model_states = [] + for file in files: + state_dict = torch.load(file, map_location=device, weights_only=False) + + if BUFFER_NAMES not in state_dict: + raise ValueError(f"{file} is not a model state checkpoint") + buffer_names = state_dict[BUFFER_NAMES] + if debug: + print("Found buffers:", buffer_names) + + # recover just the buffers while restoring them to fp32 if they were saved in fp16 + buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} + param_shapes = state_dict[PARAM_SHAPES] + + # collect parameters that are included in param_shapes + param_names = [] + for s in param_shapes: + for name in s.keys(): + param_names.append(name) + + # update with frozen parameters + frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) + if frozen_param_shapes is not None: + if debug: + print(f"Found frozen_param_shapes: {frozen_param_shapes}") + param_names += list(frozen_param_shapes.keys()) + + # handle shared params + shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] + + ds_version = state_dict.get(DS_VERSION, None) + + frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) + + z_model_state = zero_model_state(buffers=buffers, + param_shapes=param_shapes, + shared_params=shared_params, + ds_version=ds_version, + frozen_param_shapes=frozen_param_shapes, + frozen_param_fragments=frozen_param_fragments) + zero_model_states.append(z_model_state) + + return zero_model_states + + +def parse_optim_states(files, ds_checkpoint_dir): + total_files = len(files) + state_dicts = [] + for f in tqdm(files, desc='Loading checkpoint shards'): + state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False) + # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights + # and also handle the case where it was already removed by another helper script + state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) + state_dicts.append(state_dict) + + if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: + raise ValueError(f"{files[0]} is not a zero checkpoint") + zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] + world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] + + # For ZeRO-2 each param group can have different partition_count as data parallelism for expert + # parameters can be different from data parallelism for non-expert parameters. So we can just + # use the max of the partition_count to get the dp world_size. + + if type(world_size) is list: + world_size = max(world_size) + + if world_size != total_files: + raise ValueError( + f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " + "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." + ) + + # the groups are named differently in each stage + if zero_stage <= 2: + fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS + elif zero_stage == 3: + fp32_groups_key = FP32_FLAT_GROUPS + else: + raise ValueError(f"unknown zero stage {zero_stage}") + + fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] + return zero_stage, world_size, fp32_flat_groups + + +def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): + """ + Returns fp32 state_dict reconstructed from ds checkpoint + + Args: + - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) + + """ + print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") + + optim_files = get_optim_files(ds_checkpoint_dir) + zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) + print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") + + model_files = get_model_state_files(ds_checkpoint_dir) + + zero_model_states = parse_model_states(model_files) + print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') + + if zero_stage <= 2: + return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + elif zero_stage == 3: + return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + + +def _zero2_merge_frozen_params(state_dict, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + frozen_param_fragments = zero_model_states[0].frozen_param_fragments + + if debug: + num_elem = sum(s.numel() for s in frozen_param_shapes.values()) + print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + state_dict[name] = frozen_param_fragments[name] + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _has_callable(obj, fn): + attr = getattr(obj, fn, None) + return callable(attr) + + +def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + + # Reconstruction protocol: + # + # XXX: document this + + if debug: + for i in range(world_size): + for j in range(len(fp32_flat_groups[0])): + print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") + + # XXX: memory usage doubles here (zero2) + num_param_groups = len(fp32_flat_groups[0]) + merged_single_partition_of_fp32_groups = [] + for i in range(num_param_groups): + merged_partitions = [sd[i] for sd in fp32_flat_groups] + full_single_fp32_vector = torch.cat(merged_partitions, 0) + merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) + avail_numel = sum( + [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) + + if debug: + wanted_params = sum([len(shapes) for shapes in param_shapes]) + wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) + # not asserting if there is a mismatch due to possible padding + print(f"Have {avail_numel} numels to process.") + print(f"Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + total_numel = 0 + total_params = 0 + for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): + offset = 0 + avail_numel = full_single_fp32_vector.numel() + for name, shape in shapes.items(): + + unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) + total_numel += unpartitioned_numel + total_params += 1 + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) + offset += unpartitioned_numel + + # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and + # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex + # paddings performed in the code it's almost impossible to predict the exact numbers w/o the + # live optimizer object, so we are checking that the numbers are within the right range + align_to = 2 * world_size + + def zero2_align(x): + return align_to * math.ceil(x / align_to) + + if debug: + print(f"original offset={offset}, avail_numel={avail_numel}") + + offset = zero2_align(offset) + avail_numel = zero2_align(avail_numel) + + if debug: + print(f"aligned offset={offset}, avail_numel={avail_numel}") + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero2_merge_frozen_params(state_dict, zero_model_states) + + _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def zero3_partitioned_param_info(unpartitioned_numel, world_size): + remainder = unpartitioned_numel % world_size + padding_numel = (world_size - remainder) if remainder else 0 + partitioned_numel = math.ceil(unpartitioned_numel / world_size) + return partitioned_numel, padding_numel + + +def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + if debug: + for i in range(world_size): + num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) + print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in zero_model_states[0].frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) + state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +class GatheredTensor: + """ + A pseudo tensor that collects partitioned weights. + It is more memory efficient when there are multiple groups. + """ + + def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape): + self.flat_groups = flat_groups + self.flat_groups_offset = flat_groups_offset + self.offset = offset + self.partitioned_numel = partitioned_numel + self.shape = shape + self.dtype = self.flat_groups[0][0].dtype + + def contiguous(self): + """ + Merge partitioned weights from flat_groups into a single tensor. + """ + end_idx = self.offset + self.partitioned_numel + world_size = len(self.flat_groups) + pad_flat_param_chunks = [] + + for rank_i in range(world_size): + # for each rank, we need to collect weights from related group/groups + flat_groups_at_rank_i = self.flat_groups[rank_i] + start_group_id = None + end_group_id = None + for group_id in range(len(self.flat_groups_offset)): + if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]: + start_group_id = group_id + if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]: + end_group_id = group_id + break + # collect weights from related group/groups + for group_id in range(start_group_id, end_group_id + 1): + flat_tensor = flat_groups_at_rank_i[group_id] + start_offset = self.offset - self.flat_groups_offset[group_id] + end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id] + pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset]) + + # collect weights from all ranks + pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0) + param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous() + return param + + +def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size + + # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each + # param, re-consolidating each param, while dealing with padding if any + + # merge list of dicts, preserving order + param_shapes = {k: v for d in param_shapes for k, v in d.items()} + + if debug: + for i in range(world_size): + print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") + + wanted_params = len(param_shapes) + wanted_numel = sum(shape.numel() for shape in param_shapes.values()) + # not asserting if there is a mismatch due to possible padding + avail_numel = fp32_flat_groups[0].numel() * world_size + print(f"Trainable params: Have {avail_numel} numels to process.") + print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + offset = 0 + total_numel = 0 + total_params = 0 + flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]])) + for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'): + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + total_params += 1 + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + # memory efficient tensor + tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape) + state_dict[name] = tensor + offset += partitioned_numel + + offset *= world_size + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) + + _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def to_torch_tensor(state_dict, return_empty_tensor=False): + """ + Convert state_dict of GatheredTensor to torch tensor + """ + torch_state_dict = {} + converted_tensors = {} + for name, tensor in state_dict.items(): + tensor_id = id(tensor) + if tensor_id in converted_tensors: # shared tensors + shared_tensor = torch_state_dict[converted_tensors[tensor_id]] + torch_state_dict[name] = shared_tensor + else: + converted_tensors[tensor_id] = name + if return_empty_tensor: + torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype) + else: + torch_state_dict[name] = tensor.contiguous() + return torch_state_dict + + +def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag=None, + exclude_frozen_parameters=False, + lazy_mode=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with + ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example + via a model hub. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient. + Convert the pesduo tensor to torch tensor by ``.contiguous()`` + + Returns: + - pytorch ``state_dict`` + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + # do the training and checkpoint saving + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu + model = model.cpu() # move to cpu + model.load_state_dict(state_dict) + # submit to model hub or save the model to share with others + + In this example the ``model`` will no longer be usable in the deepspeed context of the same + application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. + + Note: the above usage may not work if your application doesn't have sufficient free CPU memory. + You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with + the checkpoint. Or you can load state_dict in lazy mode :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu + for name, lazy_tensor in state_dict.item(): + tensor = lazy_tensor.contiguous() # to cpu + print(name, tensor) + # del tensor to release memory if it no longer in use + """ + if tag is None: + latest_path = os.path.join(checkpoint_dir, 'latest') + if os.path.isfile(latest_path): + with open(latest_path, 'r') as fd: + tag = fd.read().strip() + else: + raise ValueError(f"Unable to find 'latest' file at {latest_path}") + + ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) + + if not os.path.isdir(ds_checkpoint_dir): + raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") + + state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) + if lazy_mode: + return state_dict + else: + return to_torch_tensor(state_dict) + + +def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, + output_dir, + max_shard_size="5GB", + safe_serialization=False, + tag=None, + exclude_frozen_parameters=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be + loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``output_dir``: directory to the pytorch fp32 state_dict output files + - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB + - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + """ + + # Dependency pre-check + if safe_serialization: + try: + from safetensors.torch import save_file + except ImportError: + print('If you want to use `safe_serialization`, please `pip install safetensors`') + raise + if max_shard_size is not None: + try: + from huggingface_hub import split_torch_state_dict_into_shards + except ImportError: + print('If you want to use `max_shard_size`, please `pip install huggingface_hub`') + raise + + # Convert zero checkpoint to state_dict + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag, + exclude_frozen_parameters, + lazy_mode=True) + + # Shard the model if it is too big. + weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin" + if max_shard_size is not None: + filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors") + # an memory-efficient approach for sharding + empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True) + state_dict_split = split_torch_state_dict_into_shards(empty_state_dict, + filename_pattern=filename_pattern, + max_shard_size=max_shard_size) + else: + from collections import namedtuple + StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"]) + state_dict_split = StateDictSplit(is_sharded=False, + filename_to_tensors={weights_name: list(state_dict.keys())}) + + # Save the model by shard + os.makedirs(output_dir, exist_ok=True) + filename_to_tensors = state_dict_split.filename_to_tensors.items() + for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"): + shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors} + shard_state_dict = to_torch_tensor(shard_state_dict) + output_path = os.path.join(output_dir, shard_file) + if safe_serialization: + save_file(shard_state_dict, output_path, metadata={"format": "pt"}) + else: + torch.save(shard_state_dict, output_path) + # release the memory of current shard + for tensor_name in list(shard_state_dict.keys()): + del state_dict[tensor_name] + del shard_state_dict[tensor_name] + del shard_state_dict + gc.collect() + + # Save index if sharded + if state_dict_split.is_sharded: + index = { + "metadata": state_dict_split.metadata, + "weight_map": state_dict_split.tensor_to_filename, + } + save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json" + save_index_file = os.path.join(output_dir, save_index_file) + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + + +def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): + """ + 1. Put the provided model to cpu + 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` + 3. Load it into the provided model + + Args: + - ``model``: the model object to update + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + + Returns: + - ``model`: modified model + + Make sure you have plenty of CPU memory available before you call this function. If you don't + have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it + conveniently placed for you in the checkpoint folder. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) + # submit to model hub or save the model to share with others + + Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context + of the same application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + """ + logger.info(f"Extracting fp32 weights") + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + + logger.info(f"Overwriting model with fp32 weights") + model = model.cpu() + model.load_state_dict(state_dict, strict=False) + + return model + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("checkpoint_dir", + type=str, + help="path to the desired checkpoint folder, e.g., path/checkpoint-12") + parser.add_argument("output_dir", + type=str, + help="directory to the pytorch fp32 state_dict output files" + "(e.g. path/checkpoint-12-output/)") + parser.add_argument( + "--max_shard_size", + type=str, + default="5GB", + help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size" + "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`" + "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances" + "without CPU OOM issues.") + parser.add_argument( + "--safe_serialization", + default=False, + action='store_true', + help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).") + parser.add_argument("-t", + "--tag", + type=str, + default=None, + help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") + parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") + parser.add_argument("-d", "--debug", action='store_true', help="enable debug") + args = parser.parse_args() + + debug = args.debug + + convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, + args.output_dir, + max_shard_size=args.max_shard_size, + safe_serialization=args.safe_serialization, + tag=args.tag, + exclude_frozen_parameters=args.exclude_frozen_parameters)