weiyuyeh commited on
Commit
d3e71a5
·
1 Parent(s): 87b6625

fix typo & upload configs

Browse files
app.py CHANGED
@@ -168,7 +168,7 @@ with gr.Blocks(theme=gr.themes.Origin()) as demo:
168
  # show_btn.click(fn=show_package, inputs=show_input, outputs=show_output)
169
  download_btn.click(fn=zip_outputs, outputs=file_output)
170
  # install_btn.click(fn=install_package, inputs=pkg_input, outputs=install_output)
171
- infer_btn.click(fn=start_inference_stream,input=config_count, outputs=log_output)
172
  # uninstall_btn.click(fn=uninstall_package, inputs=pkg_input2, outputs=uninstall_output)
173
  upload_button.click(fn=save_files,inputs=[data_input, config_input],outputs=output)
174
  demo.launch()
 
168
  # show_btn.click(fn=show_package, inputs=show_input, outputs=show_output)
169
  download_btn.click(fn=zip_outputs, outputs=file_output)
170
  # install_btn.click(fn=install_package, inputs=pkg_input, outputs=install_output)
171
+ infer_btn.click(fn=start_inference_stream,inputs=config_count, outputs=log_output)
172
  # uninstall_btn.click(fn=uninstall_package, inputs=pkg_input2, outputs=uninstall_output)
173
  upload_button.click(fn=save_files,inputs=[data_input, config_input],outputs=output)
174
  demo.launch()
src/multiview_consist_edit/0009.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71309e834855505e52c47722b1ffe3ec4e935d7511644374a25a0ebe677e08a6
3
+ size 15975752
src/multiview_consist_edit/config/configs.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:113569660e85d1e00127cfd991d0fd76bcab011abbe87fe37724ae385ef52cfd
3
+ size 8116
src/multiview_consist_edit/config/configs/0.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seed: 42
2
+
3
+ model_path: "stable-diffusion-v1-5/stable-diffusion-v1-5"
4
+ vae_path: "stabilityai/sd-vae-ft-mse"
5
+ clip_model_path: 'openai/clip-vit-base-patch32'
6
+
7
+ # unet_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000"
8
+ # pretrained_poseguider_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000/pose.ckpt"
9
+ # pretrained_referencenet_path: '/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000'
10
+
11
+ unet_path: "NTUST-DDRC/thuman_tryon_mvattn_multi"
12
+ unet_revision: "checkpoint-30000/"
13
+ pretrained_poseguider_path: "checkpoint-30000/pose.ckpt"
14
+ pretrained_referencenet_path: 'NTUST-DDRC/thuman_tryon_mvattn_multi'
15
+ referencenet_revision: "checkpoint-30000/"
16
+
17
+ out_dir: 'image_output_tryon_mvhumannet'
18
+
19
+ batch_size: 1
20
+ dataloader_num_workers: 4
21
+ guidance_scale: 3 # thuman:3 mvhumannet:2
22
+
23
+
24
+ # infer_data:
25
+ # # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
26
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/save_render_data_yw/"
27
+ # # sample_size: [512,384] # for 40G 256
28
+ # sample_size: [768,576]
29
+ # clip_model_path: '/GPUFS/sysu_gbli2_1/hzj/pretrained_models/clip-vit-base-patch32'
30
+ # is_train: false
31
+ # mode: 'pair'
32
+ # output_front: true
33
+
34
+ datatype: 'Thuman2_Dataset' # 'MVHumanNet_Dataset' or 'Thuman2_Dataset'
35
+ infer_data:
36
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
37
+ dataroot: "/home/user/app/upload/data/"
38
+ # sample_size: [512,384] # for 40G 256
39
+ sample_size: [768,576]
40
+ clip_model_path: 'openai/clip-vit-base-patch32'
41
+ is_train: false
42
+ mode: 'pair'
43
+ # the view ids you want to use
44
+ # it will cover other settings like output_front, front_id, is_use_all_views
45
+ view_ids: [0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75]
46
+ output_front: true
47
+ front_id: 0 # front view id, used for thuman2 dataset
48
+ is_use_all_views: false # if your all views count <= 16, you can set it to true, otherwise false
49
+
50
+ fusion_blocks: "full"
51
+ image_finetune: true
52
+ num_inference_steps: 30
src/multiview_consist_edit/config/configs/1.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seed: 42
2
+
3
+ model_path: "stable-diffusion-v1-5/stable-diffusion-v1-5"
4
+ vae_path: "stabilityai/sd-vae-ft-mse"
5
+ clip_model_path: 'openai/clip-vit-base-patch32'
6
+
7
+ # unet_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000"
8
+ # pretrained_poseguider_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000/pose.ckpt"
9
+ # pretrained_referencenet_path: '/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000'
10
+
11
+ unet_path: "NTUST-DDRC/thuman_tryon_mvattn_multi"
12
+ unet_revision: "checkpoint-30000/"
13
+ pretrained_poseguider_path: "checkpoint-30000/pose.ckpt"
14
+ pretrained_referencenet_path: 'NTUST-DDRC/thuman_tryon_mvattn_multi'
15
+ referencenet_revision: "checkpoint-30000/"
16
+
17
+ out_dir: 'image_output_tryon_mvhumannet'
18
+
19
+ batch_size: 1
20
+ dataloader_num_workers: 4
21
+ guidance_scale: 3 # thuman:3 mvhumannet:2
22
+
23
+
24
+ # infer_data:
25
+ # # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
26
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/save_render_data_yw/"
27
+ # # sample_size: [512,384] # for 40G 256
28
+ # sample_size: [768,576]
29
+ # clip_model_path: '/GPUFS/sysu_gbli2_1/hzj/pretrained_models/clip-vit-base-patch32'
30
+ # is_train: false
31
+ # mode: 'pair'
32
+ # output_front: true
33
+
34
+ datatype: 'Thuman2_Dataset' # 'MVHumanNet_Dataset' or 'Thuman2_Dataset'
35
+ infer_data:
36
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
37
+ dataroot: "/home/user/app/upload/data/"
38
+ # sample_size: [512,384] # for 40G 256
39
+ sample_size: [768,576]
40
+ clip_model_path: 'openai/clip-vit-base-patch32'
41
+ is_train: false
42
+ mode: 'pair'
43
+ # the view ids you want to use
44
+ # it will cover other settings like output_front, front_id, is_use_all_views
45
+ view_ids: [1,6,11,16,21,26,31,36,41,46,51,56,61,66,71,76]
46
+ output_front: true
47
+ front_id: 0 # front view id, used for thuman2 dataset
48
+ is_use_all_views: false # if your all views count <= 16, you can set it to true, otherwise false
49
+
50
+ fusion_blocks: "full"
51
+ image_finetune: true
52
+ num_inference_steps: 30
src/multiview_consist_edit/config/configs/2.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seed: 42
2
+
3
+ model_path: "stable-diffusion-v1-5/stable-diffusion-v1-5"
4
+ vae_path: "stabilityai/sd-vae-ft-mse"
5
+ clip_model_path: 'openai/clip-vit-base-patch32'
6
+
7
+ # unet_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000"
8
+ # pretrained_poseguider_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000/pose.ckpt"
9
+ # pretrained_referencenet_path: '/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000'
10
+
11
+ unet_path: "NTUST-DDRC/thuman_tryon_mvattn_multi"
12
+ unet_revision: "checkpoint-30000/"
13
+ pretrained_poseguider_path: "checkpoint-30000/pose.ckpt"
14
+ pretrained_referencenet_path: 'NTUST-DDRC/thuman_tryon_mvattn_multi'
15
+ referencenet_revision: "checkpoint-30000/"
16
+
17
+ out_dir: 'image_output_tryon_mvhumannet'
18
+
19
+ batch_size: 1
20
+ dataloader_num_workers: 4
21
+ guidance_scale: 3 # thuman:3 mvhumannet:2
22
+
23
+
24
+ # infer_data:
25
+ # # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
26
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/save_render_data_yw/"
27
+ # # sample_size: [512,384] # for 40G 256
28
+ # sample_size: [768,576]
29
+ # clip_model_path: '/GPUFS/sysu_gbli2_1/hzj/pretrained_models/clip-vit-base-patch32'
30
+ # is_train: false
31
+ # mode: 'pair'
32
+ # output_front: true
33
+
34
+ datatype: 'Thuman2_Dataset' # 'MVHumanNet_Dataset' or 'Thuman2_Dataset'
35
+ infer_data:
36
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
37
+ dataroot: "/home/user/app/upload/data/"
38
+ # sample_size: [512,384] # for 40G 256
39
+ sample_size: [768,576]
40
+ clip_model_path: 'openai/clip-vit-base-patch32'
41
+ is_train: false
42
+ mode: 'pair'
43
+ # the view ids you want to use
44
+ # it will cover other settings like output_front, front_id, is_use_all_views
45
+ view_ids: [2,7,12,17,22,27,32,37,42,47,52,57,62,67,72,77]
46
+ output_front: true
47
+ front_id: 0 # front view id, used for thuman2 dataset
48
+ is_use_all_views: false # if your all views count <= 16, you can set it to true, otherwise false
49
+
50
+ fusion_blocks: "full"
51
+ image_finetune: true
52
+ num_inference_steps: 30
src/multiview_consist_edit/config/configs/3.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seed: 42
2
+
3
+ model_path: "stable-diffusion-v1-5/stable-diffusion-v1-5"
4
+ vae_path: "stabilityai/sd-vae-ft-mse"
5
+ clip_model_path: 'openai/clip-vit-base-patch32'
6
+
7
+ # unet_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000"
8
+ # pretrained_poseguider_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000/pose.ckpt"
9
+ # pretrained_referencenet_path: '/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000'
10
+
11
+ unet_path: "NTUST-DDRC/thuman_tryon_mvattn_multi"
12
+ unet_revision: "checkpoint-30000/"
13
+ pretrained_poseguider_path: "checkpoint-30000/pose.ckpt"
14
+ pretrained_referencenet_path: 'NTUST-DDRC/thuman_tryon_mvattn_multi'
15
+ referencenet_revision: "checkpoint-30000/"
16
+
17
+ out_dir: 'image_output_tryon_mvhumannet'
18
+
19
+ batch_size: 1
20
+ dataloader_num_workers: 4
21
+ guidance_scale: 3 # thuman:3 mvhumannet:2
22
+
23
+
24
+ # infer_data:
25
+ # # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
26
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/save_render_data_yw/"
27
+ # # sample_size: [512,384] # for 40G 256
28
+ # sample_size: [768,576]
29
+ # clip_model_path: '/GPUFS/sysu_gbli2_1/hzj/pretrained_models/clip-vit-base-patch32'
30
+ # is_train: false
31
+ # mode: 'pair'
32
+ # output_front: true
33
+
34
+ datatype: 'Thuman2_Dataset' # 'MVHumanNet_Dataset' or 'Thuman2_Dataset'
35
+ infer_data:
36
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
37
+ dataroot: "/home/user/app/upload/data/"
38
+ # sample_size: [512,384] # for 40G 256
39
+ sample_size: [768,576]
40
+ clip_model_path: 'openai/clip-vit-base-patch32'
41
+ is_train: false
42
+ mode: 'pair'
43
+ # the view ids you want to use
44
+ # it will cover other settings like output_front, front_id, is_use_all_views
45
+ view_ids: [3,8,13,18,23,28,33,38,43,48,53,58,63,68,73,78]
46
+ output_front: true
47
+ front_id: 0 # front view id, used for thuman2 dataset
48
+ is_use_all_views: false # if your all views count <= 16, you can set it to true, otherwise false
49
+
50
+ fusion_blocks: "full"
51
+ image_finetune: true
52
+ num_inference_steps: 30
src/multiview_consist_edit/config/configs/4.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seed: 42
2
+
3
+ model_path: "stable-diffusion-v1-5/stable-diffusion-v1-5"
4
+ vae_path: "stabilityai/sd-vae-ft-mse"
5
+ clip_model_path: 'openai/clip-vit-base-patch32'
6
+
7
+ # unet_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000"
8
+ # pretrained_poseguider_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000/pose.ckpt"
9
+ # pretrained_referencenet_path: '/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000'
10
+
11
+ unet_path: "NTUST-DDRC/thuman_tryon_mvattn_multi"
12
+ unet_revision: "checkpoint-30000/"
13
+ pretrained_poseguider_path: "checkpoint-30000/pose.ckpt"
14
+ pretrained_referencenet_path: 'NTUST-DDRC/thuman_tryon_mvattn_multi'
15
+ referencenet_revision: "checkpoint-30000/"
16
+
17
+ out_dir: 'image_output_tryon_mvhumannet'
18
+
19
+ batch_size: 1
20
+ dataloader_num_workers: 4
21
+ guidance_scale: 3 # thuman:3 mvhumannet:2
22
+
23
+
24
+ # infer_data:
25
+ # # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
26
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/save_render_data_yw/"
27
+ # # sample_size: [512,384] # for 40G 256
28
+ # sample_size: [768,576]
29
+ # clip_model_path: '/GPUFS/sysu_gbli2_1/hzj/pretrained_models/clip-vit-base-patch32'
30
+ # is_train: false
31
+ # mode: 'pair'
32
+ # output_front: true
33
+
34
+ datatype: 'Thuman2_Dataset' # 'MVHumanNet_Dataset' or 'Thuman2_Dataset'
35
+ infer_data:
36
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
37
+ dataroot: "/home/user/app/upload/data/"
38
+ # sample_size: [512,384] # for 40G 256
39
+ sample_size: [768,576]
40
+ clip_model_path: 'openai/clip-vit-base-patch32'
41
+ is_train: false
42
+ mode: 'pair'
43
+ # the view ids you want to use
44
+ # it will cover other settings like output_front, front_id, is_use_all_views
45
+ view_ids: [4,9,14,19,24,29,34,39,44,49,54,59,64,69,74,79]
46
+ output_front: true
47
+ front_id: 0 # front view id, used for thuman2 dataset
48
+ is_use_all_views: false # if your all views count <= 16, you can set it to true, otherwise false
49
+
50
+ fusion_blocks: "full"
51
+ image_finetune: true
52
+ num_inference_steps: 30
src/multiview_consist_edit/config/configs/5.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seed: 42
2
+
3
+ model_path: "stable-diffusion-v1-5/stable-diffusion-v1-5"
4
+ vae_path: "stabilityai/sd-vae-ft-mse"
5
+ clip_model_path: 'openai/clip-vit-base-patch32'
6
+
7
+ # unet_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000"
8
+ # pretrained_poseguider_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000/pose.ckpt"
9
+ # pretrained_referencenet_path: '/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000'
10
+
11
+ unet_path: "NTUST-DDRC/thuman_tryon_mvattn_multi"
12
+ unet_revision: "checkpoint-30000/"
13
+ pretrained_poseguider_path: "checkpoint-30000/pose.ckpt"
14
+ pretrained_referencenet_path: 'NTUST-DDRC/thuman_tryon_mvattn_multi'
15
+ referencenet_revision: "checkpoint-30000/"
16
+
17
+ out_dir: 'image_output_tryon_mvhumannet'
18
+
19
+ batch_size: 1
20
+ dataloader_num_workers: 4
21
+ guidance_scale: 3 # thuman:3 mvhumannet:2
22
+
23
+
24
+ # infer_data:
25
+ # # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
26
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/save_render_data_yw/"
27
+ # # sample_size: [512,384] # for 40G 256
28
+ # sample_size: [768,576]
29
+ # clip_model_path: '/GPUFS/sysu_gbli2_1/hzj/pretrained_models/clip-vit-base-patch32'
30
+ # is_train: false
31
+ # mode: 'pair'
32
+ # output_front: true
33
+
34
+ datatype: 'Thuman2_Dataset' # 'MVHumanNet_Dataset' or 'Thuman2_Dataset'
35
+ infer_data:
36
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
37
+ dataroot: "/home/user/app/upload/data/"
38
+ # sample_size: [512,384] # for 40G 256
39
+ sample_size: [768,576]
40
+ clip_model_path: 'openai/clip-vit-base-patch32'
41
+ is_train: false
42
+ mode: 'pair'
43
+ # the view ids you want to use
44
+ # it will cover other settings like output_front, front_id, is_use_all_views
45
+ view_ids: [80,82,84,86,88,90,92,94,96,98,100,102,104,106,108,110,112,114,116,118]
46
+ output_front: true
47
+ front_id: 0 # front view id, used for thuman2 dataset
48
+ is_use_all_views: false # if your all views count <= 16, you can set it to true, otherwise false
49
+
50
+ fusion_blocks: "full"
51
+ image_finetune: true
52
+ num_inference_steps: 30
src/multiview_consist_edit/config/configs/6.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seed: 42
2
+
3
+ model_path: "stable-diffusion-v1-5/stable-diffusion-v1-5"
4
+ vae_path: "stabilityai/sd-vae-ft-mse"
5
+ clip_model_path: 'openai/clip-vit-base-patch32'
6
+
7
+ # unet_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000"
8
+ # pretrained_poseguider_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000/pose.ckpt"
9
+ # pretrained_referencenet_path: '/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000'
10
+
11
+ unet_path: "NTUST-DDRC/thuman_tryon_mvattn_multi"
12
+ unet_revision: "checkpoint-30000/"
13
+ pretrained_poseguider_path: "checkpoint-30000/pose.ckpt"
14
+ pretrained_referencenet_path: 'NTUST-DDRC/thuman_tryon_mvattn_multi'
15
+ referencenet_revision: "checkpoint-30000/"
16
+
17
+ out_dir: 'image_output_tryon_mvhumannet'
18
+
19
+ batch_size: 1
20
+ dataloader_num_workers: 4
21
+ guidance_scale: 3 # thuman:3 mvhumannet:2
22
+
23
+
24
+ # infer_data:
25
+ # # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
26
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/save_render_data_yw/"
27
+ # # sample_size: [512,384] # for 40G 256
28
+ # sample_size: [768,576]
29
+ # clip_model_path: '/GPUFS/sysu_gbli2_1/hzj/pretrained_models/clip-vit-base-patch32'
30
+ # is_train: false
31
+ # mode: 'pair'
32
+ # output_front: true
33
+
34
+ datatype: 'Thuman2_Dataset' # 'MVHumanNet_Dataset' or 'Thuman2_Dataset'
35
+ infer_data:
36
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
37
+ dataroot: "/home/user/app/upload/data/"
38
+ # sample_size: [512,384] # for 40G 256
39
+ sample_size: [768,576]
40
+ clip_model_path: 'openai/clip-vit-base-patch32'
41
+ is_train: false
42
+ mode: 'pair'
43
+ # the view ids you want to use
44
+ # it will cover other settings like output_front, front_id, is_use_all_views
45
+ view_ids: [81,83,85,87,89,91,93,95,97,99,101,103,105,107,109,111,113,115,117,119]
46
+ output_front: true
47
+ front_id: 0 # front view id, used for thuman2 dataset
48
+ is_use_all_views: false # if your all views count <= 16, you can set it to true, otherwise false
49
+
50
+ fusion_blocks: "full"
51
+ image_finetune: true
52
+ num_inference_steps: 30
src/multiview_consist_edit/config/configs/7.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seed: 42
2
+
3
+ model_path: "stable-diffusion-v1-5/stable-diffusion-v1-5"
4
+ vae_path: "stabilityai/sd-vae-ft-mse"
5
+ clip_model_path: 'openai/clip-vit-base-patch32'
6
+
7
+ # unet_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000"
8
+ # pretrained_poseguider_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000/pose.ckpt"
9
+ # pretrained_referencenet_path: '/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000'
10
+
11
+ unet_path: "NTUST-DDRC/thuman_tryon_mvattn_multi"
12
+ unet_revision: "checkpoint-30000/"
13
+ pretrained_poseguider_path: "checkpoint-30000/pose.ckpt"
14
+ pretrained_referencenet_path: 'NTUST-DDRC/thuman_tryon_mvattn_multi'
15
+ referencenet_revision: "checkpoint-30000/"
16
+
17
+ out_dir: 'image_output_tryon_mvhumannet'
18
+
19
+ batch_size: 1
20
+ dataloader_num_workers: 4
21
+ guidance_scale: 3 # thuman:3 mvhumannet:2
22
+
23
+
24
+ # infer_data:
25
+ # # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
26
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/save_render_data_yw/"
27
+ # # sample_size: [512,384] # for 40G 256
28
+ # sample_size: [768,576]
29
+ # clip_model_path: '/GPUFS/sysu_gbli2_1/hzj/pretrained_models/clip-vit-base-patch32'
30
+ # is_train: false
31
+ # mode: 'pair'
32
+ # output_front: true
33
+
34
+ datatype: 'Thuman2_Dataset' # 'MVHumanNet_Dataset' or 'Thuman2_Dataset'
35
+ infer_data:
36
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
37
+ dataroot: "/home/user/app/upload/data/"
38
+ # sample_size: [512,384] # for 40G 256
39
+ sample_size: [768,576]
40
+ clip_model_path: 'openai/clip-vit-base-patch32'
41
+ is_train: false
42
+ mode: 'pair'
43
+ # the view ids you want to use
44
+ # it will cover other settings like output_front, front_id, is_use_all_views
45
+ view_ids: [120,122,124,126,128,130,132,134,136,138,140,142,144,146,148,150,152,154,156,158]
46
+ output_front: true
47
+ front_id: 0 # front view id, used for thuman2 dataset
48
+ is_use_all_views: false # if your all views count <= 16, you can set it to true, otherwise false
49
+
50
+ fusion_blocks: "full"
51
+ image_finetune: true
52
+ num_inference_steps: 30
src/multiview_consist_edit/config/configs/8.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seed: 42
2
+
3
+ model_path: "stable-diffusion-v1-5/stable-diffusion-v1-5"
4
+ vae_path: "stabilityai/sd-vae-ft-mse"
5
+ clip_model_path: 'openai/clip-vit-base-patch32'
6
+
7
+ # unet_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000"
8
+ # pretrained_poseguider_path: "/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000/pose.ckpt"
9
+ # pretrained_referencenet_path: '/GPUFS/sysu_gbli2_1/hzj/animate/checkpoints/thuman_tryon_mvattn_multi_1205/checkpoint-30000'
10
+
11
+ unet_path: "NTUST-DDRC/thuman_tryon_mvattn_multi"
12
+ unet_revision: "checkpoint-30000/"
13
+ pretrained_poseguider_path: "checkpoint-30000/pose.ckpt"
14
+ pretrained_referencenet_path: 'NTUST-DDRC/thuman_tryon_mvattn_multi'
15
+ referencenet_revision: "checkpoint-30000/"
16
+
17
+ out_dir: 'image_output_tryon_mvhumannet'
18
+
19
+ batch_size: 1
20
+ dataloader_num_workers: 4
21
+ guidance_scale: 3 # thuman:3 mvhumannet:2
22
+
23
+
24
+ # infer_data:
25
+ # # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
26
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/save_render_data_yw/"
27
+ # # sample_size: [512,384] # for 40G 256
28
+ # sample_size: [768,576]
29
+ # clip_model_path: '/GPUFS/sysu_gbli2_1/hzj/pretrained_models/clip-vit-base-patch32'
30
+ # is_train: false
31
+ # mode: 'pair'
32
+ # output_front: true
33
+
34
+ datatype: 'Thuman2_Dataset' # 'MVHumanNet_Dataset' or 'Thuman2_Dataset'
35
+ infer_data:
36
+ # dataroot: "/GPUFS/sysu_gbli2_1/hzj/render_data"
37
+ dataroot: "/home/user/app/upload/data/"
38
+ # sample_size: [512,384] # for 40G 256
39
+ sample_size: [768,576]
40
+ clip_model_path: 'openai/clip-vit-base-patch32'
41
+ is_train: false
42
+ mode: 'pair'
43
+ # the view ids you want to use
44
+ # it will cover other settings like output_front, front_id, is_use_all_views
45
+ view_ids: [121,123,125,127,129,131,133,135,137,139,141,143,145,147,149,151,153,155,157,159]
46
+ output_front: true
47
+ front_id: 0 # front view id, used for thuman2 dataset
48
+ is_use_all_views: false # if your all views count <= 16, you can set it to true, otherwise false
49
+
50
+ fusion_blocks: "full"
51
+ image_finetune: true
52
+ num_inference_steps: 30