0719-1256
Browse files- context_unet.py +1 -1
- diffusion.py +1 -1
- quantify_results.ipynb +0 -0
context_unet.py
CHANGED
|
@@ -330,7 +330,7 @@ class ContextUnet(nn.Module):
|
|
| 330 |
elif image_size == 128:
|
| 331 |
channel_mult = (1, 1, 2, 3, 4)
|
| 332 |
elif image_size == 64:
|
| 333 |
-
channel_mult = (1, 2, 3, 4)#(1, 2, 4, 6, 8)#(1, 2, 2, 4)#(1, 2, 8, 8, 8)#(1, 2, 4)#(1, 2, 2, 4)#(0.5,1,2,2,4,4)#(1, 1, 2, 2, 4, 4)#
|
| 334 |
elif image_size == 32:
|
| 335 |
channel_mult = (1, 2, 2, 4)
|
| 336 |
elif image_size == 28:
|
|
|
|
| 330 |
elif image_size == 128:
|
| 331 |
channel_mult = (1, 1, 2, 3, 4)
|
| 332 |
elif image_size == 64:
|
| 333 |
+
channel_mult = (1, 1, 2, 2, 4, 4)#(1, 2, 3, 4)#(1, 2, 4, 6, 8)#(1, 2, 2, 4)#(1, 2, 8, 8, 8)#(1, 2, 4)#(1, 2, 2, 4)#(0.5,1,2,2,4,4)#(1, 1, 2, 2, 4, 4)#
|
| 334 |
elif image_size == 32:
|
| 335 |
channel_mult = (1, 2, 2, 4)
|
| 336 |
elif image_size == 28:
|
diffusion.py
CHANGED
|
@@ -669,7 +669,7 @@ if __name__ == "__main__":
|
|
| 669 |
print(f" sampling, world_size = {world_size} ".center(100,'-'))
|
| 670 |
# num_train_image_list = [1600,3200,6400,12800,25600]
|
| 671 |
# num_train_image_list = [5000]
|
| 672 |
-
num_new_img_per_gpu =
|
| 673 |
max_num_img_per_gpu = 20
|
| 674 |
|
| 675 |
params = torch.tensor([4.4, 131.341])
|
|
|
|
| 669 |
print(f" sampling, world_size = {world_size} ".center(100,'-'))
|
| 670 |
# num_train_image_list = [1600,3200,6400,12800,25600]
|
| 671 |
# num_train_image_list = [5000]
|
| 672 |
+
num_new_img_per_gpu = 200
|
| 673 |
max_num_img_per_gpu = 20
|
| 674 |
|
| 675 |
params = torch.tensor([4.4, 131.341])
|
quantify_results.ipynb
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|