1007
Browse files- samples/unet_320x576_0.jpg +2 -2
- samples/unet_384x576_0.jpg +2 -2
- samples/unet_448x576_0.jpg +2 -2
- samples/unet_512x576_0.jpg +2 -2
- samples/unet_576x320_0.jpg +2 -2
- samples/unet_576x384_0.jpg +2 -2
- samples/unet_576x448_0.jpg +2 -2
- samples/unet_576x512_0.jpg +2 -2
- samples/unet_576x576_0.jpg +2 -2
- train.py +5 -4
- unet/diffusion_pytorch_model.safetensors +1 -1
samples/unet_320x576_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_384x576_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_448x576_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_512x576_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_576x320_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_576x384_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_576x448_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_576x512_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_576x576_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
train.py
CHANGED
|
@@ -29,8 +29,8 @@ ds_path = "datasets/576"
|
|
| 29 |
project = "unet"
|
| 30 |
batch_size = 25
|
| 31 |
base_learning_rate = 9.5e-6
|
| 32 |
-
min_learning_rate =
|
| 33 |
-
num_epochs =
|
| 34 |
# samples/save per epoch
|
| 35 |
sample_interval_share = 10
|
| 36 |
use_wandb = True
|
|
@@ -51,7 +51,8 @@ dtype = torch.float32
|
|
| 51 |
save_barrier = 1.03
|
| 52 |
dispersive_temperature=0.5
|
| 53 |
dispersive_weight=0.05
|
| 54 |
-
percentile_clipping =
|
|
|
|
| 55 |
steps_offset = 1 # Scheduler
|
| 56 |
limit = 0
|
| 57 |
checkpoints_folder = ""
|
|
@@ -908,7 +909,7 @@ for epoch in range(start_epoch, start_epoch + num_epochs):
|
|
| 908 |
if not fbp:
|
| 909 |
if accelerator.sync_gradients:
|
| 910 |
with torch.amp.autocast('cuda', enabled=False):
|
| 911 |
-
grad = accelerator.clip_grad_norm_(unet.parameters(),
|
| 912 |
optimizer.step()
|
| 913 |
lr_scheduler.step()
|
| 914 |
optimizer.zero_grad(set_to_none=True)
|
|
|
|
| 29 |
project = "unet"
|
| 30 |
batch_size = 25
|
| 31 |
base_learning_rate = 9.5e-6
|
| 32 |
+
min_learning_rate = 8.5e-6
|
| 33 |
+
num_epochs = 20
|
| 34 |
# samples/save per epoch
|
| 35 |
sample_interval_share = 10
|
| 36 |
use_wandb = True
|
|
|
|
| 51 |
save_barrier = 1.03
|
| 52 |
dispersive_temperature=0.5
|
| 53 |
dispersive_weight=0.05
|
| 54 |
+
percentile_clipping = 95 # 8bit optim
|
| 55 |
+
clip_grad_norm = 0.5
|
| 56 |
steps_offset = 1 # Scheduler
|
| 57 |
limit = 0
|
| 58 |
checkpoints_folder = ""
|
|
|
|
| 909 |
if not fbp:
|
| 910 |
if accelerator.sync_gradients:
|
| 911 |
with torch.amp.autocast('cuda', enabled=False):
|
| 912 |
+
grad = accelerator.clip_grad_norm_(unet.parameters(), clip_grad_norm)
|
| 913 |
optimizer.step()
|
| 914 |
lr_scheduler.step()
|
| 915 |
optimizer.zero_grad(set_to_none=True)
|
unet/diffusion_pytorch_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 7014306128
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:331fd7ac0599509b191689032ee7343232b8453954a3225fd06a9fc19db5fc84
|
| 3 |
size 7014306128
|