0706
Browse files- samples/unet_320x576_0.jpg +2 -2
- samples/unet_384x576_0.jpg +2 -2
- samples/unet_448x576_0.jpg +2 -2
- samples/unet_512x576_0.jpg +2 -2
- samples/unet_576x320_0.jpg +2 -2
- samples/unet_576x384_0.jpg +2 -2
- samples/unet_576x448_0.jpg +2 -2
- samples/unet_576x512_0.jpg +2 -2
- samples/unet_576x576_0.jpg +2 -2
- train.py +5 -5
- unet/diffusion_pytorch_model.safetensors +1 -1
samples/unet_320x576_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_384x576_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_448x576_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_512x576_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_576x320_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_576x384_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_576x448_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_576x512_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/unet_576x576_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
train.py
CHANGED
|
@@ -27,10 +27,10 @@ import torch.nn.functional as F
|
|
| 27 |
# --------------------------- Параметры ---------------------------
|
| 28 |
ds_path = "datasets/576"
|
| 29 |
project = "unet"
|
| 30 |
-
batch_size =
|
| 31 |
base_learning_rate = 9e-6
|
| 32 |
min_learning_rate = 8e-6
|
| 33 |
-
num_epochs =
|
| 34 |
# samples/save per epoch
|
| 35 |
sample_interval_share = 5
|
| 36 |
use_wandb = True
|
|
@@ -51,7 +51,7 @@ dtype = torch.float32
|
|
| 51 |
save_barrier = 1.03
|
| 52 |
dispersive_temperature=0.5
|
| 53 |
dispersive_weight=0.05
|
| 54 |
-
percentile_clipping =
|
| 55 |
steps_offset = 1 # Scheduler
|
| 56 |
limit = 0
|
| 57 |
checkpoints_folder = ""
|
|
@@ -627,7 +627,7 @@ else:
|
|
| 627 |
def create_optimizer(name, params):
|
| 628 |
if name == "adam8bit":
|
| 629 |
return bnb.optim.AdamW8bit(
|
| 630 |
-
params, lr=base_learning_rate, betas=(0.9, 0.
|
| 631 |
percentile_clipping=percentile_clipping
|
| 632 |
)
|
| 633 |
elif name == "adam":
|
|
@@ -904,7 +904,7 @@ for epoch in range(start_epoch, start_epoch + num_epochs):
|
|
| 904 |
if not fbp:
|
| 905 |
if accelerator.sync_gradients:
|
| 906 |
with torch.amp.autocast('cuda', enabled=False):
|
| 907 |
-
grad = accelerator.clip_grad_norm_(unet.parameters(), 0.
|
| 908 |
optimizer.step()
|
| 909 |
lr_scheduler.step()
|
| 910 |
optimizer.zero_grad(set_to_none=True)
|
|
|
|
| 27 |
# --------------------------- Параметры ---------------------------
|
| 28 |
ds_path = "datasets/576"
|
| 29 |
project = "unet"
|
| 30 |
+
batch_size = 50
|
| 31 |
base_learning_rate = 9e-6
|
| 32 |
min_learning_rate = 8e-6
|
| 33 |
+
num_epochs = 5
|
| 34 |
# samples/save per epoch
|
| 35 |
sample_interval_share = 5
|
| 36 |
use_wandb = True
|
|
|
|
| 51 |
save_barrier = 1.03
|
| 52 |
dispersive_temperature=0.5
|
| 53 |
dispersive_weight=0.05
|
| 54 |
+
percentile_clipping = 90 # 8bit optim
|
| 55 |
steps_offset = 1 # Scheduler
|
| 56 |
limit = 0
|
| 57 |
checkpoints_folder = ""
|
|
|
|
| 627 |
def create_optimizer(name, params):
|
| 628 |
if name == "adam8bit":
|
| 629 |
return bnb.optim.AdamW8bit(
|
| 630 |
+
params, lr=base_learning_rate, betas=(0.9, 0.97), eps=1e-5, weight_decay=0.001,
|
| 631 |
percentile_clipping=percentile_clipping
|
| 632 |
)
|
| 633 |
elif name == "adam":
|
|
|
|
| 904 |
if not fbp:
|
| 905 |
if accelerator.sync_gradients:
|
| 906 |
with torch.amp.autocast('cuda', enabled=False):
|
| 907 |
+
grad = accelerator.clip_grad_norm_(unet.parameters(), 0.25)
|
| 908 |
optimizer.step()
|
| 909 |
lr_scheduler.step()
|
| 910 |
optimizer.zero_grad(set_to_none=True)
|
unet/diffusion_pytorch_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 7014306128
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e2987b5660219328cd1c22e5c4072a561d8aa8dabb3b488c55fd06e9d9059229
|
| 3 |
size 7014306128
|