1907
Browse files- 2b/diffusion_pytorch_model.safetensors +1 -1
- samples/2b_192x384_0.jpg +2 -2
- samples/2b_256x384_0.jpg +2 -2
- samples/2b_320x384_0.jpg +2 -2
- samples/2b_384x192_0.jpg +2 -2
- samples/2b_384x256_0.jpg +2 -2
- samples/2b_384x320_0.jpg +2 -2
- samples/2b_384x384_0.jpg +2 -2
- train.py +4 -4
2b/diffusion_pytorch_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 7993399544
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5fff20947b4e825d7949deb6c21525b68b1664a56e57d31e5ae7b5548318e271
|
| 3 |
size 7993399544
|
samples/2b_192x384_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/2b_256x384_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/2b_320x384_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/2b_384x192_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/2b_384x256_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/2b_384x320_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
samples/2b_384x384_0.jpg
CHANGED
|
Git LFS Details
|
|
Git LFS Details
|
train.py
CHANGED
|
@@ -27,10 +27,10 @@ import torch.nn.functional as F
|
|
| 27 |
# --------------------------- Параметры ---------------------------
|
| 28 |
ds_path = "datasets/384"
|
| 29 |
project = "2b"
|
| 30 |
-
batch_size =
|
| 31 |
base_learning_rate = 8e-5
|
| 32 |
min_learning_rate = 4e-5
|
| 33 |
-
num_epochs =
|
| 34 |
# samples/save per epoch
|
| 35 |
sample_interval_share = 10
|
| 36 |
use_wandb = True
|
|
@@ -43,7 +43,7 @@ unet_gradient = False
|
|
| 43 |
clip_sample = False #Scheduler
|
| 44 |
fixed_seed = False
|
| 45 |
shuffle = True
|
| 46 |
-
dispersive_loss_enabled =
|
| 47 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 48 |
torch.backends.cudnn.allow_tf32 = True
|
| 49 |
torch.backends.cuda.enable_mem_efficient_sdp(False)
|
|
@@ -59,7 +59,7 @@ steps_offset = 1 # Scheduler
|
|
| 59 |
limit = 0
|
| 60 |
checkpoints_folder = ""
|
| 61 |
mixed_precision = "bf16" #"fp16"
|
| 62 |
-
gradient_accumulation_steps =
|
| 63 |
accelerator = Accelerator(
|
| 64 |
mixed_precision=mixed_precision,
|
| 65 |
gradient_accumulation_steps=gradient_accumulation_steps
|
|
|
|
| 27 |
# --------------------------- Параметры ---------------------------
|
| 28 |
ds_path = "datasets/384"
|
| 29 |
project = "2b"
|
| 30 |
+
batch_size = 8 #50
|
| 31 |
base_learning_rate = 8e-5
|
| 32 |
min_learning_rate = 4e-5
|
| 33 |
+
num_epochs = 6
|
| 34 |
# samples/save per epoch
|
| 35 |
sample_interval_share = 10
|
| 36 |
use_wandb = True
|
|
|
|
| 43 |
clip_sample = False #Scheduler
|
| 44 |
fixed_seed = False
|
| 45 |
shuffle = True
|
| 46 |
+
dispersive_loss_enabled = False
|
| 47 |
torch.backends.cuda.matmul.allow_tf32 = True
|
| 48 |
torch.backends.cudnn.allow_tf32 = True
|
| 49 |
torch.backends.cuda.enable_mem_efficient_sdp(False)
|
|
|
|
| 59 |
limit = 0
|
| 60 |
checkpoints_folder = ""
|
| 61 |
mixed_precision = "bf16" #"fp16"
|
| 62 |
+
gradient_accumulation_steps = 2
|
| 63 |
accelerator = Accelerator(
|
| 64 |
mixed_precision=mixed_precision,
|
| 65 |
gradient_accumulation_steps=gradient_accumulation_steps
|