Upload folder using huggingface_hub
Browse files- config.json +1 -1
- diffusion_pytorch_model.safetensors +1 -1
- train_vae_fdl.py +5 -5
config.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
{
|
| 2 |
"_class_name": "AsymmetricAutoencoderKL",
|
| 3 |
"_diffusers_version": "0.36.0",
|
| 4 |
-
"_name_or_path": "
|
| 5 |
"act_fn": "silu",
|
| 6 |
"block_out_channels": [
|
| 7 |
128,
|
|
|
|
| 1 |
{
|
| 2 |
"_class_name": "AsymmetricAutoencoderKL",
|
| 3 |
"_diffusers_version": "0.36.0",
|
| 4 |
+
"_name_or_path": "vae10",
|
| 5 |
"act_fn": "silu",
|
| 6 |
"block_out_channels": [
|
| 7 |
128,
|
diffusion_pytorch_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 427466716
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:adb25d8783707b9a6267acb396f44a2966fd6e50dbe23ff6ed142020385a178b
|
| 3 |
size 427466716
|
train_vae_fdl.py
CHANGED
|
@@ -29,11 +29,11 @@ from collections import deque
|
|
| 29 |
|
| 30 |
# --------------------------- Параметры ---------------------------
|
| 31 |
ds_path = "/workspace/d23"
|
| 32 |
-
project = "
|
| 33 |
batch_size = 1
|
| 34 |
base_learning_rate = 6e-6
|
| 35 |
min_learning_rate = 7e-7
|
| 36 |
-
num_epochs =
|
| 37 |
sample_interval_share = 25
|
| 38 |
use_wandb = True
|
| 39 |
save_model = True
|
|
@@ -41,8 +41,8 @@ use_decay = True
|
|
| 41 |
optimizer_type = "adam8bit"
|
| 42 |
dtype = torch.float32
|
| 43 |
|
| 44 |
-
model_resolution =
|
| 45 |
-
high_resolution =
|
| 46 |
limit = 0
|
| 47 |
save_barrier = 1.3
|
| 48 |
warmup_percent = 0.005
|
|
@@ -498,7 +498,7 @@ for epoch in range(num_epochs):
|
|
| 498 |
imgs = imgs.to(accelerator.device)
|
| 499 |
|
| 500 |
if high_resolution != model_resolution:
|
| 501 |
-
imgs_low = F.interpolate(imgs, size=(model_resolution, model_resolution), mode="bilinear", align_corners=False)
|
| 502 |
else:
|
| 503 |
imgs_low = imgs
|
| 504 |
|
|
|
|
| 29 |
|
| 30 |
# --------------------------- Параметры ---------------------------
|
| 31 |
ds_path = "/workspace/d23"
|
| 32 |
+
project = "vae10"
|
| 33 |
batch_size = 1
|
| 34 |
base_learning_rate = 6e-6
|
| 35 |
min_learning_rate = 7e-7
|
| 36 |
+
num_epochs = 2
|
| 37 |
sample_interval_share = 25
|
| 38 |
use_wandb = True
|
| 39 |
save_model = True
|
|
|
|
| 41 |
optimizer_type = "adam8bit"
|
| 42 |
dtype = torch.float32
|
| 43 |
|
| 44 |
+
model_resolution = 512 #288
|
| 45 |
+
high_resolution = 1024 #576
|
| 46 |
limit = 0
|
| 47 |
save_barrier = 1.3
|
| 48 |
warmup_percent = 0.005
|
|
|
|
| 498 |
imgs = imgs.to(accelerator.device)
|
| 499 |
|
| 500 |
if high_resolution != model_resolution:
|
| 501 |
+
imgs_low = F.interpolate(imgs, size=(model_resolution, model_resolution),mode="area") # mode="bilinear", align_corners=False)
|
| 502 |
else:
|
| 503 |
imgs_low = imgs
|
| 504 |
|