Upload configs/instruct-pix2pix.yaml with huggingface_hub
Browse files
configs/instruct-pix2pix.yaml
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).
|
| 2 |
+
# See more details in LICENSE.
|
| 3 |
+
|
| 4 |
+
model:
|
| 5 |
+
base_learning_rate: 1.0e-04
|
| 6 |
+
target: modules.models.diffusion.ddpm_edit.LatentDiffusion
|
| 7 |
+
params:
|
| 8 |
+
linear_start: 0.00085
|
| 9 |
+
linear_end: 0.0120
|
| 10 |
+
num_timesteps_cond: 1
|
| 11 |
+
log_every_t: 200
|
| 12 |
+
timesteps: 1000
|
| 13 |
+
first_stage_key: edited
|
| 14 |
+
cond_stage_key: edit
|
| 15 |
+
# image_size: 64
|
| 16 |
+
# image_size: 32
|
| 17 |
+
image_size: 16
|
| 18 |
+
channels: 4
|
| 19 |
+
cond_stage_trainable: false # Note: different from the one we trained before
|
| 20 |
+
conditioning_key: hybrid
|
| 21 |
+
monitor: val/loss_simple_ema
|
| 22 |
+
scale_factor: 0.18215
|
| 23 |
+
use_ema: false
|
| 24 |
+
|
| 25 |
+
scheduler_config: # 10000 warmup steps
|
| 26 |
+
target: ldm.lr_scheduler.LambdaLinearScheduler
|
| 27 |
+
params:
|
| 28 |
+
warm_up_steps: [ 0 ]
|
| 29 |
+
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
| 30 |
+
f_start: [ 1.e-6 ]
|
| 31 |
+
f_max: [ 1. ]
|
| 32 |
+
f_min: [ 1. ]
|
| 33 |
+
|
| 34 |
+
unet_config:
|
| 35 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
| 36 |
+
params:
|
| 37 |
+
image_size: 32 # unused
|
| 38 |
+
in_channels: 8
|
| 39 |
+
out_channels: 4
|
| 40 |
+
model_channels: 320
|
| 41 |
+
attention_resolutions: [ 4, 2, 1 ]
|
| 42 |
+
num_res_blocks: 2
|
| 43 |
+
channel_mult: [ 1, 2, 4, 4 ]
|
| 44 |
+
num_heads: 8
|
| 45 |
+
use_spatial_transformer: True
|
| 46 |
+
transformer_depth: 1
|
| 47 |
+
context_dim: 768
|
| 48 |
+
use_checkpoint: True
|
| 49 |
+
legacy: False
|
| 50 |
+
|
| 51 |
+
first_stage_config:
|
| 52 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
| 53 |
+
params:
|
| 54 |
+
embed_dim: 4
|
| 55 |
+
monitor: val/rec_loss
|
| 56 |
+
ddconfig:
|
| 57 |
+
double_z: true
|
| 58 |
+
z_channels: 4
|
| 59 |
+
resolution: 256
|
| 60 |
+
in_channels: 3
|
| 61 |
+
out_ch: 3
|
| 62 |
+
ch: 128
|
| 63 |
+
ch_mult:
|
| 64 |
+
- 1
|
| 65 |
+
- 2
|
| 66 |
+
- 4
|
| 67 |
+
- 4
|
| 68 |
+
num_res_blocks: 2
|
| 69 |
+
attn_resolutions: []
|
| 70 |
+
dropout: 0.0
|
| 71 |
+
lossconfig:
|
| 72 |
+
target: torch.nn.Identity
|
| 73 |
+
|
| 74 |
+
cond_stage_config:
|
| 75 |
+
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
| 76 |
+
|
| 77 |
+
data:
|
| 78 |
+
target: main.DataModuleFromConfig
|
| 79 |
+
params:
|
| 80 |
+
batch_size: 128
|
| 81 |
+
num_workers: 1
|
| 82 |
+
wrap: false
|
| 83 |
+
validation:
|
| 84 |
+
target: edit_dataset.EditDataset
|
| 85 |
+
params:
|
| 86 |
+
path: data/clip-filtered-dataset
|
| 87 |
+
cache_dir: data/
|
| 88 |
+
cache_name: data_10k
|
| 89 |
+
split: val
|
| 90 |
+
min_text_sim: 0.2
|
| 91 |
+
min_image_sim: 0.75
|
| 92 |
+
min_direction_sim: 0.2
|
| 93 |
+
max_samples_per_prompt: 1
|
| 94 |
+
min_resize_res: 512
|
| 95 |
+
max_resize_res: 512
|
| 96 |
+
crop_res: 512
|
| 97 |
+
output_as_edit: False
|
| 98 |
+
real_input: True
|