acondess commited on
Commit
ffdfd15
·
1 Parent(s): e2d0c93

Upload 2 files

Browse files
Files changed (2) hide show
  1. db_config.json +123 -0
  2. mynew_revAnimated.yaml +70 -0
db_config.json ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "weight_decay": 0.01,
3
+ "attention": "xformers",
4
+ "cache_latents": true,
5
+ "clip_skip": 1,
6
+ "concepts_list": [
7
+ {
8
+ "class_data_dir": "",
9
+ "class_guidance_scale": 7.5,
10
+ "class_infer_steps": 40,
11
+ "class_negative_prompt": "",
12
+ "class_prompt": " butterfly",
13
+ "class_token": " butterfly",
14
+ "instance_data_dir": "H:\\StableDiffusion\\sd-webui-aki\\dreamboot\\butterfly512",
15
+ "instance_prompt": "lineart of butterfly",
16
+ "instance_token": "lineart of butterfly",
17
+ "is_valid": true,
18
+ "n_save_sample": 1,
19
+ "num_class_images_per": 0,
20
+ "sample_seed": -1,
21
+ "save_guidance_scale": 7.5,
22
+ "save_infer_steps": 20,
23
+ "save_sample_negative_prompt": "",
24
+ "save_sample_prompt": "",
25
+ "save_sample_template": ""
26
+ }
27
+ ],
28
+ "concepts_path": "",
29
+ "custom_model_name": "lineart_butterfly",
30
+ "deterministic": false,
31
+ "disable_class_matching": false,
32
+ "disable_logging": false,
33
+ "ema_predict": false,
34
+ "epoch": 200,
35
+ "epoch_pause_frequency": 0,
36
+ "epoch_pause_time": 0,
37
+ "freeze_clip_normalization": false,
38
+ "gradient_accumulation_steps": 1,
39
+ "gradient_checkpointing": true,
40
+ "gradient_set_to_none": true,
41
+ "graph_smoothing": 50,
42
+ "half_model": false,
43
+ "has_ema": false,
44
+ "hflip": false,
45
+ "infer_ema": false,
46
+ "initial_revision": 0,
47
+ "learning_rate": 2e-06,
48
+ "learning_rate_min": 1e-06,
49
+ "lifetime_revision": 0,
50
+ "lora_learning_rate": 0.0001,
51
+ "lora_model_name": "",
52
+ "lora_txt_learning_rate": 5e-05,
53
+ "lora_txt_rank": 4,
54
+ "lora_txt_weight": 1,
55
+ "lora_unet_rank": 4,
56
+ "lora_weight": 1,
57
+ "lora_use_buggy_requires_grad": false,
58
+ "lr_cycles": 1,
59
+ "lr_factor": 0.5,
60
+ "lr_power": 1,
61
+ "lr_scale_pos": 0.5,
62
+ "lr_scheduler": "constant_with_warmup",
63
+ "lr_warmup_steps": 0,
64
+ "max_token_length": 75,
65
+ "mixed_precision": "fp16",
66
+ "model_dir": "H:\\StableDiffusion\\sd-webui-aki\\sd-webui-aki-v4.1\\models\\dreambooth\\mynew_revAnimated",
67
+ "model_name": "mynew_revAnimated",
68
+ "model_path": "H:\\StableDiffusion\\sd-webui-aki\\sd-webui-aki-v4.1\\models\\dreambooth\\mynew_revAnimated",
69
+ "noise_scheduler": "DDPM",
70
+ "num_train_epochs": 100,
71
+ "offset_noise": 0,
72
+ "optimizer": "8bit AdamW",
73
+ "pad_tokens": true,
74
+ "pretrained_model_name_or_path": "H:\\StableDiffusion\\sd-webui-aki\\sd-webui-aki-v4.1\\models\\dreambooth\\mynew_revAnimated\\working",
75
+ "pretrained_vae_name_or_path": "",
76
+ "prior_loss_scale": false,
77
+ "prior_loss_target": 100.0,
78
+ "prior_loss_weight": 0.75,
79
+ "prior_loss_weight_min": 0.1,
80
+ "resolution": 512,
81
+ "revision": 2000,
82
+ "sample_batch_size": 1,
83
+ "sanity_prompt": "",
84
+ "sanity_seed": 420420.0,
85
+ "save_ckpt_after": true,
86
+ "save_ckpt_cancel": false,
87
+ "save_ckpt_during": false,
88
+ "save_ema": true,
89
+ "save_embedding_every": 25,
90
+ "save_lora_after": true,
91
+ "save_lora_cancel": false,
92
+ "save_lora_during": false,
93
+ "save_lora_for_extra_net": false,
94
+ "save_preview_every": 5,
95
+ "save_safetensors": true,
96
+ "save_state_after": false,
97
+ "save_state_cancel": false,
98
+ "save_state_during": false,
99
+ "scheduler": "DDIM",
100
+ "shared_diffusers_path": "",
101
+ "shuffle_tags": true,
102
+ "snapshot": "",
103
+ "split_loss": true,
104
+ "src": "H:\\StableDiffusion\\sd-webui-aki\\sd-webui-aki-v4.1\\models\\Stable-diffusion\\revAnimated_v122.safetensors",
105
+ "stop_text_encoder": 0,
106
+ "strict_tokens": false,
107
+ "dynamic_img_norm": false,
108
+ "tenc_weight_decay": 0.01,
109
+ "tenc_grad_clip_norm": 0,
110
+ "tomesd": 0,
111
+ "train_batch_size": 1,
112
+ "train_imagic": false,
113
+ "train_unet": true,
114
+ "train_unfrozen": false,
115
+ "txt_learning_rate": 2e-06,
116
+ "use_concepts": false,
117
+ "use_ema": false,
118
+ "use_lora": false,
119
+ "use_lora_extended": false,
120
+ "use_shared_src": false,
121
+ "use_subdir": true,
122
+ "v2": false
123
+ }
mynew_revAnimated.yaml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-04
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "images"
11
+ cond_stage_key: "input_ids"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: true # Note: different from the one we trained before
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+
20
+ scheduler_config: # 10000 warmup steps
21
+ target: ldm.lr_scheduler.LambdaLinearScheduler
22
+ params:
23
+ warm_up_steps: [ 10000 ]
24
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
+ f_start: [ 1.e-6 ]
26
+ f_max: [ 1. ]
27
+ f_min: [ 1. ]
28
+
29
+ unet_config:
30
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ image_size: 32 # unused
33
+ in_channels: 4
34
+ out_channels: 4
35
+ model_channels: 320
36
+ attention_resolutions: [ 4, 2, 1 ]
37
+ num_res_blocks: 2
38
+ channel_mult: [ 1, 2, 4, 4 ]
39
+ num_heads: 8
40
+ use_spatial_transformer: True
41
+ transformer_depth: 1
42
+ context_dim: 768
43
+ use_checkpoint: True
44
+ legacy: False
45
+
46
+ first_stage_config:
47
+ target: ldm.models.autoencoder.AutoencoderKL
48
+ params:
49
+ embed_dim: 4
50
+ monitor: val/rec_loss
51
+ ddconfig:
52
+ double_z: true
53
+ z_channels: 4
54
+ resolution: 256
55
+ in_channels: 3
56
+ out_ch: 3
57
+ ch: 128
58
+ ch_mult:
59
+ - 1
60
+ - 2
61
+ - 4
62
+ - 4
63
+ num_res_blocks: 2
64
+ attn_resolutions: [ ]
65
+ dropout: 0.0
66
+ lossconfig:
67
+ target: torch.nn.Identity
68
+
69
+ cond_stage_config:
70
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder