GamerC0der commited on
Commit
c79b1ff
·
verified ·
1 Parent(s): 67a5942

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +70 -17
README.md CHANGED
@@ -7,21 +7,74 @@ pipeline_tag: text-to-image
7
  tags:
8
  - art
9
  inference: true
10
- widget:
11
- - text: "A cat playing with a ball"
12
- example_title: "Cat"
13
- - text: "A dog jumping over a fence"
14
- example_title: "Dog"
15
- extra_gated_prompt: "You agree to not use this model in a way infringing upon the copyright of another company."
16
- extra_gated_fields:
17
- Person: text
18
-
19
- I want to use this model for:
20
- type: select
21
- options:
22
- - Research
23
- - Education
24
- - label: Other
25
- value: other
26
- I agree to the terms listed above: checkbox
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  ---
 
7
  tags:
8
  - art
9
  inference: true
10
+ model:
11
+ base_learning_rate: 1.0e-04
12
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
13
+ params:
14
+ linear_start: 0.00085
15
+ linear_end: 0.0120
16
+ num_timesteps_cond: 1
17
+ log_every_t: 200
18
+ timesteps: 1000
19
+ first_stage_key: "image"
20
+ cond_stage_key: "caption"
21
+ image_size: 64
22
+ channels: 4
23
+ cond_stage_trainable: false # Note: different from the one we trained before
24
+ conditioning_key: crossattn
25
+ monitor: val/loss_simple_ema
26
+ scale_factor: 0.18215
27
+ use_ema: False
28
+
29
+ scheduler_config: # 10000 warmup steps
30
+ target: ldm.lr_scheduler.LambdaLinearScheduler
31
+ params:
32
+ warm_up_steps: [ 10000 ]
33
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
34
+ f_start: [ 1.e-6 ]
35
+ f_max: [ 1. ]
36
+ f_min: [ 1. ]
37
+
38
+ unet_config:
39
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
40
+ params:
41
+ image_size: 32 # unused
42
+ in_channels: 4
43
+ out_channels: 4
44
+ model_channels: 320
45
+ attention_resolutions: [ 4, 2, 1 ]
46
+ num_res_blocks: 2
47
+ channel_mult: [ 1, 2, 4, 4 ]
48
+ num_heads: 8
49
+ use_spatial_transformer: True
50
+ transformer_depth: 1
51
+ context_dim: 768
52
+ use_checkpoint: True
53
+ legacy: False
54
+
55
+ first_stage_config:
56
+ target: ldm.models.autoencoder.AutoencoderKL
57
+ params:
58
+ embed_dim: 4
59
+ monitor: val/rec_loss
60
+ ddconfig:
61
+ double_z: true
62
+ z_channels: 4
63
+ resolution: 256
64
+ in_channels: 3
65
+ out_ch: 3
66
+ ch: 128
67
+ ch_mult:
68
+ - 1
69
+ - 2
70
+ - 4
71
+ - 4
72
+ num_res_blocks: 2
73
+ attn_resolutions: [ ]
74
+ dropout: 0.0
75
+ lossconfig:
76
+ target: torch.nn.Identity
77
+
78
+ cond_stage_config:
79
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
80
  ---