Nawatix commited on
Commit
2be2ed6
·
verified ·
1 Parent(s): 2e7c526

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ image_control.png filter=lfs diff=lfs merge=lfs -text
37
+ images_0.png filter=lfs diff=lfs merge=lfs -text
38
+ images_1.png filter=lfs diff=lfs merge=lfs -text
39
+ images_2.png filter=lfs diff=lfs merge=lfs -text
40
+ images_3.png filter=lfs diff=lfs merge=lfs -text
41
+ images_4.png filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: stabilityai/stable-diffusion-xl-base-1.0
3
+ library_name: diffusers
4
+ license: openrail++
5
+ inference: true
6
+ tags:
7
+ - stable-diffusion-xl
8
+ - stable-diffusion-xl-diffusers
9
+ - text-to-image
10
+ - diffusers
11
+ - controlnet
12
+ - diffusers-training
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the training script had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+
19
+ # controlnet-Nawatix/out_multihmr_normal
20
+
21
+ These are controlnet weights trained on stabilityai/stable-diffusion-xl-base-1.0 with new type of conditioning.
22
+ You can find some example images below.
23
+
24
+ prompt: a woman on a trapeze in the air
25
+ ![images_0)](./images_0.png)
26
+ prompt: a woman in red hair and white dress sitting on fire
27
+ ![images_1)](./images_1.png)
28
+ prompt: two dancers in white dress and black shoes on stage
29
+ ![images_2)](./images_2.png)
30
+ prompt: two men in period clothing stand in front of a table
31
+ ![images_3)](./images_3.png)
32
+ prompt: two men are walking down a hallway in a building
33
+ ![images_4)](./images_4.png)
34
+
35
+
36
+
37
+ ## Intended uses & limitations
38
+
39
+ #### How to use
40
+
41
+ ```python
42
+ # TODO: add an example code snippet for running this diffusion pipeline
43
+ ```
44
+
45
+ #### Limitations and bias
46
+
47
+ [TODO: provide examples of latent issues and potential remediations]
48
+
49
+ ## Training details
50
+
51
+ [TODO: describe the data used to train the model]
checkpoint-20000/controlnet/config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ControlNetModel",
3
+ "_diffusers_version": "0.34.0.dev0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": "text_time",
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": 256,
8
+ "attention_head_dim": [
9
+ 5,
10
+ 10,
11
+ 20
12
+ ],
13
+ "block_out_channels": [
14
+ 320,
15
+ 640,
16
+ 1280
17
+ ],
18
+ "class_embed_type": null,
19
+ "conditioning_channels": 3,
20
+ "conditioning_embedding_out_channels": [
21
+ 16,
22
+ 32,
23
+ 96,
24
+ 256
25
+ ],
26
+ "controlnet_conditioning_channel_order": "rgb",
27
+ "cross_attention_dim": 2048,
28
+ "down_block_types": [
29
+ "DownBlock2D",
30
+ "CrossAttnDownBlock2D",
31
+ "CrossAttnDownBlock2D"
32
+ ],
33
+ "downsample_padding": 1,
34
+ "encoder_hid_dim": null,
35
+ "encoder_hid_dim_type": null,
36
+ "flip_sin_to_cos": true,
37
+ "freq_shift": 0,
38
+ "global_pool_conditions": false,
39
+ "in_channels": 4,
40
+ "layers_per_block": 2,
41
+ "mid_block_scale_factor": 1,
42
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
43
+ "norm_eps": 1e-05,
44
+ "norm_num_groups": 32,
45
+ "num_attention_heads": null,
46
+ "num_class_embeds": null,
47
+ "only_cross_attention": false,
48
+ "projection_class_embeddings_input_dim": 2816,
49
+ "resnet_time_scale_shift": "default",
50
+ "transformer_layers_per_block": [
51
+ 1,
52
+ 2,
53
+ 10
54
+ ],
55
+ "upcast_attention": null,
56
+ "use_linear_projection": true
57
+ }
checkpoint-20000/controlnet/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8a889ce4db544e58185fde51efe324216e1c26482f343436966e3b38f390ebb
3
+ size 5004167864
checkpoint-20000/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:459d5fba2cc93f987def3761df5df4d2fc2fe4e978adb4fc95d8c5df1ec72644
3
+ size 10008838839
checkpoint-20000/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6cf7059b0f83b1b779bd749de80e95aefe41acb0d2deb9bbeaaafae3a7cfea4
3
+ size 15473
checkpoint-20000/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e860170f1be78027dd4b67402548c995ac98ee2e84bc12a97db04e99bad86386
3
+ size 1383
checkpoint-20000/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0ed641449df44a5f9874edb41ad438ffd85bf6b26dba302eca4c7f8cdb25957
3
+ size 1401
config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ControlNetModel",
3
+ "_diffusers_version": "0.34.0.dev0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": "text_time",
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": 256,
8
+ "attention_head_dim": [
9
+ 5,
10
+ 10,
11
+ 20
12
+ ],
13
+ "block_out_channels": [
14
+ 320,
15
+ 640,
16
+ 1280
17
+ ],
18
+ "class_embed_type": null,
19
+ "conditioning_channels": 3,
20
+ "conditioning_embedding_out_channels": [
21
+ 16,
22
+ 32,
23
+ 96,
24
+ 256
25
+ ],
26
+ "controlnet_conditioning_channel_order": "rgb",
27
+ "cross_attention_dim": 2048,
28
+ "down_block_types": [
29
+ "DownBlock2D",
30
+ "CrossAttnDownBlock2D",
31
+ "CrossAttnDownBlock2D"
32
+ ],
33
+ "downsample_padding": 1,
34
+ "encoder_hid_dim": null,
35
+ "encoder_hid_dim_type": null,
36
+ "flip_sin_to_cos": true,
37
+ "freq_shift": 0,
38
+ "global_pool_conditions": false,
39
+ "in_channels": 4,
40
+ "layers_per_block": 2,
41
+ "mid_block_scale_factor": 1,
42
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
43
+ "norm_eps": 1e-05,
44
+ "norm_num_groups": 32,
45
+ "num_attention_heads": null,
46
+ "num_class_embeds": null,
47
+ "only_cross_attention": false,
48
+ "projection_class_embeddings_input_dim": 2816,
49
+ "resnet_time_scale_shift": "default",
50
+ "transformer_layers_per_block": [
51
+ 1,
52
+ 2,
53
+ 10
54
+ ],
55
+ "upcast_attention": null,
56
+ "use_linear_projection": true
57
+ }
diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8a889ce4db544e58185fde51efe324216e1c26482f343436966e3b38f390ebb
3
+ size 5004167864
image_control.png ADDED

Git LFS Details

  • SHA256: 2688d192dcd21fa0a6d664f100709972493cffccea625f359a23aef74f2b5f08
  • Pointer size: 131 Bytes
  • Size of remote file: 108 kB
images_0.png ADDED

Git LFS Details

  • SHA256: 04cfbe0c5d8e0880e36ead72cf6ac29bccc49c0611c0e094a81572f33eb66f98
  • Pointer size: 131 Bytes
  • Size of remote file: 100 kB
images_1.png ADDED

Git LFS Details

  • SHA256: a65cd34b242cef599281b6800098bb19ea70b41e3dc8f8953ad37e69d2c6dbdf
  • Pointer size: 131 Bytes
  • Size of remote file: 145 kB
images_2.png ADDED

Git LFS Details

  • SHA256: b4f4a97ac8a0a9d35b31d2773c2d6ac13d7042cd4d959e233c74c5e358f53bc7
  • Pointer size: 131 Bytes
  • Size of remote file: 194 kB
images_3.png ADDED

Git LFS Details

  • SHA256: b647606656eb51943cefeb9692717a8f1c64b7b86186427e24821f7ca499a490
  • Pointer size: 131 Bytes
  • Size of remote file: 144 kB
images_4.png ADDED

Git LFS Details

  • SHA256: ea4a6b020be041ef6beae34312d81dfcb2c9bcb0d7ea4c1caa835d073f1e8b6f
  • Pointer size: 131 Bytes
  • Size of remote file: 119 kB