sd2 commited on
Commit
ac4aaff
·
0 Parent(s):

Initial Commit

Browse files
Files changed (39) hide show
  1. .gitattributes +34 -0
  2. README.md +150 -0
  3. feature_extractor/preprocessor_config.json +27 -0
  4. image.png +0 -0
  5. image_encoder/config.json +23 -0
  6. image_encoder/model.fp16.safetensors +3 -0
  7. image_encoder/model.safetensors +3 -0
  8. image_encoder/pytorch_model.bin +3 -0
  9. image_encoder/pytorch_model.fp16.bin +3 -0
  10. image_noising_scheduler/scheduler_config.json +16 -0
  11. image_normalizer/config.json +6 -0
  12. image_normalizer/diffusion_pytorch_model.bin +3 -0
  13. image_normalizer/diffusion_pytorch_model.fp16.bin +3 -0
  14. image_normalizer/diffusion_pytorch_model.fp16.safetensors +3 -0
  15. image_normalizer/diffusion_pytorch_model.safetensors +3 -0
  16. model_index.json +40 -0
  17. scheduler/scheduler_config.json +18 -0
  18. sd21-unclip-h.ckpt +3 -0
  19. sd21-unclip-l.ckpt +3 -0
  20. sd_unclip_examples.jpeg +0 -0
  21. text_encoder/config.json +25 -0
  22. text_encoder/model.fp16.safetensors +3 -0
  23. text_encoder/model.safetensors +3 -0
  24. text_encoder/pytorch_model.bin +3 -0
  25. text_encoder/pytorch_model.fp16.bin +3 -0
  26. tokenizer/merges.txt +0 -0
  27. tokenizer/special_tokens_map.json +24 -0
  28. tokenizer/tokenizer_config.json +33 -0
  29. tokenizer/vocab.json +0 -0
  30. unet/config.json +57 -0
  31. unet/diffusion_pytorch_model.bin +3 -0
  32. unet/diffusion_pytorch_model.fp16.bin +3 -0
  33. unet/diffusion_pytorch_model.fp16.safetensors +3 -0
  34. unet/diffusion_pytorch_model.safetensors +3 -0
  35. vae/config.json +31 -0
  36. vae/diffusion_pytorch_model.bin +3 -0
  37. vae/diffusion_pytorch_model.fp16.bin +3 -0
  38. vae/diffusion_pytorch_model.fp16.safetensors +3 -0
  39. vae/diffusion_pytorch_model.safetensors +3 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: openrail++
3
+ tags:
4
+ - stable-diffusion
5
+ - text-to-image
6
+ pinned: true
7
+ ---
8
+
9
+ # Stable Diffusion v2-1-unclip Model Card
10
+
11
+ ### ⚠️ This repository is a mirror of the now deprecated `stabilityai/stable-diffusion-2-1-unclip`, this repository and organization are not affiliated in any way with Stability AI.
12
+
13
+ This model card focuses on the model associated with the Stable Diffusion v2-1 model, codebase available [here](https://github.com/Stability-AI/stablediffusion).
14
+
15
+ This `stable-diffusion-2-1-unclip` is a finetuned version of Stable Diffusion 2.1, modified to accept (noisy) CLIP image embedding in addition to the text prompt, and can be used to create image variations (Examples) or can be chained with text-to-image CLIP priors. The amount of noise added to the image embedding can be specified via the noise_level (0 means no noise, 1000 full noise).
16
+
17
+ - Use it with 🧨 [`diffusers`](#examples)
18
+
19
+ ## Model Details
20
+ - **Developed by:** Robin Rombach, Patrick Esser
21
+ - **Model type:** Diffusion-based text-to-image generation model
22
+ - **Language(s):** English
23
+ - **License:** [CreativeML Open RAIL++-M License](https://huggingface.co/stabilityai/stable-diffusion-2/blob/main/LICENSE-MODEL)
24
+ - **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Latent Diffusion Model](https://arxiv.org/abs/2112.10752) that uses a fixed, pretrained text encoder ([OpenCLIP-ViT/H](https://github.com/mlfoundations/open_clip)).
25
+ - **Resources for more information:** [GitHub Repository](https://github.com/Stability-AI/).
26
+ - **Cite as:**
27
+
28
+ @InProceedings{Rombach_2022_CVPR,
29
+ author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn},
30
+ title = {High-Resolution Image Synthesis With Latent Diffusion Models},
31
+ booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
32
+ month = {June},
33
+ year = {2022},
34
+ pages = {10684-10695}
35
+ }
36
+
37
+
38
+ ## Examples
39
+
40
+ Using the [🤗's Diffusers library](https://github.com/huggingface/diffusers) to run Stable Diffusion UnCLIP 2-1-small in a simple and efficient manner.
41
+
42
+ ```bash
43
+ pip install diffusers transformers accelerate scipy safetensors
44
+ ```
45
+ Running the pipeline (if you don't swap the scheduler it will run with the default DDIM, in this example we are swapping it to DPMSolverMultistepScheduler):
46
+
47
+ ```python
48
+ from diffusers import DiffusionPipeline
49
+ from diffusers.utils import load_image
50
+ import torch
51
+
52
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-unclip-small", torch_dtype=torch.float16)
53
+ pipe.to("cuda")
54
+
55
+ # get image
56
+ url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/tarsila_do_amaral.png"
57
+ image = load_image(url)
58
+
59
+ # run image variation
60
+ image = pipe(image).images[0]
61
+ ```
62
+
63
+ ![img](./image.png)
64
+
65
+ # Uses
66
+
67
+ ## Direct Use
68
+ The model is intended for research purposes only. Possible research areas and tasks include
69
+
70
+ - Safe deployment of models which have the potential to generate harmful content.
71
+ - Probing and understanding the limitations and biases of generative models.
72
+ - Generation of artworks and use in design and other artistic processes.
73
+ - Applications in educational or creative tools.
74
+ - Research on generative models.
75
+
76
+ Excluded uses are described below.
77
+
78
+ ### Misuse, Malicious Use, and Out-of-Scope Use
79
+ _Note: This section is originally taken from the [DALLE-MINI model card](https://huggingface.co/dalle-mini/dalle-mini), was used for Stable Diffusion v1, but applies in the same way to Stable Diffusion v2_.
80
+
81
+ The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.
82
+
83
+ #### Out-of-Scope Use
84
+ The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model.
85
+
86
+ #### Misuse and Malicious Use
87
+ Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to:
88
+
89
+ - Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc.
90
+ - Intentionally promoting or propagating discriminatory content or harmful stereotypes.
91
+ - Impersonating individuals without their consent.
92
+ - Sexual content without consent of the people who might see it.
93
+ - Mis- and disinformation
94
+ - Representations of egregious violence and gore
95
+ - Sharing of copyrighted or licensed material in violation of its terms of use.
96
+ - Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use.
97
+
98
+ ## Limitations and Bias
99
+
100
+ ### Limitations
101
+
102
+ - The model does not achieve perfect photorealism
103
+ - The model cannot render legible text
104
+ - The model does not perform well on more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere”
105
+ - Faces and people in general may not be generated properly.
106
+ - The model was trained mainly with English captions and will not work as well in other languages.
107
+ - The autoencoding part of the model is lossy
108
+ - The model was trained on a subset of the large-scale dataset
109
+ [LAION-5B](https://laion.ai/blog/laion-5b/), which contains adult, violent and sexual content. To partially mitigate this, we have filtered the dataset using LAION's NFSW detector (see Training section).
110
+
111
+ ### Bias
112
+ While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases.
113
+ Stable Diffusion was primarily trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/),
114
+ which consists of images that are limited to English descriptions.
115
+ Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for.
116
+ This affects the overall output of the model, as white and western cultures are often set as the default. Further, the
117
+ ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts.
118
+ Stable Diffusion v2 mirrors and exacerbates biases to such a degree that viewer discretion must be advised irrespective of the input or its intent.
119
+
120
+
121
+ ## Training
122
+
123
+ **Training Data**
124
+ The model developers used the following dataset for training the model:
125
+
126
+ - LAION-5B and subsets (details below). The training data is further filtered using LAION's NSFW detector, with a "p_unsafe" score of 0.1 (conservative). For more details, please refer to LAION-5B's [NeurIPS 2022](https://openreview.net/forum?id=M3Y74vmsMcY) paper and reviewer discussions on the topic.
127
+
128
+
129
+ ## Environmental Impact
130
+
131
+ **Stable Diffusion v1** **Estimated Emissions**
132
+ Based on that information, we estimate the following CO2 emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware, runtime, cloud provider, and compute region were utilized to estimate the carbon impact.
133
+
134
+ - **Hardware Type:** A100 PCIe 40GB
135
+ - **Hours used:** 200000
136
+ - **Cloud Provider:** AWS
137
+ - **Compute Region:** US-east
138
+ - **Carbon Emitted (Power consumption x Time x Carbon produced based on location of power grid):** 15000 kg CO2 eq.
139
+
140
+ ## Citation
141
+ @InProceedings{Rombach_2022_CVPR,
142
+ author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn},
143
+ title = {High-Resolution Image Synthesis With Latent Diffusion Models},
144
+ booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
145
+ month = {June},
146
+ year = {2022},
147
+ pages = {10684-10695}
148
+ }
149
+
150
+ *This model card was written by: Robin Rombach, Patrick Esser and David Ha and is based on the [Stable Diffusion v1](https://github.com/CompVis/stable-diffusion/blob/main/Stable_Diffusion_v1_Model_Card.md) and [DALL-E Mini model card](https://huggingface.co/dalle-mini/dalle-mini).*
feature_extractor/preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.48145466,
13
+ 0.4578275,
14
+ 0.40821073
15
+ ],
16
+ "image_processor_type": "CLIPImageProcessor",
17
+ "image_std": [
18
+ 0.26862954,
19
+ 0.26130258,
20
+ 0.27577711
21
+ ],
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "shortest_edge": 224
26
+ }
27
+ }
image.png ADDED
image_encoder/config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./image_encoder",
3
+ "architectures": [
4
+ "CLIPVisionModelWithProjection"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "dropout": 0.0,
8
+ "hidden_act": "gelu",
9
+ "hidden_size": 1280,
10
+ "image_size": 224,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5120,
14
+ "layer_norm_eps": 1e-05,
15
+ "model_type": "clip_vision_model",
16
+ "num_attention_heads": 16,
17
+ "num_channels": 3,
18
+ "num_hidden_layers": 32,
19
+ "patch_size": 14,
20
+ "projection_dim": 1024,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.28.0.dev0"
23
+ }
image_encoder/model.fp16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64a7ef761bfccbadbaa3da77366aac4185a6c58fa5de5f589b42a65bcc21f161
3
+ size 1264219396
image_encoder/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d277aeaed13ebc0ef33e56027b826a74433d45d755b3e0b3829440c1ea7b72e
3
+ size 2528373452
image_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d3ec1e66737f77a4f3bc2df3c52eacefc69ce7825e2784183b1d4e9877d9193
3
+ size 2528481905
image_encoder/pytorch_model.fp16.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49e56ff753584e2fd7708b8315b66b20f0c4ccd09723d6e589b797d3ea019d23
3
+ size 1264330920
image_noising_scheduler/scheduler_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "DDPMScheduler",
3
+ "_diffusers_version": "0.15.0.dev0",
4
+ "beta_end": 0.02,
5
+ "beta_schedule": "squaredcos_cap_v2",
6
+ "beta_start": 0.0001,
7
+ "clip_sample": true,
8
+ "clip_sample_range": 1.0,
9
+ "dynamic_thresholding_ratio": 0.995,
10
+ "num_train_timesteps": 1000,
11
+ "prediction_type": "epsilon",
12
+ "sample_max_value": 1.0,
13
+ "thresholding": false,
14
+ "trained_betas": null,
15
+ "variance_type": "fixed_small"
16
+ }
image_normalizer/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableUnCLIPImageNormalizer",
3
+ "_diffusers_version": "0.15.0.dev0",
4
+ "_name_or_path": "./image_normalizer",
5
+ "embedding_dim": 1024
6
+ }
image_normalizer/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62d9bea2335c36ba4aad0dee4d406ecbd1011b1364b0b88c34141bef471824c8
3
+ size 9383
image_normalizer/diffusion_pytorch_model.fp16.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:124906d7b0b0c3af7a5ec242159700f788e736928f763fe135c4caa88ba03b58
3
+ size 5307
image_normalizer/diffusion_pytorch_model.fp16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:532a80f90c5602b0f76f557b01417afc5b438598e0f02459ae4c254f5755ad74
3
+ size 4268
image_normalizer/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da5bbf61227cec34fa82500488b80162ba0d3a9b89f8428444dc766ac17a510d
3
+ size 8364
model_index.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableUnCLIPImg2ImgPipeline",
3
+ "_diffusers_version": "0.15.0.dev0",
4
+ "feature_extractor": [
5
+ "transformers",
6
+ "CLIPImageProcessor"
7
+ ],
8
+ "image_encoder": [
9
+ "transformers",
10
+ "CLIPVisionModelWithProjection"
11
+ ],
12
+ "image_noising_scheduler": [
13
+ "diffusers",
14
+ "DDPMScheduler"
15
+ ],
16
+ "image_normalizer": [
17
+ "stable_diffusion",
18
+ "StableUnCLIPImageNormalizer"
19
+ ],
20
+ "scheduler": [
21
+ "diffusers",
22
+ "PNDMScheduler"
23
+ ],
24
+ "text_encoder": [
25
+ "transformers",
26
+ "CLIPTextModel"
27
+ ],
28
+ "tokenizer": [
29
+ "transformers",
30
+ "CLIPTokenizer"
31
+ ],
32
+ "unet": [
33
+ "diffusers",
34
+ "UNet2DConditionModel"
35
+ ],
36
+ "vae": [
37
+ "diffusers",
38
+ "AutoencoderKL"
39
+ ]
40
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "PNDMScheduler",
3
+ "_diffusers_version": "0.15.0.dev0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "clip_sample_range": 1.0,
9
+ "dynamic_thresholding_ratio": 0.995,
10
+ "num_train_timesteps": 1000,
11
+ "prediction_type": "v_prediction",
12
+ "sample_max_value": 1.0,
13
+ "set_alpha_to_one": false,
14
+ "skip_prk_steps": true,
15
+ "steps_offset": 1,
16
+ "thresholding": false,
17
+ "trained_betas": null
18
+ }
sd21-unclip-h.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4124cbcf397ace8c536908f5f877a1f091a41168d1d3d9fd0c84484c1653693b
3
+ size 7967306143
sd21-unclip-l.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed12098385701c1850defd3d2bc20905c7c64dad3ccad895a7f13b406aab7602
3
+ size 6161550731
sd_unclip_examples.jpeg ADDED
text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./text_encoder",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_size": 1024,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 23,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 512,
22
+ "torch_dtype": "float16",
23
+ "transformers_version": "4.28.0.dev0",
24
+ "vocab_size": 49408
25
+ }
text_encoder/model.fp16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ab158327d06ce861b5c78843672c78433ea82fcf03f142097ba204b81251cd2
3
+ size 680821102
text_encoder/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e4aa519f64dc6386f88221a66c106a09fa027b47a20cc0e126687695f2a6669
3
+ size 1361597016
text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2188379b05015f531d61503e714234d00a64939792f3098b324e516547f0194f
3
+ size 1361674657
text_encoder/pytorch_model.fp16.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bb11b1da63986aaaaefb5ef2100d34109c024ac640cacd9ed697150c1c57f01
3
+ size 680900852
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "do_lower_case": true,
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "model_max_length": 77,
22
+ "pad_token": "<|endoftext|>",
23
+ "special_tokens_map_file": "./special_tokens_map.json",
24
+ "tokenizer_class": "CLIPTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
unet/config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.15.0.dev0",
4
+ "_name_or_path": "./unet",
5
+ "act_fn": "silu",
6
+ "attention_head_dim": [
7
+ 5,
8
+ 10,
9
+ 20,
10
+ 20
11
+ ],
12
+ "block_out_channels": [
13
+ 320,
14
+ 640,
15
+ 1280,
16
+ 1280
17
+ ],
18
+ "center_input_sample": false,
19
+ "class_embed_type": "projection",
20
+ "class_embeddings_concat": false,
21
+ "conv_in_kernel": 3,
22
+ "conv_out_kernel": 3,
23
+ "cross_attention_dim": 1024,
24
+ "down_block_types": [
25
+ "CrossAttnDownBlock2D",
26
+ "CrossAttnDownBlock2D",
27
+ "CrossAttnDownBlock2D",
28
+ "DownBlock2D"
29
+ ],
30
+ "downsample_padding": 1,
31
+ "dual_cross_attention": false,
32
+ "flip_sin_to_cos": true,
33
+ "freq_shift": 0,
34
+ "in_channels": 4,
35
+ "layers_per_block": 2,
36
+ "mid_block_scale_factor": 1,
37
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
38
+ "norm_eps": 1e-05,
39
+ "norm_num_groups": 32,
40
+ "num_class_embeds": null,
41
+ "only_cross_attention": false,
42
+ "out_channels": 4,
43
+ "projection_class_embeddings_input_dim": 2048,
44
+ "resnet_time_scale_shift": "default",
45
+ "sample_size": 96,
46
+ "time_cond_proj_dim": null,
47
+ "time_embedding_type": "positional",
48
+ "timestep_post_act": null,
49
+ "up_block_types": [
50
+ "UpBlock2D",
51
+ "CrossAttnUpBlock2D",
52
+ "CrossAttnUpBlock2D",
53
+ "CrossAttnUpBlock2D"
54
+ ],
55
+ "upcast_attention": true,
56
+ "use_linear_projection": true
57
+ }
unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4661ff3ad977deec42ee692862f6664614686647bd61be2a9aedbff2b95b7b04
3
+ size 3480976157
unet/diffusion_pytorch_model.fp16.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc2f1654fc8192838a742d99db3ec1558615a3c3726892dc4334f222f47720d1
3
+ size 1740633377
unet/diffusion_pytorch_model.fp16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a11c42f0619d06677f792a29b36277bf78bcefb7c010583c631da62e40630ff5
3
+ size 1740430084
unet/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:902a190c84ff59777cb0ecb7cd0eb86be822e6e2687376aa744ffb2d398b0b86
3
+ size 3480776620
vae/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.15.0.dev0",
4
+ "_name_or_path": "./vae",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "in_channels": 3,
19
+ "latent_channels": 4,
20
+ "layers_per_block": 2,
21
+ "norm_num_groups": 32,
22
+ "out_channels": 3,
23
+ "sample_size": 768,
24
+ "scaling_factor": 0.18215,
25
+ "up_block_types": [
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D"
30
+ ]
31
+ }
vae/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36bb8e1b54aba3a0914eb35fba13dcb107e9f18d379d1df2158732cd4bf56a94
3
+ size 334711857
vae/diffusion_pytorch_model.fp16.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c13979094c6566d9aa5936879055457022f34a747eeba12542504853385077c8
3
+ size 167405395
vae/diffusion_pytorch_model.fp16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8b011e5a18c53888d51a81aa28223ddec87b450c14dc9650d9c3ebbcd17624e
3
+ size 167335350
vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1d993488569e928462932c8c38a0760b874d166399b14414135bd9c42df5815
3
+ size 334643276