First commit small size
Browse files- .gitattributes +4 -0
- RCD-Final Logosmall size.jpg +0 -0
- README.md +45 -0
- image_0.png +3 -0
- image_1.png +3 -0
- image_2.png +3 -0
- image_3.png +3 -0
- model_index.json +42 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
image_0.png filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
image_1.png filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
image_2.png filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
image_3.png filter=lfs diff=lfs merge=lfs -text
|
RCD-Final Logosmall size.jpg
ADDED
|
README.md
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: stabilityai/stable-diffusion-xl-base-1.0
|
| 3 |
+
library_name: diffusers
|
| 4 |
+
license: creativeml-openrail-m
|
| 5 |
+
tags:
|
| 6 |
+
- stable-diffusion-xl
|
| 7 |
+
- stable-diffusion-xl-diffusers
|
| 8 |
+
- text-to-image
|
| 9 |
+
- diffusers-training
|
| 10 |
+
- diffusers
|
| 11 |
+
inference: true
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
<!-- This model card has been generated automatically according to the information the training script had access to. You
|
| 15 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# Text-to-image finetuning - GiantAnalytics/sdxl_fine_tuned_model_aditya_2
|
| 19 |
+
|
| 20 |
+
This pipeline was finetuned from **stabilityai/stable-diffusion-xl-base-1.0** on the **/content/drive/MyDrive/combine_images/** dataset. Below are some example images generated with the finetuned pipeline using the following prompt: an abstract pattern composed of organic, brushstroke-like shapes in various shades of blue, brown, and white. The shapes are arranged in a loose, overlapping pattern, creating a sense of movement and energy:
|
| 21 |
+
|
| 22 |
+

|
| 23 |
+

|
| 24 |
+

|
| 25 |
+

|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
Special VAE used for training: madebyollin/sdxl-vae-fp16-fix.
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
## Intended uses & limitations
|
| 32 |
+
|
| 33 |
+
#### How to use
|
| 34 |
+
|
| 35 |
+
```python
|
| 36 |
+
# TODO: add an example code snippet for running this diffusion pipeline
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
#### Limitations and bias
|
| 40 |
+
|
| 41 |
+
[TODO: provide examples of latent issues and potential remediations]
|
| 42 |
+
|
| 43 |
+
## Training details
|
| 44 |
+
|
| 45 |
+
[TODO: describe the data used to train the model]
|
image_0.png
ADDED
|
Git LFS Details
|
image_1.png
ADDED
|
Git LFS Details
|
image_2.png
ADDED
|
Git LFS Details
|
image_3.png
ADDED
|
Git LFS Details
|
model_index.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "StableDiffusionXLPipeline",
|
| 3 |
+
"_diffusers_version": "0.30.0.dev0",
|
| 4 |
+
"_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
|
| 5 |
+
"feature_extractor": [
|
| 6 |
+
null,
|
| 7 |
+
null
|
| 8 |
+
],
|
| 9 |
+
"force_zeros_for_empty_prompt": true,
|
| 10 |
+
"image_encoder": [
|
| 11 |
+
null,
|
| 12 |
+
null
|
| 13 |
+
],
|
| 14 |
+
"scheduler": [
|
| 15 |
+
"diffusers",
|
| 16 |
+
"EulerDiscreteScheduler"
|
| 17 |
+
],
|
| 18 |
+
"text_encoder": [
|
| 19 |
+
"transformers",
|
| 20 |
+
"CLIPTextModel"
|
| 21 |
+
],
|
| 22 |
+
"text_encoder_2": [
|
| 23 |
+
"transformers",
|
| 24 |
+
"CLIPTextModelWithProjection"
|
| 25 |
+
],
|
| 26 |
+
"tokenizer": [
|
| 27 |
+
"transformers",
|
| 28 |
+
"CLIPTokenizer"
|
| 29 |
+
],
|
| 30 |
+
"tokenizer_2": [
|
| 31 |
+
"transformers",
|
| 32 |
+
"CLIPTokenizer"
|
| 33 |
+
],
|
| 34 |
+
"unet": [
|
| 35 |
+
"diffusers",
|
| 36 |
+
"UNet2DConditionModel"
|
| 37 |
+
],
|
| 38 |
+
"vae": [
|
| 39 |
+
"diffusers",
|
| 40 |
+
"AutoencoderKL"
|
| 41 |
+
]
|
| 42 |
+
}
|