yurman commited on
Commit
a9cd6a0
·
verified ·
1 Parent(s): 2091e22

Initialize unconditional model pipeline.

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ val_imgs_grid.png filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ license: creativeml-openrail-m
4
+ base_model: stabilityai/stable-diffusion-2-base
5
+ tags:
6
+ - stable-diffusion
7
+ - stable-diffusion-diffusers
8
+ - diffusers
9
+ inference: true
10
+ ---
11
+
12
+ # Unconditioned stable diffusion finetuning - uncond_sd2-base
13
+
14
+ This pipeline was finetuned from **stabilityai/stable-diffusion-2-base**
15
+ for brain image generation.
16
+ Below are some example images generated with the finetuned pipeline:
17
+
18
+ ![val_imgs_grid](./val_imgs_grid.png)
19
+
20
+
21
+ ## Pipeline usage
22
+
23
+ You can use the pipeline like so:
24
+
25
+ ```python
26
+ from diffusers import StableDiffusionUnconditionalPipeline
27
+ import torch
28
+
29
+ pipeline = StableDiffusionUnconditionalPipeline.from_pretrained("uncond_sd2-base", torch_dtype=torch.float32)
30
+ image = pipeline(1).images[0]
31
+ image.save("brain_image.png")
32
+ ```
33
+
34
+ ## Training info
35
+ For training info, refer the model card for the parent conditional model: stabilityai/stable-diffusion-2-base.
model_index.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionUnconditionalPipeline",
3
+ "_diffusers_version": "0.26.0.dev0",
4
+ "requires_safety_checker": false,
5
+ "scheduler": [
6
+ "diffusers",
7
+ "DDPMScheduler"
8
+ ],
9
+ "unet": [
10
+ "diffusers",
11
+ "UNet2DModel"
12
+ ],
13
+ "vae": [
14
+ "diffusers",
15
+ "AutoencoderKL"
16
+ ]
17
+ }
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "DDPMScheduler",
3
+ "_diffusers_version": "0.26.0.dev0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "clip_sample_range": 1.0,
9
+ "dynamic_thresholding_ratio": 0.995,
10
+ "num_train_timesteps": 1000,
11
+ "prediction_type": "epsilon",
12
+ "rescale_betas_zero_snr": false,
13
+ "sample_max_value": 1.0,
14
+ "set_alpha_to_one": false,
15
+ "skip_prk_steps": true,
16
+ "steps_offset": 1,
17
+ "thresholding": false,
18
+ "timestep_spacing": "leading",
19
+ "trained_betas": null,
20
+ "variance_type": "fixed_small"
21
+ }
unet/config.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DModel",
3
+ "_diffusers_version": "0.26.0.dev0",
4
+ "act_fn": "silu",
5
+ "add_mid_block_attention": true,
6
+ "attention_head_dim": [
7
+ 5,
8
+ 10,
9
+ 20,
10
+ 20
11
+ ],
12
+ "attention_type": "default",
13
+ "block_out_channels": [
14
+ 320,
15
+ 640,
16
+ 1280,
17
+ 1280
18
+ ],
19
+ "center_input_sample": false,
20
+ "class_embed_type": null,
21
+ "class_embeddings_concat": false,
22
+ "conv_in_kernel": 3,
23
+ "conv_out_kernel": 3,
24
+ "down_block_types": [
25
+ "SelfAttnDownBlock2D",
26
+ "SelfAttnDownBlock2D",
27
+ "SelfAttnDownBlock2D",
28
+ "DownBlock2D"
29
+ ],
30
+ "downsample_padding": 1,
31
+ "downsample_type": "conv",
32
+ "dropout": 0.0,
33
+ "flip_sin_to_cos": true,
34
+ "freq_shift": 0,
35
+ "in_channels": 4,
36
+ "layers_per_block": 2,
37
+ "mid_block_scale_factor": 1,
38
+ "mid_block_type": "UNetMidBlock2DSelfAttn",
39
+ "norm_eps": 1e-05,
40
+ "norm_num_groups": 32,
41
+ "num_attention_heads": null,
42
+ "num_class_embeds": null,
43
+ "num_train_timesteps": null,
44
+ "out_channels": 4,
45
+ "projection_class_embeddings_input_dim": null,
46
+ "resnet_out_scale_factor": 1.0,
47
+ "resnet_skip_time_act": false,
48
+ "resnet_time_scale_shift": "default",
49
+ "reverse_transformer_layers_per_block": null,
50
+ "sample_size": 64,
51
+ "time_cond_proj_dim": null,
52
+ "time_embedding_act_fn": null,
53
+ "time_embedding_dim": null,
54
+ "time_embedding_type": "positional",
55
+ "timestep_post_act": null,
56
+ "transformer_layers_per_block": 1,
57
+ "up_block_types": [
58
+ "UpBlock2D",
59
+ "SelfAttnUpBlock2D",
60
+ "SelfAttnUpBlock2D",
61
+ "SelfAttnUpBlock2D"
62
+ ],
63
+ "upcast_attention": false,
64
+ "upsample_type": "conv",
65
+ "use_linear_projection": true,
66
+ "use_transformer_attentions": true
67
+ }
unet/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5dece95dba7b1e692477aae73aac5258567d2e10b443d993478a38a9ca6de8c
3
+ size 3262202000
vae/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.26.0.dev0",
4
+ "_name_or_path": "stabilityai/stable-diffusion-2-base",
5
+ "act_fn": "silu",
6
+ "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
+ ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
+ ],
18
+ "force_upcast": true,
19
+ "in_channels": 3,
20
+ "latent_channels": 4,
21
+ "layers_per_block": 2,
22
+ "norm_num_groups": 32,
23
+ "out_channels": 3,
24
+ "sample_size": 512,
25
+ "scaling_factor": 0.18215,
26
+ "up_block_types": [
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D",
30
+ "UpDecoderBlock2D"
31
+ ]
32
+ }
vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2aa1f43011b553a4cba7f37456465cdbd48aab7b54b9348b890e8058ea7683ec
3
+ size 334643268
val_imgs_grid.png ADDED

Git LFS Details

  • SHA256: fddcaea0cc646d825b9fe0f754099cd95fb3a6139948e56be93fd798f1784ab0
  • Pointer size: 131 Bytes
  • Size of remote file: 289 kB