Upload 5 files
Browse files- Model/finetuned_crosswalk_model_v1_150_epoch_9/config.json +68 -0
- Model/finetuned_crosswalk_model_v1_150_epoch_9/diffusion_pytorch_model.safetensors +3 -0
- Model/finetuned_vae_v1_150_epoch_9/config.json +38 -0
- Model/finetuned_vae_v1_150_epoch_9/diffusion_pytorch_model.safetensors +3 -0
- generate.py +41 -0
Model/finetuned_crosswalk_model_v1_150_epoch_9/config.json
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "UNet2DConditionModel",
|
| 3 |
+
"_diffusers_version": "0.30.3",
|
| 4 |
+
"_name_or_path": "CompVis/stable-diffusion-v1-4",
|
| 5 |
+
"act_fn": "silu",
|
| 6 |
+
"addition_embed_type": null,
|
| 7 |
+
"addition_embed_type_num_heads": 64,
|
| 8 |
+
"addition_time_embed_dim": null,
|
| 9 |
+
"attention_head_dim": 8,
|
| 10 |
+
"attention_type": "default",
|
| 11 |
+
"block_out_channels": [
|
| 12 |
+
320,
|
| 13 |
+
640,
|
| 14 |
+
1280,
|
| 15 |
+
1280
|
| 16 |
+
],
|
| 17 |
+
"center_input_sample": false,
|
| 18 |
+
"class_embed_type": null,
|
| 19 |
+
"class_embeddings_concat": false,
|
| 20 |
+
"conv_in_kernel": 3,
|
| 21 |
+
"conv_out_kernel": 3,
|
| 22 |
+
"cross_attention_dim": 768,
|
| 23 |
+
"cross_attention_norm": null,
|
| 24 |
+
"down_block_types": [
|
| 25 |
+
"CrossAttnDownBlock2D",
|
| 26 |
+
"CrossAttnDownBlock2D",
|
| 27 |
+
"CrossAttnDownBlock2D",
|
| 28 |
+
"DownBlock2D"
|
| 29 |
+
],
|
| 30 |
+
"downsample_padding": 1,
|
| 31 |
+
"dropout": 0.0,
|
| 32 |
+
"dual_cross_attention": false,
|
| 33 |
+
"encoder_hid_dim": null,
|
| 34 |
+
"encoder_hid_dim_type": null,
|
| 35 |
+
"flip_sin_to_cos": true,
|
| 36 |
+
"freq_shift": 0,
|
| 37 |
+
"in_channels": 4,
|
| 38 |
+
"layers_per_block": 2,
|
| 39 |
+
"mid_block_only_cross_attention": null,
|
| 40 |
+
"mid_block_scale_factor": 1,
|
| 41 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
| 42 |
+
"norm_eps": 1e-05,
|
| 43 |
+
"norm_num_groups": 32,
|
| 44 |
+
"num_attention_heads": null,
|
| 45 |
+
"num_class_embeds": null,
|
| 46 |
+
"only_cross_attention": false,
|
| 47 |
+
"out_channels": 4,
|
| 48 |
+
"projection_class_embeddings_input_dim": null,
|
| 49 |
+
"resnet_out_scale_factor": 1.0,
|
| 50 |
+
"resnet_skip_time_act": false,
|
| 51 |
+
"resnet_time_scale_shift": "default",
|
| 52 |
+
"reverse_transformer_layers_per_block": null,
|
| 53 |
+
"sample_size": 64,
|
| 54 |
+
"time_cond_proj_dim": null,
|
| 55 |
+
"time_embedding_act_fn": null,
|
| 56 |
+
"time_embedding_dim": null,
|
| 57 |
+
"time_embedding_type": "positional",
|
| 58 |
+
"timestep_post_act": null,
|
| 59 |
+
"transformer_layers_per_block": 1,
|
| 60 |
+
"up_block_types": [
|
| 61 |
+
"UpBlock2D",
|
| 62 |
+
"CrossAttnUpBlock2D",
|
| 63 |
+
"CrossAttnUpBlock2D",
|
| 64 |
+
"CrossAttnUpBlock2D"
|
| 65 |
+
],
|
| 66 |
+
"upcast_attention": false,
|
| 67 |
+
"use_linear_projection": false
|
| 68 |
+
}
|
Model/finetuned_crosswalk_model_v1_150_epoch_9/diffusion_pytorch_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:de28df08485de192bff5d968ca476d47e03d45244085d99fb57da8e53f7cbc60
|
| 3 |
+
size 3438167536
|
Model/finetuned_vae_v1_150_epoch_9/config.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "AutoencoderKL",
|
| 3 |
+
"_diffusers_version": "0.30.3",
|
| 4 |
+
"_name_or_path": "CompVis/stable-diffusion-v1-4",
|
| 5 |
+
"act_fn": "silu",
|
| 6 |
+
"block_out_channels": [
|
| 7 |
+
128,
|
| 8 |
+
256,
|
| 9 |
+
512,
|
| 10 |
+
512
|
| 11 |
+
],
|
| 12 |
+
"down_block_types": [
|
| 13 |
+
"DownEncoderBlock2D",
|
| 14 |
+
"DownEncoderBlock2D",
|
| 15 |
+
"DownEncoderBlock2D",
|
| 16 |
+
"DownEncoderBlock2D"
|
| 17 |
+
],
|
| 18 |
+
"force_upcast": true,
|
| 19 |
+
"in_channels": 3,
|
| 20 |
+
"latent_channels": 4,
|
| 21 |
+
"latents_mean": null,
|
| 22 |
+
"latents_std": null,
|
| 23 |
+
"layers_per_block": 2,
|
| 24 |
+
"mid_block_add_attention": true,
|
| 25 |
+
"norm_num_groups": 32,
|
| 26 |
+
"out_channels": 3,
|
| 27 |
+
"sample_size": 512,
|
| 28 |
+
"scaling_factor": 0.18215,
|
| 29 |
+
"shift_factor": null,
|
| 30 |
+
"up_block_types": [
|
| 31 |
+
"UpDecoderBlock2D",
|
| 32 |
+
"UpDecoderBlock2D",
|
| 33 |
+
"UpDecoderBlock2D",
|
| 34 |
+
"UpDecoderBlock2D"
|
| 35 |
+
],
|
| 36 |
+
"use_post_quant_conv": true,
|
| 37 |
+
"use_quant_conv": true
|
| 38 |
+
}
|
Model/finetuned_vae_v1_150_epoch_9/diffusion_pytorch_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b4d2b5932bb4151e54e694fd31ccf51fca908223c9485bd56cd0e1d83ad94c49
|
| 3 |
+
size 334643268
|
generate.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from diffusers import StableDiffusionPipeline, UNet2DConditionModel, AutoencoderKL, DDPMScheduler
|
| 3 |
+
from transformers import CLIPTextModel, CLIPImageProcessor, AutoTokenizer
|
| 4 |
+
|
| 5 |
+
# Load the fine-tuned models
|
| 6 |
+
vae = AutoencoderKL.from_pretrained("./Model/finetuned_vae_v1_150_epoch_9")
|
| 7 |
+
unet = UNet2DConditionModel.from_pretrained("./Model/finetuned_crosswalk_model_v1_150_epoch_9")
|
| 8 |
+
|
| 9 |
+
scheduler = DDPMScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler")
|
| 10 |
+
|
| 11 |
+
# Load the CLIP text encoder, tokenizer, and feature extractor
|
| 12 |
+
tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
| 13 |
+
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
|
| 14 |
+
feature_extractor = CLIPImageProcessor.from_pretrained("openai/clip-vit-large-patch14")
|
| 15 |
+
|
| 16 |
+
# Load the fine-tuned Stable Diffusion pipeline
|
| 17 |
+
pipeline = StableDiffusionPipeline(
|
| 18 |
+
vae=vae,
|
| 19 |
+
text_encoder=text_encoder,
|
| 20 |
+
tokenizer=tokenizer,
|
| 21 |
+
unet=unet,
|
| 22 |
+
scheduler=scheduler,
|
| 23 |
+
feature_extractor=feature_extractor,
|
| 24 |
+
safety_checker=None,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
# Move the pipeline to GPU (if available)
|
| 28 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 29 |
+
print ("Working with: ",device)
|
| 30 |
+
pipeline.to(device)
|
| 31 |
+
|
| 32 |
+
# Generate an image from a text prompt
|
| 33 |
+
prompt = "a crosswalk image" # Customize your prompt here
|
| 34 |
+
with torch.amp.autocast('cuda'):
|
| 35 |
+
image = pipeline(prompt, num_inference_steps=50, guidance_scale=9).images[0]
|
| 36 |
+
|
| 37 |
+
# Save or show the generated image
|
| 38 |
+
image.resize((640,360)).save("output.png")
|
| 39 |
+
image.resize((640,360)).show()
|
| 40 |
+
|
| 41 |
+
|