Remade commited on
Commit
70bcea3
·
verified ·
1 Parent(s): d12447d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +0 -45
README.md CHANGED
@@ -72,51 +72,6 @@ widget:
72
  - [cakeify_16_epochs.safetensors](./cakeify_16_epochs.safetensors) - LoRA Model File
73
  - [wan_img2vid_lora_workflow.json](./workflow/wan_img2vid_lora_workflow.json) - Wan I2V with LoRA Workflow for ComfyUI
74
 
75
- ## Using with Diffusers
76
- ```py
77
- pip install git+https://github.com/huggingface/diffusers.git
78
- ```
79
-
80
- ```py
81
- import torch
82
- from diffusers.utils import export_to_video, load_image
83
- from diffusers import AutoencoderKLWan, WanImageToVideoPipeline
84
- from transformers import CLIPVisionModel
85
- import numpy as np
86
-
87
- model_id = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
88
- image_encoder = CLIPVisionModel.from_pretrained(model_id, subfolder="image_encoder", torch_dtype=torch.float32)
89
- vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
90
- pipe = WanImageToVideoPipeline.from_pretrained(model_id, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16)
91
- pipe.to("cuda")
92
-
93
- pipe.load_lora_weights("Remade/Squish")
94
-
95
- pipe.enable_model_cpu_offload() #for low-vram environments
96
-
97
- prompt = "In the video, a miniature cat toy is presented. The cat toy is held in a person's hands. The person then presses on the cat toy, causing a sq41sh squish effect. The person keeps pressing down on the cat toy, further showing the sq41sh squish effect."
98
-
99
- image = load_image("https://huggingface.co/datasets/diffusers/cat_toy_example/resolve/main/1.jpeg")
100
-
101
- max_area = 480 * 832
102
- aspect_ratio = image.height / image.width
103
- mod_value = pipe.vae_scale_factor_spatial * pipe.transformer.config.patch_size[1]
104
- height = round(np.sqrt(max_area * aspect_ratio)) // mod_value * mod_value
105
- width = round(np.sqrt(max_area / aspect_ratio)) // mod_value * mod_value
106
- image = image.resize((width, height))
107
-
108
- output = pipe(
109
- image=image,
110
- prompt=prompt,
111
- height=height,
112
- width=width,
113
- num_frames=81,
114
- guidance_scale=5.0,
115
- num_inference_steps=28
116
- ).frames[0]
117
- export_to_video(output, "output.mp4", fps=16)
118
- ```
119
-
120
  ---
121
  <div style="background-color: #f8f9fa; padding: 20px; border-radius: 10px; margin-bottom: 20px;">
122
  <div style="background-color: white; padding: 15px; border-radius: 8px; margin: 15px 0; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">
 
72
  - [cakeify_16_epochs.safetensors](./cakeify_16_epochs.safetensors) - LoRA Model File
73
  - [wan_img2vid_lora_workflow.json](./workflow/wan_img2vid_lora_workflow.json) - Wan I2V with LoRA Workflow for ComfyUI
74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  ---
76
  <div style="background-color: #f8f9fa; padding: 20px; border-radius: 10px; margin-bottom: 20px;">
77
  <div style="background-color: white; padding: 15px; border-radius: 8px; margin: 15px 0; box-shadow: 0 2px 4px rgba(0,0,0,0.1);">