ariG23498 HF Staff commited on
Commit
d9a9c2c
·
verified ·
1 Parent(s): 51dea02

Upload lightx2v_Wan2.2-Distill-Loras_0.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. lightx2v_Wan2.2-Distill-Loras_0.py +13 -14
lightx2v_Wan2.2-Distill-Loras_0.py CHANGED
@@ -1,9 +1,14 @@
1
  # /// script
2
  # requires-python = ">=3.12"
3
  # dependencies = [
 
 
 
 
4
  # "torch",
5
  # "torchvision",
6
  # "transformers",
 
7
  # "diffusers",
8
  # "sentence-transformers",
9
  # "accelerate",
@@ -13,19 +18,16 @@
13
  # ///
14
 
15
  try:
16
- import torch
17
  from diffusers import DiffusionPipeline
18
  from diffusers.utils import load_image, export_to_video
19
 
20
- pipe = DiffusionPipeline.from_pretrained("lightx2v/Wan2.2-Distill-Loras", torch_dtype=torch.float16)
21
- pipe.to("cuda")
22
 
23
  prompt = "A man with short gray hair plays a red electric guitar."
24
- image = load_image(
25
- "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png"
26
- )
27
 
28
- output = pipe(image=image, prompt=prompt).frames[0]
29
  export_to_video(output, "output.mp4")
30
  with open('lightx2v_Wan2.2-Distill-Loras_0.txt', 'w', encoding='utf-8') as f:
31
  f.write('Everything was good in lightx2v_Wan2.2-Distill-Loras_0.txt')
@@ -41,19 +43,16 @@ except Exception as e:
41
  with open('lightx2v_Wan2.2-Distill-Loras_0.txt', 'a', encoding='utf-8') as f:
42
  import traceback
43
  f.write('''```CODE:
44
- import torch
45
  from diffusers import DiffusionPipeline
46
  from diffusers.utils import load_image, export_to_video
47
 
48
- pipe = DiffusionPipeline.from_pretrained("lightx2v/Wan2.2-Distill-Loras", torch_dtype=torch.float16)
49
- pipe.to("cuda")
50
 
51
  prompt = "A man with short gray hair plays a red electric guitar."
52
- image = load_image(
53
- "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png"
54
- )
55
 
56
- output = pipe(image=image, prompt=prompt).frames[0]
57
  export_to_video(output, "output.mp4")
58
  ```
59
 
 
1
  # /// script
2
  # requires-python = ">=3.12"
3
  # dependencies = [
4
+ # "numpy",
5
+ # "einops",
6
+ # "pandas",
7
+ # "protobuf",
8
  # "torch",
9
  # "torchvision",
10
  # "transformers",
11
+ # "timm",
12
  # "diffusers",
13
  # "sentence-transformers",
14
  # "accelerate",
 
18
  # ///
19
 
20
  try:
 
21
  from diffusers import DiffusionPipeline
22
  from diffusers.utils import load_image, export_to_video
23
 
24
+ pipe = DiffusionPipeline.from_pretrained("Wan-AI/Wan2.2-I2V-A14B")
25
+ pipe.load_lora_weights("lightx2v/Wan2.2-Distill-Loras")
26
 
27
  prompt = "A man with short gray hair plays a red electric guitar."
28
+ input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png")
 
 
29
 
30
+ image = pipe(image=input_image, prompt=prompt).frames[0]
31
  export_to_video(output, "output.mp4")
32
  with open('lightx2v_Wan2.2-Distill-Loras_0.txt', 'w', encoding='utf-8') as f:
33
  f.write('Everything was good in lightx2v_Wan2.2-Distill-Loras_0.txt')
 
43
  with open('lightx2v_Wan2.2-Distill-Loras_0.txt', 'a', encoding='utf-8') as f:
44
  import traceback
45
  f.write('''```CODE:
 
46
  from diffusers import DiffusionPipeline
47
  from diffusers.utils import load_image, export_to_video
48
 
49
+ pipe = DiffusionPipeline.from_pretrained("Wan-AI/Wan2.2-I2V-A14B")
50
+ pipe.load_lora_weights("lightx2v/Wan2.2-Distill-Loras")
51
 
52
  prompt = "A man with short gray hair plays a red electric guitar."
53
+ input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png")
 
 
54
 
55
+ image = pipe(image=input_image, prompt=prompt).frames[0]
56
  export_to_video(output, "output.mp4")
57
  ```
58