Upload Video-Reason_VBVR-Wan2.2_0.txt with huggingface_hub
Browse files
Video-Reason_VBVR-Wan2.2_0.txt
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
```CODE:
|
| 2 |
+
import torch
|
| 3 |
+
from diffusers import DiffusionPipeline
|
| 4 |
+
from diffusers.utils import load_image, export_to_video
|
| 5 |
+
|
| 6 |
+
# switch to "mps" for apple devices
|
| 7 |
+
pipe = DiffusionPipeline.from_pretrained("Video-Reason/VBVR-Wan2.2", dtype=torch.bfloat16, device_map="cuda")
|
| 8 |
+
pipe.to("cuda")
|
| 9 |
+
|
| 10 |
+
prompt = "A man with short gray hair plays a red electric guitar."
|
| 11 |
+
image = load_image(
|
| 12 |
+
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png"
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
output = pipe(image=image, prompt=prompt).frames[0]
|
| 16 |
+
export_to_video(output, "output.mp4")
|
| 17 |
+
```
|
| 18 |
+
|
| 19 |
+
ERROR:
|
| 20 |
+
Traceback (most recent call last):
|
| 21 |
+
File "/tmp/Video-Reason_VBVR-Wan2.2_0nk4e9F.py", line 28, in <module>
|
| 22 |
+
pipe = DiffusionPipeline.from_pretrained("Video-Reason/VBVR-Wan2.2", dtype=torch.bfloat16, device_map="cuda")
|
| 23 |
+
File "/tmp/.cache/uv/environments-v2/55e601e3445064bb/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 89, in _inner_fn
|
| 24 |
+
return fn(*args, **kwargs)
|
| 25 |
+
File "/tmp/.cache/uv/environments-v2/55e601e3445064bb/lib/python3.13/site-packages/diffusers/pipelines/pipeline_utils.py", line 1021, in from_pretrained
|
| 26 |
+
loaded_sub_model = load_sub_model(
|
| 27 |
+
library_name=library_name,
|
| 28 |
+
...<21 lines>...
|
| 29 |
+
quantization_config=quantization_config,
|
| 30 |
+
)
|
| 31 |
+
File "/tmp/.cache/uv/environments-v2/55e601e3445064bb/lib/python3.13/site-packages/diffusers/pipelines/pipeline_loading_utils.py", line 876, in load_sub_model
|
| 32 |
+
loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs)
|
| 33 |
+
File "/tmp/.cache/uv/environments-v2/55e601e3445064bb/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 89, in _inner_fn
|
| 34 |
+
return fn(*args, **kwargs)
|
| 35 |
+
File "/tmp/.cache/uv/environments-v2/55e601e3445064bb/lib/python3.13/site-packages/diffusers/models/modeling_utils.py", line 1296, in from_pretrained
|
| 36 |
+
) = cls._load_pretrained_model(
|
| 37 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~^
|
| 38 |
+
model,
|
| 39 |
+
^^^^^^
|
| 40 |
+
...<13 lines>...
|
| 41 |
+
is_parallel_loading_enabled=is_parallel_loading_enabled,
|
| 42 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 43 |
+
)
|
| 44 |
+
^
|
| 45 |
+
File "/tmp/.cache/uv/environments-v2/55e601e3445064bb/lib/python3.13/site-packages/diffusers/models/modeling_utils.py", line 1635, in _load_pretrained_model
|
| 46 |
+
_caching_allocator_warmup(model, expanded_device_map, dtype, hf_quantizer)
|
| 47 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 48 |
+
File "/tmp/.cache/uv/environments-v2/55e601e3445064bb/lib/python3.13/site-packages/diffusers/models/model_loading_utils.py", line 751, in _caching_allocator_warmup
|
| 49 |
+
_ = torch.empty(warmup_elems, dtype=dtype, device=device, requires_grad=False)
|
| 50 |
+
torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 26.62 GiB. GPU 0 has a total capacity of 22.03 GiB of which 10.76 GiB is free. Including non-PyTorch memory, this process has 11.27 GiB memory in use. Of the allocated memory 11.07 GiB is allocated by PyTorch, and 21.35 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|