Gjm1234 commited on
Commit
e138707
·
verified ·
1 Parent(s): 1202390

Update pipeline_wan_i2v.py

Browse files
Files changed (1) hide show
  1. pipeline_wan_i2v.py +7 -23
pipeline_wan_i2v.py CHANGED
@@ -1,8 +1,8 @@
1
  import torch
2
- from diffusers import DiffusionPipeline
3
-
4
- class WanImageToVideoPipeline(DiffusionPipeline):
5
 
 
6
  def __init__(
7
  self,
8
  vae,
@@ -13,7 +13,6 @@ class WanImageToVideoPipeline(DiffusionPipeline):
13
  text_encoder,
14
  tokenizer
15
  ):
16
- super().__init__()
17
  self.vae = vae
18
  self.transformer = transformer
19
  self.scheduler = scheduler
@@ -22,22 +21,7 @@ class WanImageToVideoPipeline(DiffusionPipeline):
22
  self.text_encoder = text_encoder
23
  self.tokenizer = tokenizer
24
 
25
- def __call__(self, image, num_frames=16):
26
-
27
- # Encode image
28
- img_latents = self.image_encoder.encode(image).latent_dist.sample()
29
-
30
- frames = []
31
- for _ in range(num_frames):
32
-
33
- latents = img_latents.clone()
34
-
35
- for t in self.scheduler.timesteps:
36
- noise_pred = self.transformer(latents, t)
37
- step = self.scheduler.step(noise_pred, t, latents)
38
- latents = step.prev_sample
39
-
40
- decoded = self.vae.decode(latents).sample
41
- frames.append(self.image_processor.postprocess(decoded))
42
-
43
- return type("Result", (), {"frames": frames})
 
1
  import torch
2
+ from diffusers import AutoencoderKL, DDIMScheduler, Transformer2DModel
3
+ from transformers import PreTrainedTokenizerFast
 
4
 
5
+ class WanImageToVideoPipeline:
6
  def __init__(
7
  self,
8
  vae,
 
13
  text_encoder,
14
  tokenizer
15
  ):
 
16
  self.vae = vae
17
  self.transformer = transformer
18
  self.scheduler = scheduler
 
21
  self.text_encoder = text_encoder
22
  self.tokenizer = tokenizer
23
 
24
+ def __call__(self, image):
25
+ # Dummy output so HF endpoint doesn't crash
26
+ # Replace later with actual generation logic
27
+ return {"frames": [image]}