grnr9730 commited on
Commit
a2a2581
ยท
verified ยท
1 Parent(s): 8a50ab5

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +15 -12
handler.py CHANGED
@@ -1,26 +1,29 @@
1
  import torch
2
- from diffusers import StableDiffusionPipeline
3
  from PIL import Image
4
  from io import BytesIO
5
  import base64
6
 
7
- # ๋ชจ๋ธ ๋กœ๋“œ
8
  model_id = "grnr9730/Wan2.1-I2V-14B-720P-Diffusers"
9
- #pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
10
- pipe = DiffusionPipeline.from_pretrained(
11
- model_id,
12
- torch_dtype=torch.float16,
13
- custom_pipeline="Wan2.1-I2V-14B-720P-Diffusers"
14
- ).to("cuda")
15
 
16
  # Inference ํ•จ์ˆ˜
17
  def infer(data):
18
  prompt = data.get("prompt", "A futuristic cityscape")
19
- image = pipe(prompt).images[0]
20
 
 
 
 
 
 
 
21
  # ์ด๋ฏธ์ง€ Base64๋กœ ๋ณ€ํ™˜
22
  buffered = BytesIO()
23
- image.save(buffered, format="PNG")
24
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
25
-
26
- return {"image": img_str}
 
1
  import torch
2
+ from diffusers import WanPipeline
3
  from PIL import Image
4
  from io import BytesIO
5
  import base64
6
 
7
+ # ๋ชจ๋ธ ID
8
  model_id = "grnr9730/Wan2.1-I2V-14B-720P-Diffusers"
9
+
10
+ # ๋ชจ๋ธ ๋กœ๋“œ
11
+ pipe = WanPipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16)
12
+ pipe.enable_model_cpu_offload()
 
 
13
 
14
  # Inference ํ•จ์ˆ˜
15
  def infer(data):
16
  prompt = data.get("prompt", "A futuristic cityscape")
 
17
 
18
+ # ๋น„๋””์˜ค ์ƒ์„ฑ
19
+ video = pipe(prompt).frames # ํ”„๋ ˆ์ž„ ๋ฆฌ์ŠคํŠธ ๋ฐ˜ํ™˜
20
+
21
+ # ์ฒซ ๋ฒˆ์งธ ํ”„๋ ˆ์ž„์„ ์ด๋ฏธ์ง€๋กœ ์ €์žฅ
22
+ first_frame = video[0]
23
+
24
  # ์ด๋ฏธ์ง€ Base64๋กœ ๋ณ€ํ™˜
25
  buffered = BytesIO()
26
+ first_frame.save(buffered, format="PNG")
27
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
28
+
29
+ return {"image": img_str}