Update handler.py
Browse files- handler.py +15 -12
handler.py
CHANGED
|
@@ -1,26 +1,29 @@
|
|
| 1 |
import torch
|
| 2 |
-
from diffusers import
|
| 3 |
from PIL import Image
|
| 4 |
from io import BytesIO
|
| 5 |
import base64
|
| 6 |
|
| 7 |
-
# ๋ชจ๋ธ
|
| 8 |
model_id = "grnr9730/Wan2.1-I2V-14B-720P-Diffusers"
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
custom_pipeline="Wan2.1-I2V-14B-720P-Diffusers"
|
| 14 |
-
).to("cuda")
|
| 15 |
|
| 16 |
# Inference ํจ์
|
| 17 |
def infer(data):
|
| 18 |
prompt = data.get("prompt", "A futuristic cityscape")
|
| 19 |
-
image = pipe(prompt).images[0]
|
| 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
# ์ด๋ฏธ์ง Base64๋ก ๋ณํ
|
| 22 |
buffered = BytesIO()
|
| 23 |
-
|
| 24 |
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
| 25 |
-
|
| 26 |
-
return {"image": img_str}
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from diffusers import WanPipeline
|
| 3 |
from PIL import Image
|
| 4 |
from io import BytesIO
|
| 5 |
import base64
|
| 6 |
|
| 7 |
+
# ๋ชจ๋ธ ID
|
| 8 |
model_id = "grnr9730/Wan2.1-I2V-14B-720P-Diffusers"
|
| 9 |
+
|
| 10 |
+
# ๋ชจ๋ธ ๋ก๋
|
| 11 |
+
pipe = WanPipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16)
|
| 12 |
+
pipe.enable_model_cpu_offload()
|
|
|
|
|
|
|
| 13 |
|
| 14 |
# Inference ํจ์
|
| 15 |
def infer(data):
|
| 16 |
prompt = data.get("prompt", "A futuristic cityscape")
|
|
|
|
| 17 |
|
| 18 |
+
# ๋น๋์ค ์์ฑ
|
| 19 |
+
video = pipe(prompt).frames # ํ๋ ์ ๋ฆฌ์คํธ ๋ฐํ
|
| 20 |
+
|
| 21 |
+
# ์ฒซ ๋ฒ์งธ ํ๋ ์์ ์ด๋ฏธ์ง๋ก ์ ์ฅ
|
| 22 |
+
first_frame = video[0]
|
| 23 |
+
|
| 24 |
# ์ด๋ฏธ์ง Base64๋ก ๋ณํ
|
| 25 |
buffered = BytesIO()
|
| 26 |
+
first_frame.save(buffered, format="PNG")
|
| 27 |
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
| 28 |
+
|
| 29 |
+
return {"image": img_str}
|