File size: 28,056 Bytes
a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 a841782 03e7ba8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 |
#@title νμ νμΌ μμ±
# μ΄ νμΌμ μ
4μμ μλΈνλ‘μΈμ€λ‘ μ€νλ©λλ€.
import sys
import os
import time
import glob
import gc
import torch
import subprocess
import random
import argparse
from typing import Sequence, Mapping, Any, Union
import shutil
# --- 0. κΈ°λ³Έ μ€μ λ° μΈμ νμ± ---
def parse_args():
parser = argparse.ArgumentParser(description="ComfyUI Video Generation Script with All Controls from 1.py")
parser.add_argument("--positive_prompt", type=str, required=True); parser.add_argument("--negative_prompt", type=str, required=True)
parser.add_argument("--width", type=int, required=True); parser.add_argument("--height", type=int, required=True)
parser.add_argument("--length", type=int, required=True); parser.add_argument("--upscale_ratio", type=float, required=True)
parser.add_argument("--steps", type=int, default=4)
parser.add_argument("--cfg_high", type=float, default=1.0)
parser.add_argument("--cfg_low", type=float, default=1.0)
parser.add_argument("--sampler_name_high", type=str, default="euler"); parser.add_argument("--scheduler_high", type=str, default="simple")
parser.add_argument("--sampler_name_low", type=str, default="euler"); parser.add_argument("--scheduler_low", type=str, default="simple")
parser.add_argument("--noise_seed", type=int, default=-1); parser.add_argument("--split_point_percent", type=float, default=50.0)
parser.add_argument("--shift", type=float, default=8.0); parser.add_argument("--sageattention", type=str, default="on")
parser.add_argument("--unet_high_name", type=str, required=True); parser.add_argument("--unet_low_name", type=str, required=True)
parser.add_argument("--vae_name", type=str, required=True); parser.add_argument("--clip_name", type=str, required=True)
parser.add_argument("--upscale_model_name", type=str, default="None")
parser.add_argument("--upscale_model_scale", type=float, default=2.0)
parser.add_argument("--upscale_chunk_size", type=int, default=30)
parser.add_argument("--frame_rate", type=int, default=16); parser.add_argument("--interpolation", type=str, default="on")
parser.add_argument("--rife_fast_mode", type=str, default="on"); parser.add_argument("--rife_ensemble", type=str, default="on")
parser.add_argument("--rife_chunk_size", type=int, default=30)
parser.add_argument("--connect_lora_clip", type=str, default="off")
parser.add_argument("--video_encoder", type=str, default="GPU: HEVC (NVENC)"); parser.add_argument("--nvenc_cq", type=int, default=25); parser.add_argument("--nvenc_preset", type=str, default="p5"); parser.add_argument("--cpu_crf", type=int, default=19) # FFmpeg
parser.add_argument("--lora_high_1_name", type=str, default="None"); parser.add_argument("--lora_high_1_strength_model", type=float, default=1.0); parser.add_argument("--lora_high_1_strength_clip", type=float, default=1.0)
parser.add_argument("--lora_high_2_name", type=str, default="None"); parser.add_argument("--lora_high_2_strength_model", type=float, default=1.0); parser.add_argument("--lora_high_2_strength_clip", type=float, default=1.0)
parser.add_argument("--lora_low_1_name", type=str, default="None"); parser.add_argument("--lora_low_1_strength_model", type=float, default=1.0); parser.add_argument("--lora_low_1_strength_clip", type=float, default=1.0)
parser.add_argument("--lora_low_2_name", type=str, default="None"); parser.add_argument("--lora_low_2_strength_model", type=float, default=1.0); parser.add_argument("--lora_low_2_strength_clip", type=float, default=1.0)
parser.add_argument("--input_resize_algo", type=str, default="bicubic")
parser.add_argument("--output_resize_algo", type=str, default="bicubic")
return parser.parse_args()
def to_bool(s: str) -> bool: return s.lower() in ['true', '1', 't', 'y', 'yes', 'on']
def clear_memory():
if torch.cuda.is_available(): torch.cuda.empty_cache(); torch.cuda.ipc_collect()
gc.collect()
COMFYUI_BASE_PATH = '/content/ComfyUI'
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
try: return obj[index]
except (KeyError, TypeError):
if isinstance(obj, dict) and "result" in obj: return obj["result"][index]
raise
def add_comfyui_directory_to_sys_path() -> None:
if os.path.isdir(COMFYUI_BASE_PATH) and COMFYUI_BASE_PATH not in sys.path: sys.path.append(COMFYUI_BASE_PATH)
def import_custom_nodes() -> None:
try:
import nest_asyncio
nest_asyncio.apply()
except ImportError:
print("nest_asyncio not found, installing...")
try:
subprocess.run([sys.executable, "-m", "pip", "install", "-q", "nest_asyncio"], check=True)
import nest_asyncio
nest_asyncio.apply()
print("nest_asyncio installed and applied.")
except Exception as e:
print(f"Failed to install or apply nest_asyncio: {e}")
import asyncio, execution, server
from nodes import init_extra_nodes
try:
loop = asyncio.get_event_loop()
if loop.is_closed():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
server_instance = server.PromptServer(loop)
execution.PromptQueue(server_instance)
if not loop.is_running():
try:
loop.run_until_complete(init_extra_nodes())
except RuntimeError as e:
print(f"Note: Could not run init_extra_nodes synchronously, possibly due to existing loop state: {e}")
try:
asyncio.ensure_future(init_extra_nodes())
except Exception as fut_e:
print(f"Error trying async init_extra_nodes: {fut_e}")
else:
try:
asyncio.ensure_future(init_extra_nodes())
except Exception as fut_e:
print(f"Error trying async init_extra_nodes on running loop: {fut_e}")
def main():
args = parse_args()
print("π λμμ μμ±μ μμν©λλ€ (Full Control Mode, VRAM Optimized)...\n")
# π¨π¨π¨ μμ λ λΆλΆ μμ: .mp4, .mkv, .webm νμΌμ μ μΈνκ³ output ν΄λ μ 리 π¨π¨π¨
output_dir = os.path.join(COMFYUI_BASE_PATH, 'output')
print(f" - μ΄μ μΆλ ₯λ¬Ό μ 리 μ€... (Output: {output_dir})")
deleted_count = 0
try:
# output ν΄λ λ΄μ λͺ¨λ νμΌκ³Ό ν΄λ λͺ©λ‘μ κ°μ Έμ΅λλ€.
for item_name in os.listdir(output_dir):
item_path = os.path.join(output_dir, item_name)
# π‘ 쑰건: λΉλμ€ νμΌ νμ₯μ(.mp4, .mkv, .webm)λ μμ νμ§ μκ³ λ³΄μ‘΄ν©λλ€.
if item_name.lower().endswith(('.mp4', '.mkv', '.webm')):
print(f" - ποΈ λΉλμ€ νμΌ '{item_name}'μ 보쑴ν©λλ€.")
continue
# νμΌ λλ λ§ν¬μΈ κ²½μ° μμ
if os.path.isfile(item_path) or os.path.islink(item_path):
os.unlink(item_path)
deleted_count += 1
# ν΄λμΈ κ²½μ° μ¬κ·μ μΌλ‘ μμ
elif os.path.isdir(item_path):
shutil.rmtree(item_path)
deleted_count += 1
print(f" β
μ 리 μλ£. 보쑴λ λΉλμ€ μΈ {deleted_count}κ°μ νλͺ©μ΄ μμ λμμ΅λλ€.")
except Exception as e:
print(f" β μΆλ ₯ ν΄λ μ 리 μ€ μ€λ₯ λ°μ: {e}")
# π¨π¨π¨ μμ λ λΆλΆ λ π¨π¨π¨
# μμ ν΄λ μ¬μμ± (μ 리 κ³Όμ μμ μμ λμμ μ μμ)
os.makedirs(f"{COMFYUI_BASE_PATH}/output/temp", exist_ok=True);
os.makedirs(f"{COMFYUI_BASE_PATH}/output/up", exist_ok=True)
os.makedirs(f"{COMFYUI_BASE_PATH}/output/interpolated", exist_ok=True)
add_comfyui_directory_to_sys_path()
try: from utils.extra_config import load_extra_path_config
except ImportError: print("β οΈ ComfyUIμ extra_model_paths.yaml λ‘λ© μ€ν¨ (무μνκ³ μ§ν)"); load_extra_path_config = lambda x: None
extra_model_paths_file = os.path.join(COMFYUI_BASE_PATH, "extra_model_paths.yaml")
if os.path.exists(extra_model_paths_file): load_extra_path_config(extra_model_paths_file)
print("ComfyUI 컀μ€ν
λ
Έλ μ΄κΈ°ν μ€..."); import_custom_nodes(); from nodes import NODE_CLASS_MAPPINGS; print("컀μ€ν
λ
Έλ μ΄κΈ°ν μλ£.")
if args.noise_seed == -1: final_seed = random.randint(1, 2**64); print(f" - λλ€ μλ μμ±: {final_seed}")
else: final_seed = args.noise_seed; print(f" - κ³ μ μλ μ¬μ©: {final_seed}")
split_step = max(0, int(args.steps * (args.split_point_percent / 100.0))); print(f" - μ΄ {args.steps} μ€ν
μ€ {split_step} ( {args.split_point_percent}% )μμ λΆν ")
loras_in_use = not (args.lora_high_1_name == "None" and args.lora_high_2_name == "None" and args.lora_low_1_name == "None" and args.lora_low_2_name == "None")
connect_clip_to_lora = to_bool(args.connect_lora_clip); should_keep_clip_loaded = loras_in_use and connect_clip_to_lora
with torch.inference_mode():
loadimage=NODE_CLASS_MAPPINGS["LoadImage"](); upscalemodelloader=NODE_CLASS_MAPPINGS["UpscaleModelLoader"](); cliploader=NODE_CLASS_MAPPINGS["CLIPLoader"](); vaeloader=NODE_CLASS_MAPPINGS["VAELoader"](); cliptextencode=NODE_CLASS_MAPPINGS["CLIPTextEncode"](); unetloadergguf=NODE_CLASS_MAPPINGS["UnetLoaderGGUF"](); loraloader=NODE_CLASS_MAPPINGS["LoraLoader"](); imageresizekjv2=NODE_CLASS_MAPPINGS["ImageResizeKJv2"](); wanimagetovideo=NODE_CLASS_MAPPINGS["WanImageToVideo"](); modelsamplingsd3=NODE_CLASS_MAPPINGS["ModelSamplingSD3"](); ksampleradvanced=NODE_CLASS_MAPPINGS["KSamplerAdvanced"](); vaedecode=NODE_CLASS_MAPPINGS["VAEDecode"](); vhs_loadimagespath=NODE_CLASS_MAPPINGS["VHS_LoadImagesPath"](); imageupscalewithmodel=NODE_CLASS_MAPPINGS["ImageUpscaleWithModel"](); imagescaleby=NODE_CLASS_MAPPINGS["ImageScaleBy"](); rife_vfi=NODE_CLASS_MAPPINGS["RIFE VFI"](); vhs_videocombine=NODE_CLASS_MAPPINGS["VHS_VideoCombine"](); saveimage=NODE_CLASS_MAPPINGS["SaveImage"]()
clipvisionloader=NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
clipvisionencode=NODE_CLASS_MAPPINGS["CLIPVisionEncode"]()
pathchsageattentionkj=NODE_CLASS_MAPPINGS["PathchSageAttentionKJ"]()
# --- β¨ 1λ¨κ³: CLIP Vision λ‘μ§ μΆκ° λ° λ©λͺ¨λ¦¬ ν΄μ ---
print("\n1λ¨κ³: λ°μ΄ν° λ‘λ© λ° μ΄κΈ° Latent μμ± μ€...");
print(f" - CLIP λ‘λ©: {args.clip_name}");
cliploader_460 = cliploader.load_clip(clip_name=args.clip_name, type="wan", device="default");
cliptextencode_462 = cliptextencode.encode(text=args.positive_prompt, clip=get_value_at_index(cliploader_460, 0));
cliptextencode_463 = cliptextencode.encode(text=args.negative_prompt, clip=get_value_at_index(cliploader_460, 0));
loadimage_88 = loadimage.load_image(image="example.png");
imageresizekjv2_401 = imageresizekjv2.resize(
width=args.width, height=args.height,
upscale_method=args.input_resize_algo,
image=get_value_at_index(loadimage_88, 0), keep_proportion="crop",
pad_color="0, 0, 0", crop_position="center", divisible_by=2,
unique_id=random.randint(1, 2**64)
);
print(f" - CLIP Vision λ‘λ©: clip_vision_h.safetensors");
clipvisionloader_cv = clipvisionloader.load_clip(clip_name="clip_vision_h.safetensors");
print(f" - CLIP Vision μΈμ½λ© μ€...");
clipvisionencode_cv = clipvisionencode.encode(
crop="none",
clip_vision=get_value_at_index(clipvisionloader_cv, 0),
image=get_value_at_index(imageresizekjv2_401, 0)
);
clip_vision_output = get_value_at_index(clipvisionencode_cv, 0)
print(f" - VAE μμ λ‘λ© (μ΄κΈ° Latent μμ±μ©): {args.vae_name}");
vaeloader_temp = vaeloader.load_vae(vae_name=args.vae_name);
wanimagetovideo_464 = wanimagetovideo.EXECUTE_NORMALIZED(
width=get_value_at_index(imageresizekjv2_401, 1),
height=get_value_at_index(imageresizekjv2_401, 2),
length=args.length,
batch_size=1,
positive=get_value_at_index(cliptextencode_462, 0),
negative=get_value_at_index(cliptextencode_463, 0),
vae=get_value_at_index(vaeloader_temp, 0),
clip_vision_output=clip_vision_output,
start_image=get_value_at_index(imageresizekjv2_401, 0)
);
if not should_keep_clip_loaded: print(" β¨ (μ΅μ ν) 1λ¨κ³ μλ£, CLIP λͺ¨λΈμ μ¦μ ν΄μ ν©λλ€."); del cliploader_460
else: print(" β οΈ (μ€μ ) LoRA CLIP μ°κ²° μ΅μ
μ΄ νμ±νλμ΄ 3λ¨κ³κΉμ§ CLIP λͺ¨λΈμ μ μ§ν©λλ€.")
print(" β¨ (μ΅μ ν) 1λ¨κ³ μλ£, μμ VAE λ° CLIP Vision λͺ¨λΈμ ν΄μ ν©λλ€.");
del vaeloader_temp, clipvisionloader_cv, clipvisionencode_cv, clip_vision_output;
clear_memory(); print("1λ¨κ³ μλ£.");
# --- 1λ¨κ³ μμ μλ£ ---
print(f"\n2λ¨κ³: High Noise μνλ§ μμ..."); print(f" - UNet High λ‘λ©: {args.unet_high_name}"); unetloadergguf_495 = unetloadergguf.load_unet(unet_name=args.unet_high_name); model = get_value_at_index(unetloadergguf_495, 0); clip = get_value_at_index(cliploader_460, 0) if should_keep_clip_loaded else None; model_for_patching = model;
if to_bool(args.sageattention): print(" β¨ SageAttention ν¨μΉ μ μ© μ€ (High)..."); pathchsageattentionkj_124 = pathchsageattentionkj.patch(sage_attention="auto", model=model_for_patching); model_for_patching = get_value_at_index(pathchsageattentionkj_124, 0)
if args.lora_high_1_name != "None": print(f" - H LoRA 1: {args.lora_high_1_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_high_1_name, strength_model=args.lora_high_1_strength_model, strength_clip=args.lora_high_1_strength_clip, model=model_for_patching, clip=clip)
if args.lora_high_2_name != "None": print(f" - H LoRA 2: {args.lora_high_2_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_high_2_name, strength_model=args.lora_high_2_strength_model, strength_clip=args.lora_high_2_strength_clip, model=model_for_patching, clip=clip)
shifted_model = get_value_at_index(modelsamplingsd3.patch(shift=args.shift, model=model_for_patching), 0); final_model = shifted_model;
# μμ : cfg=args.cfg_high μ¬μ©
ksampleradvanced_466 = ksampleradvanced.sample(add_noise="enable", noise_seed=final_seed, steps=args.steps, cfg=args.cfg_high, sampler_name=args.sampler_name_high, scheduler=args.scheduler_high, start_at_step=0, end_at_step=split_step, return_with_leftover_noise="enable", model=final_model, positive=get_value_at_index(wanimagetovideo_464, 0), negative=get_value_at_index(wanimagetovideo_464, 1), latent_image=get_value_at_index(wanimagetovideo_464, 2));
if to_bool(args.sageattention): del pathchsageattentionkj_124
del unetloadergguf_495, model, clip, model_for_patching, shifted_model, final_model; clear_memory(); print("2λ¨κ³ μλ£.")
print(f"\n3λ¨κ³: Low Noise μνλ§ μμ..."); print(f" - UNet Low λ‘λ©: {args.unet_low_name}"); unetloadergguf_496 = unetloadergguf.load_unet(unet_name=args.unet_low_name); model = get_value_at_index(unetloadergguf_496, 0); clip = get_value_at_index(cliploader_460, 0) if should_keep_clip_loaded else None; model_for_patching = model;
if to_bool(args.sageattention): print(" β¨ SageAttention ν¨μΉ μ μ© μ€ (Low)..."); pathchsageattentionkj_129 = pathchsageattentionkj.patch(sage_attention="auto", model=model_for_patching); model_for_patching = get_value_at_index(pathchsageattentionkj_129, 0)
if args.lora_low_1_name != "None": print(f" - L LoRA 1: {args.lora_low_1_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_low_1_name, strength_model=args.lora_low_1_strength_model, strength_clip=args.lora_low_1_strength_clip, model=model_for_patching, clip=clip)
if args.lora_low_2_name != "None": print(f" - L LoRA 2: {args.lora_low_2_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_low_2_name, strength_model=args.lora_low_2_strength_model, strength_clip=args.lora_low_2_strength_clip, model=model_for_patching, clip=clip)
shifted_model = get_value_at_index(modelsamplingsd3.patch(shift=args.shift, model=model_for_patching), 0); final_model = shifted_model;
# μμ : cfg=args.cfg_low μ¬μ©
ksampleradvanced_465 = ksampleradvanced.sample(add_noise="disable", noise_seed=final_seed, steps=args.steps, cfg=args.cfg_low, sampler_name=args.sampler_name_low, scheduler=args.scheduler_low, start_at_step=split_step, end_at_step=10000, return_with_leftover_noise="disable", model=final_model, positive=get_value_at_index(wanimagetovideo_464, 0), negative=get_value_at_index(wanimagetovideo_464, 1), latent_image=get_value_at_index(ksampleradvanced_466, 0));
if to_bool(args.sageattention): del pathchsageattentionkj_129
if should_keep_clip_loaded: print(" β¨ (λ©λͺ¨λ¦¬) LoRA CLIP μ°κ²° μ΅μ
μ¬μ© μλ£, CLIP λͺ¨λΈμ ν΄μ ν©λλ€."); del cliploader_460
del unetloadergguf_496, model, clip, model_for_patching, shifted_model, final_model, ksampleradvanced_466, wanimagetovideo_464; clear_memory(); print("3λ¨κ³ μλ£.")
print(f"\n4λ¨κ³: VAE λμ½λ© λ° μμ μ μ₯ μ€..."); print(f" - VAE λͺ¨λΈ λ‘λ© (λμ½λ©μ©): {args.vae_name}"); vaeloader_461 = vaeloader.load_vae(vae_name=args.vae_name); vaedecode_469 = vaedecode.decode(samples=get_value_at_index(ksampleradvanced_465, 0), vae=get_value_at_index(vaeloader_461, 0)); saveimage.save_images(filename_prefix="temp/example", images=get_value_at_index(vaedecode_469, 0));
del ksampleradvanced_465, vaeloader_461, vaedecode_469, loadimage_88, imageresizekjv2_401; clear_memory(); print("4λ¨κ³ μλ£.")
combine_input_dir_for_ffmpeg = f"{COMFYUI_BASE_PATH}/output/temp"
if args.upscale_ratio > 1:
if args.upscale_model_name == "None": print("\n5λ¨κ³: μ
μ€μΌμΌλ§ 건λλ (λͺ¨λΈμ΄ μ νλμ§ μμ).")
else:
print(f"\n5λ¨κ³: νλ μ μ
μ€μΌμΌλ§ μ€..."); print(f" - Upscale λͺ¨λΈ λ‘λ©: {args.upscale_model_name}"); upscalemodelloader_384 = upscalemodelloader.load_model(model_name=args.upscale_model_name); chunk_size = args.upscale_chunk_size; base_dir = f"{COMFYUI_BASE_PATH}/output/temp"; scale_by_ratio = args.upscale_ratio / args.upscale_model_scale;
total_frames = 0
try:
temp_files = [f for f in os.listdir(base_dir) if f.endswith(('.png', '.jpg', '.jpeg', '.webp'))]
total_frames = len(temp_files)
if total_frames == 0:
raise FileNotFoundError("μ
μ€μΌμΌν νλ μμ΄ 'temp' ν΄λμ μμ΅λλ€.")
except Exception as e:
print(f" β μ
μ€μΌμΌ 5λ¨κ³ μ€λ¨: 'temp' ν΄λμμ νλ μμ μ½μ μ μμ΅λλ€. (μ€λ₯: {e})")
if 'upscalemodelloader_384' in locals(): del upscalemodelloader_384
clear_memory()
raise
print(f" - μ΄ {total_frames}κ°μ νλ μμ {chunk_size}κ° λ¨μλ‘ λΆν νμ¬ μ€νν©λλ€...")
for i in range(0, total_frames, chunk_size):
print(f" - λ°°μΉ μ²λ¦¬ μ€ (νλ μ {i} ~ {min(i + chunk_size, total_frames) - 1})...")
vhs_load_chunk = vhs_loadimagespath.load_images(directory=base_dir, skip_first_images=i, image_load_cap=chunk_size); loaded_images = get_value_at_index(vhs_load_chunk, 0);
if loaded_images is None: print(" - (κ²½κ³ ) 건λλΈ μ μλ μ΄λ―Έμ§κ° λ‘λλμμ΅λλ€, μ΄ λ°°μΉλ₯Ό 건λλλλ€."); continue
imageupscale_chunk = imageupscalewithmodel.upscale(upscale_model=get_value_at_index(upscalemodelloader_384, 0), image=loaded_images);
imagescale_chunk = imagescaleby.upscale(
upscale_method=args.output_resize_algo,
scale_by=scale_by_ratio,
image=get_value_at_index(imageupscale_chunk, 0)
);
saveimage.save_images(filename_prefix="up/example", images=get_value_at_index(imagescale_chunk, 0));
del vhs_load_chunk, loaded_images, imageupscale_chunk, imagescale_chunk; clear_memory()
del upscalemodelloader_384; clear_memory(); combine_input_dir_for_ffmpeg = f"{COMFYUI_BASE_PATH}/output/up"; print("5λ¨κ³ μλ£.")
else: print("\n5λ¨κ³: μ
μ€μΌμΌλ§ 건λλ (λΉμ¨ 1.0).")
# --- β¨ 6λ¨κ³: RIFE μ²ν¬ λ‘μ§ μμ (Overlap μ μ©) ---
print("\n6λ¨κ³: λΉλμ€ κ²°ν© μ€λΉ μ€..."); final_frame_rate = float(args.frame_rate); ffmpeg_input_dir = combine_input_dir_for_ffmpeg
if to_bool(args.interpolation):
print(" - νλ μ λ³΄κ° (RIFE)μ νμ±νν©λλ€."); interpolated_dir = f"{COMFYUI_BASE_PATH}/output/interpolated"; source_dir = combine_input_dir_for_ffmpeg
total_frames_rife = 0
try:
temp_files = [f for f in os.listdir(source_dir) if f.endswith(('.png', '.jpg', '.jpeg', '.webp'))]; total_frames_rife = len(temp_files);
if total_frames_rife == 0: raise FileNotFoundError(f"RIFE 보κ°ν νλ μμ΄ '{source_dir}' ν΄λμ μμ΅λλ€.")
except Exception as e: print(f" β RIFE 6λ¨κ³ μ€λ¨: '{source_dir}' ν΄λμμ νλ μμ μ½μ μ μμ΅λλ€. (μ€λ₯: {e})"); raise
chunk_size = args.rife_chunk_size;
print(f" - μ΄ {total_frames_rife}κ°μ νλ μμ RIFE μ²ν¬ {chunk_size}κ° λ¨μλ‘ λΆν νμ¬ μ€νν©λλ€ (Overlap μ μ©)...")
current_frame_idx = 0
is_first_chunk = True
while current_frame_idx < total_frames_rife:
load_from = current_frame_idx
load_cap = chunk_size
if not is_first_chunk:
load_from -= 1 # 1νλ μ κ²ΉμΉκΈ°
load_cap += 1 # κ²ΉμΉ λ§νΌ 1νλ μ λ λ‘λ
# λ§μ§λ§ μ²ν¬ κ²½κ³ μ²λ¦¬
if load_from + load_cap > total_frames_rife:
load_cap = total_frames_rife - load_from
# RIFEλ μ΅μ 2νλ μμ΄ νμν¨
if load_cap < 2:
print(f" - (κ²½κ³ ) RIFE μ²λ¦¬μ νμν νλ μ(2κ°)μ΄ λΆμ‘±νμ¬ λ§μ§λ§ λ°°μΉλ₯Ό 건λλλλ€.")
break
print(f" - RIFE λ°°μΉ μ²λ¦¬ μ€ (μλ³Έ νλ μ {load_from} ~ {load_from + load_cap - 1})...")
vhs_load_chunk = vhs_loadimagespath.load_images(directory=source_dir, skip_first_images=load_from, image_load_cap=load_cap);
loaded_images = get_value_at_index(vhs_load_chunk, 0);
if loaded_images is None:
print(" - (κ²½κ³ ) 건λλΈ μ μλ μ΄λ―Έμ§κ° λ‘λλμμ΅λλ€, μ΄ λ°°μΉλ₯Ό 건λλλλ€.");
current_frame_idx += chunk_size
is_first_chunk = False
continue
rife_chunk_result_tensor = get_value_at_index(rife_vfi.vfi(
ckpt_name="rife49.pth",
multiplier=2,
fast_mode=to_bool(args.rife_fast_mode),
ensemble=to_bool(args.rife_ensemble),
frames=loaded_images
), 0)
images_to_save = rife_chunk_result_tensor
if not is_first_chunk:
# 첫 λ²μ§Έκ° μλ λͺ¨λ μ²ν¬λ κ²ΉμΉλ 첫 νλ μμ μ κ±° (ν
μ μ¬λΌμ΄μ±)
print(f" - (Overlap) μ€λ³΅ νλ μ 1κ° μ κ±° ν μ μ₯")
images_to_save = rife_chunk_result_tensor[1:]
saveimage.save_images(filename_prefix="interpolated/example", images=images_to_save);
del vhs_load_chunk, loaded_images, rife_chunk_result_tensor, images_to_save; clear_memory()
current_frame_idx += chunk_size
is_first_chunk = False
ffmpeg_input_dir = interpolated_dir; final_frame_rate *= 2
else: print(" - νλ μ 보κ°μ΄ λΉνμ±νλμμ΅λλ€.");
# --- 6λ¨κ³ μμ μλ£ ---
print(f" - μ΅μ’
λΉλμ€λ₯Ό FFmpeg ({args.video_encoder})λ‘ κ²°ν©ν©λλ€..."); print(f" - μ
λ ₯ ν΄λ: '{ffmpeg_input_dir}'")
input_pattern = os.path.join(ffmpeg_input_dir, "example_%05d_.png")
timestamp = time.strftime("%Y%m%d-%H%M%S"); output_filename = f"AnimateDiff_{timestamp}.mp4"; output_path = os.path.join(COMFYUI_BASE_PATH, "output", output_filename)
ffmpeg_cmd = ["ffmpeg", "-framerate", str(final_frame_rate), "-i", input_pattern]
encoder_choice = args.video_encoder
if encoder_choice == "GPU: HEVC (NVENC)": ffmpeg_cmd.extend(["-c:v", "hevc_nvenc", "-cq", str(args.nvenc_cq), "-preset", args.nvenc_preset, "-tag:v", "hvc1"])
elif encoder_choice == "GPU: H.264 (NVENC)": ffmpeg_cmd.extend(["-c:v", "h264_nvenc", "-cq", str(args.nvenc_cq), "-preset", args.nvenc_preset])
else: ffmpeg_cmd.extend(["-c:v", "libx264", "-crf", str(args.cpu_crf), "-preset", "medium"])
ffmpeg_cmd.extend(["-pix_fmt", "yuv420p", "-y", output_path])
print(f" - μ€ν λͺ
λ Ήμ΄: {' '.join(ffmpeg_cmd)}")
try:
result = subprocess.run(ffmpeg_cmd, capture_output=True, text=True, check=True, encoding='utf-8')
print(" - FFmpeg μ€ν μλ£.")
except FileNotFoundError: print(" β μ€λ₯: 'ffmpeg' λͺ
λ Ήμ΄λ₯Ό μ°Ύμ μ μμ΅λλ€. μμ€ν
μ μ€μΉλμ΄ μλμ§ νμΈνμΈμ."); raise
except subprocess.CalledProcessError as e:
print(f" β μ€λ₯: FFmpeg μ€ν μ€ν¨ (Return code: {e.returncode})")
if e.stdout: print(f" FFmpeg stdout:\n{e.stdout}")
if e.stderr: print(f" FFmpeg stderr:\n{e.stderr}")
raise
except Exception as e: print(f" β μ€λ₯: FFmpeg μ€ν μ€ μμμΉ λͺ»ν μ€λ₯ λ°μ: {e}"); raise
print("β
λͺ¨λ λ¨κ³ μλ£.")
# --- β¨ UnboundLocalError ν΄κ²° λ° λ³΅μ¬ λ‘μ§ (μ΅μ’
) ---
latest_video = None
if os.path.exists(output_path):
latest_video = output_path
print(f"LATEST_VIDEO_PATH:{latest_video}")
else:
output_dir = os.path.join(COMFYUI_BASE_PATH, "output");
video_files = glob.glob(os.path.join(output_dir, '**', '*.mp4'), recursive=True) + \
glob.glob(os.path.join(output_dir, '**', '*.mkv'), recursive=True)
if not video_files:
raise FileNotFoundError("μμ±λ λμμ νμΌμ μ°Ύμ μ μμ΅λλ€!")
latest_video = max(video_files, key=os.path.getctime)
print(f"LATEST_VIDEO_PATH:{latest_video}")
if latest_video is None:
raise FileNotFoundError("μ΅μ’
λΉλμ€ κ²½λ‘λ₯Ό νμ ν μ μμ΅λλ€. μ€ν¬λ¦½νΈλ₯Ό νμΈνμΈμ.")
base, ext = os.path.splitext(latest_video)
original_copy_path = f"{base}_original{ext}"
try:
shutil.copy2(latest_video, original_copy_path)
print(f"β
μλ³Έ 볡μ¬λ³Έ μμ± μλ£: {original_copy_path}")
print(f"ORIGINAL_COPY_PATH:{original_copy_path}")
except Exception as e:
print(f"β μλ³Έ 볡μ¬λ³Έ μμ± μ€ν¨: {e}")
# --- μμ μλ£ --
if __name__ == "__main__":
main()
|