forge / run_generator.py
arcacolab's picture
Update run_generator.py
03e7ba8 verified
#@title ν•„μš” 파일 생성
# 이 νŒŒμΌμ€ μ…€ 4μ—μ„œ μ„œλΈŒν”„λ‘œμ„ΈμŠ€λ‘œ μ‹€ν–‰λ©λ‹ˆλ‹€.
import sys
import os
import time
import glob
import gc
import torch
import subprocess
import random
import argparse
from typing import Sequence, Mapping, Any, Union
import shutil
# --- 0. κΈ°λ³Έ μ„€μ • 및 인수 νŒŒμ‹± ---
def parse_args():
parser = argparse.ArgumentParser(description="ComfyUI Video Generation Script with All Controls from 1.py")
parser.add_argument("--positive_prompt", type=str, required=True); parser.add_argument("--negative_prompt", type=str, required=True)
parser.add_argument("--width", type=int, required=True); parser.add_argument("--height", type=int, required=True)
parser.add_argument("--length", type=int, required=True); parser.add_argument("--upscale_ratio", type=float, required=True)
parser.add_argument("--steps", type=int, default=4)
parser.add_argument("--cfg_high", type=float, default=1.0)
parser.add_argument("--cfg_low", type=float, default=1.0)
parser.add_argument("--sampler_name_high", type=str, default="euler"); parser.add_argument("--scheduler_high", type=str, default="simple")
parser.add_argument("--sampler_name_low", type=str, default="euler"); parser.add_argument("--scheduler_low", type=str, default="simple")
parser.add_argument("--noise_seed", type=int, default=-1); parser.add_argument("--split_point_percent", type=float, default=50.0)
parser.add_argument("--shift", type=float, default=8.0); parser.add_argument("--sageattention", type=str, default="on")
parser.add_argument("--unet_high_name", type=str, required=True); parser.add_argument("--unet_low_name", type=str, required=True)
parser.add_argument("--vae_name", type=str, required=True); parser.add_argument("--clip_name", type=str, required=True)
parser.add_argument("--upscale_model_name", type=str, default="None")
parser.add_argument("--upscale_model_scale", type=float, default=2.0)
parser.add_argument("--upscale_chunk_size", type=int, default=30)
parser.add_argument("--frame_rate", type=int, default=16); parser.add_argument("--interpolation", type=str, default="on")
parser.add_argument("--rife_fast_mode", type=str, default="on"); parser.add_argument("--rife_ensemble", type=str, default="on")
parser.add_argument("--rife_chunk_size", type=int, default=30)
parser.add_argument("--connect_lora_clip", type=str, default="off")
parser.add_argument("--video_encoder", type=str, default="GPU: HEVC (NVENC)"); parser.add_argument("--nvenc_cq", type=int, default=25); parser.add_argument("--nvenc_preset", type=str, default="p5"); parser.add_argument("--cpu_crf", type=int, default=19) # FFmpeg
parser.add_argument("--lora_high_1_name", type=str, default="None"); parser.add_argument("--lora_high_1_strength_model", type=float, default=1.0); parser.add_argument("--lora_high_1_strength_clip", type=float, default=1.0)
parser.add_argument("--lora_high_2_name", type=str, default="None"); parser.add_argument("--lora_high_2_strength_model", type=float, default=1.0); parser.add_argument("--lora_high_2_strength_clip", type=float, default=1.0)
parser.add_argument("--lora_low_1_name", type=str, default="None"); parser.add_argument("--lora_low_1_strength_model", type=float, default=1.0); parser.add_argument("--lora_low_1_strength_clip", type=float, default=1.0)
parser.add_argument("--lora_low_2_name", type=str, default="None"); parser.add_argument("--lora_low_2_strength_model", type=float, default=1.0); parser.add_argument("--lora_low_2_strength_clip", type=float, default=1.0)
parser.add_argument("--input_resize_algo", type=str, default="bicubic")
parser.add_argument("--output_resize_algo", type=str, default="bicubic")
return parser.parse_args()
def to_bool(s: str) -> bool: return s.lower() in ['true', '1', 't', 'y', 'yes', 'on']
def clear_memory():
if torch.cuda.is_available(): torch.cuda.empty_cache(); torch.cuda.ipc_collect()
gc.collect()
COMFYUI_BASE_PATH = '/content/ComfyUI'
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
try: return obj[index]
except (KeyError, TypeError):
if isinstance(obj, dict) and "result" in obj: return obj["result"][index]
raise
def add_comfyui_directory_to_sys_path() -> None:
if os.path.isdir(COMFYUI_BASE_PATH) and COMFYUI_BASE_PATH not in sys.path: sys.path.append(COMFYUI_BASE_PATH)
def import_custom_nodes() -> None:
try:
import nest_asyncio
nest_asyncio.apply()
except ImportError:
print("nest_asyncio not found, installing...")
try:
subprocess.run([sys.executable, "-m", "pip", "install", "-q", "nest_asyncio"], check=True)
import nest_asyncio
nest_asyncio.apply()
print("nest_asyncio installed and applied.")
except Exception as e:
print(f"Failed to install or apply nest_asyncio: {e}")
import asyncio, execution, server
from nodes import init_extra_nodes
try:
loop = asyncio.get_event_loop()
if loop.is_closed():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
server_instance = server.PromptServer(loop)
execution.PromptQueue(server_instance)
if not loop.is_running():
try:
loop.run_until_complete(init_extra_nodes())
except RuntimeError as e:
print(f"Note: Could not run init_extra_nodes synchronously, possibly due to existing loop state: {e}")
try:
asyncio.ensure_future(init_extra_nodes())
except Exception as fut_e:
print(f"Error trying async init_extra_nodes: {fut_e}")
else:
try:
asyncio.ensure_future(init_extra_nodes())
except Exception as fut_e:
print(f"Error trying async init_extra_nodes on running loop: {fut_e}")
def main():
args = parse_args()
print("πŸš€ λ™μ˜μƒ 생성을 μ‹œμž‘ν•©λ‹ˆλ‹€ (Full Control Mode, VRAM Optimized)...\n")
# 🚨🚨🚨 μˆ˜μ •λœ λΆ€λΆ„ μ‹œμž‘: .mp4, .mkv, .webm νŒŒμΌμ€ μ œμ™Έν•˜κ³  output 폴더 정리 🚨🚨🚨
output_dir = os.path.join(COMFYUI_BASE_PATH, 'output')
print(f" - 이전 좜λ ₯λ¬Ό 정리 쀑... (Output: {output_dir})")
deleted_count = 0
try:
# output 폴더 λ‚΄μ˜ λͺ¨λ“  파일과 폴더 λͺ©λ‘μ„ κ°€μ Έμ˜΅λ‹ˆλ‹€.
for item_name in os.listdir(output_dir):
item_path = os.path.join(output_dir, item_name)
# πŸ’‘ 쑰건: λΉ„λ””μ˜€ 파일 ν™•μž₯자(.mp4, .mkv, .webm)λŠ” μ‚­μ œν•˜μ§€ μ•Šκ³  λ³΄μ‘΄ν•©λ‹ˆλ‹€.
if item_name.lower().endswith(('.mp4', '.mkv', '.webm')):
print(f" - πŸ—„οΈ λΉ„λ””μ˜€ 파일 '{item_name}'은 λ³΄μ‘΄ν•©λ‹ˆλ‹€.")
continue
# 파일 λ˜λŠ” 링크인 경우 μ‚­μ œ
if os.path.isfile(item_path) or os.path.islink(item_path):
os.unlink(item_path)
deleted_count += 1
# 폴더인 경우 μž¬κ·€μ μœΌλ‘œ μ‚­μ œ
elif os.path.isdir(item_path):
shutil.rmtree(item_path)
deleted_count += 1
print(f" βœ… 정리 μ™„λ£Œ. 보쑴된 λΉ„λ””μ˜€ μ™Έ {deleted_count}개의 ν•­λͺ©μ΄ μ‚­μ œλ˜μ—ˆμŠ΅λ‹ˆλ‹€.")
except Exception as e:
print(f" ❌ 좜λ ₯ 폴더 정리 쀑 였λ₯˜ λ°œμƒ: {e}")
# 🚨🚨🚨 μˆ˜μ •λœ λΆ€λΆ„ 끝 🚨🚨🚨
# μž„μ‹œ 폴더 μž¬μƒμ„± (정리 κ³Όμ •μ—μ„œ μ‚­μ œλ˜μ—ˆμ„ 수 있음)
os.makedirs(f"{COMFYUI_BASE_PATH}/output/temp", exist_ok=True);
os.makedirs(f"{COMFYUI_BASE_PATH}/output/up", exist_ok=True)
os.makedirs(f"{COMFYUI_BASE_PATH}/output/interpolated", exist_ok=True)
add_comfyui_directory_to_sys_path()
try: from utils.extra_config import load_extra_path_config
except ImportError: print("⚠️ ComfyUI의 extra_model_paths.yaml λ‘œλ”© μ‹€νŒ¨ (λ¬΄μ‹œν•˜κ³  μ§„ν–‰)"); load_extra_path_config = lambda x: None
extra_model_paths_file = os.path.join(COMFYUI_BASE_PATH, "extra_model_paths.yaml")
if os.path.exists(extra_model_paths_file): load_extra_path_config(extra_model_paths_file)
print("ComfyUI μ»€μŠ€ν…€ λ…Έλ“œ μ΄ˆκΈ°ν™” 쀑..."); import_custom_nodes(); from nodes import NODE_CLASS_MAPPINGS; print("μ»€μŠ€ν…€ λ…Έλ“œ μ΄ˆκΈ°ν™” μ™„λ£Œ.")
if args.noise_seed == -1: final_seed = random.randint(1, 2**64); print(f" - 랜덀 μ‹œλ“œ 생성: {final_seed}")
else: final_seed = args.noise_seed; print(f" - κ³ μ • μ‹œλ“œ μ‚¬μš©: {final_seed}")
split_step = max(0, int(args.steps * (args.split_point_percent / 100.0))); print(f" - 총 {args.steps} μŠ€ν… 쀑 {split_step} ( {args.split_point_percent}% )μ—μ„œ λΆ„ν• ")
loras_in_use = not (args.lora_high_1_name == "None" and args.lora_high_2_name == "None" and args.lora_low_1_name == "None" and args.lora_low_2_name == "None")
connect_clip_to_lora = to_bool(args.connect_lora_clip); should_keep_clip_loaded = loras_in_use and connect_clip_to_lora
with torch.inference_mode():
loadimage=NODE_CLASS_MAPPINGS["LoadImage"](); upscalemodelloader=NODE_CLASS_MAPPINGS["UpscaleModelLoader"](); cliploader=NODE_CLASS_MAPPINGS["CLIPLoader"](); vaeloader=NODE_CLASS_MAPPINGS["VAELoader"](); cliptextencode=NODE_CLASS_MAPPINGS["CLIPTextEncode"](); unetloadergguf=NODE_CLASS_MAPPINGS["UnetLoaderGGUF"](); loraloader=NODE_CLASS_MAPPINGS["LoraLoader"](); imageresizekjv2=NODE_CLASS_MAPPINGS["ImageResizeKJv2"](); wanimagetovideo=NODE_CLASS_MAPPINGS["WanImageToVideo"](); modelsamplingsd3=NODE_CLASS_MAPPINGS["ModelSamplingSD3"](); ksampleradvanced=NODE_CLASS_MAPPINGS["KSamplerAdvanced"](); vaedecode=NODE_CLASS_MAPPINGS["VAEDecode"](); vhs_loadimagespath=NODE_CLASS_MAPPINGS["VHS_LoadImagesPath"](); imageupscalewithmodel=NODE_CLASS_MAPPINGS["ImageUpscaleWithModel"](); imagescaleby=NODE_CLASS_MAPPINGS["ImageScaleBy"](); rife_vfi=NODE_CLASS_MAPPINGS["RIFE VFI"](); vhs_videocombine=NODE_CLASS_MAPPINGS["VHS_VideoCombine"](); saveimage=NODE_CLASS_MAPPINGS["SaveImage"]()
clipvisionloader=NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
clipvisionencode=NODE_CLASS_MAPPINGS["CLIPVisionEncode"]()
pathchsageattentionkj=NODE_CLASS_MAPPINGS["PathchSageAttentionKJ"]()
# --- ✨ 1단계: CLIP Vision 둜직 μΆ”κ°€ 및 λ©”λͺ¨λ¦¬ ν•΄μ œ ---
print("\n1단계: 데이터 λ‘œλ”© 및 초기 Latent 생성 쀑...");
print(f" - CLIP λ‘œλ”©: {args.clip_name}");
cliploader_460 = cliploader.load_clip(clip_name=args.clip_name, type="wan", device="default");
cliptextencode_462 = cliptextencode.encode(text=args.positive_prompt, clip=get_value_at_index(cliploader_460, 0));
cliptextencode_463 = cliptextencode.encode(text=args.negative_prompt, clip=get_value_at_index(cliploader_460, 0));
loadimage_88 = loadimage.load_image(image="example.png");
imageresizekjv2_401 = imageresizekjv2.resize(
width=args.width, height=args.height,
upscale_method=args.input_resize_algo,
image=get_value_at_index(loadimage_88, 0), keep_proportion="crop",
pad_color="0, 0, 0", crop_position="center", divisible_by=2,
unique_id=random.randint(1, 2**64)
);
print(f" - CLIP Vision λ‘œλ”©: clip_vision_h.safetensors");
clipvisionloader_cv = clipvisionloader.load_clip(clip_name="clip_vision_h.safetensors");
print(f" - CLIP Vision 인코딩 쀑...");
clipvisionencode_cv = clipvisionencode.encode(
crop="none",
clip_vision=get_value_at_index(clipvisionloader_cv, 0),
image=get_value_at_index(imageresizekjv2_401, 0)
);
clip_vision_output = get_value_at_index(clipvisionencode_cv, 0)
print(f" - VAE μž„μ‹œ λ‘œλ”© (초기 Latent μƒμ„±μš©): {args.vae_name}");
vaeloader_temp = vaeloader.load_vae(vae_name=args.vae_name);
wanimagetovideo_464 = wanimagetovideo.EXECUTE_NORMALIZED(
width=get_value_at_index(imageresizekjv2_401, 1),
height=get_value_at_index(imageresizekjv2_401, 2),
length=args.length,
batch_size=1,
positive=get_value_at_index(cliptextencode_462, 0),
negative=get_value_at_index(cliptextencode_463, 0),
vae=get_value_at_index(vaeloader_temp, 0),
clip_vision_output=clip_vision_output,
start_image=get_value_at_index(imageresizekjv2_401, 0)
);
if not should_keep_clip_loaded: print(" ✨ (μ΅œμ ν™”) 1단계 μ™„λ£Œ, CLIP λͺ¨λΈμ„ μ¦‰μ‹œ ν•΄μ œν•©λ‹ˆλ‹€."); del cliploader_460
else: print(" ⚠️ (μ„€μ •) LoRA CLIP μ—°κ²° μ˜΅μ…˜μ΄ ν™œμ„±ν™”λ˜μ–΄ 3λ‹¨κ³„κΉŒμ§€ CLIP λͺ¨λΈμ„ μœ μ§€ν•©λ‹ˆλ‹€.")
print(" ✨ (μ΅œμ ν™”) 1단계 μ™„λ£Œ, μž„μ‹œ VAE 및 CLIP Vision λͺ¨λΈμ„ ν•΄μ œν•©λ‹ˆλ‹€.");
del vaeloader_temp, clipvisionloader_cv, clipvisionencode_cv, clip_vision_output;
clear_memory(); print("1단계 μ™„λ£Œ.");
# --- 1단계 μˆ˜μ • μ™„λ£Œ ---
print(f"\n2단계: High Noise μƒ˜ν”Œλ§ μ‹œμž‘..."); print(f" - UNet High λ‘œλ”©: {args.unet_high_name}"); unetloadergguf_495 = unetloadergguf.load_unet(unet_name=args.unet_high_name); model = get_value_at_index(unetloadergguf_495, 0); clip = get_value_at_index(cliploader_460, 0) if should_keep_clip_loaded else None; model_for_patching = model;
if to_bool(args.sageattention): print(" ✨ SageAttention 패치 적용 쀑 (High)..."); pathchsageattentionkj_124 = pathchsageattentionkj.patch(sage_attention="auto", model=model_for_patching); model_for_patching = get_value_at_index(pathchsageattentionkj_124, 0)
if args.lora_high_1_name != "None": print(f" - H LoRA 1: {args.lora_high_1_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_high_1_name, strength_model=args.lora_high_1_strength_model, strength_clip=args.lora_high_1_strength_clip, model=model_for_patching, clip=clip)
if args.lora_high_2_name != "None": print(f" - H LoRA 2: {args.lora_high_2_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_high_2_name, strength_model=args.lora_high_2_strength_model, strength_clip=args.lora_high_2_strength_clip, model=model_for_patching, clip=clip)
shifted_model = get_value_at_index(modelsamplingsd3.patch(shift=args.shift, model=model_for_patching), 0); final_model = shifted_model;
# μˆ˜μ •: cfg=args.cfg_high μ‚¬μš©
ksampleradvanced_466 = ksampleradvanced.sample(add_noise="enable", noise_seed=final_seed, steps=args.steps, cfg=args.cfg_high, sampler_name=args.sampler_name_high, scheduler=args.scheduler_high, start_at_step=0, end_at_step=split_step, return_with_leftover_noise="enable", model=final_model, positive=get_value_at_index(wanimagetovideo_464, 0), negative=get_value_at_index(wanimagetovideo_464, 1), latent_image=get_value_at_index(wanimagetovideo_464, 2));
if to_bool(args.sageattention): del pathchsageattentionkj_124
del unetloadergguf_495, model, clip, model_for_patching, shifted_model, final_model; clear_memory(); print("2단계 μ™„λ£Œ.")
print(f"\n3단계: Low Noise μƒ˜ν”Œλ§ μ‹œμž‘..."); print(f" - UNet Low λ‘œλ”©: {args.unet_low_name}"); unetloadergguf_496 = unetloadergguf.load_unet(unet_name=args.unet_low_name); model = get_value_at_index(unetloadergguf_496, 0); clip = get_value_at_index(cliploader_460, 0) if should_keep_clip_loaded else None; model_for_patching = model;
if to_bool(args.sageattention): print(" ✨ SageAttention 패치 적용 쀑 (Low)..."); pathchsageattentionkj_129 = pathchsageattentionkj.patch(sage_attention="auto", model=model_for_patching); model_for_patching = get_value_at_index(pathchsageattentionkj_129, 0)
if args.lora_low_1_name != "None": print(f" - L LoRA 1: {args.lora_low_1_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_low_1_name, strength_model=args.lora_low_1_strength_model, strength_clip=args.lora_low_1_strength_clip, model=model_for_patching, clip=clip)
if args.lora_low_2_name != "None": print(f" - L LoRA 2: {args.lora_low_2_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_low_2_name, strength_model=args.lora_low_2_strength_model, strength_clip=args.lora_low_2_strength_clip, model=model_for_patching, clip=clip)
shifted_model = get_value_at_index(modelsamplingsd3.patch(shift=args.shift, model=model_for_patching), 0); final_model = shifted_model;
# μˆ˜μ •: cfg=args.cfg_low μ‚¬μš©
ksampleradvanced_465 = ksampleradvanced.sample(add_noise="disable", noise_seed=final_seed, steps=args.steps, cfg=args.cfg_low, sampler_name=args.sampler_name_low, scheduler=args.scheduler_low, start_at_step=split_step, end_at_step=10000, return_with_leftover_noise="disable", model=final_model, positive=get_value_at_index(wanimagetovideo_464, 0), negative=get_value_at_index(wanimagetovideo_464, 1), latent_image=get_value_at_index(ksampleradvanced_466, 0));
if to_bool(args.sageattention): del pathchsageattentionkj_129
if should_keep_clip_loaded: print(" ✨ (λ©”λͺ¨λ¦¬) LoRA CLIP μ—°κ²° μ˜΅μ…˜ μ‚¬μš© μ™„λ£Œ, CLIP λͺ¨λΈμ„ ν•΄μ œν•©λ‹ˆλ‹€."); del cliploader_460
del unetloadergguf_496, model, clip, model_for_patching, shifted_model, final_model, ksampleradvanced_466, wanimagetovideo_464; clear_memory(); print("3단계 μ™„λ£Œ.")
print(f"\n4단계: VAE λ””μ½”λ”© 및 μž„μ‹œ μ €μž₯ 쀑..."); print(f" - VAE λͺ¨λΈ λ‘œλ”© (λ””μ½”λ”©μš©): {args.vae_name}"); vaeloader_461 = vaeloader.load_vae(vae_name=args.vae_name); vaedecode_469 = vaedecode.decode(samples=get_value_at_index(ksampleradvanced_465, 0), vae=get_value_at_index(vaeloader_461, 0)); saveimage.save_images(filename_prefix="temp/example", images=get_value_at_index(vaedecode_469, 0));
del ksampleradvanced_465, vaeloader_461, vaedecode_469, loadimage_88, imageresizekjv2_401; clear_memory(); print("4단계 μ™„λ£Œ.")
combine_input_dir_for_ffmpeg = f"{COMFYUI_BASE_PATH}/output/temp"
if args.upscale_ratio > 1:
if args.upscale_model_name == "None": print("\n5단계: μ—…μŠ€μΌ€μΌλ§ κ±΄λ„ˆλœ€ (λͺ¨λΈμ΄ μ„ νƒλ˜μ§€ μ•ŠμŒ).")
else:
print(f"\n5단계: ν”„λ ˆμž„ μ—…μŠ€μΌ€μΌλ§ 쀑..."); print(f" - Upscale λͺ¨λΈ λ‘œλ”©: {args.upscale_model_name}"); upscalemodelloader_384 = upscalemodelloader.load_model(model_name=args.upscale_model_name); chunk_size = args.upscale_chunk_size; base_dir = f"{COMFYUI_BASE_PATH}/output/temp"; scale_by_ratio = args.upscale_ratio / args.upscale_model_scale;
total_frames = 0
try:
temp_files = [f for f in os.listdir(base_dir) if f.endswith(('.png', '.jpg', '.jpeg', '.webp'))]
total_frames = len(temp_files)
if total_frames == 0:
raise FileNotFoundError("μ—…μŠ€μΌ€μΌν•  ν”„λ ˆμž„μ΄ 'temp' 폴더에 μ—†μŠ΅λ‹ˆλ‹€.")
except Exception as e:
print(f" ❌ μ—…μŠ€μΌ€μΌ 5단계 쀑단: 'temp' ν΄λ”μ—μ„œ ν”„λ ˆμž„μ„ 읽을 수 μ—†μŠ΅λ‹ˆλ‹€. (였λ₯˜: {e})")
if 'upscalemodelloader_384' in locals(): del upscalemodelloader_384
clear_memory()
raise
print(f" - 총 {total_frames}개의 ν”„λ ˆμž„μ„ {chunk_size}개 λ‹¨μœ„λ‘œ λΆ„ν• ν•˜μ—¬ μ‹€ν–‰ν•©λ‹ˆλ‹€...")
for i in range(0, total_frames, chunk_size):
print(f" - 배치 처리 쀑 (ν”„λ ˆμž„ {i} ~ {min(i + chunk_size, total_frames) - 1})...")
vhs_load_chunk = vhs_loadimagespath.load_images(directory=base_dir, skip_first_images=i, image_load_cap=chunk_size); loaded_images = get_value_at_index(vhs_load_chunk, 0);
if loaded_images is None: print(" - (κ²½κ³ ) κ±΄λ„ˆλ›Έ 수 μ—†λŠ” 이미지가 λ‘œλ“œλ˜μ—ˆμŠ΅λ‹ˆλ‹€, 이 배치λ₯Ό κ±΄λ„ˆλœλ‹ˆλ‹€."); continue
imageupscale_chunk = imageupscalewithmodel.upscale(upscale_model=get_value_at_index(upscalemodelloader_384, 0), image=loaded_images);
imagescale_chunk = imagescaleby.upscale(
upscale_method=args.output_resize_algo,
scale_by=scale_by_ratio,
image=get_value_at_index(imageupscale_chunk, 0)
);
saveimage.save_images(filename_prefix="up/example", images=get_value_at_index(imagescale_chunk, 0));
del vhs_load_chunk, loaded_images, imageupscale_chunk, imagescale_chunk; clear_memory()
del upscalemodelloader_384; clear_memory(); combine_input_dir_for_ffmpeg = f"{COMFYUI_BASE_PATH}/output/up"; print("5단계 μ™„λ£Œ.")
else: print("\n5단계: μ—…μŠ€μΌ€μΌλ§ κ±΄λ„ˆλœ€ (λΉ„μœ¨ 1.0).")
# --- ✨ 6단계: RIFE 청크 둜직 μˆ˜μ • (Overlap 적용) ---
print("\n6단계: λΉ„λ””μ˜€ κ²°ν•© μ€€λΉ„ 쀑..."); final_frame_rate = float(args.frame_rate); ffmpeg_input_dir = combine_input_dir_for_ffmpeg
if to_bool(args.interpolation):
print(" - ν”„λ ˆμž„ 보간 (RIFE)을 ν™œμ„±ν™”ν•©λ‹ˆλ‹€."); interpolated_dir = f"{COMFYUI_BASE_PATH}/output/interpolated"; source_dir = combine_input_dir_for_ffmpeg
total_frames_rife = 0
try:
temp_files = [f for f in os.listdir(source_dir) if f.endswith(('.png', '.jpg', '.jpeg', '.webp'))]; total_frames_rife = len(temp_files);
if total_frames_rife == 0: raise FileNotFoundError(f"RIFE 보간할 ν”„λ ˆμž„μ΄ '{source_dir}' 폴더에 μ—†μŠ΅λ‹ˆλ‹€.")
except Exception as e: print(f" ❌ RIFE 6단계 쀑단: '{source_dir}' ν΄λ”μ—μ„œ ν”„λ ˆμž„μ„ 읽을 수 μ—†μŠ΅λ‹ˆλ‹€. (였λ₯˜: {e})"); raise
chunk_size = args.rife_chunk_size;
print(f" - 총 {total_frames_rife}개의 ν”„λ ˆμž„μ„ RIFE 청크 {chunk_size}개 λ‹¨μœ„λ‘œ λΆ„ν• ν•˜μ—¬ μ‹€ν–‰ν•©λ‹ˆλ‹€ (Overlap 적용)...")
current_frame_idx = 0
is_first_chunk = True
while current_frame_idx < total_frames_rife:
load_from = current_frame_idx
load_cap = chunk_size
if not is_first_chunk:
load_from -= 1 # 1ν”„λ ˆμž„ 겹치기
load_cap += 1 # 겹친 만큼 1ν”„λ ˆμž„ 더 λ‘œλ“œ
# λ§ˆμ§€λ§‰ 청크 경계 처리
if load_from + load_cap > total_frames_rife:
load_cap = total_frames_rife - load_from
# RIFEλŠ” μ΅œμ†Œ 2ν”„λ ˆμž„μ΄ ν•„μš”ν•¨
if load_cap < 2:
print(f" - (κ²½κ³ ) RIFE μ²˜λ¦¬μ— ν•„μš”ν•œ ν”„λ ˆμž„(2개)이 λΆ€μ‘±ν•˜μ—¬ λ§ˆμ§€λ§‰ 배치λ₯Ό κ±΄λ„ˆλœλ‹ˆλ‹€.")
break
print(f" - RIFE 배치 처리 쀑 (원본 ν”„λ ˆμž„ {load_from} ~ {load_from + load_cap - 1})...")
vhs_load_chunk = vhs_loadimagespath.load_images(directory=source_dir, skip_first_images=load_from, image_load_cap=load_cap);
loaded_images = get_value_at_index(vhs_load_chunk, 0);
if loaded_images is None:
print(" - (κ²½κ³ ) κ±΄λ„ˆλ›Έ 수 μ—†λŠ” 이미지가 λ‘œλ“œλ˜μ—ˆμŠ΅λ‹ˆλ‹€, 이 배치λ₯Ό κ±΄λ„ˆλœλ‹ˆλ‹€.");
current_frame_idx += chunk_size
is_first_chunk = False
continue
rife_chunk_result_tensor = get_value_at_index(rife_vfi.vfi(
ckpt_name="rife49.pth",
multiplier=2,
fast_mode=to_bool(args.rife_fast_mode),
ensemble=to_bool(args.rife_ensemble),
frames=loaded_images
), 0)
images_to_save = rife_chunk_result_tensor
if not is_first_chunk:
# 첫 λ²ˆμ§Έκ°€ μ•„λ‹Œ λͺ¨λ“  μ²­ν¬λŠ” κ²ΉμΉ˜λŠ” 첫 ν”„λ ˆμž„μ„ 제거 (ν…μ„œ μŠ¬λΌμ΄μ‹±)
print(f" - (Overlap) 쀑볡 ν”„λ ˆμž„ 1개 제거 ν›„ μ €μž₯")
images_to_save = rife_chunk_result_tensor[1:]
saveimage.save_images(filename_prefix="interpolated/example", images=images_to_save);
del vhs_load_chunk, loaded_images, rife_chunk_result_tensor, images_to_save; clear_memory()
current_frame_idx += chunk_size
is_first_chunk = False
ffmpeg_input_dir = interpolated_dir; final_frame_rate *= 2
else: print(" - ν”„λ ˆμž„ 보간이 λΉ„ν™œμ„±ν™”λ˜μ—ˆμŠ΅λ‹ˆλ‹€.");
# --- 6단계 μˆ˜μ • μ™„λ£Œ ---
print(f" - μ΅œμ’… λΉ„λ””μ˜€λ₯Ό FFmpeg ({args.video_encoder})둜 κ²°ν•©ν•©λ‹ˆλ‹€..."); print(f" - μž…λ ₯ 폴더: '{ffmpeg_input_dir}'")
input_pattern = os.path.join(ffmpeg_input_dir, "example_%05d_.png")
timestamp = time.strftime("%Y%m%d-%H%M%S"); output_filename = f"AnimateDiff_{timestamp}.mp4"; output_path = os.path.join(COMFYUI_BASE_PATH, "output", output_filename)
ffmpeg_cmd = ["ffmpeg", "-framerate", str(final_frame_rate), "-i", input_pattern]
encoder_choice = args.video_encoder
if encoder_choice == "GPU: HEVC (NVENC)": ffmpeg_cmd.extend(["-c:v", "hevc_nvenc", "-cq", str(args.nvenc_cq), "-preset", args.nvenc_preset, "-tag:v", "hvc1"])
elif encoder_choice == "GPU: H.264 (NVENC)": ffmpeg_cmd.extend(["-c:v", "h264_nvenc", "-cq", str(args.nvenc_cq), "-preset", args.nvenc_preset])
else: ffmpeg_cmd.extend(["-c:v", "libx264", "-crf", str(args.cpu_crf), "-preset", "medium"])
ffmpeg_cmd.extend(["-pix_fmt", "yuv420p", "-y", output_path])
print(f" - μ‹€ν–‰ λͺ…λ Ήμ–΄: {' '.join(ffmpeg_cmd)}")
try:
result = subprocess.run(ffmpeg_cmd, capture_output=True, text=True, check=True, encoding='utf-8')
print(" - FFmpeg μ‹€ν–‰ μ™„λ£Œ.")
except FileNotFoundError: print(" ❌ 였λ₯˜: 'ffmpeg' λͺ…λ Ήμ–΄λ₯Ό 찾을 수 μ—†μŠ΅λ‹ˆλ‹€. μ‹œμŠ€ν…œμ— μ„€μΉ˜λ˜μ–΄ μžˆλŠ”μ§€ ν™•μΈν•˜μ„Έμš”."); raise
except subprocess.CalledProcessError as e:
print(f" ❌ 였λ₯˜: FFmpeg μ‹€ν–‰ μ‹€νŒ¨ (Return code: {e.returncode})")
if e.stdout: print(f" FFmpeg stdout:\n{e.stdout}")
if e.stderr: print(f" FFmpeg stderr:\n{e.stderr}")
raise
except Exception as e: print(f" ❌ 였λ₯˜: FFmpeg μ‹€ν–‰ 쀑 μ˜ˆμƒμΉ˜ λͺ»ν•œ 였λ₯˜ λ°œμƒ: {e}"); raise
print("βœ… λͺ¨λ“  단계 μ™„λ£Œ.")
# --- ✨ UnboundLocalError ν•΄κ²° 및 볡사 둜직 (μ΅œμ’…) ---
latest_video = None
if os.path.exists(output_path):
latest_video = output_path
print(f"LATEST_VIDEO_PATH:{latest_video}")
else:
output_dir = os.path.join(COMFYUI_BASE_PATH, "output");
video_files = glob.glob(os.path.join(output_dir, '**', '*.mp4'), recursive=True) + \
glob.glob(os.path.join(output_dir, '**', '*.mkv'), recursive=True)
if not video_files:
raise FileNotFoundError("μƒμ„±λœ λ™μ˜μƒ νŒŒμΌμ„ 찾을 수 μ—†μŠ΅λ‹ˆλ‹€!")
latest_video = max(video_files, key=os.path.getctime)
print(f"LATEST_VIDEO_PATH:{latest_video}")
if latest_video is None:
raise FileNotFoundError("μ΅œμ’… λΉ„λ””μ˜€ 경둜λ₯Ό ν™•μ •ν•  수 μ—†μŠ΅λ‹ˆλ‹€. 슀크립트λ₯Ό ν™•μΈν•˜μ„Έμš”.")
base, ext = os.path.splitext(latest_video)
original_copy_path = f"{base}_original{ext}"
try:
shutil.copy2(latest_video, original_copy_path)
print(f"βœ… 원본 볡사본 생성 μ™„λ£Œ: {original_copy_path}")
print(f"ORIGINAL_COPY_PATH:{original_copy_path}")
except Exception as e:
print(f"❌ 원본 볡사본 생성 μ‹€νŒ¨: {e}")
# --- μˆ˜μ • μ™„λ£Œ --
if __name__ == "__main__":
main()