arcacolab commited on
Commit
5ce455c
Β·
verified Β·
1 Parent(s): d581aaa

Upload run_generator.py

Browse files
Files changed (1) hide show
  1. run_generator.py +362 -0
run_generator.py ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #@title ν•„μš” 파일 생성
2
+ # 이 νŒŒμΌμ€ μ…€ 4μ—μ„œ μ„œλΈŒν”„λ‘œμ„ΈμŠ€λ‘œ μ‹€ν–‰λ©λ‹ˆλ‹€.
3
+
4
+ import sys
5
+ import os
6
+ import time
7
+ import glob
8
+ import gc
9
+ import torch
10
+ import subprocess
11
+ import random
12
+ import argparse
13
+ from typing import Sequence, Mapping, Any, Union
14
+ import shutil # ✨ 볡사본 생성을 μœ„ν•΄ shutil μž„ν¬νŠΈ
15
+
16
+ # --- 0. κΈ°λ³Έ μ„€μ • 및 인수 νŒŒμ‹± ---\n",
17
+ def parse_args():
18
+ parser = argparse.ArgumentParser(description="ComfyUI Video Generation Script with All Controls from 1.py")
19
+ parser.add_argument("--positive_prompt", type=str, required=True); parser.add_argument("--negative_prompt", type=str, required=True)
20
+ parser.add_argument("--width", type=int, required=True); parser.add_argument("--height", type=int, required=True)
21
+ parser.add_argument("--length", type=int, required=True); parser.add_argument("--upscale_ratio", type=float, required=True)
22
+
23
+ # ✨ μˆ˜μ •: CFG 인자 뢄리
24
+ parser.add_argument("--steps", type=int, default=4)
25
+ parser.add_argument("--cfg_high", type=float, default=1.0) # High
26
+ parser.add_argument("--cfg_low", type=float, default=1.0) # Low
27
+
28
+ parser.add_argument("--sampler_name_high", type=str, default="euler"); parser.add_argument("--scheduler_high", type=str, default="simple")
29
+ parser.add_argument("--sampler_name_low", type=str, default="euler"); parser.add_argument("--scheduler_low", type=str, default="simple")
30
+ parser.add_argument("--noise_seed", type=int, default=-1); parser.add_argument("--split_point_percent", type=float, default=50.0)
31
+ parser.add_argument("--shift", type=float, default=8.0); parser.add_argument("--sageattention", type=str, default="on")
32
+ parser.add_argument("--unet_high_name", type=str, required=True); parser.add_argument("--unet_low_name", type=str, required=True)
33
+ parser.add_argument("--vae_name", type=str, required=True); parser.add_argument("--clip_name", type=str, required=True)
34
+ parser.add_argument("--upscale_model_name", type=str, default="None")
35
+ parser.add_argument("--upscale_model_scale", type=float, default=2.0)
36
+ parser.add_argument("--upscale_chunk_size", type=int, default=30)
37
+ parser.add_argument("--frame_rate", type=int, default=16); parser.add_argument("--interpolation", type=str, default="on")
38
+ parser.add_argument("--rife_fast_mode", type=str, default="on"); parser.add_argument("--rife_ensemble", type=str, default="on")
39
+ parser.add_argument("--rife_chunk_size", type=int, default=30)
40
+ parser.add_argument("--connect_lora_clip", type=str, default="off")
41
+ parser.add_argument("--video_encoder", type=str, default="GPU: HEVC (NVENC)"); parser.add_argument("--nvenc_cq", type=int, default=25); parser.add_argument("--nvenc_preset", type=str, default="p5"); parser.add_argument("--cpu_crf", type=int, default=19) # FFmpeg
42
+ parser.add_argument("--lora_high_1_name", type=str, default="None"); parser.add_argument("--lora_high_1_strength_model", type=float, default=1.0); parser.add_argument("--lora_high_1_strength_clip", type=float, default=1.0)
43
+ parser.add_argument("--lora_high_2_name", type=str, default="None"); parser.add_argument("--lora_high_2_strength_model", type=float, default=1.0); parser.add_argument("--lora_high_2_strength_clip", type=float, default=1.0)
44
+ parser.add_argument("--lora_low_1_name", type=str, default="None"); parser.add_argument("--lora_low_1_strength_model", type=float, default=1.0); parser.add_argument("--lora_low_1_strength_clip", type=float, default=1.0)
45
+ parser.add_argument("--lora_low_2_name", type=str, default="None"); parser.add_argument("--lora_low_2_strength_model", type=float, default=1.0); parser.add_argument("--lora_low_2_strength_clip", type=float, default=1.0)
46
+ parser.add_argument("--input_resize_algo", type=str, default="bicubic")
47
+ parser.add_argument("--output_resize_algo", type=str, default="bicubic")
48
+ return parser.parse_args()
49
+
50
+ def to_bool(s: str) -> bool: return s.lower() in ['true', '1', 't', 'y', 'yes', 'on']
51
+ def clear_memory():
52
+ if torch.cuda.is_available(): torch.cuda.empty_cache(); torch.cuda.ipc_collect()
53
+ gc.collect()
54
+ COMFYUI_BASE_PATH = '/content/ComfyUI'
55
+ def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
56
+ try: return obj[index]
57
+ except (KeyError, TypeError):
58
+ if isinstance(obj, dict) and "result" in obj: return obj["result"][index]
59
+ raise
60
+ def add_comfyui_directory_to_sys_path() -> None:
61
+ if os.path.isdir(COMFYUI_BASE_PATH) and COMFYUI_BASE_PATH not in sys.path: sys.path.append(COMFYUI_BASE_PATH)
62
+ def import_custom_nodes() -> None:
63
+ try:
64
+ import nest_asyncio
65
+ nest_asyncio.apply()
66
+ except ImportError:
67
+ print("nest_asyncio not found, installing...")
68
+ try:
69
+ subprocess.run([sys.executable, "-m", "pip", "install", "-q", "nest_asyncio"], check=True)
70
+ import nest_asyncio
71
+ nest_asyncio.apply()
72
+ print("nest_asyncio installed and applied.")
73
+ except Exception as e:
74
+ print(f"Failed to install or apply nest_asyncio: {e}")
75
+ import asyncio, execution, server
76
+ from nodes import init_extra_nodes
77
+ try:
78
+ loop = asyncio.get_event_loop()
79
+ if loop.is_closed():
80
+ loop = asyncio.new_event_loop()
81
+ asyncio.set_event_loop(loop)
82
+ except RuntimeError:
83
+ loop = asyncio.new_event_loop()
84
+ asyncio.set_event_loop(loop)
85
+ server_instance = server.PromptServer(loop)
86
+ execution.PromptQueue(server_instance)
87
+ if not loop.is_running():
88
+ try:
89
+ loop.run_until_complete(init_extra_nodes())
90
+ except RuntimeError as e:
91
+ print(f"Note: Could not run init_extra_nodes synchronously, possibly due to existing loop state: {e}")
92
+ try:
93
+ asyncio.ensure_future(init_extra_nodes())
94
+ except Exception as fut_e:
95
+ print(f"Error trying async init_extra_nodes: {fut_e}")
96
+ else:
97
+ try:
98
+ asyncio.ensure_future(init_extra_nodes())
99
+ except Exception as fut_e:
100
+ print(f"Error trying async init_extra_nodes on running loop: {fut_e}")
101
+
102
+
103
+ def main():
104
+ args = parse_args()
105
+ print("πŸš€ λ™μ˜μƒ 생성을 μ‹œμž‘ν•©λ‹ˆλ‹€ (Full Control Mode, VRAM Optimized)...\n")
106
+ # ✨ SyntaxError μˆ˜μ •: f-string 끝의 λΆˆν•„μš”ν•œ \ 제거
107
+ subprocess.run(f"rm -rf {COMFYUI_BASE_PATH}/output/*", shell=True, check=True)
108
+ os.makedirs(f"{COMFYUI_BASE_PATH}/output/temp", exist_ok=True); os.makedirs(f"{COMFYUI_BASE_PATH}/output/up", exist_ok=True)
109
+ os.makedirs(f"{COMFYUI_BASE_PATH}/output/interpolated", exist_ok=True)
110
+ add_comfyui_directory_to_sys_path()
111
+ try: from utils.extra_config import load_extra_path_config
112
+ except ImportError: print("⚠️ ComfyUI의 extra_model_paths.yaml λ‘œλ”© μ‹€νŒ¨ (λ¬΄μ‹œν•˜κ³  μ§„ν–‰)"); load_extra_path_config = lambda x: None
113
+ extra_model_paths_file = os.path.join(COMFYUI_BASE_PATH, "extra_model_paths.yaml")
114
+ if os.path.exists(extra_model_paths_file): load_extra_path_config(extra_model_paths_file)
115
+ print("ComfyUI μ»€μŠ€ν…€ λ…Έλ“œ μ΄ˆκΈ°ν™” 쀑..."); import_custom_nodes(); from nodes import NODE_CLASS_MAPPINGS; print("μ»€μŠ€ν…€ λ…Έλ“œ μ΄ˆκΈ°ν™” μ™„λ£Œ.")
116
+
117
+ if args.noise_seed == -1: final_seed = random.randint(1, 2**64); print(f" - 랜덀 μ‹œλ“œ 생성: {final_seed}")
118
+ else: final_seed = args.noise_seed; print(f" - κ³ μ • μ‹œλ“œ μ‚¬μš©: {final_seed}")
119
+ split_step = max(0, int(args.steps * (args.split_point_percent / 100.0))); print(f" - 총 {args.steps} μŠ€ν… 쀑 {split_step} ( {args.split_point_percent}% )μ—μ„œ λΆ„ν• ")
120
+ loras_in_use = not (args.lora_high_1_name == "None" and args.lora_high_2_name == "None" and args.lora_low_1_name == "None" and args.lora_low_2_name == "None")
121
+ connect_clip_to_lora = to_bool(args.connect_lora_clip); should_keep_clip_loaded = loras_in_use and connect_clip_to_lora
122
+
123
+ with torch.inference_mode():
124
+ loadimage=NODE_CLASS_MAPPINGS["LoadImage"](); upscalemodelloader=NODE_CLASS_MAPPINGS["UpscaleModelLoader"](); cliploader=NODE_CLASS_MAPPINGS["CLIPLoader"](); vaeloader=NODE_CLASS_MAPPINGS["VAELoader"](); cliptextencode=NODE_CLASS_MAPPINGS["CLIPTextEncode"](); unetloadergguf=NODE_CLASS_MAPPINGS["UnetLoaderGGUF"](); loraloader=NODE_CLASS_MAPPINGS["LoraLoader"](); imageresizekjv2=NODE_CLASS_MAPPINGS["ImageResizeKJv2"](); wanimagetovideo=NODE_CLASS_MAPPINGS["WanImageToVideo"](); modelsamplingsd3=NODE_CLASS_MAPPINGS["ModelSamplingSD3"](); ksampleradvanced=NODE_CLASS_MAPPINGS["KSamplerAdvanced"](); vaedecode=NODE_CLASS_MAPPINGS["VAEDecode"](); vhs_loadimagespath=NODE_CLASS_MAPPINGS["VHS_LoadImagesPath"](); imageupscalewithmodel=NODE_CLASS_MAPPINGS["ImageUpscaleWithModel"](); imagescaleby=NODE_CLASS_MAPPINGS["ImageScaleBy"](); rife_vfi=NODE_CLASS_MAPPINGS["RIFE VFI"](); vhs_videocombine=NODE_CLASS_MAPPINGS["VHS_VideoCombine"](); saveimage=NODE_CLASS_MAPPINGS["SaveImage"]()
125
+ clipvisionloader=NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
126
+ clipvisionencode=NODE_CLASS_MAPPINGS["CLIPVisionEncode"]()
127
+ pathchsageattentionkj=NODE_CLASS_MAPPINGS["PathchSageAttentionKJ"]()
128
+
129
+ # --- ✨ 1단계: CLIP Vision 둜직 μΆ”κ°€ 및 λ©”λͺ¨λ¦¬ ν•΄μ œ ---\n",
130
+ print("\n1단계: 데이터 λ‘œλ”© 및 초기 Latent 생성 쀑...");
131
+ print(f" - CLIP λ‘œλ”©: {args.clip_name}");
132
+ # ✨ SyntaxError μˆ˜μ •: λΆˆν•„μš”ν•œ \ 제거
133
+ cliploader_460 = cliploader.load_clip(clip_name=args.clip_name, type="wan", device="default");
134
+ cliptextencode_462 = cliptextencode.encode(text=args.positive_prompt, clip=get_value_at_index(cliploader_460, 0));
135
+ cliptextencode_463 = cliptextencode.encode(text=args.negative_prompt, clip=get_value_at_index(cliploader_460, 0));
136
+ loadimage_88 = loadimage.load_image(image="example.png");
137
+
138
+ imageresizekjv2_401 = imageresizekjv2.resize(
139
+ width=args.width, height=args.height,
140
+ upscale_method=args.input_resize_algo, # ✨ 변경됨
141
+ image=get_value_at_index(loadimage_88, 0), keep_proportion="crop",
142
+ pad_color="0, 0, 0", crop_position="center", divisible_by=2,
143
+ unique_id=random.randint(1, 2**64)
144
+ );
145
+
146
+ print(f" - CLIP Vision λ‘œλ”©: clip_vision_h.safetensors");
147
+ clipvisionloader_cv = clipvisionloader.load_clip(clip_name="clip_vision_h.safetensors");
148
+ print(f" - CLIP Vision 인코딩 쀑...");
149
+ clipvisionencode_cv = clipvisionencode.encode(
150
+ crop="none",
151
+ clip_vision=get_value_at_index(clipvisionloader_cv, 0),
152
+ image=get_value_at_index(imageresizekjv2_401, 0) # λ¦¬μ‚¬μ΄μ¦ˆλœ 이미지 μ‚¬μš©
153
+ );
154
+ clip_vision_output = get_value_at_index(clipvisionencode_cv, 0)
155
+
156
+ print(f" - VAE μž„μ‹œ λ‘œλ”© (초기 Latent μƒμ„±μš©): {args.vae_name}");
157
+ vaeloader_temp = vaeloader.load_vae(vae_name=args.vae_name);
158
+
159
+ wanimagetovideo_464 = wanimagetovideo.EXECUTE_NORMALIZED(
160
+ width=get_value_at_index(imageresizekjv2_401, 1),
161
+ height=get_value_at_index(imageresizekjv2_401, 2),
162
+ length=args.length,
163
+ batch_size=1,
164
+ positive=get_value_at_index(cliptextencode_462, 0),
165
+ negative=get_value_at_index(cliptextencode_463, 0),
166
+ vae=get_value_at_index(vaeloader_temp, 0),
167
+ clip_vision_output=clip_vision_output, # ✨ μΆ”κ°€λœ 인수
168
+ start_image=get_value_at_index(imageresizekjv2_401, 0)
169
+ );
170
+
171
+ if not should_keep_clip_loaded: print(" ✨ (μ΅œμ ν™”) 1단계 μ™„λ£Œ, CLIP λͺ¨λΈμ„ μ¦‰μ‹œ ν•΄μ œν•©λ‹ˆλ‹€."); del cliploader_460
172
+ else: print(" ⚠️ (μ„€μ •) LoRA CLIP μ—°κ²° μ˜΅μ…˜μ΄ ν™œμ„±ν™”λ˜μ–΄ 3λ‹¨κ³„κΉŒμ§€ CLIP λͺ¨λΈμ„ μœ μ§€ν•©λ‹ˆλ‹€.")
173
+
174
+ print(" ✨ (μ΅œμ ν™”) 1단계 μ™„λ£Œ, μž„μ‹œ VAE 및 CLIP Vision λͺ¨λΈμ„ ν•΄μ œν•©λ‹ˆλ‹€.");
175
+ del vaeloader_temp, clipvisionloader_cv, clipvisionencode_cv, clip_vision_output;
176
+
177
+ clear_memory(); print("1단계 μ™„λ£Œ.");
178
+ # --- 1단계 μˆ˜μ • μ™„λ£Œ ---
179
+
180
+ print(f"\n2단계: High Noise μƒ˜ν”Œλ§ μ‹œμž‘..."); print(f" - UNet High λ‘œλ”©: {args.unet_high_name}"); unetloadergguf_495 = unetloadergguf.load_unet(unet_name=args.unet_high_name); model = get_value_at_index(unetloadergguf_495, 0); clip = get_value_at_index(cliploader_460, 0) if should_keep_clip_loaded else None; model_for_patching = model;
181
+ if to_bool(args.sageattention): print(" ✨ SageAttention 패치 적용 쀑 (High)..."); pathchsageattentionkj_124 = pathchsageattentionkj.patch(sage_attention="auto", model=model_for_patching); model_for_patching = get_value_at_index(pathchsageattentionkj_124, 0)
182
+ if args.lora_high_1_name != "None": print(f" - H LoRA 1: {args.lora_high_1_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_high_1_name, strength_model=args.lora_high_1_strength_model, strength_clip=args.lora_high_1_strength_clip, model=model_for_patching, clip=clip)
183
+ if args.lora_high_2_name != "None": print(f" - H LoRA 2: {args.lora_high_2_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_high_2_name, strength_model=args.lora_high_2_strength_model, strength_clip=args.lora_high_2_strength_clip, model=model_for_patching, clip=clip)
184
+ shifted_model = get_value_at_index(modelsamplingsd3.patch(shift=args.shift, model=model_for_patching), 0); final_model = shifted_model;
185
+
186
+ # ✨ μˆ˜μ •: cfg=args.cfg_high μ‚¬μš©
187
+ ksampleradvanced_466 = ksampleradvanced.sample(add_noise="enable", noise_seed=final_seed, steps=args.steps, cfg=args.cfg_high, sampler_name=args.sampler_name_high, scheduler=args.scheduler_high, start_at_step=0, end_at_step=split_step, return_with_leftover_noise="enable", model=final_model, positive=get_value_at_index(wanimagetovideo_464, 0), negative=get_value_at_index(wanimagetovideo_464, 1), latent_image=get_value_at_index(wanimagetovideo_464, 2));
188
+
189
+ if to_bool(args.sageattention): del pathchsageattentionkj_124
190
+ del unetloadergguf_495, model, clip, model_for_patching, shifted_model, final_model; clear_memory(); print("2단계 μ™„λ£Œ.")
191
+
192
+ print(f"\n3단계: Low Noise μƒ˜ν”Œλ§ μ‹œμž‘..."); print(f" - UNet Low λ‘œλ”©: {args.unet_low_name}"); unetloadergguf_496 = unetloadergguf.load_unet(unet_name=args.unet_low_name); model = get_value_at_index(unetloadergguf_496, 0); clip = get_value_at_index(cliploader_460, 0) if should_keep_clip_loaded else None; model_for_patching = model;
193
+ if to_bool(args.sageattention): print(" ✨ SageAttention 패치 적용 쀑 (Low)..."); pathchsageattentionkj_129 = pathchsageattentionkj.patch(sage_attention="auto", model=model_for_patching); model_for_patching = get_value_at_index(pathchsageattentionkj_129, 0)
194
+ if args.lora_low_1_name != "None": print(f" - L LoRA 1: {args.lora_low_1_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_low_1_name, strength_model=args.lora_low_1_strength_model, strength_clip=args.lora_low_1_strength_clip, model=model_for_patching, clip=clip)
195
+ if args.lora_low_2_name != "None": print(f" - L LoRA 2: {args.lora_low_2_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_low_2_name, strength_model=args.lora_low_2_strength_model, strength_clip=args.lora_low_2_strength_clip, model=model_for_patching, clip=clip)
196
+ shifted_model = get_value_at_index(modelsamplingsd3.patch(shift=args.shift, model=model_for_patching), 0); final_model = shifted_model;
197
+
198
+ # ✨ μˆ˜μ •: cfg=args.cfg_low μ‚¬μš©
199
+ ksampleradvanced_465 = ksampleradvanced.sample(add_noise="disable", noise_seed=final_seed, steps=args.steps, cfg=args.cfg_low, sampler_name=args.sampler_name_low, scheduler=args.scheduler_low, start_at_step=split_step, end_at_step=10000, return_with_leftover_noise="disable", model=final_model, positive=get_value_at_index(wanimagetovideo_464, 0), negative=get_value_at_index(wanimagetovideo_464, 1), latent_image=get_value_at_index(ksampleradvanced_466, 0));
200
+
201
+ if to_bool(args.sageattention): del pathchsageattentionkj_129
202
+ if should_keep_clip_loaded: print(" ✨ (λ©”λͺ¨λ¦¬) LoRA CLIP μ—°κ²° μ˜΅μ…˜ μ‚¬μš© μ™„λ£Œ, CLIP λͺ¨λΈμ„ ν•΄μ œν•©λ‹ˆλ‹€."); del cliploader_460
203
+ del unetloadergguf_496, model, clip, model_for_patching, shifted_model, final_model, ksampleradvanced_466, wanimagetovideo_464; clear_memory(); print("3단계 μ™„λ£Œ.")
204
+
205
+ print(f"\n4단계: VAE λ””μ½”λ”© 및 μž„μ‹œ μ €μž₯ 쀑..."); print(f" - VAE λͺ¨λΈ λ‘œλ”© (λ””μ½”λ”©μš©): {args.vae_name}"); vaeloader_461 = vaeloader.load_vae(vae_name=args.vae_name); vaedecode_469 = vaedecode.decode(samples=get_value_at_index(ksampleradvanced_465, 0), vae=get_value_at_index(vaeloader_461, 0)); saveimage.save_images(filename_prefix="temp/example", images=get_value_at_index(vaedecode_469, 0));
206
+ del ksampleradvanced_465, vaeloader_461, vaedecode_469, loadimage_88, imageresizekjv2_401; clear_memory(); print("4단계 μ™„λ£Œ.")
207
+
208
+ combine_input_dir_for_ffmpeg = f"{COMFYUI_BASE_PATH}/output/temp"
209
+ if args.upscale_ratio > 1:
210
+ if args.upscale_model_name == "None": print("\n5단계: μ—…μŠ€μΌ€μΌλ§ κ±΄λ„ˆλœ€ (λͺ¨λΈμ΄ μ„ νƒλ˜μ§€ μ•ŠμŒ).")
211
+ else:
212
+ print(f"\n5단계: ν”„λ ˆμž„ μ—…μŠ€μΌ€μΌλ§ 쀑..."); print(f" - Upscale λͺ¨λΈ λ‘œλ”©: {args.upscale_model_name}"); upscalemodelloader_384 = upscalemodelloader.load_model(model_name=args.upscale_model_name); chunk_size = args.upscale_chunk_size; base_dir = f"{COMFYUI_BASE_PATH}/output/temp"; scale_by_ratio = args.upscale_ratio / args.upscale_model_scale;
213
+ total_frames = 0
214
+ try:
215
+ temp_files = [f for f in os.listdir(base_dir) if f.endswith(('.png', '.jpg', '.jpeg', '.webp'))]
216
+ total_frames = len(temp_files)
217
+ if total_frames == 0:
218
+ raise FileNotFoundError("μ—…μŠ€μΌ€μΌν•  ν”„λ ˆμž„μ΄ 'temp' 폴더에 μ—†μŠ΅λ‹ˆλ‹€.")
219
+ except Exception as e:
220
+ print(f" ❌ μ—…μŠ€μΌ€μΌ 5단계 쀑단: 'temp' ν΄λ”μ—μ„œ ν”„λ ˆμž„μ„ 읽을 수 μ—†μŠ΅λ‹ˆλ‹€. (였λ₯˜: {e})")
221
+ if 'upscalemodelloader_384' in locals(): del upscalemodelloader_384
222
+ clear_memory()
223
+ raise
224
+ print(f" - 총 {total_frames}개의 ν”„λ ˆμž„μ„ {chunk_size}개 λ‹¨μœ„λ‘œ λΆ„ν• ν•˜μ—¬ μ‹€ν–‰ν•©λ‹ˆλ‹€...")
225
+ for i in range(0, total_frames, chunk_size):
226
+ print(f" - 배치 처리 쀑 (ν”„λ ˆμž„ {i} ~ {min(i + chunk_size, total_frames) - 1})...")
227
+ vhs_load_chunk = vhs_loadimagespath.load_images(directory=base_dir, skip_first_images=i, image_load_cap=chunk_size); loaded_images = get_value_at_index(vhs_load_chunk, 0);
228
+ if loaded_images is None: print(" - (κ²½κ³ ) κ±΄λ„ˆλ›Έ 수 μ—†λŠ” 이미지가 λ‘œλ“œλ˜μ—ˆμŠ΅λ‹ˆλ‹€, 이 배치λ₯Ό κ±΄λ„ˆλœλ‹ˆλ‹€."); continue
229
+ imageupscale_chunk = imageupscalewithmodel.upscale(upscale_model=get_value_at_index(upscalemodelloader_384, 0), image=loaded_images);
230
+ imagescale_chunk = imagescaleby.upscale(
231
+ upscale_method=args.output_resize_algo, # ✨ 변경됨
232
+ scale_by=scale_by_ratio,
233
+ image=get_value_at_index(imageupscale_chunk, 0)
234
+ );
235
+ saveimage.save_images(filename_prefix="up/example", images=get_value_at_index(imagescale_chunk, 0));
236
+ del vhs_load_chunk, loaded_images, imageupscale_chunk, imagescale_chunk; clear_memory()
237
+ del upscalemodelloader_384; clear_memory(); combine_input_dir_for_ffmpeg = f"{COMFYUI_BASE_PATH}/output/up"; print("5단계 μ™„λ£Œ.")
238
+ else: print("\n5단계: μ—…μŠ€μΌ€μΌλ§ κ±΄λ„ˆλœ€ (λΉ„μœ¨ 1.0).")
239
+
240
+ # --- ✨ 6단계: RIFE 청크 둜직 μˆ˜μ • (Overlap 적용) ---\n",
241
+ print("\n6단계: λΉ„λ””μ˜€ κ²°ν•© μ€€λΉ„ 쀑..."); final_frame_rate = float(args.frame_rate); ffmpeg_input_dir = combine_input_dir_for_ffmpeg
242
+ if to_bool(args.interpolation):
243
+ print(" - ν”„λ ˆμž„ 보간 (RIFE)을 ν™œμ„±ν™”ν•©λ‹ˆλ‹€."); interpolated_dir = f"{COMFYUI_BASE_PATH}/output/interpolated"; source_dir = combine_input_dir_for_ffmpeg
244
+ total_frames_rife = 0
245
+ try:
246
+ temp_files = [f for f in os.listdir(source_dir) if f.endswith(('.png', '.jpg', '.jpeg', '.webp'))]; total_frames_rife = len(temp_files);
247
+ if total_frames_rife == 0: raise FileNotFoundError(f"RIFE 보간할 ν”„λ ˆμž„μ΄ '{source_dir}' 폴더에 μ—†μŠ΅λ‹ˆλ‹€.")
248
+ except Exception as e: print(f" ❌ RIFE 6단계 쀑단: '{source_dir}' ν΄λ”μ—μ„œ ν”„λ ˆμž„μ„ 읽을 수 μ—†μŠ΅λ‹ˆλ‹€. (였λ₯˜: {e})"); raise
249
+
250
+ chunk_size = args.rife_chunk_size;
251
+ print(f" - 총 {total_frames_rife}개의 ν”„λ ˆμž„μ„ RIFE 청크 {chunk_size}개 λ‹¨μœ„λ‘œ λΆ„ν• ν•˜μ—¬ μ‹€ν–‰ν•©λ‹ˆλ‹€ (Overlap 적용)...")
252
+
253
+ current_frame_idx = 0
254
+ is_first_chunk = True
255
+
256
+ while current_frame_idx < total_frames_rife:
257
+ load_from = current_frame_idx
258
+ load_cap = chunk_size
259
+
260
+ if not is_first_chunk:
261
+ load_from -= 1 # 1ν”„λ ˆμž„ 겹치기
262
+ load_cap += 1 # 겹친 만큼 1ν”„λ ˆμž„ 더 λ‘œλ“œ
263
+
264
+ # λ§ˆμ§€λ§‰ 청크 경계 처리
265
+ if load_from + load_cap > total_frames_rife:
266
+ load_cap = total_frames_rife - load_from
267
+
268
+ # RIFEλŠ” μ΅œμ†Œ 2ν”„λ ˆμž„μ΄ ν•„μš”ν•¨
269
+ if load_cap < 2:
270
+ print(f" - (κ²½κ³ ) RIFE μ²˜λ¦¬μ— ν•„μš”ν•œ ν”„λ ˆμž„(2개)이 λΆ€μ‘±ν•˜μ—¬ λ§ˆμ§€λ§‰ 배치λ₯Ό κ±΄λ„ˆλœλ‹ˆλ‹€.")
271
+ break
272
+
273
+ print(f" - RIFE 배치 처리 쀑 (원본 ν”„λ ˆμž„ {load_from} ~ {load_from + load_cap - 1})...")
274
+
275
+ vhs_load_chunk = vhs_loadimagespath.load_images(directory=source_dir, skip_first_images=load_from, image_load_cap=load_cap);
276
+ loaded_images = get_value_at_index(vhs_load_chunk, 0);
277
+
278
+ if loaded_images is None:
279
+ print(" - (κ²½κ³ ) κ±΄λ„ˆλ›Έ 수 μ—†λŠ” 이미지가 λ‘œλ“œλ˜μ—ˆμŠ΅λ‹ˆλ‹€, 이 배치λ₯Ό κ±΄λ„ˆλœλ‹ˆλ‹€.");
280
+ current_frame_idx += chunk_size # λ‹€μŒ 청크둜 이동
281
+ is_first_chunk = False
282
+ continue
283
+
284
+ rife_chunk_result_tensor = get_value_at_index(rife_vfi.vfi(
285
+ ckpt_name="rife49.pth",
286
+ multiplier=2,
287
+ fast_mode=to_bool(args.rife_fast_mode),
288
+ ensemble=to_bool(args.rife_ensemble),
289
+ frames=loaded_images
290
+ ), 0)
291
+
292
+ images_to_save = rife_chunk_result_tensor
293
+ if not is_first_chunk:
294
+ # ✨ 첫 λ²ˆμ§Έκ°€ μ•„λ‹Œ λͺ¨λ“  μ²­ν¬λŠ” κ²ΉμΉ˜λŠ” 첫 ν”„λ ˆμž„μ„ 제거 (ν…μ„œ μŠ¬λΌμ΄μ‹±)
295
+ print(f" - (Overlap) 쀑볡 ν”„λ ˆμž„ 1개 제거 ν›„ μ €μž₯")
296
+ images_to_save = rife_chunk_result_tensor[1:]
297
+
298
+ saveimage.save_images(filename_prefix="interpolated/example", images=images_to_save);
299
+
300
+ del vhs_load_chunk, loaded_images, rife_chunk_result_tensor, images_to_save; clear_memory()
301
+
302
+ current_frame_idx += chunk_size
303
+ is_first_chunk = False
304
+
305
+ ffmpeg_input_dir = interpolated_dir; final_frame_rate *= 2
306
+ else: print(" - ν”„λ ˆμž„ 보간이 λΉ„ν™œμ„±ν™”λ˜μ—ˆμŠ΅λ‹ˆλ‹€.");
307
+ # --- 6단계 μˆ˜μ • μ™„λ£Œ ---
308
+
309
+ print(f" - μ΅œμ’… λΉ„λ””μ˜€λ₯Ό FFmpeg ({args.video_encoder})둜 κ²°ν•©ν•©λ‹ˆλ‹€..."); print(f" - μž…λ ₯ 폴더: '{ffmpeg_input_dir}'")
310
+ input_pattern = os.path.join(ffmpeg_input_dir, "example_%05d_.png") # 파일λͺ… νŒ¨ν„΄ μˆ˜μ •λ¨
311
+ timestamp = time.strftime("%Y%m%d-%H%M%S"); output_filename = f"AnimateDiff_{timestamp}.mp4"; output_path = os.path.join(COMFYUI_BASE_PATH, "output", output_filename)
312
+ ffmpeg_cmd = ["ffmpeg", "-framerate", str(final_frame_rate), "-i", input_pattern]
313
+ encoder_choice = args.video_encoder
314
+ if encoder_choice == "GPU: HEVC (NVENC)": ffmpeg_cmd.extend(["-c:v", "hevc_nvenc", "-cq", str(args.nvenc_cq), "-preset", args.nvenc_preset, "-tag:v", "hvc1"])
315
+ elif encoder_choice == "GPU: H.264 (NVENC)": ffmpeg_cmd.extend(["-c:v", "h264_nvenc", "-cq", str(args.nvenc_cq), "-preset", args.nvenc_preset])
316
+ else: ffmpeg_cmd.extend(["-c:v", "libx264", "-crf", str(args.cpu_crf), "-preset", "medium"])
317
+ ffmpeg_cmd.extend(["-pix_fmt", "yuv420p", "-y", output_path])
318
+ print(f" - μ‹€ν–‰ λͺ…λ Ήμ–΄: {' '.join(ffmpeg_cmd)}")
319
+ try:
320
+ result = subprocess.run(ffmpeg_cmd, capture_output=True, text=True, check=True, encoding='utf-8')
321
+ print(" - FFmpeg μ‹€ν–‰ μ™„λ£Œ.")
322
+ except FileNotFoundError: print(" ❌ 였λ₯˜: 'ffmpeg' λͺ…λ Ήμ–΄λ₯Ό 찾을 수 μ—†μŠ΅λ‹ˆλ‹€. μ‹œμŠ€ν…œμ— μ„€μΉ˜λ˜μ–΄ μžˆλŠ”μ§€ ν™•μΈν•˜μ„Έμš”."); raise
323
+ except subprocess.CalledProcessError as e:
324
+ print(f" ❌ 였λ₯˜: FFmpeg μ‹€ν–‰ μ‹€νŒ¨ (Return code: {e.returncode})")
325
+ if e.stdout: print(f" FFmpeg stdout:\n{e.stdout}")
326
+ if e.stderr: print(f" FFmpeg stderr:\n{e.stderr}")
327
+ raise
328
+ except Exception as e: print(f" ❌ 였λ₯˜: FFmpeg μ‹€ν–‰ 쀑 μ˜ˆμƒμΉ˜ λͺ»ν•œ 였λ₯˜ λ°œμƒ: {e}"); raise
329
+ print("βœ… λͺ¨λ“  단계 μ™„λ£Œ.")
330
+
331
+ # --- ✨ UnboundLocalError ν•΄κ²° 및 볡사 둜직 (μ΅œμ’…) ---\n",
332
+ latest_video = None
333
+
334
+ if os.path.exists(output_path):
335
+ latest_video = output_path
336
+ print(f"LATEST_VIDEO_PATH:{latest_video}")
337
+ else:
338
+ output_dir = os.path.join(COMFYUI_BASE_PATH, "output");
339
+ video_files = glob.glob(os.path.join(output_dir, '**', '*.mp4'), recursive=True) + \
340
+ glob.glob(os.path.join(output_dir, '**', '*.mkv'), recursive=True)
341
+
342
+ if not video_files:
343
+ raise FileNotFoundError("μƒμ„±λœ λ™μ˜μƒ νŒŒμΌμ„ 찾을 수 μ—†μŠ΅λ‹ˆλ‹€!")
344
+
345
+ latest_video = max(video_files, key=os.path.getctime)
346
+ print(f"LATEST_VIDEO_PATH:{latest_video}")
347
+
348
+ if latest_video is None:
349
+ raise FileNotFoundError("μ΅œμ’… λΉ„λ””μ˜€ 경둜λ₯Ό ν™•μ •ν•  수 μ—†μŠ΅λ‹ˆλ‹€. 슀크립트λ₯Ό ν™•μΈν•˜μ„Έμš”.")
350
+
351
+ base, ext = os.path.splitext(latest_video)
352
+ original_copy_path = f"{base}_original{ext}"
353
+ try:
354
+ shutil.copy2(latest_video, original_copy_path)
355
+ print(f"βœ… 원본 볡사본 생성 μ™„λ£Œ: {original_copy_path}")
356
+ print(f"ORIGINAL_COPY_PATH:{original_copy_path}")
357
+ except Exception as e:
358
+ print(f"❌ 원본 볡사본 생성 μ‹€νŒ¨: {e}")
359
+ # --- μˆ˜μ • μ™„λ£Œ --
360
+
361
+ if __name__ == "__main__":
362
+ main()