arcacolab commited on
Commit
6504c2a
Β·
verified Β·
1 Parent(s): 902c98d

Upload run_generator.py

Browse files
Files changed (1) hide show
  1. run_generator.py +424 -0
run_generator.py ADDED
@@ -0,0 +1,424 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ import time
4
+ import glob
5
+ import gc
6
+ import torch
7
+ import subprocess
8
+ import random
9
+ import argparse
10
+ from typing import Sequence, Mapping, Any, Union
11
+ import shutil
12
+
13
+ # --- 0. κΈ°λ³Έ μ„€μ • 및 인수 νŒŒμ‹± ---
14
+ def parse_args():
15
+ parser = argparse.ArgumentParser(description="ComfyUI Video Generation Script with All Controls from 1.py")
16
+ parser.add_argument("--positive_prompt", type=str, required=True); parser.add_argument("--negative_prompt", type=str, required=True)
17
+ parser.add_argument("--width", type=int, required=True); parser.add_argument("--height", type=int, required=True)
18
+ parser.add_argument("--length", type=int, required=True); parser.add_argument("--upscale_ratio", type=float, required=True)
19
+ parser.add_argument("--steps", type=int, default=4)
20
+ parser.add_argument("--cfg_high", type=float, default=1.0); parser.add_argument("--cfg_low", type=float, default=1.0)
21
+ parser.add_argument("--sampler_name_high", type=str, default="euler"); parser.add_argument("--scheduler_high", type=str, default="simple")
22
+ parser.add_argument("--sampler_name_low", type=str, default="euler"); parser.add_argument("--scheduler_low", type=str, default="simple")
23
+ parser.add_argument("--noise_seed", type=int, default=-1); parser.add_argument("--split_point_percent", type=float, default=50.0)
24
+ parser.add_argument("--shift", type=float, default=8.0); parser.add_argument("--sageattention", type=str, default="on")
25
+ parser.add_argument("--unet_high_name", type=str, required=True); parser.add_argument("--unet_low_name", type=str, required=True)
26
+ parser.add_argument("--vae_name", type=str, required=True); parser.add_argument("--clip_name", type=str, required=True)
27
+
28
+ # VAE (KJ) λ‘œλ”λ₯Ό μœ„ν•œ 인자 μΆ”κ°€
29
+ parser.add_argument("--vae_weight_dtype", type=str, default="bf16")
30
+ parser.add_argument("--vae_device", type=str, default="main_device")
31
+
32
+ parser.add_argument("--upscale_model_name", type=str, default="None")
33
+ parser.add_argument("--upscale_model_scale", type=float, default=2.0)
34
+ parser.add_argument("--upscale_chunk_size", type=int, default=30)
35
+ parser.add_argument("--frame_rate", type=int, default=16); parser.add_argument("--interpolation", type=str, default="on")
36
+ parser.add_argument("--rife_fast_mode", type=str, default="on"); parser.add_argument("--rife_ensemble", type=str, default="on")
37
+ parser.add_argument("--rife_chunk_size", type=int, default=30)
38
+ parser.add_argument("--connect_lora_clip", type=str, default="off")
39
+ parser.add_argument("--video_encoder", type=str, default="GPU: HEVC (NVENC)"); parser.add_argument("--nvenc_cq", type=int, default=25); parser.add_argument("--nvenc_preset", type=str, default="p5"); parser.add_argument("--cpu_crf", type=int, default=19) # FFmpeg
40
+ parser.add_argument("--lora_high_1_name", type=str, default="None"); parser.add_argument("--lora_high_1_strength_model", type=float, default=1.0); parser.add_argument("--lora_high_1_strength_clip", type=float, default=1.0)
41
+ parser.add_argument("--lora_high_2_name", type=str, default="None"); parser.add_argument("--lora_high_2_strength_model", type=float, default=1.0); parser.add_argument("--lora_high_2_strength_clip", type=float, default=1.0)
42
+ parser.add_argument("--lora_low_1_name", type=str, default="None"); parser.add_argument("--lora_low_1_strength_model", type=float, default=1.0); parser.add_argument("--lora_low_1_strength_clip", type=float, default=1.0)
43
+ parser.add_argument("--lora_low_2_name", type=str, default="None"); parser.add_argument("--lora_low_2_strength_model", type=float, default=1.0); parser.add_argument("--lora_low_2_strength_clip", type=float, default=1.0)
44
+ parser.add_argument("--input_resize_algo", type=str, default="bicubic")
45
+ parser.add_argument("--output_resize_algo", type=str, default="bicubic")
46
+ return parser.parse_args()
47
+
48
+ def to_bool(s: str) -> bool: return s.lower() in ['true', '1', 't', 'y', 'yes', 'on']
49
+ def clear_memory():
50
+ if torch.cuda.is_available(): torch.cuda.empty_cache(); torch.cuda.ipc_collect()
51
+ gc.collect()
52
+ COMFYUI_BASE_PATH = '/content/ComfyUI'
53
+ def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
54
+ try: return obj[index]
55
+ except (KeyError, TypeError):
56
+ if isinstance(obj, dict) and "result" in obj: return obj["result"][index]
57
+ raise
58
+ def add_comfyui_directory_to_sys_path() -> None:
59
+ if os.path.isdir(COMFYUI_BASE_PATH) and COMFYUI_BASE_PATH not in sys.path: sys.path.append(COMFYUI_BASE_PATH)
60
+ def import_custom_nodes() -> None:
61
+ try:
62
+ import nest_asyncio
63
+ nest_asyncio.apply()
64
+ except ImportError:
65
+ print("nest_asyncio not found, installing...")
66
+ try:
67
+ subprocess.run([sys.executable, "-m", "pip", "install", "-q", "nest_asyncio"], check=True)
68
+ import nest_asyncio
69
+ nest_asyncio.apply()
70
+ print("nest_asyncio installed and applied.")
71
+ except Exception as e:
72
+ print(f"Failed to install or apply nest_asyncio: {e}")
73
+ import asyncio, execution, server
74
+ from nodes import init_extra_nodes
75
+ try:
76
+ loop = asyncio.get_event_loop()
77
+ if loop.is_closed():
78
+ loop = asyncio.new_event_loop()
79
+ asyncio.set_event_loop(loop)
80
+ except RuntimeError:
81
+ loop = asyncio.new_event_loop()
82
+ asyncio.set_event_loop(loop)
83
+ server_instance = server.PromptServer(loop)
84
+ execution.PromptQueue(server_instance)
85
+ if not loop.is_running():
86
+ try:
87
+ loop.run_until_complete(init_extra_nodes())
88
+ except RuntimeError as e:
89
+ print(f"Note: Could not run init_extra_nodes synchronously, possibly due to existing loop state: {e}")
90
+ try:
91
+ asyncio.ensure_future(init_extra_nodes())
92
+ except Exception as fut_e:
93
+ print(f"Error trying async init_extra_nodes: {fut_e}")
94
+ else:
95
+ try:
96
+ asyncio.ensure_future(init_extra_nodes())
97
+ except Exception as fut_e:
98
+ print(f"Error trying async init_extra_nodes on running loop: {fut_e}")
99
+
100
+
101
+ def main():
102
+ args = parse_args()
103
+ print("πŸš€ λ™μ˜μƒ 생성을 μ‹œμž‘ν•©λ‹ˆλ‹€ (Full Control Mode, VRAM Optimized)...\n")
104
+
105
+ # ⭐⭐⭐ μˆ˜μ •λœ λΆ€λΆ„: μ‹œμž‘ μ‹œ κΈ°μ‘΄ output 폴더 정리 (μ΅œμ’… 파일 μœ μ§€) ⭐⭐⭐
106
+ temp_dirs = ["temp", "up", "interpolated"]
107
+ output_base = f"{COMFYUI_BASE_PATH}/output"
108
+ print(" - 이전 μž‘μ—…μ˜ μž„μ‹œ 폴더 정리 쀑...")
109
+ for d in temp_dirs:
110
+ full_path = os.path.join(output_base, d)
111
+ if os.path.exists(full_path):
112
+ shutil.rmtree(full_path)
113
+ print(f" - 이전 μž„μ‹œ 폴더 μ‚­μ œ μ™„λ£Œ: {d}")
114
+
115
+ # ν˜„μž¬ μž‘μ—…μ— ν•„μš”ν•œ μž„μ‹œ 폴더 생성
116
+ os.makedirs(f"{COMFYUI_BASE_PATH}/output/temp", exist_ok=True);
117
+ os.makedirs(f"{COMFYUI_BASE_PATH}/output/up", exist_ok=True)
118
+ os.makedirs(f"{COMFYUI_BASE_PATH}/output/interpolated", exist_ok=True)
119
+ # ⭐⭐⭐ μˆ˜μ •λœ λΆ€λΆ„ 끝 ⭐⭐⭐
120
+
121
+ add_comfyui_directory_to_sys_path()
122
+ try: from utils.extra_config import load_extra_path_config
123
+ except ImportError: print("⚠️ ComfyUI의 extra_model_paths.yaml λ‘œλ”© μ‹€νŒ¨ (λ¬΄μ‹œν•˜κ³  μ§„ν–‰)"); load_extra_path_config = lambda x: None
124
+ extra_model_paths_file = os.path.join(COMFYUI_BASE_PATH, "extra_model_paths.yaml")
125
+ if os.path.exists(extra_model_paths_file): load_extra_path_config(extra_model_paths_file)
126
+ print("ComfyUI μ»€μŠ€ν…€ λ…Έλ“œ μ΄ˆκΈ°ν™” 쀑..."); import_custom_nodes(); from nodes import NODE_CLASS_MAPPINGS; print("μ»€μŠ€ν…€ λ…Έλ“œ μ΄ˆκΈ°ν™” μ™„λ£Œ.")
127
+
128
+ if args.noise_seed == -1: final_seed = random.randint(1, 2**64); print(f" - 랜덀 μ‹œλ“œ 생성: {final_seed}")
129
+ else: final_seed = args.noise_seed; print(f" - κ³ μ • μ‹œλ“œ μ‚¬μš©: {final_seed}")
130
+ split_step = max(0, int(args.steps * (args.split_point_percent / 100.0))); print(f" - 총 {args.steps} μŠ€ν… 쀑 {split_step} ( {args.split_point_percent}% )μ—μ„œ λΆ„ν• ")
131
+ loras_in_use = not (args.lora_high_1_name == "None" and args.lora_high_2_name == "None" and args.lora_low_1_name == "None" and args.lora_low_2_name == "None")
132
+ connect_clip_to_lora = to_bool(args.connect_lora_clip); should_keep_clip_loaded = loras_in_use and connect_clip_to_lora
133
+
134
+ with torch.inference_mode():
135
+ # VAE Loader, Upscale Model Loader, CLIP Loader λ“± λ…Έλ“œ λ§€ν•‘
136
+ loadimage=NODE_CLASS_MAPPINGS["LoadImage"](); upscalemodelloader=NODE_CLASS_MAPPINGS["UpscaleModelLoader"](); cliploader=NODE_CLASS_MAPPINGS["CLIPLoader"]()
137
+ vaeloaderkj=NODE_CLASS_MAPPINGS["VAELoaderKJ"]()
138
+ cliptextencode=NODE_CLASS_MAPPINGS["CLIPTextEncode"](); unetloadergguf=NODE_CLASS_MAPPINGS["UnetLoaderGGUF"](); loraloader=NODE_CLASS_MAPPINGS["LoraLoader"](); imageresizekjv2=NODE_CLASS_MAPPINGS["ImageResizeKJv2"](); wanimagetovideo=NODE_CLASS_MAPPINGS["WanImageToVideo"](); modelsamplingsd3=NODE_CLASS_MAPPINGS["ModelSamplingSD3"](); ksampleradvanced=NODE_CLASS_MAPPINGS["KSamplerAdvanced"](); vaedecode=NODE_CLASS_MAPPINGS["VAEDecode"](); vhs_loadimagespath=NODE_CLASS_MAPPINGS["VHS_LoadImagesPath"](); imageupscalewithmodel=NODE_CLASS_MAPPINGS["ImageUpscaleWithModel"](); imagescaleby=NODE_CLASS_MAPPINGS["ImageScaleBy"](); rife_vfi=NODE_CLASS_MAPPINGS["RIFE VFI"](); vhs_videocombine=NODE_CLASS_MAPPINGS["VHS_VideoCombine"](); saveimage=NODE_CLASS_MAPPINGS["SaveImage"]()
139
+ clipvisionloader=NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
140
+ clipvisionencode=NODE_CLASS_MAPPINGS["CLIPVisionEncode"]()
141
+ pathchsageattentionkj=NODE_CLASS_MAPPINGS["PathchSageAttentionKJ"]()
142
+
143
+ # --- 1단계: CLIP Vision 둜직 μΆ”κ°€ 및 초기 Latent 생성 ---
144
+ print("\n1단계: 데이터 λ‘œλ”© 및 초기 Latent 생성 쀑...");
145
+ print(f" - CLIP λ‘œλ”©: {args.clip_name}");
146
+ cliploader_460 = cliploader.load_clip(clip_name=args.clip_name, type="wan", device="default");
147
+ cliptextencode_462 = cliptextencode.encode(text=args.positive_prompt, clip=get_value_at_index(cliploader_460, 0));
148
+ cliptextencode_463 = cliptextencode.encode(text=args.negative_prompt, clip=get_value_at_index(cliploader_460, 0));
149
+ loadimage_88 = loadimage.load_image(image="example.png");
150
+
151
+ imageresizekjv2_401 = imageresizekjv2.resize(
152
+ width=args.width, height=args.height,
153
+ upscale_method=args.input_resize_algo,
154
+ image=get_value_at_index(loadimage_88, 0), keep_proportion="crop",
155
+ pad_color="0, 0, 0", crop_position="center", divisible_by=2,
156
+ unique_id=random.randint(1, 2**64)
157
+ );
158
+
159
+ print(f" - CLIP Vision λ‘œλ”©: clip_vision_h.safetensors");
160
+ clipvisionloader_cv = clipvisionloader.load_clip(clip_name="clip_vision_h.safetensors");
161
+ print(f" - CLIP Vision 인코딩 쀑...");
162
+ clipvisionencode_cv = clipvisionencode.encode(
163
+ crop="none",
164
+ clip_vision=get_value_at_index(clipvisionloader_cv, 0),
165
+ image=get_value_at_index(imageresizekjv2_401, 0)
166
+ );
167
+ clip_vision_output = get_value_at_index(clipvisionencode_cv, 0)
168
+
169
+ print(f" - VAE μž„μ‹œ λ‘œλ”© ({args.vae_weight_dtype}): {args.vae_name}");
170
+ vaeloader_temp = vaeloaderkj.load_vae(
171
+ vae_name=args.vae_name,
172
+ device=args.vae_device,
173
+ weight_dtype=args.vae_weight_dtype
174
+ );
175
+
176
+ wanimagetovideo_464 = wanimagetovideo.EXECUTE_NORMALIZED(
177
+ width=get_value_at_index(imageresizekjv2_401, 1),
178
+ height=get_value_at_index(imageresizekjv2_401, 2),
179
+ length=args.length,
180
+ batch_size=1,
181
+ positive=get_value_at_index(cliptextencode_462, 0),
182
+ negative=get_value_at_index(cliptextencode_463, 0),
183
+ vae=get_value_at_index(vaeloader_temp, 0),
184
+ clip_vision_output=clip_vision_output,
185
+ start_image=get_value_at_index(imageresizekjv2_401, 0)
186
+ );
187
+
188
+ if not should_keep_clip_loaded: print(" ✨ (μ΅œμ ν™”) 1단계 μ™„λ£Œ, CLIP λͺ¨λΈμ„ μ¦‰μ‹œ ν•΄μ œν•©λ‹ˆλ‹€."); del cliploader_460
189
+ else: print(" ⚠️ (μ„€μ •) LoRA CLIP μ—°κ²° μ˜΅μ…˜μ΄ ν™œμ„±ν™”λ˜μ–΄ 3λ‹¨κ³„κΉŒμ§€ CLIP λͺ¨λΈμ„ μœ μ§€ν•©λ‹ˆλ‹€.")
190
+ print(" ✨ (μ΅œμ ν™”) 1단계 μ™„λ£Œ, μž„μ‹œ VAE 및 CLIP Vision λͺ¨λΈμ„ ν•΄μ œν•©λ‹ˆλ‹€.");
191
+ del vaeloader_temp, clipvisionloader_cv, clipvisionencode_cv, clip_vision_output;
192
+ clear_memory(); print("1단계 μ™„λ£Œ.");
193
+
194
+ # --- 2단계: High Noise (KSampler) ---
195
+ print(f"\n2단계: High Noise μƒ˜ν”Œλ§ μ‹œμž‘..."); print(f" - UNet High λ‘œλ”©: {args.unet_high_name}"); unetloadergguf_495 = unetloadergguf.load_unet(unet_name=args.unet_high_name); model = get_value_at_index(unetloadergguf_495, 0); clip = get_value_at_index(cliploader_460, 0) if should_keep_clip_loaded else None; model_for_patching = model;
196
+ if to_bool(args.sageattention): print(" ✨ SageAttention 패치 적용 쀑 (High)..."); pathchsageattentionkj_124 = pathchsageattentionkj.patch(sage_attention="auto", model=model_for_patching); model_for_patching = get_value_at_index(pathchsageattentionkj_124, 0)
197
+ if args.lora_high_1_name != "None": print(f" - H LoRA 1: {args.lora_high_1_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_high_1_name, strength_model=args.lora_high_1_strength_model, strength_clip=args.lora_high_1_strength_clip, model=model_for_patching, clip=clip)
198
+ if args.lora_high_2_name != "None": print(f" - H LoRA 2: {args.lora_high_2_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_high_2_name, strength_model=args.lora_high_2_strength_model, strength_clip=args.lora_high_2_strength_clip, model=model_for_patching, clip=clip)
199
+ shifted_model = get_value_at_index(modelsamplingsd3.patch(shift=args.shift, model=model_for_patching), 0); final_model = shifted_model;
200
+ ksampleradvanced_466 = ksampleradvanced.sample(add_noise="enable", noise_seed=final_seed, steps=args.steps, cfg=args.cfg_high, sampler_name=args.sampler_name_high, scheduler=args.scheduler_high, start_at_step=0, end_at_step=split_step, return_with_leftover_noise="enable", model=final_model, positive=get_value_at_index(wanimagetovideo_464, 0), negative=get_value_at_index(wanimagetovideo_464, 1), latent_image=get_value_at_index(wanimagetovideo_464, 2));
201
+ if to_bool(args.sageattention): del pathchsageattentionkj_124
202
+ del unetloadergguf_495, model, clip, model_for_patching, shifted_model, final_model; clear_memory(); print("2단계 μ™„λ£Œ.")
203
+
204
+ # --- 3단계: Low Noise (KSampler) ---
205
+ print(f"\n3단계: Low Noise μƒ˜ν”Œλ§ μ‹œμž‘..."); print(f" - UNet Low λ‘œλ”©: {args.unet_low_name}"); unetloadergguf_496 = unetloadergguf.load_unet(unet_name=args.unet_low_name); model = get_value_at_index(unetloadergguf_496, 0); clip = get_value_at_index(cliploader_460, 0) if should_keep_clip_loaded else None; model_for_patching = model;
206
+ if to_bool(args.sageattention): print(" ✨ SageAttention 패치 적용 쀑 (Low)..."); pathchsageattentionkj_129 = pathchsageattentionkj.patch(sage_attention="auto", model=model_for_patching); model_for_patching = get_value_at_index(pathchsageattentionkj_129, 0)
207
+ if args.lora_low_1_name != "None": print(f" - L LoRA 1: {args.lora_low_1_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_low_1_name, strength_model=args.lora_low_1_strength_model, strength_clip=args.lora_low_1_strength_clip, model=model_for_patching, clip=clip)
208
+ if args.lora_low_2_name != "None": print(f" - L LoRA 2: {args.lora_low_2_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_low_2_name, strength_model=args.lora_low_2_strength_model, strength_clip=args.lora_low_2_strength_clip, model=model_for_patching, clip=clip)
209
+ shifted_model = get_value_at_index(modelsamplingsd3.patch(shift=args.shift, model=model_for_patching), 0); final_model = shifted_model;
210
+ ksampleradvanced_465 = ksampleradvanced.sample(add_noise="disable", noise_seed=final_seed, steps=args.steps, cfg=args.cfg_low, sampler_name=args.sampler_name_low, scheduler=args.scheduler_low, start_at_step=split_step, end_at_step=10000, return_with_leftover_noise="disable", model=final_model, positive=get_value_at_index(wanimagetovideo_464, 0), negative=get_value_at_index(wanimagetovideo_464, 1), latent_image=get_value_at_index(ksampleradvanced_466, 0));
211
+ if to_bool(args.sageattention): del pathchsageattentionkj_129
212
+ if should_keep_clip_loaded: print(" ✨ (λ©”λͺ¨λ¦¬) LoRA CLIP μ—°κ²° μ˜΅μ…˜ μ‚¬μš© μ™„λ£Œ, CLIP λͺ¨λΈμ„ ν•΄μ œν•©λ‹ˆλ‹€."); del cliploader_460
213
+ del unetloadergguf_496, model, clip, model_for_patching, shifted_model, final_model, ksampleradvanced_466, wanimagetovideo_464; clear_memory(); print("3단계 μ™„λ£Œ.")
214
+
215
+ # --- 4단계: VAE λ””μ½”λ”© ---
216
+ print(f"\n4단계: VAE λ””μ½”λ”© 및 μž„μ‹œ μ €μž₯ 쀑...");
217
+
218
+ print(f" - VAE λͺ¨λΈ λ‘œλ”© (λ””μ½”λ”©μš©, {args.vae_weight_dtype}): {args.vae_name}");
219
+ vaeloader_461 = vaeloaderkj.load_vae(
220
+ vae_name=args.vae_name,
221
+ device=args.vae_device,
222
+ weight_dtype=args.vae_weight_dtype
223
+ );
224
+
225
+ vaedecode_469 = vaedecode.decode(samples=get_value_at_index(ksampleradvanced_465, 0), vae=get_value_at_index(vaeloader_461, 0));
226
+ saveimage.save_images(filename_prefix="temp/example", images=get_value_at_index(vaedecode_469, 0));
227
+ del ksampleradvanced_465, vaeloader_461, vaedecode_469, loadimage_88, imageresizekjv2_401; clear_memory(); print("4단계 μ™„λ£Œ.")
228
+
229
+ # --- 5단계: μ—…μŠ€μΌ€μΌλ§ ---
230
+ combine_input_dir_for_ffmpeg = f"{COMFYUI_BASE_PATH}/output/temp"
231
+ if args.upscale_ratio > 1:
232
+ if args.upscale_model_name == "None": print("\n5단계: μ—…μŠ€μΌ€μΌλ§ κ±΄λ„ˆλœ€ (λͺ¨λΈμ΄ μ„ νƒλ˜μ§€ μ•ŠμŒ).")
233
+ else:
234
+ print(f"\n5단계: ν”„λ ˆμž„ μ—…μŠ€μΌ€μΌλ§ 쀑..."); print(f" - Upscale λͺ¨λΈ λ‘œλ”©: {args.upscale_model_name}"); upscalemodelloader_384 = upscalemodelloader.load_model(model_name=args.upscale_model_name); chunk_size = args.upscale_chunk_size; base_dir = f"{COMFYUI_BASE_PATH}/output/temp"; scale_by_ratio = args.upscale_ratio / args.upscale_model_scale;
235
+ total_frames = 0
236
+ try:
237
+ temp_files = [f for f in os.listdir(base_dir) if f.endswith(('.png', '.jpg', '.jpeg', '.webp'))]
238
+ total_frames = len(temp_files)
239
+ if total_frames == 0:
240
+ raise FileNotFoundError("μ—…μŠ€μΌ€μΌν•  ν”„λ ˆμž„μ΄ 'temp' 폴더에 μ—†μŠ΅λ‹ˆλ‹€.")
241
+ except Exception as e:
242
+ print(f" ❌ μ—…μŠ€μΌ€μΌ 5단계 쀑단: 'temp' ν΄λ”μ—μ„œ ν”„λ ˆμž„μ„ 읽을 수 μ—†μŠ΅λ‹ˆλ‹€. (였λ₯˜: {e})")
243
+ if 'upscalemodelloader_384' in locals(): del upscalemodelloader_384
244
+ clear_memory()
245
+ raise
246
+ print(f" - 총 {total_frames}개의 ν”„λ ˆμž„μ„ {chunk_size}개 λ‹¨μœ„λ‘œ λΆ„ν• ν•˜μ—¬ μ‹€ν–‰ν•©λ‹ˆλ‹€...")
247
+ for i in range(0, total_frames, chunk_size):
248
+ print(f" - 배치 처리 쀑 (ν”„λ ˆμž„ {i} ~ {min(i + chunk_size, total_frames) - 1})...")
249
+ vhs_load_chunk = vhs_loadimagespath.load_images(directory=base_dir, skip_first_images=i, image_load_cap=chunk_size); loaded_images = get_value_at_index(vhs_load_chunk, 0);
250
+ if loaded_images is None: print(" - (κ²½κ³ ) κ±΄λ„ˆλ›Έ 수 μ—†λŠ” 이미지가 λ‘œλ“œλ˜μ—ˆμŠ΅λ‹ˆλ‹€, 이 배치λ₯Ό κ±΄λ„ˆλœλ‹ˆλ‹€."); continue
251
+ imageupscale_chunk = imageupscalewithmodel.upscale(upscale_model=get_value_at_index(upscalemodelloader_384, 0), image=loaded_images);
252
+ imagescale_chunk = imagescaleby.upscale(
253
+ upscale_method=args.output_resize_algo,
254
+ scale_by=scale_by_ratio,
255
+ image=get_value_at_index(imageupscale_chunk, 0)
256
+ );
257
+ saveimage.save_images(filename_prefix="up/example", images=get_value_at_index(imagescale_chunk, 0));
258
+ del vhs_load_chunk, loaded_images, imageupscale_chunk, imagescale_chunk; clear_memory()
259
+ del upscalemodelloader_384; clear_memory(); combine_input_dir_for_ffmpeg = f"{COMFYUI_BASE_PATH}/output/up"; print("5단계 μ™„λ£Œ.")
260
+ else: print("\n5단계: μ—…μŠ€μΌ€μΌλ§ κ±΄λ„ˆκΉ€ (λΉ„μœ¨ 1.0).")
261
+
262
+ # --- 6단계: RIFE 및 λΉ„λ””μ˜€ κ²°ν•© ---
263
+ print("\n6단계: λΉ„λ””μ˜€ κ²°ν•© μ€€λΉ„ 쀑..."); final_frame_rate = float(args.frame_rate); ffmpeg_input_dir = combine_input_dir_for_ffmpeg
264
+ if to_bool(args.interpolation):
265
+ print(" - ν”„λ ˆμž„ 보간 (RIFE)을 ν™œμ„±ν™”ν•©λ‹ˆλ‹€."); interpolated_dir = f"{COMFYUI_BASE_PATH}/output/interpolated"; source_dir = combine_input_dir_for_ffmpeg
266
+ total_frames_rife = 0
267
+ try:
268
+ temp_files = [f for f in os.listdir(source_dir) if f.endswith(('.png', '.jpg', '.jpeg', '.webp'))]; total_frames_rife = len(temp_files);
269
+ if total_frames_rife == 0: raise FileNotFoundError(f"RIFE 보간할 ν”„λ ˆμž„μ΄ '{source_dir}' 폴더에 μ—†μŠ΅λ‹ˆλ‹€.")
270
+ except Exception as e: print(f" ❌ RIFE 6단계 쀑단: '{source_dir}' ν΄λ”μ—μ„œ ν”„λ ˆμž„μ„ 읽을 수 μ—†μŠ΅λ‹ˆλ‹€. (였λ₯˜: {e})"); raise
271
+ chunk_size = args.rife_chunk_size;
272
+ print(f" - 총 {total_frames_rife}개의 ν”„λ ˆμž„μ„ RIFE 청크 {chunk_size}개 λ‹¨μœ„λ‘œ λΆ„ν• ν•˜μ—¬ μ‹€ν–‰ν•©λ‹ˆλ‹€ (Overlap 적용)...")
273
+ current_frame_idx = 0; is_first_chunk = True
274
+ while current_frame_idx < total_frames_rife:
275
+ load_from = current_frame_idx; load_cap = chunk_size
276
+ if not is_first_chunk:
277
+ load_from -= 1; load_cap += 1
278
+ if load_from + load_cap > total_frames_rife:
279
+ load_cap = total_frames_rife - load_from
280
+ if load_cap < 2:
281
+ print(f" - (κ²½κ³ ) RIFE μ²˜λ¦¬μ— ν•„μš”ν•œ ν”„λ ˆμž„(2개)이 λΆ€μ‘±ν•˜μ—¬ λ§ˆμ§€λ§‰ 배치λ₯Ό κ±΄λ„ˆλœλ‹ˆλ‹€."); break
282
+ print(f" - RIFE 배치 처리 쀑 (원본 ν”„λ ˆμž„ {load_from} ~ {load_from + load_cap - 1})...")
283
+ vhs_load_chunk = vhs_loadimagespath.load_images(directory=source_dir, skip_first_images=load_from, image_load_cap=load_cap);
284
+ loaded_images = get_value_at_index(vhs_load_chunk, 0);
285
+ if loaded_images is None:
286
+ print(" - (κ²½κ³ ) κ±΄λ„ˆλ›Έ 수 μ—†λŠ” 이미지가 λ‘œλ“œλ˜μ—ˆμŠ΅λ‹ˆλ‹€, 이 배치λ₯Ό κ±΄λ„ˆλœλ‹ˆλ‹€.");
287
+ current_frame_idx += chunk_size; is_first_chunk = False; continue
288
+ rife_chunk_result_tensor = get_value_at_index(rife_vfi.vfi(
289
+ ckpt_name="rife49.pth", multiplier=2,
290
+ fast_mode=to_bool(args.rife_fast_mode),
291
+ ensemble=to_bool(args.rife_ensemble),
292
+ frames=loaded_images
293
+ ), 0)
294
+ images_to_save = rife_chunk_result_tensor
295
+ if not is_first_chunk:
296
+ print(f" - (Overlap) 쀑볡 ν”„λ ˆμž„ 1개 제거 ν›„ μ €μž₯")
297
+ images_to_save = rife_chunk_result_tensor[1:]
298
+ saveimage.save_images(filename_prefix="interpolated/example", images=images_to_save);
299
+ del vhs_load_chunk, loaded_images, rife_chunk_result_tensor, images_to_save; clear_memory()
300
+ current_frame_idx += chunk_size; is_first_chunk = False
301
+ ffmpeg_input_dir = interpolated_dir; final_frame_rate *= 2
302
+ else: print(" - ν”„λ ˆμž„ 보간이 λΉ„ν™œμ„±ν™”λ˜μ—ˆμŠ΅λ‹ˆλ‹€.");
303
+
304
+ print(f" - μ΅œμ’… λΉ„λ””μ˜€λ₯Ό FFmpeg ({args.video_encoder})둜 κ²°ν•©ν•©λ‹ˆλ‹€..."); print(f" - μž…λ ₯ 폴더: '{ffmpeg_input_dir}'")
305
+ input_pattern = os.path.join(ffmpeg_input_dir, "example_%05d_.png");
306
+ timestamp = time.strftime("%Y%m%d-%H%M%S"); output_filename = f"AnimateDiff_{timestamp}.mp4"; output_path = os.path.join(COMFYUI_BASE_PATH, "output", output_filename)
307
+ ffmpeg_cmd = ["ffmpeg", "-framerate", str(final_frame_rate), "-i", input_pattern]
308
+ encoder_choice = args.video_encoder
309
+ if encoder_choice == "GPU: HEVC (NVENC)": ffmpeg_cmd.extend(["-c:v", "hevc_nvenc", "-cq", str(args.nvenc_cq), "-preset", args.nvenc_preset, "-tag:v", "hvc1"])
310
+ elif encoder_choice == "GPU: H.264 (NVENC)": ffmpeg_cmd.extend(["-c:v", "h264_nvenc", "-cq", str(args.nvenc_cq), "-preset", args.nvenc_preset])
311
+ else: ffmpeg_cmd.extend(["-c:v", "libx264", "-crf", str(args.cpu_crf), "-preset", "medium"])
312
+ ffmpeg_cmd.extend(["-pix_fmt", "yuv420p", "-y", output_path])
313
+ print(f" - μ‹€ν–‰ λͺ…λ Ήμ–΄: {' '.join(ffmpeg_cmd)}")
314
+ try:
315
+ result = subprocess.run(ffmpeg_cmd, capture_output=True, text=True, check=True, encoding='utf-8')
316
+ print(" - FFmpeg μ‹€ν–‰ μ™„λ£Œ.")
317
+ except FileNotFoundError: print(" ❌ 였λ₯˜: 'ffmpeg' λͺ…λ Ήμ–΄λ₯Ό 찾을 수 μ—†μŠ΅λ‹ˆλ‹€. μ‹œμŠ€ν…œμ— μ„€μΉ˜λ˜μ–΄ μžˆλŠ”μ§€ ν™•μΈν•˜μ„Έμš”."); raise
318
+ except subprocess.CalledProcessError as e:
319
+ print(f" ❌ 였λ₯˜: FFmpeg μ‹€ν–‰ μ‹€νŒ¨ (Return code: {e.returncode})")
320
+ if e.stdout: print(f" FFmpeg stdout:\n{e.stdout}")
321
+ if e.stderr: print(f" FFmpeg stderr:\n{e.stderr}")
322
+ raise
323
+ except Exception as e: print(f" ❌ 였λ₯˜: FFmpeg μ‹€ν–‰ 쀑 μ˜ˆμƒμΉ˜ λͺ»ν•œ 였λ₯˜ λ°œμƒ: {e}"); raise
324
+ print("βœ… λͺ¨λ“  단계 μ™„λ£Œ.")
325
+
326
+ # --- μ΅œμ’… 파일 경둜 좜λ ₯ 및 볡사 ---
327
+ latest_video = None
328
+ if os.path.exists(output_path):
329
+ latest_video = output_path
330
+ print(f"LATEST_VIDEO_PATH:{latest_video}")
331
+ else:
332
+ output_dir = os.path.join(COMFYUI_BASE_PATH, "output");
333
+ # mp4와 mkv 파일 λͺ¨λ‘ 확인
334
+ video_files = glob.glob(os.path.join(output_dir, '**', '*.mp4'), recursive=True) + \
335
+ glob.glob(os.path.join(output_dir, '**', '*.mkv'), recursive=True)
336
+ if not video_files:
337
+ raise FileNotFoundError("μƒμ„±λœ λ™μ˜μƒ νŒŒμΌμ„ 찾을 수 μ—†μŠ΅λ‹ˆλ‹€!")
338
+ latest_video = max(video_files, key=os.path.getctime)
339
+ print(f"LATEST_VIDEO_PATH:{latest_video}")
340
+ if latest_video is None:
341
+ raise FileNotFoundError("μ΅œμ’… λΉ„λ””μ˜€ 경둜λ₯Ό ν™•μ •ν•  수 μ—†μŠ΅λ‹ˆλ‹€. 슀크립트λ₯Ό ν™•μΈν•˜μ„Έμš”.")
342
+
343
+ base, ext = os.path.splitext(latest_video)
344
+ original_copy_path = f"{base}_original{ext}"
345
+ try:
346
+ shutil.copy2(latest_video, original_copy_path)
347
+ print(f"βœ… 원본 볡사본 생성 μ™„λ£Œ: {original_copy_path}")
348
+ print(f"ORIGINAL_COPY_PATH:{original_copy_path}")
349
+ except Exception as e:
350
+ print(f"❌ 원본 볡사본 생성 μ‹€νŒ¨: {e}")
351
+
352
+ # --- 7단계: 둜그 파일 (.txt) μ €μž₯ ---
353
+ log_file_path = f"{base}.txt"
354
+ print(f"\n7단계: μ„€μ • 둜그 파일 μž‘μ„± 쀑 ({os.path.basename(log_file_path)})...")
355
+ try:
356
+ log_content = f"""
357
+ [--- 생성 정보 ---]
358
+ Positive Prompt: {args.positive_prompt}
359
+ Negative Prompt: {args.negative_prompt}
360
+
361
+ [--- κΈ°λ³Έ μ„€μ • ---]
362
+ Seed: {final_seed}
363
+ Steps: {args.steps}
364
+ Split Step (%): {args.split_point_percent}% ({split_step} steps)
365
+ Shift: {args.shift}
366
+ SageAttention: {args.sageattention}
367
+ Length (Frames): {args.length}
368
+ Output Resolution: {args.width}x{args.height}
369
+
370
+ [--- High Noise (2단계) ---]
371
+ Model: {args.unet_high_name}
372
+ CFG: {args.cfg_high}
373
+ Sampler: {args.sampler_name_high}
374
+ Scheduler: {args.scheduler_high}
375
+ LoRA 1: {args.lora_high_1_name} (Model: {args.lora_high_1_strength_model}, Clip: {args.lora_high_1_strength_clip})
376
+ LoRA 2: {args.lora_high_2_name} (Model: {args.lora_high_2_strength_model}, Clip: {args.lora_high_2_strength_clip})
377
+
378
+ [--- Low Noise (3단계) ---]
379
+ Model: {args.unet_low_name}
380
+ CFG: {args.cfg_low}
381
+ Sampler: {args.sampler_name_low}
382
+ Scheduler: {args.scheduler_low}
383
+ LoRA 1: {args.lora_low_1_name} (Model: {args.lora_low_1_strength_model}, Clip: {args.lora_low_1_strength_clip})
384
+ LoRA 2: {args.lora_low_2_name} (Model: {args.lora_low_2_strength_model}, Clip: {args.lora_low_2_strength_clip})
385
+
386
+ [--- VAE & CLIP ---]
387
+ VAE Model: {args.vae_name}
388
+ CLIP Model: {args.clip_name}
389
+ Connect LoRA to CLIP: {args.connect_lora_clip}
390
+
391
+ [--- ν›„μ²˜λ¦¬ 및 인코딩 ---]
392
+ Upscale Model: {args.upscale_model_name} (Ratio: {args.upscale_ratio})
393
+ Frame Interpolation (RIFE): {args.interpolation}
394
+ Target Frame Rate: {args.frame_rate} fps ({final_frame_rate} fps, 보간 적용 ν›„)
395
+ Video Encoder: {args.video_encoder}
396
+ """
397
+ with open(log_file_path, 'w', encoding='utf-8') as f:
398
+ f.write(log_content.strip())
399
+ print(f"βœ… μ„€μ • 둜그 파일 μž‘μ„± μ™„λ£Œ: {log_file_path}")
400
+ except Exception as e:
401
+ print(f"❌ μ„€μ • 둜그 파일 μž‘μ„± 쀑 였λ₯˜ λ°œμƒ: {e}")
402
+
403
+ # --- 8단계: μž‘μ—… ν›„ 정리 (μš”μ²­λŒ€λ‘œ μž„μ‹œ 폴더 3개만 μ‚­μ œ) ---
404
+ print("\n8단계: μž‘μ—… ν›„ 정리 μ‹œμž‘ (temp, up, interpolated ν΄λ”λ§Œ μ‚­μ œ)");
405
+ try:
406
+ temp_dirs = ["temp", "up", "interpolated"]
407
+ output_dir = os.path.join(COMFYUI_BASE_PATH, "output")
408
+
409
+ # 1. μž„μ‹œ ν΄λ”λ§Œ μ‚­μ œ
410
+ for d in temp_dirs:
411
+ full_path = os.path.join(output_dir, d)
412
+ if os.path.exists(full_path):
413
+ shutil.rmtree(full_path)
414
+ print(f" - μž„μ‹œ 폴더 μ‚­μ œ μ™„λ£Œ: {d}")
415
+
416
+ # 2. output 디렉토리에 λ‚¨μ•„μžˆλŠ” '파일'은 일체 κ±΄λ“œλ¦¬μ§€ μ•ŠμŒ (μ˜μƒ, png, json, txt λͺ¨λ‘ μœ μ§€)
417
+ print(" - output 폴더 λ‚΄μ˜ 파일(μ˜μƒ, 둜그, 이미지 λ“±)은 λͺ¨λ‘ μœ μ§€λ©λ‹ˆλ‹€.")
418
+ print("βœ… μž‘μ—… ν›„ 정리 μ™„λ£Œ.")
419
+ except Exception as e:
420
+ print(f"❌ μž‘μ—… ν›„ 정리 쀑 였λ₯˜ λ°œμƒ: {e}")
421
+ # --------------------------------------------------------
422
+
423
+ if __name__ == "__main__":
424
+ main()