arcacolab commited on
Commit
3d3ce25
Β·
verified Β·
1 Parent(s): 03e7ba8

Upload run_generator_itstxt.py

Browse files
Files changed (1) hide show
  1. run_generator_itstxt.py +479 -0
run_generator_itstxt.py ADDED
@@ -0,0 +1,479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #@title ν•„μš” 파일 생성
2
+ # 이 νŒŒμΌμ€ μ…€ 4μ—μ„œ μ„œλΈŒν”„λ‘œμ„ΈμŠ€λ‘œ μ‹€ν–‰λ©λ‹ˆλ‹€.
3
+
4
+ import sys
5
+ import os
6
+ import time
7
+ import glob
8
+ import gc
9
+ import torch
10
+ import subprocess
11
+ import random
12
+ import argparse
13
+ from typing import Sequence, Mapping, Any, Union
14
+ import shutil
15
+
16
+ # --- 0. κΈ°λ³Έ μ„€μ • 및 인수 νŒŒμ‹± ---
17
+ def parse_args():
18
+ parser = argparse.ArgumentParser(description="ComfyUI Video Generation Script with All Controls from 1.py")
19
+ parser.add_argument("--positive_prompt", type=str, required=True); parser.add_argument("--negative_prompt", type=str, required=True)
20
+ parser.add_argument("--width", type=int, required=True); parser.add_argument("--height", type=int, required=True)
21
+ parser.add_argument("--length", type=int, required=True); parser.add_argument("--upscale_ratio", type=float, required=True)
22
+ parser.add_argument("--steps", type=int, default=4)
23
+ parser.add_argument("--cfg_high", type=float, default=1.0)
24
+ parser.add_argument("--cfg_low", type=float, default=1.0)
25
+ parser.add_argument("--sampler_name_high", type=str, default="euler"); parser.add_argument("--scheduler_high", type=str, default="simple")
26
+ parser.add_argument("--sampler_name_low", type=str, default="euler"); parser.add_argument("--scheduler_low", type=str, default="simple")
27
+ parser.add_argument("--noise_seed", type=int, default=-1); parser.add_argument("--split_point_percent", type=float, default=50.0)
28
+ parser.add_argument("--shift", type=float, default=8.0); parser.add_argument("--sageattention", type=str, default="on")
29
+ parser.add_argument("--unet_high_name", type=str, required=True); parser.add_argument("--unet_low_name", type=str, required=True)
30
+ parser.add_argument("--vae_name", type=str, required=True); parser.add_argument("--clip_name", type=str, required=True)
31
+ parser.add_argument("--upscale_model_name", type=str, default="None")
32
+ parser.add_argument("--upscale_model_scale", type=float, default=2.0)
33
+ parser.add_argument("--upscale_chunk_size", type=int, default=30)
34
+ parser.add_argument("--frame_rate", type=int, default=16); parser.add_argument("--interpolation", type=str, default="on")
35
+ parser.add_argument("--rife_fast_mode", type=str, default="on"); parser.add_argument("--rife_ensemble", type=str, default="on")
36
+ parser.add_argument("--rife_chunk_size", type=int, default=30)
37
+ parser.add_argument("--connect_lora_clip", type=str, default="off")
38
+ parser.add_argument("--video_encoder", type=str, default="GPU: HEVC (NVENC)"); parser.add_argument("--nvenc_cq", type=int, default=25); parser.add_argument("--nvenc_preset", type=str, default="p5"); parser.add_argument("--cpu_crf", type=int, default=19) # FFmpeg
39
+ parser.add_argument("--lora_high_1_name", type=str, default="None"); parser.add_argument("--lora_high_1_strength_model", type=float, default=1.0); parser.add_argument("--lora_high_1_strength_clip", type=float, default=1.0)
40
+ parser.add_argument("--lora_high_2_name", type=str, default="None"); parser.add_argument("--lora_high_2_strength_model", type=float, default=1.0); parser.add_argument("--lora_high_2_strength_clip", type=float, default=1.0)
41
+ parser.add_argument("--lora_low_1_name", type=str, default="None"); parser.add_argument("--lora_low_1_strength_model", type=float, default=1.0); parser.add_argument("--lora_low_1_strength_clip", type=float, default=1.0)
42
+ parser.add_argument("--lora_low_2_name", type=str, default="None"); parser.add_argument("--lora_low_2_strength_model", type=float, default=1.0); parser.add_argument("--lora_low_2_strength_clip", type=float, default=1.0)
43
+ parser.add_argument("--input_resize_algo", type=str, default="bicubic")
44
+ parser.add_argument("--output_resize_algo", type=str, default="bicubic")
45
+ # 'parse_args()'κ°€ μ‹€ν–‰λœ ν›„ λͺ¨λ“  μΈμžλ“€μ„ λ°˜ν™˜ν•©λ‹ˆλ‹€.
46
+ return parser.parse_args()
47
+
48
+ def to_bool(s: str) -> bool: return s.lower() in ['true', '1', 't', 'y', 'yes', 'on']
49
+ def clear_memory():
50
+ if torch.cuda.is_available(): torch.cuda.empty_cache(); torch.cuda.ipc_collect()
51
+ gc.collect()
52
+ COMFYUI_BASE_PATH = '/content/ComfyUI'
53
+ def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
54
+ try: return obj[index]
55
+ except (KeyError, TypeError):
56
+ if isinstance(obj, dict) and "result" in obj: return obj["result"][index]
57
+ raise
58
+ def add_comfyui_directory_to_sys_path() -> None:
59
+ if os.path.isdir(COMFYUI_BASE_PATH) and COMFYUI_BASE_PATH not in sys.path: sys.path.append(COMFYUI_BASE_PATH)
60
+ def import_custom_nodes() -> None:
61
+ try:
62
+ import nest_asyncio
63
+ nest_asyncio.apply()
64
+ except ImportError:
65
+ print("nest_asyncio not found, installing...")
66
+ try:
67
+ subprocess.run([sys.executable, "-m", "pip", "install", "-q", "nest_asyncio"], check=True)
68
+ import nest_asyncio
69
+ nest_asyncio.apply()
70
+ print("nest_asyncio installed and applied.")
71
+ except Exception as e:
72
+ print(f"Failed to install or apply nest_asyncio: {e}")
73
+ import asyncio, execution, server
74
+ from nodes import init_extra_nodes
75
+ try:
76
+ loop = asyncio.get_event_loop()
77
+ if loop.is_closed():
78
+ loop = asyncio.new_event_loop()
79
+ asyncio.set_event_loop(loop)
80
+ except RuntimeError:
81
+ loop = asyncio.new_event_loop()
82
+ asyncio.set_event_loop(loop)
83
+ server_instance = server.PromptServer(loop)
84
+ execution.PromptQueue(server_instance)
85
+ if not loop.is_running():
86
+ try:
87
+ loop.run_until_complete(init_extra_nodes())
88
+ except RuntimeError as e:
89
+ print(f"Note: Could not run init_extra_nodes synchronously, possibly due to existing loop state: {e}")
90
+ try:
91
+ asyncio.ensure_future(init_extra_nodes())
92
+ except Exception as fut_e:
93
+ print(f"Error trying async init_extra_nodes: {fut_e}")
94
+ else:
95
+ try:
96
+ asyncio.ensure_future(init_extra_nodes())
97
+ except Exception as fut_e:
98
+ print(f"Error trying async init_extra_nodes on running loop: {fut_e}")
99
+
100
+
101
+ def main():
102
+ args = parse_args() # πŸ’‘ args λ³€μˆ˜μ— λͺ¨λ“  인자 μ €μž₯
103
+
104
+ print("πŸš€ λ™μ˜μƒ 생성을 μ‹œμž‘ν•©λ‹ˆλ‹€ (Full Control Mode, VRAM Optimized)...\n")
105
+
106
+ # 🚨🚨🚨 μˆ˜μ •λœ λΆ€λΆ„: .mp4, .mkv, .webm νŒŒμΌμ€ μ œμ™Έν•˜κ³  output 폴더 정리 🚨🚨🚨
107
+ output_dir = os.path.join(COMFYUI_BASE_PATH, 'output')
108
+ print(f" - 이전 좜λ ₯λ¬Ό 정리 쀑... (Output: {output_dir})")
109
+
110
+ deleted_count = 0
111
+ try:
112
+ for item_name in os.listdir(output_dir):
113
+ item_path = os.path.join(output_dir, item_name)
114
+
115
+ # πŸ’‘ 쑰건: λΉ„λ””μ˜€ 파일 ν™•μž₯자(.mp4, .mkv, .webm)λŠ” μ‚­μ œν•˜μ§€ μ•Šκ³  λ³΄μ‘΄ν•©λ‹ˆλ‹€.
116
+ if item_name.lower().endswith(('.mp4', '.mkv', '.webm')):
117
+ print(f" - πŸ—„οΈ λΉ„λ””μ˜€ 파일 '{item_name}'은 λ³΄μ‘΄ν•©λ‹ˆλ‹€.")
118
+ continue
119
+
120
+ # 파일 λ˜λŠ” 링크인 경우 μ‚­μ œ
121
+ if os.path.isfile(item_path) or os.path.islink(item_path):
122
+ os.unlink(item_path)
123
+ deleted_count += 1
124
+ # 폴더인 경우 μž¬κ·€μ μœΌλ‘œ μ‚­μ œ
125
+ elif os.path.isdir(item_path):
126
+ shutil.rmtree(item_path)
127
+ deleted_count += 1
128
+
129
+ print(f" βœ… 정리 μ™„λ£Œ. 보쑴된 λΉ„λ””μ˜€ μ™Έ {deleted_count}개의 ν•­λͺ©μ΄ μ‚­μ œλ˜μ—ˆμŠ΅λ‹ˆλ‹€.")
130
+ except Exception as e:
131
+ print(f" ❌ 좜λ ₯ 폴더 정리 쀑 였λ₯˜ λ°œμƒ: {e}")
132
+ # 🚨🚨🚨 μˆ˜μ •λœ λΆ€λΆ„ 끝 🚨🚨🚨
133
+
134
+ # μž„μ‹œ 폴더 μž¬μƒμ„± (정리 κ³Όμ •μ—μ„œ μ‚­μ œλ˜μ—ˆμ„ 수 있음)
135
+ os.makedirs(f"{COMFYUI_BASE_PATH}/output/temp", exist_ok=True);
136
+ os.makedirs(f"{COMFYUI_BASE_PATH}/output/up", exist_ok=True)
137
+ os.makedirs(f"{COMFYUI_BASE_PATH}/output/interpolated", exist_ok=True)
138
+
139
+ add_comfyui_directory_to_sys_path()
140
+ try: from utils.extra_config import load_extra_path_config
141
+ except ImportError: print("⚠️ ComfyUI의 extra_model_paths.yaml λ‘œλ”© μ‹€νŒ¨ (λ¬΄μ‹œν•˜κ³  μ§„ν–‰)"); load_extra_path_config = lambda x: None
142
+ extra_model_paths_file = os.path.join(COMFYUI_BASE_PATH, "extra_model_paths.yaml")
143
+ if os.path.exists(extra_model_paths_file): load_extra_path_config(extra_model_paths_file)
144
+ print("ComfyUI μ»€μŠ€ν…€ λ…Έλ“œ μ΄ˆκΈ°ν™” 쀑..."); import_custom_nodes(); from nodes import NODE_CLASS_MAPPINGS; print("μ»€μŠ€ν…€ λ…Έλ“œ μ΄ˆκΈ°ν™” μ™„λ£Œ.")
145
+
146
+ if args.noise_seed == -1: final_seed = random.randint(1, 2**64); print(f" - 랜덀 μ‹œλ“œ 생성: {final_seed}")
147
+ else: final_seed = args.noise_seed; print(f" - κ³ μ • μ‹œλ“œ μ‚¬μš©: {final_seed}")
148
+ split_step = max(0, int(args.steps * (args.split_point_percent / 100.0))); print(f" - 총 {args.steps} μŠ€ν… 쀑 {split_step} ( {args.split_point_percent}% )μ—μ„œ λΆ„ν• ")
149
+ loras_in_use = not (args.lora_high_1_name == "None" and args.lora_high_2_name == "None" and args.lora_low_1_name == "None" and args.lora_low_2_name == "None")
150
+ connect_clip_to_lora = to_bool(args.connect_lora_clip); should_keep_clip_loaded = loras_in_use and connect_clip_to_lora
151
+
152
+ with torch.inference_mode():
153
+ loadimage=NODE_CLASS_MAPPINGS["LoadImage"](); upscalemodelloader=NODE_CLASS_MAPPINGS["UpscaleModelLoader"](); cliploader=NODE_CLASS_MAPPINGS["CLIPLoader"](); vaeloader=NODE_CLASS_MAPPINGS["VAELoader"](); cliptextencode=NODE_CLASS_MAPPINGS["CLIPTextEncode"](); unetloadergguf=NODE_CLASS_MAPPINGS["UnetLoaderGGUF"](); loraloader=NODE_CLASS_MAPPINGS["LoraLoader"](); imageresizekjv2=NODE_CLASS_MAPPINGS["ImageResizeKJv2"](); wanimagetovideo=NODE_CLASS_MAPPINGS["WanImageToVideo"](); modelsamplingsd3=NODE_CLASS_MAPPINGS["ModelSamplingSD3"](); ksampleradvanced=NODE_CLASS_MAPPINGS["KSamplerAdvanced"](); vaedecode=NODE_CLASS_MAPPINGS["VAEDecode"](); vhs_loadimagespath=NODE_CLASS_MAPPINGS["VHS_LoadImagesPath"](); imageupscalewithmodel=NODE_CLASS_MAPPINGS["ImageUpscaleWithModel"](); imagescaleby=NODE_CLASS_MAPPINGS["ImageScaleBy"](); rife_vfi=NODE_CLASS_MAPPINGS["RIFE VFI"](); vhs_videocombine=NODE_CLASS_MAPPINGS["VHS_VideoCombine"](); saveimage=NODE_CLASS_MAPPINGS["SaveImage"]()
154
+ clipvisionloader=NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
155
+ clipvisionencode=NODE_CLASS_MAPPINGS["CLIPVisionEncode"]()
156
+ pathchsageattentionkj=NODE_CLASS_MAPPINGS["PathchSageAttentionKJ"]()
157
+
158
+ # --- ✨ 1단계: CLIP Vision 둜직 μΆ”κ°€ 및 λ©”λͺ¨λ¦¬ ν•΄μ œ ---
159
+ print("\n1단계: 데이터 λ‘œλ”© 및 초기 Latent 생성 쀑...");
160
+ print(f" - CLIP λ‘œλ”©: {args.clip_name}");
161
+ cliploader_460 = cliploader.load_clip(clip_name=args.clip_name, type="wan", device="default");
162
+ cliptextencode_462 = cliptextencode.encode(text=args.positive_prompt, clip=get_value_at_index(cliploader_460, 0));
163
+ cliptextencode_463 = cliptextencode.encode(text=args.negative_prompt, clip=get_value_at_index(cliploader_460, 0));
164
+ loadimage_88 = loadimage.load_image(image="example.png");
165
+
166
+ imageresizekjv2_401 = imageresizekjv2.resize(
167
+ width=args.width, height=args.height,
168
+ upscale_method=args.input_resize_algo,
169
+ image=get_value_at_index(loadimage_88, 0), keep_proportion="crop",
170
+ pad_color="0, 0, 0", crop_position="center", divisible_by=2,
171
+ unique_id=random.randint(1, 2**64)
172
+ );
173
+
174
+ print(f" - CLIP Vision λ‘œλ”©: clip_vision_h.safetensors");
175
+ clipvisionloader_cv = clipvisionloader.load_clip(clip_name="clip_vision_h.safetensors");
176
+ print(f" - CLIP Vision 인코딩 쀑...");
177
+ clipvisionencode_cv = clipvisionencode.encode(
178
+ crop="none",
179
+ clip_vision=get_value_at_index(clipvisionloader_cv, 0),
180
+ image=get_value_at_index(imageresizekjv2_401, 0)
181
+ );
182
+ clip_vision_output = get_value_at_index(clipvisionencode_cv, 0)
183
+
184
+ print(f" - VAE μž„μ‹œ λ‘œλ”© (초기 Latent μƒμ„±μš©): {args.vae_name}");
185
+ vaeloader_temp = vaeloader.load_vae(vae_name=args.vae_name);
186
+
187
+ wanimagetovideo_464 = wanimagetovideo.EXECUTE_NORMALIZED(
188
+ width=get_value_at_index(imageresizekjv2_401, 1),
189
+ height=get_value_at_index(imageresizekjv2_401, 2),
190
+ length=args.length,
191
+ batch_size=1,
192
+ positive=get_value_at_index(cliptextencode_462, 0),
193
+ negative=get_value_at_index(cliptextencode_463, 0),
194
+ vae=get_value_at_index(vaeloader_temp, 0),
195
+ clip_vision_output=clip_vision_output,
196
+ start_image=get_value_at_index(imageresizekjv2_401, 0)
197
+ );
198
+
199
+ if not should_keep_clip_loaded: print(" ✨ (μ΅œμ ν™”) 1단계 μ™„λ£Œ, CLIP λͺ¨λΈμ„ μ¦‰μ‹œ ν•΄μ œν•©λ‹ˆλ‹€."); del cliploader_460
200
+ else: print(" ⚠️ (μ„€μ •) LoRA CLIP μ—°κ²° μ˜΅μ…˜μ΄ ν™œμ„±ν™”λ˜μ–΄ 3λ‹¨κ³„κΉŒμ§€ CLIP λͺ¨λΈμ„ μœ μ§€ν•©λ‹ˆλ‹€.")
201
+
202
+ print(" ✨ (μ΅œμ ν™”) 1단계 μ™„λ£Œ, μž„μ‹œ VAE 및 CLIP Vision λͺ¨λΈμ„ ν•΄μ œν•©λ‹ˆλ‹€.");
203
+ del vaeloader_temp, clipvisionloader_cv, clipvisionencode_cv, clip_vision_output;
204
+
205
+ clear_memory(); print("1단계 μ™„λ£Œ.");
206
+ # --- 1단계 μˆ˜μ • μ™„λ£Œ ---
207
+
208
+ print(f"\n2단계: High Noise μƒ˜ν”Œλ§ μ‹œμž‘..."); print(f" - UNet High λ‘œλ”©: {args.unet_high_name}"); unetloadergguf_495 = unetloadergguf.load_unet(unet_name=args.unet_high_name); model = get_value_at_index(unetloadergguf_495, 0); clip = get_value_at_index(cliploader_460, 0) if should_keep_clip_loaded else None; model_for_patching = model;
209
+ if to_bool(args.sageattention): print(" ✨ SageAttention 패치 적용 쀑 (High)..."); pathchsageattentionkj_124 = pathchsageattentionkj.patch(sage_attention="auto", model=model_for_patching); model_for_patching = get_value_at_index(pathchsageattentionkj_124, 0)
210
+ if args.lora_high_1_name != "None": print(f" - H LoRA 1: {args.lora_high_1_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_high_1_name, strength_model=args.lora_high_1_strength_model, strength_clip=args.lora_high_1_strength_clip, model=model_for_patching, clip=clip)
211
+ if args.lora_high_2_name != "None": print(f" - H LoRA 2: {args.lora_high_2_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_high_2_name, strength_model=args.lora_high_2_strength_model, strength_clip=args.lora_high_2_strength_clip, model=model_for_patching, clip=clip)
212
+ shifted_model = get_value_at_index(modelsamplingsd3.patch(shift=args.shift, model=model_for_patching), 0); final_model = shifted_model;
213
+
214
+ # ✨ Gradio νŒŒμ‹±μ„ μœ„ν•œ μƒ˜ν”Œλ§ μ‹œμž‘ 둜그 μΆ”κ°€
215
+ print(f"### SAMPLER_START: HIGH_NOISE, Steps 0 to {split_step} (CFG {args.cfg_high}) ###")
216
+
217
+ # μˆ˜μ •: cfg=args.cfg_high μ‚¬μš©
218
+ ksampleradvanced_466 = ksampleradvanced.sample(add_noise="enable", noise_seed=final_seed, steps=args.steps, cfg=args.cfg_high, sampler_name=args.sampler_name_high, scheduler=args.scheduler_high, start_at_step=0, end_at_step=split_step, return_with_leftover_noise="enable", model=final_model, positive=get_value_at_index(wanimagetovideo_464, 0), negative=get_value_at_index(wanimagetovideo_464, 1), latent_image=get_value_at_index(wanimagetovideo_464, 2));
219
+
220
+ # ✨ Gradio νŒŒμ‹±μ„ μœ„ν•œ μƒ˜ν”Œλ§ μ’…λ£Œ 둜그 μΆ”κ°€
221
+ print("### SAMPLER_END: HIGH_NOISE ###")
222
+
223
+ if to_bool(args.sageattention): del pathchsageattentionkj_124
224
+ del unetloadergguf_495, model, clip, model_for_patching, shifted_model, final_model; clear_memory(); print("2단계 μ™„λ£Œ.")
225
+
226
+ print(f"\n3단계: Low Noise μƒ˜ν”Œλ§ μ‹œμž‘..."); print(f" - UNet Low λ‘œλ”©: {args.unet_low_name}"); unetloadergguf_496 = unetloadergguf.load_unet(unet_name=args.unet_low_name); model = get_value_at_index(unetloadergguf_496, 0); clip = get_value_at_index(cliploader_460, 0) if should_keep_clip_loaded else None; model_for_patching = model;
227
+ if to_bool(args.sageattention): print(" ✨ SageAttention 패치 적용 쀑 (Low)..."); pathchsageattentionkj_129 = pathchsageattentionkj.patch(sage_attention="auto", model=model_for_patching); model_for_patching = get_value_at_index(pathchsageattentionkj_129, 0)
228
+ if args.lora_low_1_name != "None": print(f" - L LoRA 1: {args.lora_low_1_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_low_1_name, strength_model=args.lora_low_1_strength_model, strength_clip=args.lora_low_1_strength_clip, model=model_for_patching, clip=clip)
229
+ if args.lora_low_2_name != "None": print(f" - L LoRA 2: {args.lora_low_2_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_low_2_name, strength_model=args.lora_low_2_strength_model, strength_clip=args.lora_low_2_strength_clip, model=model_for_patching, clip=clip)
230
+ shifted_model = get_value_at_index(modelsamplingsd3.patch(shift=args.shift, model=model_for_patching), 0); final_model = shifted_model;
231
+
232
+ # ✨ Gradio νŒŒμ‹±μ„ μœ„ν•œ μƒ˜ν”Œλ§ μ‹œμž‘ 둜그 μΆ”κ°€
233
+ print(f"### SAMPLER_START: LOW_NOISE, Steps {split_step} to {args.steps} (CFG {args.cfg_low}) ###")
234
+
235
+ # μˆ˜μ •: cfg=args.cfg_low μ‚¬μš©
236
+ ksampleradvanced_465 = ksampleradvanced.sample(add_noise="disable", noise_seed=final_seed, steps=args.steps, cfg=args.cfg_low, sampler_name=args.sampler_name_low, scheduler=args.scheduler_low, start_at_step=split_step, end_at_step=10000, return_with_leftover_noise="disable", model=final_model, positive=get_value_at_index(wanimagetovideo_464, 0), negative=get_value_at_index(wanimagetovideo_464, 1), latent_image=get_value_at_index(ksampleradvanced_466, 0));
237
+
238
+ # ✨ Gradio νŒŒμ‹±μ„ μœ„ν•œ μƒ˜ν”Œλ§ μ’…λ£Œ 둜그 μΆ”κ°€
239
+ print("### SAMPLER_END: LOW_NOISE ###")
240
+
241
+ if to_bool(args.sageattention): del pathchsageattentionkj_129
242
+ if should_keep_clip_loaded: print(" ✨ (λ©”λͺ¨λ¦¬) LoRA CLIP μ—°κ²° μ˜΅μ…˜ μ‚¬μš© μ™„λ£Œ, CLIP λͺ¨λΈμ„ ν•΄μ œν•©λ‹ˆλ‹€."); del cliploader_460
243
+ del unetloadergguf_496, model, clip, model_for_patching, shifted_model, final_model, ksampleradvanced_466, wanimagetovideo_464; clear_memory(); print("3단계 μ™„λ£Œ.")
244
+
245
+ print(f"\n4단계: VAE λ””μ½”λ”© 및 μž„μ‹œ μ €μž₯ 쀑..."); print(f" - VAE λͺ¨λΈ λ‘œλ”© (λ””μ½”λ”©μš©): {args.vae_name}"); vaeloader_461 = vaeloader.load_vae(vae_name=args.vae_name); vaedecode_469 = vaedecode.decode(samples=get_value_at_index(ksampleradvanced_465, 0), vae=get_value_at_index(vaeloader_461, 0)); saveimage.save_images(filename_prefix="temp/example", images=get_value_at_index(vaedecode_469, 0));
246
+ del ksampleradvanced_465, vaeloader_461, vaedecode_469, loadimage_88, imageresizekjv2_401; clear_memory(); print("4단계 μ™„λ£Œ.")
247
+
248
+ combine_input_dir_for_ffmpeg = f"{COMFYUI_BASE_PATH}/output/temp"
249
+ if args.upscale_ratio > 1:
250
+ if args.upscale_model_name == "None": print("\n5단계: μ—…μŠ€μΌ€μΌλ§ κ±΄λ„ˆλœ€ (λͺ¨λΈμ΄ μ„ νƒλ˜μ§€ μ•ŠμŒ).")
251
+ else:
252
+ print(f"\n5단계: ν”„λ ˆμž„ μ—…μŠ€μΌ€μΌλ§ 쀑..."); print(f" - Upscale λͺ¨λΈ λ‘œλ”©: {args.upscale_model_name}"); upscalemodelloader_384 = upscalemodelloader.load_model(model_name=args.upscale_model_name); chunk_size = args.upscale_chunk_size; base_dir = f"{COMFYUI_BASE_PATH}/output/temp"; scale_by_ratio = args.upscale_ratio / args.upscale_model_scale;
253
+ total_frames = 0
254
+ try:
255
+ temp_files = [f for f in os.listdir(base_dir) if f.endswith(('.png', '.jpg', '.jpeg', '.webp'))]
256
+ total_frames = len(temp_files)
257
+ if total_frames == 0:
258
+ raise FileNotFoundError("μ—…μŠ€μΌ€μΌν•  ν”„λ ˆμž„μ΄ 'temp' 폴더에 μ—†μŠ΅λ‹ˆλ‹€.")
259
+ except Exception as e:
260
+ print(f" ❌ μ—…μŠ€μΌ€μΌ 5단계 쀑단: 'temp' ν΄λ”μ—μ„œ ν”„λ ˆμž„μ„ 읽을 수 μ—†μŠ΅λ‹ˆλ‹€. (였λ₯˜: {e})")
261
+ if 'upscalemodelloader_384' in locals(): del upscalemodelloader_384
262
+ clear_memory()
263
+ raise
264
+ print(f" - 총 {total_frames}개의 ν”„λ ˆμž„μ„ {chunk_size}개 λ‹¨μœ„λ‘œ λΆ„ν• ν•˜μ—¬ μ‹€ν–‰ν•©λ‹ˆλ‹€...")
265
+ for i in range(0, total_frames, chunk_size):
266
+ print(f" - 배치 처리 쀑 (ν”„λ ˆμž„ {i} ~ {min(i + chunk_size, total_frames) - 1})...")
267
+ vhs_load_chunk = vhs_loadimagespath.load_images(directory=base_dir, skip_first_images=i, image_load_cap=chunk_size); loaded_images = get_value_at_index(vhs_load_chunk, 0);
268
+ if loaded_images is None: print(" - (κ²½κ³ ) κ±΄λ„ˆλ›Έ 수 μ—†λŠ” 이미지가 λ‘œλ“œλ˜μ—ˆμŠ΅λ‹ˆλ‹€, 이 배치λ₯Ό κ±΄λ„ˆλœλ‹ˆλ‹€."); continue
269
+ imageupscale_chunk = imageupscalewithmodel.upscale(upscale_model=get_value_at_index(upscalemodelloader_384, 0), image=loaded_images);
270
+ imagescale_chunk = imagescaleby.upscale(
271
+ upscale_method=args.output_resize_algo,
272
+ scale_by=scale_by_ratio,
273
+ image=get_value_at_index(imageupscale_chunk, 0)
274
+ );
275
+ saveimage.save_images(filename_prefix="up/example", images=get_value_at_index(imagescale_chunk, 0));
276
+ del vhs_load_chunk, loaded_images, imageupscale_chunk, imagescale_chunk; clear_memory()
277
+ del upscalemodelloader_384; clear_memory(); combine_input_dir_for_ffmpeg = f"{COMFYUI_BASE_PATH}/output/up"; print("5단계 μ™„λ£Œ.")
278
+ else: print("\n5단계: μ—…μŠ€μΌ€μΌλ§ κ±΄λ„ˆλœ€ (λΉ„μœ¨ 1.0).")
279
+
280
+ # --- ✨ 6단계: RIFE 청크 둜직 μˆ˜μ • (Overlap 적용) ---
281
+ print("\n6단계: λΉ„λ””μ˜€ κ²°ν•© μ€€λΉ„ 쀑..."); final_frame_rate = float(args.frame_rate); ffmpeg_input_dir = combine_input_dir_for_ffmpeg
282
+ if to_bool(args.interpolation):
283
+ print(" - ν”„λ ˆμž„ 보간 (RIFE)을 ν™œμ„±ν™”ν•©λ‹ˆλ‹€."); interpolated_dir = f"{COMFYUI_BASE_PATH}/output/interpolated"; source_dir = combine_input_dir_for_ffmpeg
284
+ total_frames_rife = 0
285
+ try:
286
+ temp_files = [f for f in os.listdir(source_dir) if f.endswith(('.png', '.jpg', '.jpeg', '.webp'))]; total_frames_rife = len(temp_files);
287
+ if total_frames_rife == 0: raise FileNotFoundError(f"RIFE 보간할 ν”„λ ˆμž„μ΄ '{source_dir}' 폴더에 μ—†μŠ΅λ‹ˆλ‹€.")
288
+ except Exception as e: print(f" ❌ RIFE 6단계 쀑단: '{source_dir}' ν΄λ”μ—μ„œ ν”„λ ˆμž„μ„ 읽을 수 μ—†μŠ΅λ‹ˆλ‹€. (였λ₯˜: {e})"); raise
289
+
290
+ chunk_size = args.rife_chunk_size;
291
+ print(f" - 총 {total_frames_rife}개의 ν”„λ ˆμž„μ„ RIFE 청크 {chunk_size}개 λ‹¨μœ„λ‘œ λΆ„ν• ν•˜μ—¬ μ‹€ν–‰ν•©λ‹ˆλ‹€ (Overlap 적용)...")
292
+
293
+ current_frame_idx = 0
294
+ is_first_chunk = True
295
+
296
+ while current_frame_idx < total_frames_rife:
297
+ load_from = current_frame_idx
298
+ load_cap = chunk_size
299
+
300
+ if not is_first_chunk:
301
+ load_from -= 1 # 1ν”„λ ˆμž„ 겹치기
302
+ load_cap += 1 # 겹친 만큼 1ν”„λ ˆμž„ 더 λ‘œλ“œ
303
+
304
+ # λ§ˆμ§€λ§‰ 청크 경계 처리
305
+ if load_from + load_cap > total_frames_rife:
306
+ load_cap = total_frames_rife - load_from
307
+
308
+ # RIFEλŠ” μ΅œμ†Œ 2ν”„λ ˆμž„μ΄ ν•„μš”ν•¨
309
+ if load_cap < 2:
310
+ print(f" - (κ²½κ³ ) RIFE μ²˜λ¦¬μ— ν•„μš”ν•œ ν”„λ ˆμž„(2개)이 λΆ€μ‘±ν•˜μ—¬ λ§ˆμ§€λ§‰ 배치λ₯Ό κ±΄λ„ˆλœλ‹ˆλ‹€.")
311
+ break
312
+
313
+ print(f" - RIFE 배치 처리 쀑 (원본 ν”„λ ˆμž„ {load_from} ~ {load_from + load_cap - 1})...")
314
+
315
+ vhs_load_chunk = vhs_loadimagespath.load_images(directory=source_dir, skip_first_images=load_from, image_load_cap=load_cap);
316
+ loaded_images = get_value_at_index(vhs_load_chunk, 0);
317
+
318
+ if loaded_images is None:
319
+ print(" - (κ²½κ³ ) κ±΄λ„ˆλ›Έ 수 μ—†λŠ” 이미지가 λ‘œλ“œλ˜μ—ˆμŠ΅λ‹ˆλ‹€, 이 배치λ₯Ό κ±΄λ„ˆλœλ‹ˆλ‹€.");
320
+ current_frame_idx += chunk_size
321
+ is_first_chunk = False
322
+ continue
323
+
324
+ rife_chunk_result_tensor = get_value_at_index(rife_vfi.vfi(
325
+ ckpt_name="rife49.pth",
326
+ multiplier=2,
327
+ fast_mode=to_bool(args.rife_fast_mode),
328
+ ensemble=to_bool(args.rife_ensemble),
329
+ frames=loaded_images
330
+ ), 0)
331
+
332
+ images_to_save = rife_chunk_result_tensor
333
+ if not is_first_chunk:
334
+ # 첫 λ²ˆμ§Έκ°€ μ•„λ‹Œ λͺ¨λ“  μ²­ν¬λŠ” κ²ΉμΉ˜λŠ” 첫 ν”„λ ˆμž„μ„ 제거 (ν…μ„œ μŠ¬λΌμ΄μ‹±)
335
+ print(f" - (Overlap) 쀑볡 ν”„λ ˆμž„ 1개 제거 ν›„ μ €μž₯")
336
+ images_to_save = rife_chunk_result_tensor[1:]
337
+
338
+ saveimage.save_images(filename_prefix="interpolated/example", images=images_to_save);
339
+
340
+ del vhs_load_chunk, loaded_images, rife_chunk_result_tensor, images_to_save; clear_memory()
341
+
342
+ current_frame_idx += chunk_size
343
+ is_first_chunk = False
344
+
345
+ ffmpeg_input_dir = interpolated_dir; final_frame_rate *= 2
346
+ else: print(" - ν”„λ ˆμž„ 보간이 λΉ„ν™œμ„±ν™”λ˜μ—ˆμŠ΅λ‹ˆλ‹€.");
347
+ # --- 6단계 μˆ˜μ • μ™„λ£Œ ---
348
+
349
+ print(f" - μ΅œμ’… λΉ„λ””μ˜€λ₯Ό FFmpeg ({args.video_encoder})둜 κ²°ν•©ν•©λ‹ˆλ‹€..."); print(f" - μž…λ ₯ 폴더: '{ffmpeg_input_dir}'")
350
+ input_pattern = os.path.join(ffmpeg_input_dir, "example_%05d_.png")
351
+ timestamp = time.strftime("%Y%m%d-%H%M%S"); output_filename = f"AnimateDiff_{timestamp}.mp4"; output_path = os.path.join(COMFYUI_BASE_PATH, "output", output_filename)
352
+ ffmpeg_cmd = ["ffmpeg", "-framerate", str(final_frame_rate), "-i", input_pattern]
353
+ encoder_choice = args.video_encoder
354
+ if encoder_choice == "GPU: HEVC (NVENC)": ffmpeg_cmd.extend(["-c:v", "hevc_nvenc", "-cq", str(args.nvenc_cq), "-preset", args.nvenc_preset, "-tag:v", "hvc1"])
355
+ elif encoder_choice == "GPU: H.264 (NVENC)": ffmpeg_cmd.extend(["-c:v", "h264_nvenc", "-cq", str(args.nvenc_cq), "-preset", args.nvenc_preset])
356
+ else: ffmpeg_cmd.extend(["-c:v", "libx264", "-crf", str(args.cpu_crf), "-preset", "medium"])
357
+ ffmpeg_cmd.extend(["-pix_fmt", "yuv420p", "-y", output_path])
358
+ print(f" - μ‹€ν–‰ λͺ…λ Ήμ–΄: {' '.join(ffmpeg_cmd)}")
359
+ try:
360
+ result = subprocess.run(ffmpeg_cmd, capture_output=True, text=True, check=True, encoding='utf-8')
361
+ print(" - FFmpeg μ‹€ν–‰ μ™„λ£Œ.")
362
+ except FileNotFoundError: print(" ❌ 였λ₯˜: 'ffmpeg' λͺ…λ Ήμ–΄λ₯Ό 찾을 수 μ—†μŠ΅λ‹ˆλ‹€. μ‹œμŠ€ν…œμ— μ„€μΉ˜λ˜μ–΄ μžˆλŠ”μ§€ ν™•μΈν•˜μ„Έμš”."); raise
363
+ except subprocess.CalledProcessError as e:
364
+ print(f" ❌ 였λ₯˜: FFmpeg μ‹€ν–‰ μ‹€νŒ¨ (Return code: {e.returncode})")
365
+ if e.stdout: print(f" FFmpeg stdout:\n{e.stdout}")
366
+ if e.stderr: print(f" FFmpeg stderr:\n{e.stderr}")
367
+ raise
368
+ except Exception as e: print(f" ❌ 였λ₯˜: FFmpeg μ‹€ν–‰ 쀑 μ˜ˆμƒμΉ˜ λͺ»ν•œ 였λ₯˜ λ°œμƒ: {e}"); raise
369
+ print("βœ… λͺ¨λ“  단계 μ™„λ£Œ.")
370
+
371
+ # --- ✨ λΉ„λ””μ˜€ 경둜 ν™•μ • 및 볡사 둜직 ---
372
+ latest_video = None
373
+
374
+ if os.path.exists(output_path):
375
+ latest_video = output_path
376
+ print(f"LATEST_VIDEO_PATH:{latest_video}")
377
+ else:
378
+ output_dir = os.path.join(COMFYUI_BASE_PATH, "output");
379
+ video_files = glob.glob(os.path.join(output_dir, '**', '*.mp4'), recursive=True) + \
380
+ glob.glob(os.path.join(output_dir, '**', '*.mkv'), recursive=True)
381
+
382
+ if not video_files:
383
+ raise FileNotFoundError("μƒμ„±λœ λ™μ˜μƒ νŒŒμΌμ„ 찾을 수 μ—†μŠ΅λ‹ˆλ‹€!")
384
+
385
+ latest_video = max(video_files, key=os.path.getctime)
386
+ print(f"LATEST_VIDEO_PATH:{latest_video}")
387
+
388
+ if latest_video is None:
389
+ raise FileNotFoundError("μ΅œμ’… λΉ„λ””μ˜€ 경둜λ₯Ό ν™•μ •ν•  수 μ—†μŠ΅λ‹ˆλ‹€. 슀크립트λ₯Ό ν™•μΈν•˜μ„Έμš”.")
390
+
391
+ base, ext = os.path.splitext(latest_video)
392
+ original_copy_path = f"{base}_original{ext}"
393
+ try:
394
+ shutil.copy2(latest_video, original_copy_path)
395
+ print(f"βœ… 원본 볡사본 생성 μ™„λ£Œ: {original_copy_path}")
396
+ print(f"ORIGINAL_COPY_PATH:{original_copy_path}")
397
+ except Exception as e:
398
+ print(f"❌ 원본 볡사본 생성 μ‹€νŒ¨: {e}")
399
+ # --- λΉ„λ””μ˜€ μ €μž₯ μ™„λ£Œ ---
400
+
401
+ # 🚨🚨🚨 μˆ˜μ •: μ„€μ • TXT 파일 μ €μž₯ 둜직 μΆ”κ°€ 🚨🚨🚨
402
+ txt_path = f"{base}.txt"
403
+
404
+ try:
405
+ print(f" - μ„€μ • 둜그 TXT 파일 생성 쀑: {txt_path}")
406
+
407
+ # λͺ¨λ“  인자(args)λ₯Ό λ”•μ…”λ„ˆλ¦¬λ‘œ λ³€ν™˜
408
+ settings = vars(args)
409
+
410
+ with open(txt_path, 'w', encoding='utf-8') as f:
411
+ f.write("========== 🎬 Video Generation Settings (Full Control) ==========\n\n")
412
+
413
+ f.write("--- πŸ“ Prompts & Seed ---\n")
414
+ f.write(f"Seed: {final_seed}\n")
415
+ f.write(f"Positive Prompt: {settings['positive_prompt']}\n")
416
+ f.write(f"Negative Prompt: {settings['negative_prompt']}\n\n")
417
+
418
+ f.write("--- πŸ–ΌοΈ Size & Length ---\n")
419
+ f.write(f"Width: {settings['width']}, Height: {settings['height']}\n")
420
+ f.write(f"Length (Frames): {settings['length']}\n")
421
+ f.write(f"Upscale Ratio (Final): {settings['upscale_ratio']}\n")
422
+ f.write(f"Input Resize Algo: {settings['input_resize_algo']}\n")
423
+ f.write(f"Output Resize Algo: {settings['output_resize_algo']}\n\n")
424
+
425
+ f.write("--- βš™οΈ Sampler & Steps ---\n")
426
+ f.write(f"Total Steps: {settings['steps']}\n")
427
+ f.write(f"Split Point (%): {settings['split_point_percent']} -> Step {split_step}\n")
428
+ f.write(f"Shift (Speed): {settings['shift']}\n")
429
+ f.write(f"SageAttention: {settings['sageattention']}\n")
430
+ f.write(f"Connect LoRA Clip: {settings['connect_lora_clip']}\n\n")
431
+
432
+ f.write("--- ⚑ High Noise (Initial) ---\n")
433
+ f.write(f"Model (Unet): {settings['unet_high_name']}\n")
434
+ f.write(f"CFG: {settings['cfg_high']}\n")
435
+ f.write(f"Sampler: {settings['sampler_name_high']}\n")
436
+ f.write(f"Scheduler: {settings['scheduler_high']}\n")
437
+ if settings['lora_high_1_name'] != 'None':
438
+ f.write(f"LoRA 1: {settings['lora_high_1_name']} (M:{settings['lora_high_1_strength_model']}, C:{settings['lora_high_1_strength_clip']})\n")
439
+ if settings['lora_high_2_name'] != 'None':
440
+ f.write(f"LoRA 2: {settings['lora_high_2_name']} (M:{settings['lora_high_2_strength_model']}, C:{settings['lora_high_2_strength_clip']})\n\n")
441
+
442
+ f.write("--- 🌿 Low Noise (Refiner) ---\n")
443
+ f.write(f"Model (Unet): {settings['unet_low_name']}\n")
444
+ f.write(f"CFG: {settings['cfg_low']}\n")
445
+ f.write(f"Sampler: {settings['sampler_name_low']}\n")
446
+ f.write(f"Scheduler: {settings['scheduler_low']}\n")
447
+ if settings['lora_low_1_name'] != 'None':
448
+ f.write(f"LoRA 1: {settings['lora_low_1_name']} (M:{settings['lora_low_1_strength_model']}, C:{settings['lora_low_1_strength_clip']})\n")
449
+ if settings['lora_low_2_name'] != 'None':
450
+ f.write(f"LoRA 2: {settings['lora_low_2_name']} (M:{settings['lora_low_2_strength_model']}, C:{settings['lora_low_2_strength_clip']})\n\n")
451
+
452
+ f.write("--- πŸ’Ύ Other Models ---\n")
453
+ f.write(f"VAE: {settings['vae_name']}\n")
454
+ f.write(f"CLIP: {settings['clip_name']}\n")
455
+ f.write(f"Upscale Model: {settings['upscale_model_name']} (Scale: {settings['upscale_model_scale']}, Chunk: {settings['upscale_chunk_size']})\n\n")
456
+
457
+ f.write("--- πŸŽ₯ Video Output & Interpolation ---\n")
458
+ f.write(f"Frame Rate (Base): {settings['frame_rate']}\n")
459
+ f.write(f"Interpolation (RIFE): {settings['interpolation']}\n")
460
+ if to_bool(settings['interpolation']):
461
+ f.write(f" - RIFE Fast Mode: {settings['rife_fast_mode']}\n")
462
+ f.write(f" - RIFE Ensemble: {settings['rife_ensemble']}\n")
463
+ f.write(f" - RIFE Chunk Size: {settings['rife_chunk_size']}\n")
464
+ f.write(f"Video Encoder: {settings['video_encoder']}\n")
465
+ if 'NVENC' in settings['video_encoder']:
466
+ f.write(f" - NVENC CQ: {settings['nvenc_cq']}, Preset: {settings['nvenc_preset']}\n")
467
+ elif 'CPU' in settings['video_encoder']:
468
+ f.write(f" - CPU CRF: {settings['cpu_crf']}\n")
469
+
470
+ f.write("\n========================================================\n")
471
+
472
+ print(f"βœ… μ„€μ • 둜그 TXT 파일 μ €μž₯ μ™„λ£Œ: {os.path.basename(txt_path)}")
473
+
474
+ except Exception as e:
475
+ print(f"❌ μ„€μ • 둜그 TXT 파일 μ €μž₯ μ‹€νŒ¨: {e}")
476
+ # 🚨🚨🚨 TXT 파일 μ €μž₯ 둜직 끝 🚨🚨🚨
477
+
478
+ if __name__ == "__main__":
479
+ main()