arcacolab commited on
Commit
03e7ba8
Β·
verified Β·
1 Parent(s): a841782

Update run_generator.py

Browse files
Files changed (1) hide show
  1. run_generator.py +51 -23
run_generator.py CHANGED
@@ -11,20 +11,17 @@ import subprocess
11
  import random
12
  import argparse
13
  from typing import Sequence, Mapping, Any, Union
14
- import shutil # ✨ 볡사본 생성을 μœ„ν•΄ shutil μž„ν¬νŠΈ
15
 
16
- # --- 0. κΈ°λ³Έ μ„€μ • 및 인수 νŒŒμ‹± ---\n",
17
  def parse_args():
18
  parser = argparse.ArgumentParser(description="ComfyUI Video Generation Script with All Controls from 1.py")
19
  parser.add_argument("--positive_prompt", type=str, required=True); parser.add_argument("--negative_prompt", type=str, required=True)
20
  parser.add_argument("--width", type=int, required=True); parser.add_argument("--height", type=int, required=True)
21
  parser.add_argument("--length", type=int, required=True); parser.add_argument("--upscale_ratio", type=float, required=True)
22
-
23
- # ✨ μˆ˜μ •: CFG 인자 뢄리
24
  parser.add_argument("--steps", type=int, default=4)
25
- parser.add_argument("--cfg_high", type=float, default=1.0) # High
26
- parser.add_argument("--cfg_low", type=float, default=1.0) # Low
27
-
28
  parser.add_argument("--sampler_name_high", type=str, default="euler"); parser.add_argument("--scheduler_high", type=str, default="simple")
29
  parser.add_argument("--sampler_name_low", type=str, default="euler"); parser.add_argument("--scheduler_low", type=str, default="simple")
30
  parser.add_argument("--noise_seed", type=int, default=-1); parser.add_argument("--split_point_percent", type=float, default=50.0)
@@ -103,10 +100,41 @@ def import_custom_nodes() -> None:
103
  def main():
104
  args = parse_args()
105
  print("πŸš€ λ™μ˜μƒ 생성을 μ‹œμž‘ν•©λ‹ˆλ‹€ (Full Control Mode, VRAM Optimized)...\n")
106
- # ✨ SyntaxError μˆ˜μ •: f-string 끝의 λΆˆν•„μš”ν•œ \ 제거
107
- subprocess.run(f"rm -rf {COMFYUI_BASE_PATH}/output/*", shell=True, check=True)
108
- os.makedirs(f"{COMFYUI_BASE_PATH}/output/temp", exist_ok=True); os.makedirs(f"{COMFYUI_BASE_PATH}/output/up", exist_ok=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  os.makedirs(f"{COMFYUI_BASE_PATH}/output/interpolated", exist_ok=True)
 
110
  add_comfyui_directory_to_sys_path()
111
  try: from utils.extra_config import load_extra_path_config
112
  except ImportError: print("⚠️ ComfyUI의 extra_model_paths.yaml λ‘œλ”© μ‹€νŒ¨ (λ¬΄μ‹œν•˜κ³  μ§„ν–‰)"); load_extra_path_config = lambda x: None
@@ -126,10 +154,9 @@ def main():
126
  clipvisionencode=NODE_CLASS_MAPPINGS["CLIPVisionEncode"]()
127
  pathchsageattentionkj=NODE_CLASS_MAPPINGS["PathchSageAttentionKJ"]()
128
 
129
- # --- ✨ 1단계: CLIP Vision 둜직 μΆ”κ°€ 및 λ©”λͺ¨λ¦¬ ν•΄μ œ ---\n",
130
  print("\n1단계: 데이터 λ‘œλ”© 및 초기 Latent 생성 쀑...");
131
  print(f" - CLIP λ‘œλ”©: {args.clip_name}");
132
- # ✨ SyntaxError μˆ˜μ •: λΆˆν•„μš”ν•œ \ 제거
133
  cliploader_460 = cliploader.load_clip(clip_name=args.clip_name, type="wan", device="default");
134
  cliptextencode_462 = cliptextencode.encode(text=args.positive_prompt, clip=get_value_at_index(cliploader_460, 0));
135
  cliptextencode_463 = cliptextencode.encode(text=args.negative_prompt, clip=get_value_at_index(cliploader_460, 0));
@@ -137,7 +164,7 @@ def main():
137
 
138
  imageresizekjv2_401 = imageresizekjv2.resize(
139
  width=args.width, height=args.height,
140
- upscale_method=args.input_resize_algo, # ✨ 변경됨
141
  image=get_value_at_index(loadimage_88, 0), keep_proportion="crop",
142
  pad_color="0, 0, 0", crop_position="center", divisible_by=2,
143
  unique_id=random.randint(1, 2**64)
@@ -149,7 +176,7 @@ def main():
149
  clipvisionencode_cv = clipvisionencode.encode(
150
  crop="none",
151
  clip_vision=get_value_at_index(clipvisionloader_cv, 0),
152
- image=get_value_at_index(imageresizekjv2_401, 0) # λ¦¬μ‚¬μ΄μ¦ˆλœ 이미지 μ‚¬μš©
153
  );
154
  clip_vision_output = get_value_at_index(clipvisionencode_cv, 0)
155
 
@@ -164,7 +191,7 @@ def main():
164
  positive=get_value_at_index(cliptextencode_462, 0),
165
  negative=get_value_at_index(cliptextencode_463, 0),
166
  vae=get_value_at_index(vaeloader_temp, 0),
167
- clip_vision_output=clip_vision_output, # ✨ μΆ”κ°€λœ 인수
168
  start_image=get_value_at_index(imageresizekjv2_401, 0)
169
  );
170
 
@@ -183,7 +210,7 @@ def main():
183
  if args.lora_high_2_name != "None": print(f" - H LoRA 2: {args.lora_high_2_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_high_2_name, strength_model=args.lora_high_2_strength_model, strength_clip=args.lora_high_2_strength_clip, model=model_for_patching, clip=clip)
184
  shifted_model = get_value_at_index(modelsamplingsd3.patch(shift=args.shift, model=model_for_patching), 0); final_model = shifted_model;
185
 
186
- # ✨ μˆ˜μ •: cfg=args.cfg_high μ‚¬μš©
187
  ksampleradvanced_466 = ksampleradvanced.sample(add_noise="enable", noise_seed=final_seed, steps=args.steps, cfg=args.cfg_high, sampler_name=args.sampler_name_high, scheduler=args.scheduler_high, start_at_step=0, end_at_step=split_step, return_with_leftover_noise="enable", model=final_model, positive=get_value_at_index(wanimagetovideo_464, 0), negative=get_value_at_index(wanimagetovideo_464, 1), latent_image=get_value_at_index(wanimagetovideo_464, 2));
188
 
189
  if to_bool(args.sageattention): del pathchsageattentionkj_124
@@ -195,7 +222,7 @@ def main():
195
  if args.lora_low_2_name != "None": print(f" - L LoRA 2: {args.lora_low_2_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_low_2_name, strength_model=args.lora_low_2_strength_model, strength_clip=args.lora_low_2_strength_clip, model=model_for_patching, clip=clip)
196
  shifted_model = get_value_at_index(modelsamplingsd3.patch(shift=args.shift, model=model_for_patching), 0); final_model = shifted_model;
197
 
198
- # ✨ μˆ˜μ •: cfg=args.cfg_low μ‚¬μš©
199
  ksampleradvanced_465 = ksampleradvanced.sample(add_noise="disable", noise_seed=final_seed, steps=args.steps, cfg=args.cfg_low, sampler_name=args.sampler_name_low, scheduler=args.scheduler_low, start_at_step=split_step, end_at_step=10000, return_with_leftover_noise="disable", model=final_model, positive=get_value_at_index(wanimagetovideo_464, 0), negative=get_value_at_index(wanimagetovideo_464, 1), latent_image=get_value_at_index(ksampleradvanced_466, 0));
200
 
201
  if to_bool(args.sageattention): del pathchsageattentionkj_129
@@ -228,7 +255,7 @@ def main():
228
  if loaded_images is None: print(" - (κ²½κ³ ) κ±΄λ„ˆλ›Έ 수 μ—†λŠ” 이미지가 λ‘œλ“œλ˜μ—ˆμŠ΅λ‹ˆλ‹€, 이 배치λ₯Ό κ±΄λ„ˆλœλ‹ˆλ‹€."); continue
229
  imageupscale_chunk = imageupscalewithmodel.upscale(upscale_model=get_value_at_index(upscalemodelloader_384, 0), image=loaded_images);
230
  imagescale_chunk = imagescaleby.upscale(
231
- upscale_method=args.output_resize_algo, # ✨ 변경됨
232
  scale_by=scale_by_ratio,
233
  image=get_value_at_index(imageupscale_chunk, 0)
234
  );
@@ -237,7 +264,7 @@ def main():
237
  del upscalemodelloader_384; clear_memory(); combine_input_dir_for_ffmpeg = f"{COMFYUI_BASE_PATH}/output/up"; print("5단계 μ™„λ£Œ.")
238
  else: print("\n5단계: μ—…μŠ€μΌ€μΌλ§ κ±΄λ„ˆλœ€ (λΉ„μœ¨ 1.0).")
239
 
240
- # --- ✨ 6단계: RIFE 청크 둜직 μˆ˜μ • (Overlap 적용) ---\n",
241
  print("\n6단계: λΉ„λ””μ˜€ κ²°ν•© μ€€λΉ„ 쀑..."); final_frame_rate = float(args.frame_rate); ffmpeg_input_dir = combine_input_dir_for_ffmpeg
242
  if to_bool(args.interpolation):
243
  print(" - ν”„λ ˆμž„ 보간 (RIFE)을 ν™œμ„±ν™”ν•©λ‹ˆλ‹€."); interpolated_dir = f"{COMFYUI_BASE_PATH}/output/interpolated"; source_dir = combine_input_dir_for_ffmpeg
@@ -277,7 +304,7 @@ def main():
277
 
278
  if loaded_images is None:
279
  print(" - (κ²½κ³ ) κ±΄λ„ˆλ›Έ 수 μ—†λŠ” 이미지가 λ‘œλ“œλ˜μ—ˆμŠ΅λ‹ˆλ‹€, 이 배치λ₯Ό οΏ½οΏ½οΏ½λ„ˆλœλ‹ˆλ‹€.");
280
- current_frame_idx += chunk_size # λ‹€μŒ 청크둜 이동
281
  is_first_chunk = False
282
  continue
283
 
@@ -291,7 +318,7 @@ def main():
291
 
292
  images_to_save = rife_chunk_result_tensor
293
  if not is_first_chunk:
294
- # ✨ 첫 λ²ˆμ§Έκ°€ μ•„λ‹Œ λͺ¨λ“  μ²­ν¬λŠ” κ²ΉμΉ˜λŠ” 첫 ν”„λ ˆμž„μ„ 제거 (ν…μ„œ μŠ¬λΌμ΄μ‹±)
295
  print(f" - (Overlap) 쀑볡 ν”„λ ˆμž„ 1개 제거 ν›„ μ €μž₯")
296
  images_to_save = rife_chunk_result_tensor[1:]
297
 
@@ -307,7 +334,7 @@ def main():
307
  # --- 6단계 μˆ˜μ • μ™„λ£Œ ---
308
 
309
  print(f" - μ΅œμ’… λΉ„λ””μ˜€λ₯Ό FFmpeg ({args.video_encoder})둜 κ²°ν•©ν•©λ‹ˆλ‹€..."); print(f" - μž…λ ₯ 폴더: '{ffmpeg_input_dir}'")
310
- input_pattern = os.path.join(ffmpeg_input_dir, "example_%05d_.png") # 파일λͺ… νŒ¨ν„΄ μˆ˜μ •λ¨
311
  timestamp = time.strftime("%Y%m%d-%H%M%S"); output_filename = f"AnimateDiff_{timestamp}.mp4"; output_path = os.path.join(COMFYUI_BASE_PATH, "output", output_filename)
312
  ffmpeg_cmd = ["ffmpeg", "-framerate", str(final_frame_rate), "-i", input_pattern]
313
  encoder_choice = args.video_encoder
@@ -328,7 +355,7 @@ def main():
328
  except Exception as e: print(f" ❌ 였λ₯˜: FFmpeg μ‹€ν–‰ 쀑 μ˜ˆμƒμΉ˜ λͺ»ν•œ 였λ₯˜ λ°œμƒ: {e}"); raise
329
  print("βœ… λͺ¨λ“  단계 μ™„λ£Œ.")
330
 
331
- # --- ✨ UnboundLocalError ν•΄κ²° 및 볡사 둜직 (μ΅œμ’…) ---\n",
332
  latest_video = None
333
 
334
  if os.path.exists(output_path):
@@ -360,3 +387,4 @@ def main():
360
 
361
  if __name__ == "__main__":
362
  main()
 
 
11
  import random
12
  import argparse
13
  from typing import Sequence, Mapping, Any, Union
14
+ import shutil
15
 
16
+ # --- 0. κΈ°λ³Έ μ„€μ • 및 인수 νŒŒμ‹± ---
17
  def parse_args():
18
  parser = argparse.ArgumentParser(description="ComfyUI Video Generation Script with All Controls from 1.py")
19
  parser.add_argument("--positive_prompt", type=str, required=True); parser.add_argument("--negative_prompt", type=str, required=True)
20
  parser.add_argument("--width", type=int, required=True); parser.add_argument("--height", type=int, required=True)
21
  parser.add_argument("--length", type=int, required=True); parser.add_argument("--upscale_ratio", type=float, required=True)
 
 
22
  parser.add_argument("--steps", type=int, default=4)
23
+ parser.add_argument("--cfg_high", type=float, default=1.0)
24
+ parser.add_argument("--cfg_low", type=float, default=1.0)
 
25
  parser.add_argument("--sampler_name_high", type=str, default="euler"); parser.add_argument("--scheduler_high", type=str, default="simple")
26
  parser.add_argument("--sampler_name_low", type=str, default="euler"); parser.add_argument("--scheduler_low", type=str, default="simple")
27
  parser.add_argument("--noise_seed", type=int, default=-1); parser.add_argument("--split_point_percent", type=float, default=50.0)
 
100
  def main():
101
  args = parse_args()
102
  print("πŸš€ λ™μ˜μƒ 생성을 μ‹œμž‘ν•©λ‹ˆλ‹€ (Full Control Mode, VRAM Optimized)...\n")
103
+
104
+ # 🚨🚨🚨 μˆ˜μ •λœ λΆ€λΆ„ μ‹œμž‘: .mp4, .mkv, .webm νŒŒμΌμ€ μ œμ™Έν•˜κ³  output 폴더 정리 🚨🚨🚨
105
+ output_dir = os.path.join(COMFYUI_BASE_PATH, 'output')
106
+ print(f" - 이전 좜λ ₯λ¬Ό 정리 쀑... (Output: {output_dir})")
107
+
108
+ deleted_count = 0
109
+ try:
110
+ # output 폴더 λ‚΄μ˜ λͺ¨λ“  파일과 폴더 λͺ©λ‘μ„ κ°€μ Έμ˜΅λ‹ˆλ‹€.
111
+ for item_name in os.listdir(output_dir):
112
+ item_path = os.path.join(output_dir, item_name)
113
+
114
+ # πŸ’‘ 쑰건: λΉ„λ””μ˜€ 파일 ν™•μž₯자(.mp4, .mkv, .webm)λŠ” μ‚­μ œν•˜μ§€ μ•Šκ³  λ³΄μ‘΄ν•©λ‹ˆλ‹€.
115
+ if item_name.lower().endswith(('.mp4', '.mkv', '.webm')):
116
+ print(f" - πŸ—„οΈ λΉ„λ””μ˜€ 파일 '{item_name}'은 λ³΄μ‘΄ν•©λ‹ˆλ‹€.")
117
+ continue
118
+
119
+ # 파일 λ˜λŠ” 링크인 경우 μ‚­μ œ
120
+ if os.path.isfile(item_path) or os.path.islink(item_path):
121
+ os.unlink(item_path)
122
+ deleted_count += 1
123
+ # 폴더인 경우 μž¬κ·€μ μœΌλ‘œ μ‚­μ œ
124
+ elif os.path.isdir(item_path):
125
+ shutil.rmtree(item_path)
126
+ deleted_count += 1
127
+
128
+ print(f" βœ… 정리 μ™„λ£Œ. 보쑴된 λΉ„λ””μ˜€ μ™Έ {deleted_count}개의 ν•­λͺ©μ΄ μ‚­μ œλ˜μ—ˆμŠ΅λ‹ˆλ‹€.")
129
+ except Exception as e:
130
+ print(f" ❌ 좜λ ₯ 폴더 정리 쀑 였λ₯˜ λ°œμƒ: {e}")
131
+ # 🚨🚨🚨 μˆ˜μ •λœ λΆ€λΆ„ 끝 🚨🚨🚨
132
+
133
+ # μž„μ‹œ 폴더 μž¬μƒμ„± (정리 κ³Όμ •μ—μ„œ μ‚­μ œλ˜μ—ˆμ„ 수 있음)
134
+ os.makedirs(f"{COMFYUI_BASE_PATH}/output/temp", exist_ok=True);
135
+ os.makedirs(f"{COMFYUI_BASE_PATH}/output/up", exist_ok=True)
136
  os.makedirs(f"{COMFYUI_BASE_PATH}/output/interpolated", exist_ok=True)
137
+
138
  add_comfyui_directory_to_sys_path()
139
  try: from utils.extra_config import load_extra_path_config
140
  except ImportError: print("⚠️ ComfyUI의 extra_model_paths.yaml λ‘œλ”© μ‹€νŒ¨ (λ¬΄μ‹œν•˜κ³  μ§„ν–‰)"); load_extra_path_config = lambda x: None
 
154
  clipvisionencode=NODE_CLASS_MAPPINGS["CLIPVisionEncode"]()
155
  pathchsageattentionkj=NODE_CLASS_MAPPINGS["PathchSageAttentionKJ"]()
156
 
157
+ # --- ✨ 1단계: CLIP Vision 둜직 μΆ”κ°€ 및 λ©”λͺ¨λ¦¬ ν•΄μ œ ---
158
  print("\n1단계: 데이터 λ‘œλ”© 및 초기 Latent 생성 쀑...");
159
  print(f" - CLIP λ‘œλ”©: {args.clip_name}");
 
160
  cliploader_460 = cliploader.load_clip(clip_name=args.clip_name, type="wan", device="default");
161
  cliptextencode_462 = cliptextencode.encode(text=args.positive_prompt, clip=get_value_at_index(cliploader_460, 0));
162
  cliptextencode_463 = cliptextencode.encode(text=args.negative_prompt, clip=get_value_at_index(cliploader_460, 0));
 
164
 
165
  imageresizekjv2_401 = imageresizekjv2.resize(
166
  width=args.width, height=args.height,
167
+ upscale_method=args.input_resize_algo,
168
  image=get_value_at_index(loadimage_88, 0), keep_proportion="crop",
169
  pad_color="0, 0, 0", crop_position="center", divisible_by=2,
170
  unique_id=random.randint(1, 2**64)
 
176
  clipvisionencode_cv = clipvisionencode.encode(
177
  crop="none",
178
  clip_vision=get_value_at_index(clipvisionloader_cv, 0),
179
+ image=get_value_at_index(imageresizekjv2_401, 0)
180
  );
181
  clip_vision_output = get_value_at_index(clipvisionencode_cv, 0)
182
 
 
191
  positive=get_value_at_index(cliptextencode_462, 0),
192
  negative=get_value_at_index(cliptextencode_463, 0),
193
  vae=get_value_at_index(vaeloader_temp, 0),
194
+ clip_vision_output=clip_vision_output,
195
  start_image=get_value_at_index(imageresizekjv2_401, 0)
196
  );
197
 
 
210
  if args.lora_high_2_name != "None": print(f" - H LoRA 2: {args.lora_high_2_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_high_2_name, strength_model=args.lora_high_2_strength_model, strength_clip=args.lora_high_2_strength_clip, model=model_for_patching, clip=clip)
211
  shifted_model = get_value_at_index(modelsamplingsd3.patch(shift=args.shift, model=model_for_patching), 0); final_model = shifted_model;
212
 
213
+ # μˆ˜μ •: cfg=args.cfg_high μ‚¬μš©
214
  ksampleradvanced_466 = ksampleradvanced.sample(add_noise="enable", noise_seed=final_seed, steps=args.steps, cfg=args.cfg_high, sampler_name=args.sampler_name_high, scheduler=args.scheduler_high, start_at_step=0, end_at_step=split_step, return_with_leftover_noise="enable", model=final_model, positive=get_value_at_index(wanimagetovideo_464, 0), negative=get_value_at_index(wanimagetovideo_464, 1), latent_image=get_value_at_index(wanimagetovideo_464, 2));
215
 
216
  if to_bool(args.sageattention): del pathchsageattentionkj_124
 
222
  if args.lora_low_2_name != "None": print(f" - L LoRA 2: {args.lora_low_2_name}"); model_for_patching, clip = loraloader.load_lora(lora_name=args.lora_low_2_name, strength_model=args.lora_low_2_strength_model, strength_clip=args.lora_low_2_strength_clip, model=model_for_patching, clip=clip)
223
  shifted_model = get_value_at_index(modelsamplingsd3.patch(shift=args.shift, model=model_for_patching), 0); final_model = shifted_model;
224
 
225
+ # μˆ˜μ •: cfg=args.cfg_low μ‚¬μš©
226
  ksampleradvanced_465 = ksampleradvanced.sample(add_noise="disable", noise_seed=final_seed, steps=args.steps, cfg=args.cfg_low, sampler_name=args.sampler_name_low, scheduler=args.scheduler_low, start_at_step=split_step, end_at_step=10000, return_with_leftover_noise="disable", model=final_model, positive=get_value_at_index(wanimagetovideo_464, 0), negative=get_value_at_index(wanimagetovideo_464, 1), latent_image=get_value_at_index(ksampleradvanced_466, 0));
227
 
228
  if to_bool(args.sageattention): del pathchsageattentionkj_129
 
255
  if loaded_images is None: print(" - (κ²½κ³ ) κ±΄λ„ˆλ›Έ 수 μ—†λŠ” 이미지가 λ‘œλ“œλ˜μ—ˆμŠ΅λ‹ˆλ‹€, 이 배치λ₯Ό κ±΄λ„ˆλœλ‹ˆλ‹€."); continue
256
  imageupscale_chunk = imageupscalewithmodel.upscale(upscale_model=get_value_at_index(upscalemodelloader_384, 0), image=loaded_images);
257
  imagescale_chunk = imagescaleby.upscale(
258
+ upscale_method=args.output_resize_algo,
259
  scale_by=scale_by_ratio,
260
  image=get_value_at_index(imageupscale_chunk, 0)
261
  );
 
264
  del upscalemodelloader_384; clear_memory(); combine_input_dir_for_ffmpeg = f"{COMFYUI_BASE_PATH}/output/up"; print("5단계 μ™„λ£Œ.")
265
  else: print("\n5단계: μ—…μŠ€μΌ€μΌλ§ κ±΄λ„ˆλœ€ (λΉ„μœ¨ 1.0).")
266
 
267
+ # --- ✨ 6단계: RIFE 청크 둜직 μˆ˜μ • (Overlap 적용) ---
268
  print("\n6단계: λΉ„λ””μ˜€ κ²°ν•© μ€€λΉ„ 쀑..."); final_frame_rate = float(args.frame_rate); ffmpeg_input_dir = combine_input_dir_for_ffmpeg
269
  if to_bool(args.interpolation):
270
  print(" - ν”„λ ˆμž„ 보간 (RIFE)을 ν™œμ„±ν™”ν•©λ‹ˆλ‹€."); interpolated_dir = f"{COMFYUI_BASE_PATH}/output/interpolated"; source_dir = combine_input_dir_for_ffmpeg
 
304
 
305
  if loaded_images is None:
306
  print(" - (κ²½κ³ ) κ±΄λ„ˆλ›Έ 수 μ—†λŠ” 이미지가 λ‘œλ“œλ˜μ—ˆμŠ΅λ‹ˆλ‹€, 이 배치λ₯Ό οΏ½οΏ½οΏ½λ„ˆλœλ‹ˆλ‹€.");
307
+ current_frame_idx += chunk_size
308
  is_first_chunk = False
309
  continue
310
 
 
318
 
319
  images_to_save = rife_chunk_result_tensor
320
  if not is_first_chunk:
321
+ # 첫 λ²ˆμ§Έκ°€ μ•„λ‹Œ λͺ¨λ“  μ²­ν¬λŠ” κ²ΉμΉ˜λŠ” 첫 ν”„λ ˆμž„μ„ 제거 (ν…μ„œ μŠ¬λΌμ΄μ‹±)
322
  print(f" - (Overlap) 쀑볡 ν”„λ ˆμž„ 1개 제거 ν›„ μ €μž₯")
323
  images_to_save = rife_chunk_result_tensor[1:]
324
 
 
334
  # --- 6단계 μˆ˜μ • μ™„λ£Œ ---
335
 
336
  print(f" - μ΅œμ’… λΉ„λ””μ˜€λ₯Ό FFmpeg ({args.video_encoder})둜 κ²°ν•©ν•©λ‹ˆλ‹€..."); print(f" - μž…λ ₯ 폴더: '{ffmpeg_input_dir}'")
337
+ input_pattern = os.path.join(ffmpeg_input_dir, "example_%05d_.png")
338
  timestamp = time.strftime("%Y%m%d-%H%M%S"); output_filename = f"AnimateDiff_{timestamp}.mp4"; output_path = os.path.join(COMFYUI_BASE_PATH, "output", output_filename)
339
  ffmpeg_cmd = ["ffmpeg", "-framerate", str(final_frame_rate), "-i", input_pattern]
340
  encoder_choice = args.video_encoder
 
355
  except Exception as e: print(f" ❌ 였λ₯˜: FFmpeg μ‹€ν–‰ 쀑 μ˜ˆμƒμΉ˜ λͺ»ν•œ 였λ₯˜ λ°œμƒ: {e}"); raise
356
  print("βœ… λͺ¨λ“  단계 μ™„λ£Œ.")
357
 
358
+ # --- ✨ UnboundLocalError ν•΄κ²° 및 볡사 둜직 (μ΅œμ’…) ---
359
  latest_video = None
360
 
361
  if os.path.exists(output_path):
 
387
 
388
  if __name__ == "__main__":
389
  main()
390
+