Upload run_generator.py
Browse files- run_generator.py +234 -0
run_generator.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# μ΄ νμΌμ μ
3μμ μλΈνλ‘μΈμ€λ‘ μ€νλ©λλ€.
|
| 2 |
+
|
| 3 |
+
import sys
|
| 4 |
+
import os
|
| 5 |
+
import time
|
| 6 |
+
import glob
|
| 7 |
+
import gc
|
| 8 |
+
import torch
|
| 9 |
+
import subprocess
|
| 10 |
+
import random
|
| 11 |
+
import argparse
|
| 12 |
+
from typing import Sequence, Mapping, Any, Union
|
| 13 |
+
|
| 14 |
+
# --- 0. κΈ°λ³Έ μ€μ λ° μΈμ νμ± --
|
| 15 |
+
def parse_args():
|
| 16 |
+
parser = argparse.ArgumentParser(description="ComfyUI Video Generation Script")
|
| 17 |
+
parser.add_argument("--positive_prompt", type=str, required=True)
|
| 18 |
+
parser.add_argument("--negative_prompt", type=str, required=True)
|
| 19 |
+
parser.add_argument("--width", type=int, required=True)
|
| 20 |
+
parser.add_argument("--height", type=int, required=True)
|
| 21 |
+
parser.add_argument("--length", type=int, required=True)
|
| 22 |
+
parser.add_argument("--upscale_ratio", type=float, required=True)
|
| 23 |
+
parser.add_argument("--custom_lora_1_name", type=str, default="None")
|
| 24 |
+
parser.add_argument("--custom_lora_1_strength_model", type=float, default=1.0)
|
| 25 |
+
parser.add_argument("--custom_lora_1_strength_clip", type=float, default=1.0)
|
| 26 |
+
parser.add_argument("--custom_lora_2_name", type=str, default="None")
|
| 27 |
+
parser.add_argument("--custom_lora_2_strength_model", type=float, default=1.0)
|
| 28 |
+
parser.add_argument("--custom_lora_2_strength_clip", type=float, default=1.0)
|
| 29 |
+
return parser.parse_args()
|
| 30 |
+
|
| 31 |
+
def clear_memory():
|
| 32 |
+
if torch.cuda.is_available():
|
| 33 |
+
torch.cuda.empty_cache()
|
| 34 |
+
torch.cuda.ipc_collect()
|
| 35 |
+
gc.collect()
|
| 36 |
+
|
| 37 |
+
# --- 1. ComfyUI κ΄λ ¨ ν¨μ μ μ ---
|
| 38 |
+
COMFYUI_BASE_PATH = '/content/ComfyUI'
|
| 39 |
+
|
| 40 |
+
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
| 41 |
+
try:
|
| 42 |
+
return obj[index]
|
| 43 |
+
except (KeyError, TypeError):
|
| 44 |
+
if isinstance(obj, dict) and "result" in obj:
|
| 45 |
+
return obj["result"][index]
|
| 46 |
+
raise
|
| 47 |
+
|
| 48 |
+
def add_comfyui_directory_to_sys_path() -> None:
|
| 49 |
+
if os.path.isdir(COMFYUI_BASE_PATH) and COMFYUI_BASE_PATH not in sys.path:
|
| 50 |
+
sys.path.append(COMFYUI_BASE_PATH)
|
| 51 |
+
print(f"'{COMFYUI_BASE_PATH}' added to sys.path")
|
| 52 |
+
|
| 53 |
+
def import_custom_nodes() -> None:
|
| 54 |
+
try:
|
| 55 |
+
import nest_asyncio
|
| 56 |
+
nest_asyncio.apply()
|
| 57 |
+
except ImportError:
|
| 58 |
+
subprocess.run([sys.executable, "-m", "pip", "install", "-q", "nest_asyncio"])
|
| 59 |
+
import nest_asyncio
|
| 60 |
+
nest_asyncio.apply()
|
| 61 |
+
|
| 62 |
+
import asyncio
|
| 63 |
+
import execution
|
| 64 |
+
from nodes import init_extra_nodes
|
| 65 |
+
import server
|
| 66 |
+
|
| 67 |
+
loop = asyncio.get_event_loop() or asyncio.new_event_loop()
|
| 68 |
+
server_instance = server.PromptServer(loop)
|
| 69 |
+
execution.PromptQueue(server_instance)
|
| 70 |
+
if not loop.is_running():
|
| 71 |
+
loop.run_until_complete(init_extra_nodes())
|
| 72 |
+
else:
|
| 73 |
+
asyncio.ensure_future(init_extra_nodes())
|
| 74 |
+
|
| 75 |
+
# --- 2. λ©μΈ μ€ν λ‘μ§ ---
|
| 76 |
+
def main():
|
| 77 |
+
args = parse_args()
|
| 78 |
+
|
| 79 |
+
print("π λμμ μμ±μ μμν©λλ€...")
|
| 80 |
+
print(f"ν둬ννΈ: {args.positive_prompt[:50]}...")
|
| 81 |
+
print(f"ν¬κΈ°: {args.width}x{args.height}, κΈΈμ΄: {args.length} νλ μ")
|
| 82 |
+
print(f"μ΅μ’
μ
μ€μΌμΌ λΉμ¨: {args.upscale_ratio}x")
|
| 83 |
+
if args.custom_lora_1_name != "None":
|
| 84 |
+
print(f"컀μ€ν
LoRA 1: {args.custom_lora_1_name} (κ°λ: {args.custom_lora_1_strength_model})")
|
| 85 |
+
if args.custom_lora_2_name != "None":
|
| 86 |
+
print(f"컀μ€ν
LoRA 2: {args.custom_lora_2_name} (κ°λ: {args.custom_lora_2_strength_model})")
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
subprocess.run(f"rm -rf {COMFYUI_BASE_PATH}/output/up/*", shell=True, check=True)
|
| 90 |
+
subprocess.run(f"rm -rf {COMFYUI_BASE_PATH}/output/temp/*", shell=True, check=True)
|
| 91 |
+
|
| 92 |
+
add_comfyui_directory_to_sys_path()
|
| 93 |
+
|
| 94 |
+
from utils.extra_config import load_extra_path_config
|
| 95 |
+
extra_model_paths_file = os.path.join(COMFYUI_BASE_PATH, "extra_model_paths.yaml")
|
| 96 |
+
if os.path.exists(extra_model_paths_file):
|
| 97 |
+
load_extra_path_config(extra_model_paths_file)
|
| 98 |
+
|
| 99 |
+
print("Initializing ComfyUI custom nodes...")
|
| 100 |
+
import_custom_nodes()
|
| 101 |
+
from nodes import NODE_CLASS_MAPPINGS
|
| 102 |
+
print("Custom nodes initialized successfully.")
|
| 103 |
+
|
| 104 |
+
with torch.inference_mode():
|
| 105 |
+
unetloadergguf = NODE_CLASS_MAPPINGS["UnetLoaderGGUF"]()
|
| 106 |
+
cliploader = NODE_CLASS_MAPPINGS["CLIPLoader"]()
|
| 107 |
+
loraloader = NODE_CLASS_MAPPINGS["LoraLoader"]()
|
| 108 |
+
cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
|
| 109 |
+
vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
|
| 110 |
+
loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
|
| 111 |
+
upscalemodelloader = NODE_CLASS_MAPPINGS["UpscaleModelLoader"]()
|
| 112 |
+
mxslider = NODE_CLASS_MAPPINGS["mxSlider"]()
|
| 113 |
+
vhs_loadimagespath = NODE_CLASS_MAPPINGS["VHS_LoadImagesPath"]()
|
| 114 |
+
modelsamplingsd3 = NODE_CLASS_MAPPINGS["ModelSamplingSD3"]()
|
| 115 |
+
wanimagetovideo = NODE_CLASS_MAPPINGS["WanImageToVideo"]()
|
| 116 |
+
ksampleradvanced = NODE_CLASS_MAPPINGS["KSamplerAdvanced"]()
|
| 117 |
+
vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
|
| 118 |
+
rife_vfi = NODE_CLASS_MAPPINGS["RIFE VFI"]()
|
| 119 |
+
vhs_videocombine = NODE_CLASS_MAPPINGS["VHS_VideoCombine"]()
|
| 120 |
+
easy_mathfloat = NODE_CLASS_MAPPINGS["easy mathFloat"]()
|
| 121 |
+
imageupscalewithmodel = NODE_CLASS_MAPPINGS["ImageUpscaleWithModel"]()
|
| 122 |
+
imagescaleby = NODE_CLASS_MAPPINGS["ImageScaleBy"]()
|
| 123 |
+
saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
print("Starting Step 1: Initial Sampling")
|
| 127 |
+
cliploader_38 = cliploader.load_clip(clip_name="umt5_xxl_fp8_e4m3fn_scaled.safetensors", type="wan", device="default")
|
| 128 |
+
unetloadergguf_84 = unetloadergguf.load_unet(unet_name="Wan2.2-I2V-A14B-HighNoise-Q4_K_S.gguf")
|
| 129 |
+
|
| 130 |
+
# 1λ² μνλ¬ LoRA 체μΈ
|
| 131 |
+
model, clip = loraloader.load_lora(lora_name="lightx2v_T2V_14B_cfg_step_distill_v2_lora_rank32_bf16.safetensors", strength_model=2, strength_clip=2, model=get_value_at_index(unetloadergguf_84, 0), clip=get_value_at_index(cliploader_38, 0))
|
| 132 |
+
if args.custom_lora_1_name and args.custom_lora_1_name != "None":
|
| 133 |
+
print(f"1λ² μνλ¬μ 컀μ€ν
LoRA μ μ©: {args.custom_lora_1_name}")
|
| 134 |
+
model, clip = loraloader.load_lora(lora_name=args.custom_lora_1_name, strength_model=args.custom_lora_1_strength_model, strength_clip=args.custom_lora_1_strength_clip, model=model, clip=clip)
|
| 135 |
+
loraloader_78 = loraloader.load_lora(lora_name="FastWan_T2V_14B_480p_lora_rank_128_bf16.safetensors", strength_model=1.5, strength_clip=1.5, model=model, clip=clip)
|
| 136 |
+
|
| 137 |
+
cliptextencode_6 = cliptextencode.encode(text=args.positive_prompt, clip=get_value_at_index(loraloader_78, 1))
|
| 138 |
+
cliptextencode_7 = cliptextencode.encode(text=args.negative_prompt, clip=get_value_at_index(loraloader_78, 1))
|
| 139 |
+
vaeloader_39 = vaeloader.load_vae(vae_name="wan_2.1_vae.safetensors")
|
| 140 |
+
loadimage_62 = loadimage.load_image(image="example.png")
|
| 141 |
+
wanimagetovideo_63 = wanimagetovideo.EXECUTE_NORMALIZED(width=args.width, height=args.height, length=args.length, batch_size=1, positive=get_value_at_index(cliptextencode_6, 0), negative=get_value_at_index(cliptextencode_7, 0), vae=get_value_at_index(vaeloader_39, 0), start_image=get_value_at_index(loadimage_62, 0))
|
| 142 |
+
modelsamplingsd3_54 = modelsamplingsd3.patch(shift=8.0, model=get_value_at_index(loraloader_78, 0))
|
| 143 |
+
|
| 144 |
+
current_seed = int(time.time() * 1000)
|
| 145 |
+
random.seed(current_seed)
|
| 146 |
+
ksampleradvanced_74 = ksampleradvanced.sample(add_noise="enable", noise_seed=random.randint(1, 2**64), steps=4, cfg=1, sampler_name="lcm", scheduler="simple", start_at_step=0, end_at_step=2, return_with_leftover_noise="enable", model=get_value_at_index(modelsamplingsd3_54, 0), positive=get_value_at_index(wanimagetovideo_63, 0), negative=get_value_at_index(wanimagetovideo_63, 1), latent_image=get_value_at_index(wanimagetovideo_63, 2))
|
| 147 |
+
|
| 148 |
+
del unetloadergguf_84, loraloader_78, modelsamplingsd3_54, loadimage_62, model, clip
|
| 149 |
+
clear_memory()
|
| 150 |
+
print("Step 1 finished and memory cleared.")
|
| 151 |
+
|
| 152 |
+
print("Starting Step 2: Refinement Sampling")
|
| 153 |
+
unetloadergguf_85 = unetloadergguf.load_unet(unet_name="Wan2.2-I2V-A14B-LowNoise-Q4_K_S.gguf")
|
| 154 |
+
|
| 155 |
+
# 2λ² μνλ¬ LoRA 체μΈ
|
| 156 |
+
model, clip = loraloader.load_lora(lora_name="lightx2v_T2V_14B_cfg_step_distill_v2_lora_rank32_bf16.safetensors", strength_model=2, strength_clip=2, model=get_value_at_index(unetloadergguf_85, 0), clip=get_value_at_index(cliploader_38, 0))
|
| 157 |
+
if args.custom_lora_2_name and args.custom_lora_2_name != "None":
|
| 158 |
+
print(f"2λ² μνλ¬μ 컀μ€ν
LoRA μ μ©: {args.custom_lora_2_name}")
|
| 159 |
+
model, clip = loraloader.load_lora(lora_name=args.custom_lora_2_name, strength_model=args.custom_lora_2_strength_model, strength_clip=args.custom_lora_2_strength_clip, model=model, clip=clip)
|
| 160 |
+
loraloader_86 = loraloader.load_lora(lora_name="FastWan_T2V_14B_480p_lora_rank_128_bf16.safetensors", strength_model=0.5, strength_clip=0.5, model=model, clip=clip)
|
| 161 |
+
|
| 162 |
+
modelsamplingsd3_55 = modelsamplingsd3.patch(shift=8, model=get_value_at_index(loraloader_86, 0))
|
| 163 |
+
|
| 164 |
+
current_seed_2 = int(time.time() * 1000)
|
| 165 |
+
random.seed(current_seed_2)
|
| 166 |
+
ksampleradvanced_75 = ksampleradvanced.sample(add_noise="disable", noise_seed=random.randint(1, 2**64), steps=4, cfg=1, sampler_name="lcm", scheduler="simple", start_at_step=2, end_at_step=10000, return_with_leftover_noise="disable", model=get_value_at_index(modelsamplingsd3_55, 0), positive=get_value_at_index(wanimagetovideo_63, 0), negative=get_value_at_index(wanimagetovideo_63, 1), latent_image=get_value_at_index(ksampleradvanced_74, 0))
|
| 167 |
+
|
| 168 |
+
del unetloadergguf_85, loraloader_86, modelsamplingsd3_55, cliploader_38, cliptextencode_6, cliptextencode_7, ksampleradvanced_74, wanimagetovideo_63, model, clip
|
| 169 |
+
clear_memory()
|
| 170 |
+
print("Step 2 finished and memory cleared.")
|
| 171 |
+
|
| 172 |
+
print("Starting Step 3: VAE Decode and Save")
|
| 173 |
+
vaedecode_8 = vaedecode.decode(samples=get_value_at_index(ksampleradvanced_75, 0), vae=get_value_at_index(vaeloader_39, 0))
|
| 174 |
+
saveimage.save_images(filename_prefix="temp/example", images=get_value_at_index(vaedecode_8, 0))
|
| 175 |
+
|
| 176 |
+
# β¨ [μ΅μ ν] μ¬μ©μ΄ λλ VAE κ΄λ ¨ λ³μ μ¦μ μμ
|
| 177 |
+
del ksampleradvanced_75, vaeloader_39, vaedecode_8
|
| 178 |
+
clear_memory()
|
| 179 |
+
print("Step 3 finished and memory cleared.")
|
| 180 |
+
|
| 181 |
+
if args.upscale_ratio > 1:
|
| 182 |
+
print("Starting Steps 4 & 5: Upscaling")
|
| 183 |
+
upscalemodelloader_88 = upscalemodelloader.load_model(model_name="2x-AnimeSharpV4_Fast_RCAN_PU.safetensors")
|
| 184 |
+
|
| 185 |
+
# Upscale Part 1
|
| 186 |
+
vhs_loadimagespath_96 = vhs_loadimagespath.load_images(directory=f"{COMFYUI_BASE_PATH}/output/temp", image_load_cap=40)
|
| 187 |
+
if get_value_at_index(vhs_loadimagespath_96, 0) is not None and get_value_at_index(vhs_loadimagespath_96, 0).shape[0] > 0:
|
| 188 |
+
imageupscalewithmodel_92 = imageupscalewithmodel.upscale(upscale_model=get_value_at_index(upscalemodelloader_88, 0), image=get_value_at_index(vhs_loadimagespath_96, 0))
|
| 189 |
+
mxslider_91 = mxslider.main(Xi=args.upscale_ratio, Xf=args.upscale_ratio, isfloatX=1)
|
| 190 |
+
easy_mathfloat_90 = easy_mathfloat.float_math_operation(a=get_value_at_index(mxslider_91, 0), b=2, operation="divide")
|
| 191 |
+
imagescaleby_93 = imagescaleby.upscale(upscale_method="nearest-exact", scale_by=get_value_at_index(easy_mathfloat_90, 0), image=get_value_at_index(imageupscalewithmodel_92, 0))
|
| 192 |
+
saveimage.save_images(filename_prefix="up/example", images=get_value_at_index(imagescaleby_93, 0))
|
| 193 |
+
del vhs_loadimagespath_96, imageupscalewithmodel_92, imagescaleby_93, easy_mathfloat_90, mxslider_91
|
| 194 |
+
clear_memory()
|
| 195 |
+
|
| 196 |
+
# Upscale Part 2
|
| 197 |
+
vhs_loadimagespath_98 = vhs_loadimagespath.load_images(directory=f"{COMFYUI_BASE_PATH}/output/temp", skip_first_images=40)
|
| 198 |
+
if get_value_at_index(vhs_loadimagespath_98, 0) is not None and get_value_at_index(vhs_loadimagespath_98, 0).shape[0] > 0:
|
| 199 |
+
imageupscalewithmodel_100 = imageupscalewithmodel.upscale(upscale_model=get_value_at_index(upscalemodelloader_88, 0), image=get_value_at_index(vhs_loadimagespath_98, 0))
|
| 200 |
+
mxslider_102 = mxslider.main(Xi=args.upscale_ratio, Xf=args.upscale_ratio, isfloatX=1)
|
| 201 |
+
easy_mathfloat_103 = easy_mathfloat.float_math_operation(a=get_value_at_index(mxslider_102, 0), b=2, operation="divide")
|
| 202 |
+
imagescaleby_101 = imagescaleby.upscale(upscale_method="nearest-exact", scale_by=get_value_at_index(easy_mathfloat_103, 0), image=get_value_at_index(imageupscalewithmodel_100, 0))
|
| 203 |
+
saveimage.save_images(filename_prefix="up/example", images=get_value_at_index(imagescaleby_101, 0))
|
| 204 |
+
del vhs_loadimagespath_98, imageupscalewithmodel_100, easy_mathfloat_103, imagescaleby_101, mxslider_102
|
| 205 |
+
|
| 206 |
+
del upscalemodelloader_88
|
| 207 |
+
clear_memory()
|
| 208 |
+
print("Upscaling finished and memory cleared.")
|
| 209 |
+
rife_input_dir = f"{COMFYUI_BASE_PATH}/output/up"
|
| 210 |
+
else:
|
| 211 |
+
print("Skipping Upscaling (Upscale ratio <= 1)")
|
| 212 |
+
rife_input_dir = f"{COMFYUI_BASE_PATH}/output/temp"
|
| 213 |
+
|
| 214 |
+
print("Starting Step 6: RIFE and Combine")
|
| 215 |
+
vhs_loadimagespath_97 = vhs_loadimagespath.load_images(directory=rife_input_dir)
|
| 216 |
+
rife_vfi_94 = rife_vfi.vfi(ckpt_name="rife47.pth", multiplier=2, frames=get_value_at_index(vhs_loadimagespath_97, 0))
|
| 217 |
+
vhs_videocombine.combine_video(frame_rate=32, loop_count=0, filename_prefix="AnimateDiff", format="video/h264-mp4", pix_fmt="yuv420p", crf=19, save_metadata=True, trim_to_audio=False, pingpong=False, save_output=True, images=get_value_at_index(rife_vfi_94, 0))
|
| 218 |
+
|
| 219 |
+
# β¨ [μ΅μ ν] λ§μ§λ§ λ¨κ³μμ μ¬μ©ν λ³μ μμ
|
| 220 |
+
del vhs_loadimagespath_97, rife_vfi_94
|
| 221 |
+
clear_memory()
|
| 222 |
+
print("β
All steps finished.")
|
| 223 |
+
|
| 224 |
+
output_dir = os.path.join(COMFYUI_BASE_PATH, "output")
|
| 225 |
+
video_files = glob.glob(os.path.join(output_dir, '**', '*.mp4'), recursive=True)
|
| 226 |
+
|
| 227 |
+
if not video_files:
|
| 228 |
+
raise FileNotFoundError("μμ±λ λμμ νμΌμ μ°Ύμ μ μμ΅λλ€!")
|
| 229 |
+
|
| 230 |
+
latest_video = max(video_files, key=os.path.getctime)
|
| 231 |
+
print(f"LATEST_VIDEO_PATH:{latest_video}")
|
| 232 |
+
|
| 233 |
+
if __name__ == "__main__":
|
| 234 |
+
main()
|