File size: 5,376 Bytes
3aa970e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 | import torch
import argparse
import os
import json
import sys
import datetime
import imageio # Import imageio for MP4 saving
sys.path.append(os.getcwd())
from pipelines.sd_controlnet_rave import RAVE
from pipelines.sd_multicontrolnet_rave import RAVE_MultiControlNet
import utils.constants as const
import utils.video_grid_utils as vgu
import warnings
warnings.filterwarnings("ignore")
import numpy as np
def init_device():
"""Initialize the device (CUDA if available, else CPU)."""
device_name = 'cuda' if torch.cuda.is_available() else 'cpu'
device = torch.device(device_name)
return device
def init_paths(input_ns, video_name, save_folder):
"""Initialize paths for video processing based on video name and save folder."""
# Set save path directly to the video name (e.g., truck.mp4) under save_folder
save_dir = save_folder
os.makedirs(save_dir, exist_ok=True)
input_ns.save_path = os.path.join(save_dir, video_name) # Use video_name directly as filename
# Set video path using the fixed base path and video name
input_ns.video_path = f'/home/wangjuntong/video_editing_dataset/all_sourse/{video_name}'
# Set Hugging Face ControlNet path based on preprocess_name
if '-' in input_ns.preprocess_name:
input_ns.hf_cn_path = [const.PREPROCESSOR_DICT[i] for i in input_ns.preprocess_name.split('-')]
else:
input_ns.hf_cn_path = const.PREPROCESSOR_DICT[input_ns.preprocess_name]
input_ns.hf_path = "runwayml/stable-diffusion-v1-5"
# Set inverse and control paths (though not used for saving)
input_ns.inverse_path = f'{const.GENERATED_DATA_PATH}/inverses/{video_name}/{input_ns.preprocess_name}_{input_ns.model_id}_{input_ns.grid_size}x{input_ns.grid_size}_{input_ns.pad}'
input_ns.control_path = f'{const.GENERATED_DATA_PATH}/controls/{video_name}/{input_ns.preprocess_name}_{input_ns.grid_size}x{input_ns.grid_size}_{input_ns.pad}'
os.makedirs(input_ns.control_path, exist_ok=True)
os.makedirs(input_ns.inverse_path, exist_ok=True)
return input_ns
def run(input_ns, video_name, positive_prompts, save_folder):
"""Run the video editing process with the given parameters."""
if 'model_id' not in input_ns.__dict__:
input_ns.model_id = "None"
device = init_device()
input_ns = init_paths(input_ns, video_name, save_folder)
print(f"Save path: {input_ns.save_path}")
# Prepare video frames as a grid
input_ns.image_pil_list = vgu.prepare_video_to_grid(input_ns.video_path, input_ns.sample_size, input_ns.grid_size, input_ns.pad)
input_ns.sample_size = len(input_ns.image_pil_list)
print(f'Frame count: {len(input_ns.image_pil_list)}')
# Choose the appropriate ControlNet class
controlnet_class = RAVE_MultiControlNet if '-' in str(input_ns.controlnet_conditioning_scale) else RAVE
CN = controlnet_class(device)
# Initialize models
CN.init_models(input_ns.hf_cn_path, input_ns.hf_path, input_ns.preprocess_name, input_ns.model_id)
input_dict = vars(input_ns)
# Run the editing process
start_time = datetime.datetime.now()
if '-' in str(input_ns.controlnet_conditioning_scale):
res_vid, control_vid_1, control_vid_2 = CN(input_dict)
else:
res_vid, control_vid = CN(input_dict)
end_time = datetime.datetime.now()
# Convert PIL images to numpy arrays for imageio
res_vid_np = [np.array(img) for img in res_vid]
# Save the result video as MP4
imageio.mimwrite(input_ns.save_path, res_vid_np, format='mp4', fps=30, quality=8)
if __name__ == '__main__':
# Parse command-line argument for JSONL file path
parser = argparse.ArgumentParser(description='Batch video editing with JSONL input.')
parser.add_argument('--jsonl_path', type=str, required=True, help='Path to the JSONL file containing video info')
args = parser.parse_args()
# Fixed parameters
fixed_params = {
'preprocess_name': 'depth_zoe',
'batch_size': 4,
'batch_size_vae': 1,
'cond_step_start': 0.0,
'controlnet_conditioning_scale': 1.0,
'controlnet_guidance_end': 1.0,
'controlnet_guidance_start': 0.0,
'give_control_inversion': True,
'grid_size': 3,
'sample_size': -1,
'pad': 1,
'guidance_scale': 7.5,
'inversion_prompt': '',
'is_ddim_inversion': True,
'is_shuffle': True,
'negative_prompts': '',
'num_inference_steps': 50,
'num_inversion_step': 50,
'seed': 0,
'model_id': 'None'
}
# Read and process each line in the JSONL file
with open(args.jsonl_path, 'r') as f:
for line in f:
data = json.loads(line)
video_name = data['video'] # Use video key directly as filename (e.g., "truck.mp4")
positive_prompts = data['edit_prompt']
save_folder = f'/home/wangjuntong/RAVE-main/outputs/lnk_painting/{video_name.rsplit(".", 1)[0]}' # Folder named after video without extension
# Create input namespace with fixed and dynamic parameters
input_ns = argparse.Namespace(**fixed_params)
input_ns.positive_prompts = positive_prompts
input_ns.video_name = video_name
# Run the editing process
run(input_ns, video_name, positive_prompts, save_folder) |