File size: 5,542 Bytes
39ad784
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import os
import argparse
import torch
import torchvision
import json
from einops import rearrange
from diffusers import DDIMScheduler, AutoencoderKL, DDIMInverseScheduler
from transformers import CLIPTextModel, CLIPTokenizer

from models.pipeline_flatten import FlattenPipeline
from models.util import save_videos_grid, read_video, sample_trajectories
from models.unet import UNet3DConditionModel

def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--jsonl_path", type=str, help="Path to the JSONL file containing video paths and prompts")
    parser.add_argument("--prompt", type=str, help="Textual prompt for video editing")
    parser.add_argument("--neg_prompt", type=str, default="none", help="Negative prompt for guidance")
    parser.add_argument("--guidance_scale", default=20.0, type=float, help="Guidance scale")
    parser.add_argument("--video_path", type=str, help="Path to a source video")
    parser.add_argument("--sd_path", type=str, default="checkpoints/stable-diffusion-2-1-base", help="Path of Stable Diffusion")
    parser.add_argument("--output_path", type=str, default="./outputs", help="Directory of output")
    parser.add_argument("--video_length", type=int, default=32, help="Length of output video")
    parser.add_argument("--old_qk", type=int, default=0, help="Whether to use old queries and keys for flow-guided attention")
    parser.add_argument("--height", type=int, default=512, help="Height of synthesized video, and should be a multiple of 32")
    parser.add_argument("--width", type=int, default=512, help="Width of synthesized video, and should be a multiple of 32")
    parser.add_argument("--sample_steps", type=int, default=50, help="Steps for feature injection")
    parser.add_argument("--inject_step", type=int, default=40, help="Steps for feature injection")
    parser.add_argument("--seed", type=int, default=66, help="Random seed of generator")
    parser.add_argument("--frame_rate", type=int, default=2, help="The frame rate of loading input video")
    parser.add_argument("--fps", type=int, default=15, help="FPS of the output video")
    args = parser.parse_args()
    return args

if __name__ == "__main__":
    args = get_args()
    os.makedirs(args.output_path, exist_ok=True)
    device = "cuda"
    args.height = (args.height // 32) * 32
    args.width = (args.width // 32) * 32

    tokenizer = CLIPTokenizer.from_pretrained(args.sd_path, subfolder="tokenizer")
    text_encoder = CLIPTextModel.from_pretrained(args.sd_path, subfolder="text_encoder").to(dtype=torch.float16)
    vae = AutoencoderKL.from_pretrained(args.sd_path, subfolder="vae").to(dtype=torch.float16)
    unet = UNet3DConditionModel.from_pretrained_2d(args.sd_path, subfolder="unet").to(dtype=torch.float16)
    scheduler = DDIMScheduler.from_pretrained(args.sd_path, subfolder="scheduler")
    inverse = DDIMInverseScheduler.from_pretrained(args.sd_path, subfolder="scheduler")

    pipe = FlattenPipeline(
        vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet,
        scheduler=scheduler, inverse_scheduler=inverse)
    pipe.enable_vae_slicing()
    pipe.enable_xformers_memory_efficient_attention()
    pipe.to(device)

    generator = torch.Generator(device=device)
    generator.manual_seed(args.seed)

    if args.jsonl_path:
        with open(args.jsonl_path, 'r') as f:
            lines = f.readlines()
            video_tasks = [json.loads(line) for line in lines]
    else:
        if not args.video_path or not args.prompt:
            raise ValueError("For single video editing, --video_path and --prompt are required")
        video_tasks = [{"video": args.video_path, "edit_prompt": args.prompt}]

    base_dir = "/home/wangjuntong/video_editing_dataset/all_sourse/"
    os.makedirs("tmp", exist_ok=True)

    for task in video_tasks:
        video_path = os.path.join(base_dir, task["video"])
        prompt = task["edit_prompt"]
        
        video = read_video(video_path=video_path, video_length=args.video_length,
                           width=args.width, height=args.height, frame_rate=args.frame_rate)
        original_pixels = rearrange(video, "(b f) c h w -> b c f h w", b=1)
        
        video_name = os.path.splitext(os.path.basename(video_path))[0]
        source_video_path = os.path.join(args.output_path, f"{video_name}_source.mp4")
        save_videos_grid(original_pixels, source_video_path, rescale=True)
        
        t2i_transform = torchvision.transforms.ToPILImage()
        real_frames = []
        for i, frame in enumerate(video):
            real_frames.append(t2i_transform(((frame+1)/2*255).to(torch.uint8)))
        
        temp_dir = os.path.join("tmp", video_name)
        os.makedirs(temp_dir, exist_ok=True)
        trajectories = sample_trajectories(source_video_path, device)
        torch.cuda.empty_cache()
        
        for k in trajectories.keys():
            trajectories[k] = trajectories[k].to(device)
        
        sample = pipe(prompt, video_length=args.video_length, frames=real_frames,
                      num_inference_steps=args.sample_steps, generator=generator, guidance_scale=args.guidance_scale,
                      negative_prompt=args.neg_prompt, width=args.width, height=args.height,
                      trajs=trajectories, output_dir=temp_dir, inject_step=args.inject_step, old_qk=args.old_qk).videos
        
        output_video_path = os.path.join(args.output_path, f"{video_name}_edited.mp4")
        save_videos_grid(sample, output_video_path, fps=args.fps)