diff --git a/CCEdit-main/scripts/__pycache__/__init__.cpython-39.pyc b/CCEdit-main/scripts/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3c8d5a5dbbf607f3fd473ae00c3c9689936df22 Binary files /dev/null and b/CCEdit-main/scripts/__pycache__/__init__.cpython-39.pyc differ diff --git a/CCEdit-main/scripts/demo/__init__.py b/CCEdit-main/scripts/demo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/CCEdit-main/scripts/demo/detect.py b/CCEdit-main/scripts/demo/detect.py new file mode 100644 index 0000000000000000000000000000000000000000..823ae8d39ee5977ed27b07240e2bb34a417d2db7 --- /dev/null +++ b/CCEdit-main/scripts/demo/detect.py @@ -0,0 +1,157 @@ +import argparse + +import cv2 +import numpy as np + +try: + from imwatermark import WatermarkDecoder +except ImportError as e: + try: + # Assume some of the other dependencies such as torch are not fulfilled + # import file without loading unnecessary libraries. + import importlib.util + import sys + + spec = importlib.util.find_spec("imwatermark.maxDct") + assert spec is not None + maxDct = importlib.util.module_from_spec(spec) + sys.modules["maxDct"] = maxDct + spec.loader.exec_module(maxDct) + + class WatermarkDecoder(object): + """A minimal version of + https://github.com/ShieldMnt/invisible-watermark/blob/main/imwatermark/watermark.py + to only reconstruct bits using dwtDct""" + + def __init__(self, wm_type="bytes", length=0): + assert wm_type == "bits", "Only bits defined in minimal import" + self._wmType = wm_type + self._wmLen = length + + def reconstruct(self, bits): + if len(bits) != self._wmLen: + raise RuntimeError("bits are not matched with watermark length") + + return bits + + def decode(self, cv2Image, method="dwtDct", **configs): + (r, c, channels) = cv2Image.shape + if r * c < 256 * 256: + raise RuntimeError("image too small, should be larger than 256x256") + + bits = [] + assert method == "dwtDct" + embed = maxDct.EmbedMaxDct(watermarks=[], wmLen=self._wmLen, **configs) + bits = embed.decode(cv2Image) + return self.reconstruct(bits) + + except: + raise e + + +# A fixed 48-bit message that was choosen at random +# WATERMARK_MESSAGE = 0xB3EC907BB19E +WATERMARK_MESSAGE = 0b101100111110110010010000011110111011000110011110 +# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 +WATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] +MATCH_VALUES = [ + [27, "No watermark detected"], + [33, "Partial watermark match. Cannot determine with certainty."], + [ + 35, + ( + "Likely watermarked. In our test 0.02% of real images were " + 'falsely detected as "Likely watermarked"' + ), + ], + [ + 49, + ( + "Very likely watermarked. In our test no real images were " + 'falsely detected as "Very likely watermarked"' + ), + ], +] + + +class GetWatermarkMatch: + def __init__(self, watermark): + self.watermark = watermark + self.num_bits = len(self.watermark) + self.decoder = WatermarkDecoder("bits", self.num_bits) + + def __call__(self, x: np.ndarray) -> np.ndarray: + """ + Detects the number of matching bits the predefined watermark with one + or multiple images. Images should be in cv2 format, e.g. h x w x c. + + Args: + x: ([B], h w, c) in range [0, 255] + + Returns: + number of matched bits ([B],) + """ + squeeze = len(x.shape) == 3 + if squeeze: + x = x[None, ...] + x = np.flip(x, axis=-1) + + bs = x.shape[0] + detected = np.empty((bs, self.num_bits), dtype=bool) + for k in range(bs): + detected[k] = self.decoder.decode(x[k], "dwtDct") + result = np.sum(detected == self.watermark, axis=-1) + if squeeze: + return result[0] + else: + return result + + +get_watermark_match = GetWatermarkMatch(WATERMARK_BITS) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "filename", + nargs="+", + type=str, + help="Image files to check for watermarks", + ) + opts = parser.parse_args() + + print( + """ + This script tries to detect watermarked images. Please be aware of + the following: + - As the watermark is supposed to be invisible, there is the risk that + watermarked images may not be detected. + - To maximize the chance of detection make sure that the image has the same + dimensions as when the watermark was applied (most likely 1024x1024 + or 512x512). + - Specific image manipulation may drastically decrease the chance that + watermarks can be detected. + - There is also the chance that an image has the characteristics of the + watermark by chance. + - The watermark script is public, anybody may watermark any images, and + could therefore claim it to be generated. + - All numbers below are based on a test using 10,000 images without any + modifications after applying the watermark. + """ + ) + + for fn in opts.filename: + image = cv2.imread(fn) + if image is None: + print(f"Couldn't read {fn}. Skipping") + continue + + num_bits = get_watermark_match(image) + k = 0 + while num_bits > MATCH_VALUES[k][0]: + k += 1 + print( + f"{fn}: {MATCH_VALUES[k][1]}", + f"Bits that matched the watermark {num_bits} from {len(WATERMARK_BITS)}\n", + sep="\n\t", + ) diff --git a/CCEdit-main/scripts/demo/sampling.py b/CCEdit-main/scripts/demo/sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..7e953e9069d34ae09f4fddd70f8dcdd016e87731 --- /dev/null +++ b/CCEdit-main/scripts/demo/sampling.py @@ -0,0 +1,328 @@ +from pytorch_lightning import seed_everything +from scripts.demo.streamlit_helpers import * +from scripts.util.detection.nsfw_and_watermark_dectection import DeepFloydDataFiltering + +SAVE_PATH = "outputs/demo/txt2img/" + +SD_XL_BASE_RATIOS = { + "0.5": (704, 1408), + "0.52": (704, 1344), + "0.57": (768, 1344), + "0.6": (768, 1280), + "0.68": (832, 1216), + "0.72": (832, 1152), + "0.78": (896, 1152), + "0.82": (896, 1088), + "0.88": (960, 1088), + "0.94": (960, 1024), + "1.0": (1024, 1024), + "1.07": (1024, 960), + "1.13": (1088, 960), + "1.21": (1088, 896), + "1.29": (1152, 896), + "1.38": (1152, 832), + "1.46": (1216, 832), + "1.67": (1280, 768), + "1.75": (1344, 768), + "1.91": (1344, 704), + "2.0": (1408, 704), + "2.09": (1472, 704), + "2.4": (1536, 640), + "2.5": (1600, 640), + "2.89": (1664, 576), + "3.0": (1728, 576), +} + +VERSION2SPECS = { + "SD-XL base": { + "H": 1024, + "W": 1024, + "C": 4, + "f": 8, + "is_legacy": False, + "config": "configs/inference/sd_xl_base.yaml", + "ckpt": "checkpoints/sd_xl_base_0.9.safetensors", + "is_guided": True, + }, + "sd-2.1": { + "H": 512, + "W": 512, + "C": 4, + "f": 8, + "is_legacy": True, + "config": "configs/inference/sd_2_1.yaml", + "ckpt": "checkpoints/v2-1_512-ema-pruned.safetensors", + "is_guided": True, + }, + "sd-2.1-768": { + "H": 768, + "W": 768, + "C": 4, + "f": 8, + "is_legacy": True, + "config": "configs/inference/sd_2_1_768.yaml", + "ckpt": "checkpoints/v2-1_768-ema-pruned.safetensors", + }, + "SDXL-Refiner": { + "H": 1024, + "W": 1024, + "C": 4, + "f": 8, + "is_legacy": True, + "config": "configs/inference/sd_xl_refiner.yaml", + "ckpt": "checkpoints/sd_xl_refiner_0.9.safetensors", + "is_guided": True, + }, +} + + +def load_img(display=True, key=None, device="cuda"): + image = get_interactive_image(key=key) + if image is None: + return None + if display: + st.image(image) + w, h = image.size + print(f"loaded input image of size ({w}, {h})") + width, height = map( + lambda x: x - x % 64, (w, h) + ) # resize to integer multiple of 64 + image = image.resize((width, height)) + image = np.array(image.convert("RGB")) + image = image[None].transpose(0, 3, 1, 2) + image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 + return image.to(device) + + +def run_txt2img( + state, version, version_dict, is_legacy=False, return_latents=False, filter=None +): + if version == "SD-XL base": + ratio = st.sidebar.selectbox("Ratio:", list(SD_XL_BASE_RATIOS.keys()), 10) + W, H = SD_XL_BASE_RATIOS[ratio] + else: + H = st.sidebar.number_input( + "H", value=version_dict["H"], min_value=64, max_value=2048 + ) + W = st.sidebar.number_input( + "W", value=version_dict["W"], min_value=64, max_value=2048 + ) + C = version_dict["C"] + F = version_dict["f"] + + init_dict = { + "orig_width": W, + "orig_height": H, + "target_width": W, + "target_height": H, + } + value_dict = init_embedder_options( + get_unique_embedder_keys_from_conditioner(state["model"].conditioner), + init_dict, + prompt=prompt, + negative_prompt=negative_prompt, + ) + num_rows, num_cols, sampler = init_sampling( + use_identity_guider=not version_dict["is_guided"] + ) + + num_samples = num_rows * num_cols + + if st.button("Sample"): + st.write(f"**Model I:** {version}") + out = do_sample( + state["model"], + sampler, + value_dict, + num_samples, + H, + W, + C, + F, + force_uc_zero_embeddings=["txt"] if not is_legacy else [], + return_latents=return_latents, + filter=filter, + ) + return out + + +def run_img2img( + state, version_dict, is_legacy=False, return_latents=False, filter=None +): + img = load_img() + if img is None: + return None + H, W = img.shape[2], img.shape[3] + + init_dict = { + "orig_width": W, + "orig_height": H, + "target_width": W, + "target_height": H, + } + value_dict = init_embedder_options( + get_unique_embedder_keys_from_conditioner(state["model"].conditioner), + init_dict, + ) + strength = st.number_input( + "**Img2Img Strength**", value=0.5, min_value=0.0, max_value=1.0 + ) + num_rows, num_cols, sampler = init_sampling( + img2img_strength=strength, + use_identity_guider=not version_dict["is_guided"], + ) + num_samples = num_rows * num_cols + + if st.button("Sample"): + out = do_img2img( + repeat(img, "1 ... -> n ...", n=num_samples), + state["model"], + sampler, + value_dict, + num_samples, + force_uc_zero_embeddings=["txt"] if not is_legacy else [], + return_latents=return_latents, + filter=filter, + ) + return out + + +def apply_refiner( + input, + state, + sampler, + num_samples, + prompt, + negative_prompt, + filter=None, +): + init_dict = { + "orig_width": input.shape[3] * 8, + "orig_height": input.shape[2] * 8, + "target_width": input.shape[3] * 8, + "target_height": input.shape[2] * 8, + } + + value_dict = init_dict + value_dict["prompt"] = prompt + value_dict["negative_prompt"] = negative_prompt + + value_dict["crop_coords_top"] = 0 + value_dict["crop_coords_left"] = 0 + + value_dict["aesthetic_score"] = 6.0 + value_dict["negative_aesthetic_score"] = 2.5 + + st.warning(f"refiner input shape: {input.shape}") + samples = do_img2img( + input, + state["model"], + sampler, + value_dict, + num_samples, + skip_encode=True, + filter=filter, + ) + + return samples + + +if __name__ == "__main__": + st.title("Stable Diffusion") + version = st.selectbox("Model Version", list(VERSION2SPECS.keys()), 0) + version_dict = VERSION2SPECS[version] + mode = st.radio("Mode", ("txt2img", "img2img"), 0) + st.write("__________________________") + + if version == "SD-XL base": + add_pipeline = st.checkbox("Load SDXL-Refiner?", False) + st.write("__________________________") + else: + add_pipeline = False + + filter = DeepFloydDataFiltering(verbose=False) + + seed = st.sidebar.number_input("seed", value=42, min_value=0, max_value=int(1e9)) + seed_everything(seed) + + save_locally, save_path = init_save_locally(os.path.join(SAVE_PATH, version)) + + state = init_st(version_dict) + if state["msg"]: + st.info(state["msg"]) + model = state["model"] + + is_legacy = version_dict["is_legacy"] + + prompt = st.text_input( + "prompt", + "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", + ) + if is_legacy: + negative_prompt = st.text_input("negative prompt", "") + else: + negative_prompt = "" # which is unused + + if add_pipeline: + st.write("__________________________") + + version2 = "SDXL-Refiner" + st.warning( + f"Running with {version2} as the second stage model. Make sure to provide (V)RAM :) " + ) + st.write("**Refiner Options:**") + + version_dict2 = VERSION2SPECS[version2] + state2 = init_st(version_dict2) + st.info(state2["msg"]) + + stage2strength = st.number_input( + "**Refinement strength**", value=0.3, min_value=0.0, max_value=1.0 + ) + + sampler2 = init_sampling( + key=2, + img2img_strength=stage2strength, + use_identity_guider=not version_dict2["is_guided"], + get_num_samples=False, + ) + st.write("__________________________") + + if mode == "txt2img": + out = run_txt2img( + state, + version, + version_dict, + is_legacy=is_legacy, + return_latents=add_pipeline, + filter=filter, + ) + elif mode == "img2img": + out = run_img2img( + state, + version_dict, + is_legacy=is_legacy, + return_latents=add_pipeline, + filter=filter, + ) + else: + raise ValueError(f"unknown mode {mode}") + if isinstance(out, (tuple, list)): + samples, samples_z = out + else: + samples = out + + if add_pipeline: + st.write("**Running Refinement Stage**") + samples = apply_refiner( + samples_z, + state2, + sampler2, + samples_z.shape[0], + prompt=prompt, + negative_prompt=negative_prompt if is_legacy else "", + filter=filter, + ) + + if save_locally and samples is not None: + perform_save_locally(save_path, samples) diff --git a/CCEdit-main/scripts/demo/sampling_command.py b/CCEdit-main/scripts/demo/sampling_command.py new file mode 100644 index 0000000000000000000000000000000000000000..fbcfddbe7c670033818c64a051d48e3dc94ca472 --- /dev/null +++ b/CCEdit-main/scripts/demo/sampling_command.py @@ -0,0 +1,152 @@ +from pytorch_lightning import seed_everything +from scripts.demo.streamlit_helpers import * +from scripts.util.detection.nsfw_and_watermark_dectection import DeepFloydDataFiltering +import torchvision + +SAVE_PATH = "outputs/demo/txt2img/" + +SD_XL_BASE_RATIOS = { + "0.5": (704, 1408), + "0.52": (704, 1344), + "0.57": (768, 1344), + "0.6": (768, 1280), + "0.68": (832, 1216), + "0.72": (832, 1152), + "0.78": (896, 1152), + "0.82": (896, 1088), + "0.88": (960, 1088), + "0.94": (960, 1024), + "1.0": (1024, 1024), + "1.07": (1024, 960), + "1.13": (1088, 960), + "1.21": (1088, 896), + "1.29": (1152, 896), + "1.38": (1152, 832), + "1.46": (1216, 832), + "1.67": (1280, 768), + "1.75": (1344, 768), + "1.91": (1344, 704), + "2.0": (1408, 704), + "2.09": (1472, 704), + "2.4": (1536, 640), + "2.5": (1600, 640), + "2.89": (1664, 576), + "3.0": (1728, 576), +} + +VERSION2SPECS = { + "SD-XL base": { + "H": 1024, + "W": 1024, + "C": 4, + "f": 8, + "is_legacy": False, + "config": "configs/inference/sd_xl_base.yaml", + "ckpt": "checkpoints/sd_xl_base_0.9.safetensors", + "is_guided": True, + }, + "sd-2.1": { + "H": 512, + "W": 512, + "C": 4, + "f": 8, + "is_legacy": True, + "config": "configs/inference/sd_2_1.yaml", + "ckpt": "checkpoints/v2-1_512-ema-pruned.safetensors", + "is_guided": True, + }, + "sd-2.1-768": { + "H": 768, + "W": 768, + "C": 4, + "f": 8, + "is_legacy": True, + "config": "configs/inference/sd_2_1_768.yaml", + "ckpt": "checkpoints/v2-1_768-ema-pruned.safetensors", + }, + "SDXL-Refiner": { + "H": 1024, + "W": 1024, + "C": 4, + "f": 8, + "is_legacy": True, + "config": "configs/inference/sd_xl_refiner.yaml", + "ckpt": "checkpoints/sd_xl_refiner_0.9.safetensors", + "is_guided": True, + }, +} + +version = "sd-2.1" +# version = "SD-XL base" +version_dict = VERSION2SPECS[version] + +# if version == "SD-XL base": +# # ratio = st.sidebar.selectbox("Ratio:", list(SD_XL_BASE_RATIOS.keys()), 10) +# ratio = '1.0' +# W, H = SD_XL_BASE_RATIOS[ratio] +# else: +# H = st.sidebar.number_input( +# "H", value=version_dict["H"], min_value=64, max_value=2048 +# ) +# W = st.sidebar.number_input( +# "W", value=version_dict["W"], min_value=64, max_value=2048 +# ) + +# initialize model +state = init_st(version_dict) +if state["msg"]: + st.info(state["msg"]) +model = state["model"] + +if version == "SD-XL base": + ratio = '1.0' + W, H = SD_XL_BASE_RATIOS[ratio] +else: + W, H = 512, 512 + +C = version_dict["C"] +F = version_dict["f"] + + +prompt = 'a corgi is sitting on a couch' +negative_prompt = 'ugly, low quality' + +init_dict = { + "orig_width": W, + "orig_height": H, + "target_width": W, + "target_height": H, +} +value_dict = init_embedder_options( + get_unique_embedder_keys_from_conditioner(state["model"].conditioner), + init_dict, + prompt=prompt, + negative_prompt=negative_prompt, +) +num_rows, num_cols, sampler = init_sampling( + use_identity_guider=not version_dict["is_guided"] +) + + +num_samples = num_rows * num_cols + +# st.write(f"**Model I:** {version}") +is_legacy=False +return_latents = False +filter=None +out = do_sample( + state["model"], + sampler, + value_dict, + num_samples, + H, + W, + C, + F, + force_uc_zero_embeddings=["txt"] if not is_legacy else [], + return_latents=return_latents, + filter=filter, +) + +torchvision.utils.save_image(out, 'debug/myres_2_1.png', nrow=4) +# torchvision.utils.save_image(out, 'debug/myres.png', nrow=4) \ No newline at end of file diff --git a/CCEdit-main/scripts/demo/streamlit_helpers.py b/CCEdit-main/scripts/demo/streamlit_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..9272e121d5bda9bb6dfa2565a00ce5fafcd3b300 --- /dev/null +++ b/CCEdit-main/scripts/demo/streamlit_helpers.py @@ -0,0 +1,668 @@ +import os +from typing import Union, List + +import math +import numpy as np +import streamlit as st +import torch +from PIL import Image +from einops import rearrange, repeat +from imwatermark import WatermarkEncoder +from omegaconf import OmegaConf, ListConfig +from torch import autocast +from torchvision import transforms +from torchvision.utils import make_grid +from safetensors.torch import load_file as load_safetensors + +from sgm.modules.diffusionmodules.sampling import ( + EulerEDMSampler, + HeunEDMSampler, + EulerAncestralSampler, + DPMPP2SAncestralSampler, + DPMPP2MSampler, + LinearMultistepSampler, +) +from sgm.util import append_dims +from sgm.util import instantiate_from_config + + +class WatermarkEmbedder: + def __init__(self, watermark): + self.watermark = watermark + self.num_bits = len(WATERMARK_BITS) + self.encoder = WatermarkEncoder() + self.encoder.set_watermark("bits", self.watermark) + + def __call__(self, image: torch.Tensor): + """ + Adds a predefined watermark to the input image + + Args: + image: ([N,] B, C, H, W) in range [0, 1] + + Returns: + same as input but watermarked + """ + # watermarking libary expects input as cv2 format + squeeze = len(image.shape) == 4 + if squeeze: + image = image[None, ...] + n = image.shape[0] + image_np = rearrange( + (255 * image).detach().cpu(), "n b c h w -> (n b) h w c" + ).numpy() + # torch (b, c, h, w) in [0, 1] -> numpy (b, h, w, c) [0, 255] + for k in range(image_np.shape[0]): + image_np[k] = self.encoder.encode(image_np[k], "dwtDct") + image = torch.from_numpy( + rearrange(image_np, "(n b) h w c -> n b c h w", n=n) + ).to(image.device) + image = torch.clamp(image / 255, min=0.0, max=1.0) + if squeeze: + image = image[0] + return image + + +# A fixed 48-bit message that was choosen at random +# WATERMARK_MESSAGE = 0xB3EC907BB19E +WATERMARK_MESSAGE = 0b101100111110110010010000011110111011000110011110 +# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 +WATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] +embed_watemark = WatermarkEmbedder(WATERMARK_BITS) + + +@st.cache_resource() +def init_st(version_dict, load_ckpt=True): + state = dict() + if not "model" in state: + config = version_dict["config"] + ckpt = version_dict["ckpt"] + + config = OmegaConf.load(config) + model, msg = load_model_from_config(config, ckpt if load_ckpt else None) + + state["msg"] = msg + state["model"] = model + state["ckpt"] = ckpt if load_ckpt else None + state["config"] = config + return state + + +def load_model_from_config(config, ckpt=None, verbose=True): + model = instantiate_from_config(config.model) + + if ckpt is not None: + print(f"Loading model from {ckpt}") + if ckpt.endswith("ckpt"): + pl_sd = torch.load(ckpt, map_location="cpu") + if "global_step" in pl_sd: + global_step = pl_sd["global_step"] + st.info(f"loaded ckpt from global step {global_step}") + print(f"Global Step: {pl_sd['global_step']}") + sd = pl_sd["state_dict"] + elif ckpt.endswith("safetensors"): + sd = load_safetensors(ckpt) + else: + raise NotImplementedError + + msg = None + + m, u = model.load_state_dict(sd, strict=False) + + if len(m) > 0 and verbose: + print("missing keys:") + print(m) + if len(u) > 0 and verbose: + print("unexpected keys:") + print(u) + else: + msg = None + + model.cuda() + model.eval() + return model, msg + + +def get_unique_embedder_keys_from_conditioner(conditioner): + return list(set([x.input_key for x in conditioner.embedders])) + + +def init_embedder_options(keys, init_dict, prompt=None, negative_prompt=None): + # Hardcoded demo settings; might undergo some changes in the future + + value_dict = {} + for key in keys: + if key == "txt": + if prompt is None: + prompt = st.text_input( + "Prompt", "A professional photograph of an astronaut riding a pig" + ) + if negative_prompt is None: + negative_prompt = st.text_input("Negative prompt", "") + + value_dict["prompt"] = prompt + value_dict["negative_prompt"] = negative_prompt + + if key == "original_size_as_tuple": + orig_width = st.number_input( + "orig_width", + value=init_dict["orig_width"], + min_value=16, + ) + orig_height = st.number_input( + "orig_height", + value=init_dict["orig_height"], + min_value=16, + ) + + value_dict["orig_width"] = orig_width + value_dict["orig_height"] = orig_height + + if key == "crop_coords_top_left": + crop_coord_top = st.number_input("crop_coords_top", value=0, min_value=0) + crop_coord_left = st.number_input("crop_coords_left", value=0, min_value=0) + + value_dict["crop_coords_top"] = crop_coord_top + value_dict["crop_coords_left"] = crop_coord_left + + if key == "aesthetic_score": + value_dict["aesthetic_score"] = 6.0 + value_dict["negative_aesthetic_score"] = 2.5 + + if key == "target_size_as_tuple": + target_width = st.number_input( + "target_width", + value=init_dict["target_width"], + min_value=16, + ) + target_height = st.number_input( + "target_height", + value=init_dict["target_height"], + min_value=16, + ) + + value_dict["target_width"] = target_width + value_dict["target_height"] = target_height + + return value_dict + + +def perform_save_locally(save_path, samples): + os.makedirs(os.path.join(save_path), exist_ok=True) + base_count = len(os.listdir(os.path.join(save_path))) + # samples = embed_watemark(samples) + for sample in samples: + sample = 255.0 * rearrange(sample.cpu().numpy(), "c h w -> h w c") + Image.fromarray(sample.astype(np.uint8)).save( + os.path.join(save_path, f"{base_count:09}.png") + ) + base_count += 1 + + +def init_save_locally(_dir, init_value: bool = False): + save_locally = st.sidebar.checkbox("Save images locally", value=init_value) + if save_locally: + save_path = st.text_input("Save path", value=os.path.join(_dir, "samples")) + else: + save_path = None + + return save_locally, save_path + + +class Img2ImgDiscretizationWrapper: + """ + wraps a discretizer, and prunes the sigmas + params: + strength: float between 0.0 and 1.0. 1.0 means full sampling (all sigmas are returned) + """ + + def __init__(self, discretization, strength: float = 1.0): + self.discretization = discretization + self.strength = strength + assert 0.0 <= self.strength <= 1.0 + + def __call__(self, *args, **kwargs): + # sigmas start large first, and decrease then + sigmas = self.discretization(*args, **kwargs) + print(f"sigmas after discretization, before pruning img2img: ", sigmas) + sigmas = torch.flip(sigmas, (0,)) + sigmas = sigmas[: max(int(self.strength * len(sigmas)), 1)] + print("prune index:", max(int(self.strength * len(sigmas)), 1)) + sigmas = torch.flip(sigmas, (0,)) + print(f"sigmas after pruning: ", sigmas) + return sigmas + + +def get_guider(key): + guider = st.sidebar.selectbox( + f"Discretization #{key}", + [ + "VanillaCFG", + "IdentityGuider", + ], + ) + + if guider == "IdentityGuider": + guider_config = { + "target": "sgm.modules.diffusionmodules.guiders.IdentityGuider" + } + elif guider == "VanillaCFG": + scale = st.number_input( + f"cfg-scale #{key}", value=5.0, min_value=0.0, max_value=100.0 + ) + + thresholder = st.sidebar.selectbox( + f"Thresholder #{key}", + [ + "None", + ], + ) + + if thresholder == "None": + dyn_thresh_config = { + "target": "sgm.modules.diffusionmodules.sampling_utils.NoDynamicThresholding" + } + else: + raise NotImplementedError + + guider_config = { + "target": "sgm.modules.diffusionmodules.guiders.VanillaCFG", + "params": {"scale": scale, "dyn_thresh_config": dyn_thresh_config}, + } + else: + raise NotImplementedError + return guider_config + + +def init_sampling( + key=1, img2img_strength=1.0, use_identity_guider=False, get_num_samples=True +): + if get_num_samples: + num_rows = 1 + num_cols = st.number_input( + f"num cols #{key}", value=2, min_value=1, max_value=10 + ) + + steps = st.sidebar.number_input( + f"steps #{key}", value=50, min_value=1, max_value=1000 + ) + sampler = st.sidebar.selectbox( + f"Sampler #{key}", + [ + "EulerEDMSampler", + "HeunEDMSampler", + "EulerAncestralSampler", + "DPMPP2SAncestralSampler", + "DPMPP2MSampler", + "LinearMultistepSampler", + ], + 0, + ) + discretization = st.sidebar.selectbox( + f"Discretization #{key}", + [ + "LegacyDDPMDiscretization", + "EDMDiscretization", + ], + ) + + discretization_config = get_discretization(discretization, key=key) + + guider_config = get_guider(key=key) + + sampler = get_sampler(sampler, steps, discretization_config, guider_config, key=key) + if img2img_strength < 1.0: + st.warning( + f"Wrapping {sampler.__class__.__name__} with Img2ImgDiscretizationWrapper" + ) + sampler.discretization = Img2ImgDiscretizationWrapper( + sampler.discretization, strength=img2img_strength + ) + if get_num_samples: + return num_rows, num_cols, sampler + return sampler + + +def get_discretization(discretization, key=1): + if discretization == "LegacyDDPMDiscretization": + discretization_config = { + "target": "sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization", + } + elif discretization == "EDMDiscretization": + sigma_min = st.number_input(f"sigma_min #{key}", value=0.03) # 0.0292 + sigma_max = st.number_input(f"sigma_max #{key}", value=14.61) # 14.6146 + rho = st.number_input(f"rho #{key}", value=3.0) + discretization_config = { + "target": "sgm.modules.diffusionmodules.discretizer.EDMDiscretization", + "params": { + "sigma_min": sigma_min, + "sigma_max": sigma_max, + "rho": rho, + }, + } + + return discretization_config + + +def get_sampler(sampler_name, steps, discretization_config, guider_config, key=1): + if sampler_name == "EulerEDMSampler" or sampler_name == "HeunEDMSampler": + s_churn = st.sidebar.number_input(f"s_churn #{key}", value=0.0, min_value=0.0) + s_tmin = st.sidebar.number_input(f"s_tmin #{key}", value=0.0, min_value=0.0) + s_tmax = st.sidebar.number_input(f"s_tmax #{key}", value=999.0, min_value=0.0) + s_noise = st.sidebar.number_input(f"s_noise #{key}", value=1.0, min_value=0.0) + + if sampler_name == "EulerEDMSampler": + sampler = EulerEDMSampler( + num_steps=steps, + discretization_config=discretization_config, + guider_config=guider_config, + s_churn=s_churn, + s_tmin=s_tmin, + s_tmax=s_tmax, + s_noise=s_noise, + verbose=True, + ) + elif sampler_name == "HeunEDMSampler": + sampler = HeunEDMSampler( + num_steps=steps, + discretization_config=discretization_config, + guider_config=guider_config, + s_churn=s_churn, + s_tmin=s_tmin, + s_tmax=s_tmax, + s_noise=s_noise, + verbose=True, + ) + elif ( + sampler_name == "EulerAncestralSampler" + or sampler_name == "DPMPP2SAncestralSampler" + ): + s_noise = st.sidebar.number_input("s_noise", value=1.0, min_value=0.0) + eta = st.sidebar.number_input("eta", value=1.0, min_value=0.0) + + if sampler_name == "EulerAncestralSampler": + sampler = EulerAncestralSampler( + num_steps=steps, + discretization_config=discretization_config, + guider_config=guider_config, + eta=eta, + s_noise=s_noise, + verbose=True, + ) + elif sampler_name == "DPMPP2SAncestralSampler": + sampler = DPMPP2SAncestralSampler( + num_steps=steps, + discretization_config=discretization_config, + guider_config=guider_config, + eta=eta, + s_noise=s_noise, + verbose=True, + ) + elif sampler_name == "DPMPP2MSampler": + sampler = DPMPP2MSampler( + num_steps=steps, + discretization_config=discretization_config, + guider_config=guider_config, + verbose=True, + ) + elif sampler_name == "LinearMultistepSampler": + order = st.sidebar.number_input("order", value=4, min_value=1) + sampler = LinearMultistepSampler( + num_steps=steps, + discretization_config=discretization_config, + guider_config=guider_config, + order=order, + verbose=True, + ) + else: + raise ValueError(f"unknown sampler {sampler_name}!") + + return sampler + + +def get_interactive_image(key=None) -> Image.Image: + image = st.file_uploader("Input", type=["jpg", "JPEG", "png"], key=key) + if image is not None: + image = Image.open(image) + if not image.mode == "RGB": + image = image.convert("RGB") + return image + + +def load_img(display=True, key=None): + image = get_interactive_image(key=key) + if image is None: + return None + if display: + st.image(image) + w, h = image.size + print(f"loaded input image of size ({w}, {h})") + + transform = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Lambda(lambda x: x * 2.0 - 1.0), + ] + ) + img = transform(image)[None, ...] + st.text(f"input min/max/mean: {img.min():.3f}/{img.max():.3f}/{img.mean():.3f}") + return img + + +def get_init_img(batch_size=1, key=None): + init_image = load_img(key=key).cuda() + init_image = repeat(init_image, "1 ... -> b ...", b=batch_size) + return init_image + + +def do_sample( + model, + sampler, + value_dict, + num_samples, + H, + W, + C, + F, + force_uc_zero_embeddings: List = None, + batch2model_input: List = None, + return_latents=False, + filter=None, +): + if force_uc_zero_embeddings is None: + force_uc_zero_embeddings = [] + if batch2model_input is None: + batch2model_input = [] + + st.text("Sampling") + + outputs = st.empty() + precision_scope = autocast + with torch.no_grad(): + with precision_scope("cuda"): + with model.ema_scope(): + num_samples = [num_samples] + batch, batch_uc = get_batch( + get_unique_embedder_keys_from_conditioner(model.conditioner), + value_dict, + num_samples, + ) + for key in batch: + if isinstance(batch[key], torch.Tensor): + print(key, batch[key].shape) + elif isinstance(batch[key], list): + print(key, [len(l) for l in batch[key]]) + else: + print(key, batch[key]) + c, uc = model.conditioner.get_unconditional_conditioning( + batch, + batch_uc=batch_uc, + force_uc_zero_embeddings=force_uc_zero_embeddings, + ) + + for k in c: + if not k == "crossattn": + c[k], uc[k] = map( + lambda y: y[k][: math.prod(num_samples)].to("cuda"), (c, uc) + ) + + additional_model_inputs = {} + for k in batch2model_input: + additional_model_inputs[k] = batch[k] + + shape = (math.prod(num_samples), C, H // F, W // F) + randn = torch.randn(shape).to("cuda") + + def denoiser(input, sigma, c): + return model.denoiser( + model.model, input, sigma, c, **additional_model_inputs + ) + + samples_z = sampler(denoiser, randn, cond=c, uc=uc) + samples_x = model.decode_first_stage(samples_z) + samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0) + + if filter is not None: + samples = filter(samples) + + grid = torch.stack([samples]) + grid = rearrange(grid, "n b c h w -> (n h) (b w) c") + outputs.image(grid.cpu().numpy()) + + if return_latents: + return samples, samples_z + return samples + + +def get_batch(keys, value_dict, N: Union[List, ListConfig], device="cuda"): + # Hardcoded demo setups; might undergo some changes in the future + + batch = {} + batch_uc = {} + + for key in keys: + if key == "txt": + batch["txt"] = ( + np.repeat([value_dict["prompt"]], repeats=math.prod(N)) + .reshape(N) + .tolist() + ) + batch_uc["txt"] = ( + np.repeat([value_dict["negative_prompt"]], repeats=math.prod(N)) + .reshape(N) + .tolist() + ) + elif key == "original_size_as_tuple": + # import pdb; pdb.set_trace() + batch["original_size_as_tuple"] = ( + torch.tensor([value_dict["orig_height"], value_dict["orig_width"]]) + .to(device) + .repeat(*N, 1) + ) + elif key == "crop_coords_top_left": + # import pdb; pdb.set_trace() + batch["crop_coords_top_left"] = ( + torch.tensor( + [value_dict["crop_coords_top"], value_dict["crop_coords_left"]] + ) + .to(device) + .repeat(*N, 1) + ) + elif key == "aesthetic_score": + batch["aesthetic_score"] = ( + torch.tensor([value_dict["aesthetic_score"]]).to(device).repeat(*N, 1) + ) + batch_uc["aesthetic_score"] = ( + torch.tensor([value_dict["negative_aesthetic_score"]]) + .to(device) + .repeat(*N, 1) + ) + + elif key == "target_size_as_tuple": + batch["target_size_as_tuple"] = ( + torch.tensor([value_dict["target_height"], value_dict["target_width"]]) + .to(device) + .repeat(*N, 1) + ) + else: + batch[key] = value_dict[key] + + for key in batch.keys(): + if key not in batch_uc and isinstance(batch[key], torch.Tensor): + batch_uc[key] = torch.clone(batch[key]) + return batch, batch_uc + + +@torch.no_grad() +def do_img2img( + img, + model, + sampler, + value_dict, + num_samples, + force_uc_zero_embeddings=[], + additional_kwargs={}, + offset_noise_level: int = 0.0, + return_latents=False, + skip_encode=False, + filter=None, +): + st.text("Sampling") + + outputs = st.empty() + precision_scope = autocast + with torch.no_grad(): + with precision_scope("cuda"): + with model.ema_scope(): + batch, batch_uc = get_batch( + get_unique_embedder_keys_from_conditioner(model.conditioner), + value_dict, + [num_samples], + ) + c, uc = model.conditioner.get_unconditional_conditioning( + batch, + batch_uc=batch_uc, + force_uc_zero_embeddings=force_uc_zero_embeddings, + ) + + for k in c: + c[k], uc[k] = map(lambda y: y[k][:num_samples].to("cuda"), (c, uc)) + + for k in additional_kwargs: + c[k] = uc[k] = additional_kwargs[k] + if skip_encode: + z = img + else: + z = model.encode_first_stage(img) + noise = torch.randn_like(z) + sigmas = sampler.discretization(sampler.num_steps) + sigma = sigmas[0] + + st.info(f"all sigmas: {sigmas}") + st.info(f"noising sigma: {sigma}") + + if offset_noise_level > 0.0: + noise = noise + offset_noise_level * append_dims( + torch.randn(z.shape[0], device=z.device), z.ndim + ) + noised_z = z + noise * append_dims(sigma, z.ndim) + noised_z = noised_z / torch.sqrt( + 1.0 + sigmas[0] ** 2.0 + ) # Note: hardcoded to DDPM-like scaling. need to generalize later. + + def denoiser(x, sigma, c): + return model.denoiser(model.model, x, sigma, c) + + samples_z = sampler(denoiser, noised_z, cond=c, uc=uc) + samples_x = model.decode_first_stage(samples_z) + samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0) + + if filter is not None: + samples = filter(samples) + + grid = embed_watemark(torch.stack([samples])) + grid = rearrange(grid, "n b c h w -> (n h) (b w) c") + outputs.image(grid.cpu().numpy()) + if return_latents: + return samples, samples_z + return samples diff --git a/CCEdit-main/scripts/sampling/__init__.py b/CCEdit-main/scripts/sampling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/CCEdit-main/scripts/sampling/__pycache__/__init__.cpython-39.pyc b/CCEdit-main/scripts/sampling/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c423411eab7f1269b34dd8556c8c9b6ffb6bc25 Binary files /dev/null and b/CCEdit-main/scripts/sampling/__pycache__/__init__.cpython-39.pyc differ diff --git a/CCEdit-main/scripts/sampling/__pycache__/util.cpython-39.pyc b/CCEdit-main/scripts/sampling/__pycache__/util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ef0e0d2795db278342cab0b6fd52d4f6aa8c130 Binary files /dev/null and b/CCEdit-main/scripts/sampling/__pycache__/util.cpython-39.pyc differ diff --git a/CCEdit-main/scripts/sampling/pnp_generate_config.py b/CCEdit-main/scripts/sampling/pnp_generate_config.py new file mode 100644 index 0000000000000000000000000000000000000000..42a7438131a087ecd880965a98a8ff502013df05 --- /dev/null +++ b/CCEdit-main/scripts/sampling/pnp_generate_config.py @@ -0,0 +1,52 @@ +''' +python scripts/sampling/pnp_generate_config.py \ + --p_config outputs/debug/automatic_ref_editing/config_pnp_auto.yaml \ + --output_path "outputs/debug/automatic_ref_editing/output" \ + --image_path "src/pnp-diffusers/data/horse.jpg" \ + --latents_path "outputs/debug/automatic_ref_editing/latents_forward" \ + --prompt "a photo of a pink toy horse on the beach" +''' + + +import yaml +import argparse + +def save_yaml(args): + config_data = { + 'seed': args.seed, + 'device': args.device, + 'output_path': args.output_path, + 'image_path': args.image_path, + 'latents_path': args.latents_path, + 'sd_version': args.sd_version, + 'guidance_scale': args.guidance_scale, + 'n_timesteps': args.n_timesteps, + 'prompt': args.prompt, + 'negative_prompt': args.negative_prompt, + 'pnp_attn_t': args.pnp_attn_t, + 'pnp_f_t': args.pnp_f_t + } + + with open(args.p_config, 'w') as file: + yaml.dump(config_data, file, sort_keys=False, allow_unicode=True) + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description="Save configuration to a YAML file.") + parser.add_argument('--p_config', type=str, help="Path to save the YAML configuration file.") + parser.add_argument('--output_path', type=str, help="Output path for the results.") + parser.add_argument('--image_path', type=str, help="Path to the input image.") + parser.add_argument('--latents_path', type=str, help="Path to the latents file.") + parser.add_argument('--prompt', type=str, help="Prompt for the diffusion model.") + parser.add_argument('--seed', type=int, default=1, help="Seed for random number generation.") + parser.add_argument('--device', type=str, default='cuda', help="Device to be used (e.g., 'cuda', 'cpu').") + parser.add_argument('--sd_version', type=str, default='2.1', help="Version of the diffusion model.") + parser.add_argument('--guidance_scale', type=float, default=7.5, help="Guidance scale for the diffusion model.") + parser.add_argument('--n_timesteps', type=int, default=50, help="Number of timesteps for the diffusion process.") + parser.add_argument('--negative_prompt', type=str, default='ugly, blurry, black, low res, unrealistic', help="Negative prompt for the diffusion model.") + parser.add_argument('--pnp_attn_t', type=float, default=0.5, help="PNP attention threshold.") + parser.add_argument('--pnp_f_t', type=float, default=0.8, help="PNP feature threshold.") + + args = parser.parse_args() + + save_yaml(args) + print(f"YAML configuration saved to {args.p_config}") diff --git a/CCEdit-main/scripts/sampling/sampling_image.py b/CCEdit-main/scripts/sampling/sampling_image.py new file mode 100644 index 0000000000000000000000000000000000000000..c3cc3224112fa5bbc884ff01bf65794f35f4e681 --- /dev/null +++ b/CCEdit-main/scripts/sampling/sampling_image.py @@ -0,0 +1,168 @@ +from pytorch_lightning import seed_everything +from scripts.demo.streamlit_helpers import * +from scripts.util.detection.nsfw_and_watermark_dectection import DeepFloydDataFiltering + +import argparse +import tqdm + +if __name__ == "__main__": + + parser = argparse.ArgumentParser() + parser.add_argument('--model_version', type=str, default='2.1', + choices=['2.1', '2.1-768', 'xl']) + parser.add_argument("--num_samples", type=int, default=4) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--prompt", type=str, default="a corgi is sitting on a couch") + parser.add_argument("--prompt_listpath", type=str, default="", help="path to a txt file with a list of prompts") + parser.add_argument("--negative_prompt", type=str, default="ugly, low quality") + parser.add_argument('--save_path', type=str, default='outputs/demo/txt2img/') + args = parser.parse_args() + + seed_everything(args.seed) + save_path = args.save_path + + version_map = { + '2.1': 'sd-2.1', + '2.1-768': 'sd-2.1-768', + 'xl': 'SD-XL base', + } + + SD_XL_BASE_RATIOS = { + "0.5": (704, 1408), + "0.52": (704, 1344), + "0.57": (768, 1344), + "0.6": (768, 1280), + "0.68": (832, 1216), + "0.72": (832, 1152), + "0.78": (896, 1152), + "0.82": (896, 1088), + "0.88": (960, 1088), + "0.94": (960, 1024), + "1.0": (1024, 1024), + "1.07": (1024, 960), + "1.13": (1088, 960), + "1.21": (1088, 896), + "1.29": (1152, 896), + "1.38": (1152, 832), + "1.46": (1216, 832), + "1.67": (1280, 768), + "1.75": (1344, 768), + "1.91": (1344, 704), + "2.0": (1408, 704), + "2.09": (1472, 704), + "2.4": (1536, 640), + "2.5": (1600, 640), + "2.89": (1664, 576), + "3.0": (1728, 576), + } + + VERSION2SPECS = { + "SD-XL base": { + "H": 1024, + "W": 1024, + "C": 4, + "f": 8, + "is_legacy": False, + "config": "configs/inference/sd_xl_base.yaml", + "ckpt": "checkpoints/sd_xl_base_0.9.safetensors", + "is_guided": True, + }, + "sd-2.1": { + "H": 512, + "W": 512, + "C": 4, + "f": 8, + "is_legacy": True, + "config": "configs/inference/sd_2_1.yaml", + "ckpt": "checkpoints/v2-1_512-ema-pruned.safetensors", + "is_guided": True, + }, + "sd-2.1-768": { + "H": 768, + "W": 768, + "C": 4, + "f": 8, + "is_legacy": True, + "config": "configs/inference/sd_2_1_768.yaml", + "ckpt": "checkpoints/v2-1_768-ema-pruned.safetensors", + }, + "SDXL-Refiner": { + "H": 1024, + "W": 1024, + "C": 4, + "f": 8, + "is_legacy": True, + "config": "configs/inference/sd_xl_refiner.yaml", + "ckpt": "checkpoints/sd_xl_refiner_0.9.safetensors", + "is_guided": True, + }, + } + + version = args.model_version + version = version_map[version] + version_dict = VERSION2SPECS[version] + + # initialize model + state = init_st(version_dict) + if state["msg"]: + st.info(state["msg"]) + model = state["model"] + + if version == "SD-XL base": + ratio = '1.0' + W, H = SD_XL_BASE_RATIOS[ratio] + else: + W, H = version_dict['W'], version_dict['H'] + + C = version_dict["C"] + F = version_dict["f"] + + if args.prompt_listpath: + with open(args.prompt_listpath, 'r') as f: + prompts = f.readlines() + prompts = [p.strip() for p in prompts] + else: + prompts = [args.prompt] + negative_prompt = args.negative_prompt + init_dict = { + "orig_width": W, + "orig_height": H, + "target_width": W, + "target_height": H, + } + + for prompt in tqdm.tqdm(prompts): + print('Current Prompt: >>>>> {} <<<<<'.format(prompt)) + value_dict = init_embedder_options( + get_unique_embedder_keys_from_conditioner(state["model"].conditioner), + init_dict, + prompt=prompt, + negative_prompt=negative_prompt, + ) + _, _, sampler = init_sampling( + use_identity_guider=not version_dict["is_guided"] + ) + + num_samples = args.num_samples + + is_legacy=False + return_latents = False + filter=None + with torch.no_grad(): + samples = do_sample( + state["model"], + sampler, + value_dict, + num_samples, + H, + W, + C, + F, + force_uc_zero_embeddings=["txt"] if not is_legacy else [], + return_latents=return_latents, + filter=filter, + ) + + if samples is not None: + perform_save_locally(save_path, samples) + print("Saved samples to {}. Enjoy.".format(save_path)) diff --git a/CCEdit-main/scripts/sampling/sampling_tv2v.py b/CCEdit-main/scripts/sampling/sampling_tv2v.py new file mode 100644 index 0000000000000000000000000000000000000000..34c43822da11625565e82f41152bfcfbe7ae16b4 --- /dev/null +++ b/CCEdit-main/scripts/sampling/sampling_tv2v.py @@ -0,0 +1,209 @@ +import argparse +import json +import os +import random +import sys +sys.path.insert(0, '../src') +import torch +from einops import rearrange, repeat +from pytorch_lightning import seed_everything +from safetensors import safe_open +from torch import autocast + +from scripts.sampling.util import ( + chunk, + convert_load_lora, + create_model, + init_sampling, + load_video_keyframes, + model_load_ckpt, + perform_save_locally_video, +) +from sgm.util import append_dims + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--seed", type=int, default=42) + parser.add_argument( + "--config_path", + type=str, + default="configs/inference_ccedit/keyframe_no2ndca_depthmidas.yaml", + ) + parser.add_argument( + "--ckpt_path", + type=str, + default="models/tv2v-no2ndca-depthmidas.ckpt", + ) + parser.add_argument( + "--use_default", action="store_true", help="use default ckpt at first" + ) + parser.add_argument( + "--basemodel_path", + type=str, + default="", + help="load a new base model instead of original sd-1.5", + ) + parser.add_argument("--basemodel_listpath", type=str, default="") + parser.add_argument("--lora_path", type=str, default="") + parser.add_argument("--vae_path", type=str, default="") + parser.add_argument( + "--jsonl_path", + type=str, + required=True, + help="path to jsonl file containing video paths, prompts, and edit prompts" + ) + parser.add_argument("--save_root", type=str, default="outputs") + parser.add_argument("--H", type=int, default=512) + parser.add_argument("--W", type=int, default=768) + parser.add_argument("--original_fps", type=int, default=18) + parser.add_argument("--target_fps", type=int, default=6) + parser.add_argument("--num_keyframes", type=int, default=17) + parser.add_argument("--negative_prompt", type=str, default="ugly, low quality") + parser.add_argument("--sample_steps", type=int, default=30) + parser.add_argument("--sampler_name", type=str, default="DPMPP2SAncestralSampler") + parser.add_argument( + "--discretization_name", type=str, default="LegacyDDPMDiscretization" + ) + parser.add_argument("--cfg_scale", type=float, default=7.5) + parser.add_argument("--prior_coefficient_x", type=float, default=0.0) + parser.add_argument("--prior_coefficient_noise", type=float, default=1.0) + parser.add_argument("--sdedit_denoise_strength", type=float, default=0.0) + parser.add_argument("--num_samples", type=int, default=2) + parser.add_argument("--batch_size", type=int, default=1) + parser.add_argument('--disable_check_repeat', action='store_true', help='disable check repeat') + parser.add_argument('--lora_strength', type=float, default=0.8) + parser.add_argument('--save_type', type=str, default='mp4', choices=['gif', 'mp4']) + parser.add_argument('--inpainting_mode', action='store_true', help='inpainting mode') + args = parser.parse_args() + + seed = args.seed + if seed == -1: + seed = random.randint(0, 1000000) + seed_everything(seed) + + model = create_model(config_path=args.config_path).to("cuda") + ckpt_path = args.ckpt_path + print("--> load ckpt from: ", ckpt_path) + model = model_load_ckpt(model, path=ckpt_path) + model.eval() + + with open(args.jsonl_path, 'r') as f: + lines = f.readlines() + video_info_list = [json.loads(line) for line in lines] + + for video_info in video_info_list: + video_name = video_info['video'] + prompt = video_info['prompt'] + add_prompt = video_info['edit_prompt'] + video_path = os.path.join('/home/wangjuntong/video_editing_dataset/all_sourse', video_name) + save_path = os.path.join(args.save_root, os.path.splitext(video_name)[0]) + + keyframes = load_video_keyframes( + video_path, + args.original_fps, + args.target_fps, + args.num_keyframes, + (args.H, args.W), + ) + keyframes = keyframes.unsqueeze(0) + keyframes = rearrange(keyframes, "b t c h w -> b c t h w").to(model.device) + control_hint = keyframes + + batch = { + "txt": [prompt], + "control_hint": control_hint, + } + negative_prompt = args.negative_prompt + batch_uc = { + "txt": [negative_prompt], + "control_hint": batch["control_hint"].clone(), + } + if add_prompt: + batch["txt"] = [add_prompt + ", " + prompt] + + c, uc = model.conditioner.get_unconditional_conditioning( + batch_c=batch, + batch_uc=batch_uc, + ) + + sampling_kwargs = {} + + for k in c: + if isinstance(c[k], torch.Tensor): + c[k], uc[k] = map(lambda y: y[k].to(model.device), (c, uc)) + shape = (4, args.num_keyframes, args.H // 8, args.W // 8) + + precision_scope = autocast + with torch.no_grad(): + with torch.cuda.amp.autocast(): + randn = torch.randn(1, *shape).to(model.device) + if args.sdedit_denoise_strength == 0.0: + + def denoiser(input, sigma, c): + return model.denoiser( + model.model, input, sigma, c, **sampling_kwargs + ) + + if args.prior_coefficient_x != 0.0: + prior = model.encode_first_stage(keyframes) + randn = ( + args.prior_coefficient_x * prior + + args.prior_coefficient_noise * randn + ) + sampler = init_sampling( + sample_steps=args.sample_steps, + sampler_name=args.sampler_name, + discretization_name=args.discretization_name, + guider_config_target="sgm.modules.diffusionmodules.guiders.VanillaCFGTV2V", + cfg_scale=args.cfg_scale, + ) + sampler.verbose = True + samples = sampler(denoiser, randn, c, uc=uc) + else: + assert ( + args.sdedit_denoise_strength > 0.0 + ), "sdedit_denoise_strength should be positive" + assert ( + args.sdedit_denoise_strength <= 1.0 + ), "sdedit_denoise_strength should be less than 1.0" + assert ( + args.prior_coefficient_x == 0 + ), "prior_coefficient_x should be 0 when using sdedit_denoise_strength" + denoise_strength = args.sdedit_denoise_strength + sampler = init_sampling( + sample_steps=args.sample_steps, + sampler_name=args.sampler_name, + discretization_name=args.discretization_name, + guider_config_target="sgm.modules.diffusionmodules.guiders.VanillaCFGTV2V", + cfg_scale=args.cfg_scale, + img2img_strength=denoise_strength, + ) + sampler.verbose = True + z = model.encode_first_stage(keyframes) + noise = torch.randn_like(z) + sigmas = sampler.discretization(sampler.num_steps).to(z.device) + sigma = sigmas[0] + + print(f"all sigmas: {sigmas}") + print(f"noising sigma: {sigma}") + noised_z = z + noise * append_dims(sigma, z.ndim) + noised_z = noised_z / torch.sqrt( + 1.0 + sigmas[0] ** 2.0 + ) + + def denoiser(x, sigma, c): + return model.denoiser(model.model, x, sigma, c) + samples = sampler(denoiser, noised_z, cond=c, uc=uc) + + samples = model.decode_first_stage(samples) + + samples = (torch.clamp(samples, -1.0, 1.0) + 1.0) / 2.0 + os.makedirs(save_path, exist_ok=True) + perform_save_locally_video( + save_path, + samples, + args.target_fps, + args.save_type, + save_grid=False + ) + print(f"Saved video to {save_path}") \ No newline at end of file diff --git a/CCEdit-main/scripts/sampling/sampling_tv2v_ref.py b/CCEdit-main/scripts/sampling/sampling_tv2v_ref.py new file mode 100644 index 0000000000000000000000000000000000000000..c2b8a2a2b689991645ea77b98249e6e277218143 --- /dev/null +++ b/CCEdit-main/scripts/sampling/sampling_tv2v_ref.py @@ -0,0 +1,550 @@ +import argparse +import json +import os +import random + +import torch +from einops import rearrange, repeat +from pytorch_lightning import seed_everything +from safetensors import safe_open +from torch import autocast + +from scripts.sampling.util import ( + chunk, + convert_load_lora, + create_model, + init_sampling, + load_img, + load_video_keyframes, + model_load_ckpt, + perform_save_locally_video, +) +from sgm.util import append_dims + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--seed", type=int, default=42) + parser.add_argument( + "--config_path", + type=str, + default="", + ) + parser.add_argument( + "--ckpt_path", + type=str, + default="", + ) + parser.add_argument( + "--use_default", action="store_true", help="use default ckpt at first" + ) + parser.add_argument( + "--basemodel_path", + type=str, + default="", + help="load a new base model instead of original sd-1.5", + ) + parser.add_argument("--basemodel_listpath", type=str, default="") + parser.add_argument("--lora_path", type=str, default="") + parser.add_argument("--vae_path", type=str, default="") + parser.add_argument( + "--video_path", + type=str, + default="", + ) + parser.add_argument( + '--reference_path', + type=str, + default='', + ) + parser.add_argument("--prompt_listpath", type=str, default="") + parser.add_argument("--video_listpath", type=str, default="") + parser.add_argument( + "--videos_directory", + type=str, + default="", + help="directory containing videos to be processed", + ) + parser.add_argument( + '--json_path', + type=str, + default='', + help='path to json file containing video paths and captions' + ) + parser.add_argument( + '--videos_root', + type=str, + default='', + help='path to the root of videos' + ) + parser.add_argument( + '--reference_root', + type=str, + default='', + help='path to the root of reference videos' + ) + parser.add_argument("--save_path", type=str, default="outputs/demo/tv2v") + parser.add_argument("--H", type=int, default=256) + parser.add_argument("--W", type=int, default=384) + parser.add_argument("--detect_ratio", type=float, default=1.0) + parser.add_argument("--original_fps", type=int, default=20) + parser.add_argument("--target_fps", type=int, default=3) + parser.add_argument("--num_keyframes", type=int, default=9) + parser.add_argument("--prompt", type=str, default="") + parser.add_argument("--negative_prompt", type=str, default="ugly, low quality") + parser.add_argument("--add_prompt", type=str, default="masterpiece, high quality") + parser.add_argument("--sample_steps", type=int, default=50) + parser.add_argument("--sampler_name", type=str, default="EulerEDMSampler") + parser.add_argument( + "--discretization_name", type=str, default="LegacyDDPMDiscretization" + ) + parser.add_argument("--cfg_scale", type=float, default=7.5) + parser.add_argument("--prior_coefficient_x", type=float, default=0.0) + parser.add_argument("--prior_coefficient_noise", type=float, default=1.0) + parser.add_argument("--sdedit_denoise_strength", type=float, default=0.0) + parser.add_argument('--prior_type', type=str, default='ref', choices=['video', 'ref', 'video_ref']) + parser.add_argument("--num_samples", type=int, default=1) + parser.add_argument("--batch_size", type=int, default=4) + parser.add_argument('--disable_check_repeat', action='store_true', help='disable check repeat') + parser.add_argument('--lora_strength', type=float, default=0.8) + parser.add_argument('--save_type', type=str, default='mp4', choices=['gif', 'mp4']) + parser.add_argument('--auto_ref_editing', action='store_true', help='auto center editing') + args = parser.parse_args() + + seed = args.seed + if seed == -1: + seed = random.randint(0, 1000000) + seed_everything(seed) + + # initialize the model + model = create_model(config_path=args.config_path).to("cuda") + ckpt_path = args.ckpt_path + print("--> load ckpt from: ", ckpt_path) + model = model_load_ckpt(model, path=ckpt_path) + model.eval() + + # load the prompts and video_paths + video_save_paths = [] + assert not (args.prompt_listpath and args.videos_directory), ( + "Only one of prompt_listpath and videos_directory can be provided, " + "but got prompt_listpath: {}, videos_directory: {}".format( + args.prompt_listpath, args.videos_directory + ) + ) + if args.prompt_listpath: + with open(args.prompt_listpath, "r") as f: + prompts = f.readlines() + prompts = [p.strip() for p in prompts] + # load paths of cond_img + assert args.video_listpath, ( + "video_listpath must be provided when prompt_listpath is provided, " + "but got video_listpath: {}".format(args.video_listpath) + ) + with open(args.video_listpath, "r") as f: + video_paths = f.readlines() + video_paths = [p.strip() for p in video_paths] + elif args.videos_directory: + prompts = [] + video_paths = [] + for video_name in os.listdir(args.videos_directory): + video_path = os.path.join(args.videos_directory, video_name) + if os.path.isdir(video_path): + prompts.append(video_name) + video_paths.append(video_path) + elif args.json_path: + assert args.videos_root != '', 'videos_root must be provided when json_path is provided' + assert args.reference_root != '', 'reference_root must be provided when json_path is provided' + with open(args.json_path, 'r') as f: + json_dict = json.load(f) + prompts = [] + video_paths = [] + ref_paths = [] + for item in json_dict: + video_path = os.path.join(args.videos_root, item["Video Type"], item["Video Name"] + '.mp4') + + for edit in item['Editing']: + video_save_path = os.path.join(args.save_path, item["Video Type"], item["Video Name"], edit["Target Prompt"]) + if os.path.exists(video_save_path): + print(f'video {video_save_path} exists, skip it.') + continue + + video_paths.append(video_path) + prompts.append(edit["Target Prompt"]) + video_save_paths.append(video_save_path) + # outputs/debug/automatic_ref_editing/output_auto + # ref_paths.append(os.path.join( + # args.videos_root + '-centerframe', item["Video Type"], item["Video Name"] + '.png')) + ref_paths.append(os.path.join( + args.reference_root, 'output-{}.png'.format(edit["Target Prompt"]))) + else: + assert args.prompt and args.video_path, ( + "prompt and video_path must be provided when prompt_listpath and videos_directory are not provided, " + "but got prompt: {}, video_path: {}".format(args.prompt, args.video_path) + ) + prompts = [args.prompt] + video_paths = [args.video_path] + + assert len(prompts) == len( + video_paths + ), "The number of prompts and video_paths must be the same, and you provided {} prompts and {} video_paths".format( + len(prompts), len(video_paths) + ) + + if not args.json_path: + ref_paths = [args.reference_path] + + + num_samples = args.num_samples + batch_size = args.batch_size + + print("\nNumber of prompts: {}".format(len(prompts))) + print("Generate {} samples for each prompt".format(num_samples)) + + prompts = [item for item in prompts for _ in range(num_samples)] + video_paths = [item for item in video_paths for _ in range(num_samples)] + ref_paths = [item for item in ref_paths for _ in range(num_samples)] + + prompts_chunk = list(chunk(prompts, batch_size)) + video_paths_chunk = list(chunk(video_paths, batch_size)) + ref_paths_chunk = list(chunk(ref_paths, batch_size)) + del prompts + del video_paths + del ref_paths + + # load paths of basemodel if provided + assert not (args.basemodel_path and args.basemodel_listpath), ( + "Only one of basemodel_path and basemodel_listpath can be provided, " + "but got basemodel_path: {}, basemodel_listpath: {}".format( + args.basemodel_path, args.basemodel_listpath + ) + ) + basemodel_paths = [] + if args.basemodel_listpath: + with open(args.basemodel_listpath, "r") as f: + basemodel_paths = f.readlines() + basemodel_paths = [p.strip() for p in basemodel_paths] + if args.basemodel_path: + basemodel_paths = [args.basemodel_path] + if args.use_default: + basemodel_paths = ["default"] + basemodel_paths + if len(basemodel_paths) == 0: + basemodel_paths = ["default"] + + for basemodel_idx, basemodel_path in enumerate(basemodel_paths): + print("-> base model idx: ", basemodel_idx) + print("-> base model path: ", basemodel_path) + + if basemodel_path == "default": + pass + elif basemodel_path: + print("--> load a new base model from {}".format(basemodel_path)) + model = model_load_ckpt(model, basemodel_path, True) + + if args.lora_path: + print("--> load a new LoRA model from {}".format(args.lora_path)) + sd_state_dict = model.state_dict() + lora_path = args.lora_path + + if lora_path.endswith(".safetensors"): + lora_state_dict = {} + + # with safe_open(lora_path, framework="pt", device='cpu') as f: + with safe_open(lora_path, framework="pt", device=0) as f: + for key in f.keys(): + lora_state_dict[key] = f.get_tensor(key) + + is_lora = all("lora" in k for k in lora_state_dict.keys()) + if not is_lora: + raise ValueError( + f"The model you provided in [{lora_path}] is not a LoRA model. " + ) + else: + raise NotImplementedError + sd_state_dict = convert_load_lora( + sd_state_dict, lora_state_dict, alpha=args.lora_strength + ) # + model.load_state_dict(sd_state_dict) + + # TODO: the logic here is not elegant. + if args.vae_path: + vae_path = args.vae_path + print("--> load a new VAE model from {}".format(vae_path)) + + if vae_path.endswith(".pt"): + vae_state_dict = torch.load(vae_path, map_location="cpu")["state_dict"] + msg = model.first_stage_model.load_state_dict( + vae_state_dict, strict=False + ) + elif vae_path.endswith(".safetensors"): + vae_state_dict = {} + + # with safe_open(vae_path, framework="pt", device='cpu') as f: + with safe_open(vae_path, framework="pt", device=0) as f: + for key in f.keys(): + vae_state_dict[key] = f.get_tensor(key) + + msg = model.first_stage_model.load_state_dict( + vae_state_dict, strict=False + ) + else: + raise ValueError("Cannot load vae model from {}".format(vae_path)) + + print("msg of loading vae: ", msg) + + if os.path.exists( + os.path.join( + args.save_path, + basemodel_path.split("/")[-1].split(".")[0], + "log_info.json", + ) + ): + with open( + os.path.join( + args.save_path, + basemodel_path.split("/")[-1].split(".")[0], + "log_info.json", + ), + "r", + ) as f: + log_info = json.load(f) + else: + log_info = { + "basemodel_path": basemodel_path, + "lora_path": args.lora_path, + "vae_path": args.vae_path, + "video_paths": [], + "keyframes_paths": [], + } + + num_keyframes = args.num_keyframes + + for idx, (prompts, video_paths, ref_paths) in enumerate( + zip(prompts_chunk, video_paths_chunk, ref_paths_chunk) + ): + # if idx == 2: # ! DEBUG + # break + if not args.disable_check_repeat: + while video_paths[0] in log_info["video_paths"]: + print(f"video [{video_paths[0]}] has been processed, skip it.") + prompts_list, video_paths_list = list(prompts), list(video_paths) + prompts_list.pop(0) + video_paths_list.pop(0) + prompts, video_paths = tuple(prompts_list), tuple(video_paths_list) + del prompts_list, video_paths_list + if len(prompts) == 0: + break + if len(video_paths) == 0: + continue + + bs = min(len(prompts), batch_size) + print(f"\nProgress: {idx} / {len(prompts_chunk)}. ") + H, W = args.H, args.W + keyframes_list = [] + print("load video ...") + try: + for video_path in video_paths: + keyframes = load_video_keyframes( + video_path, + args.original_fps, + args.target_fps, + num_keyframes, + (H, W), + ) + keyframes = keyframes.unsqueeze(0) # B T C H W + keyframes = rearrange(keyframes, "b t c h w -> b c t h w").to( + model.device + ) + keyframes_list.append(keyframes) + except: + print(f"Error when loading video from {video_paths}") + continue + print("load video done ...") + keyframes = torch.cat(keyframes_list, dim=0) + control_hint = keyframes + + # load reference + ref_list = [] + if args.auto_ref_editing: + print('Conduct auto ref editing, args.reference_path is ignored.') + # import pdb; pdb.set_trace() + raise NotImplementedError + + else: + for ref_path in ref_paths: + ref = load_img(ref_path, (H, W)) + ref_list.append(ref) + ref = torch.cat(ref_list, dim=0).to(model.device) + + batch = { + "txt": prompts, + "control_hint": control_hint, + 'cond_img': ref, + } + + negative_prompt = args.negative_prompt + batch_uc = { + "txt": [negative_prompt for _ in range(bs)], + "control_hint": batch["control_hint"].clone(), # balance mode in controlnet-webui + 'cond_img': batch["cond_img"].clone(), # follow the balance mode + } + # batch["txt"] = ["masterpiece, best quality, " + each for each in batch["txt"]] + if args.add_prompt: + batch["txt"] = [args.add_prompt + ", " + each for each in batch["txt"]] + c, uc = model.conditioner.get_unconditional_conditioning( + batch_c=batch, + batch_uc=batch_uc, + ) + + sampling_kwargs = {} # usually empty + + for k in c: + if isinstance(c[k], torch.Tensor): + c[k], uc[k] = map(lambda y: y[k][:bs].to(model.device), (c, uc)) + shape = (4, num_keyframes, H // 8, W // 8) + + precision_scope = autocast + with torch.no_grad(): + with torch.cuda.amp.autocast(): + randn = torch.randn(bs, *shape).to(model.device) + if args.sdedit_denoise_strength == 0.0: + + def denoiser(input, sigma, c): + return model.denoiser( + model.model, input, sigma, c, **sampling_kwargs + ) + + if args.prior_coefficient_x != 0.0: + assert 0.0 < args.prior_coefficient_x <= 1.0, ( + "prior_coefficient_x should be in (0.0, 1.0], " + "but got {}".format(args.prior_coefficient_x) + ) + # prior = model.encode_first_stage(keyframes) + if args.prior_type == 'video': + prior = model.encode_first_stage(keyframes) + elif args.prior_type == 'ref': + prior = model.encode_first_stage(ref) + prior = repeat(prior, 'b c h w -> b c t h w', t=num_keyframes) + elif args.prior_type == 'video_ref': + prior = model.encode_first_stage(keyframes) + prior_ref = model.encode_first_stage(ref) + prior_ref = repeat(prior_ref, 'b c h w -> b c t h w', t=num_keyframes) + prior = prior + prior_ref + else: + raise NotImplementedError + randn = ( + args.prior_coefficient_x * prior + + args.prior_coefficient_noise * randn + ) + sampler = init_sampling( + sample_steps=args.sample_steps, + sampler_name=args.sampler_name, + discretization_name=args.discretization_name, + guider_config_target="sgm.modules.diffusionmodules.guiders.VanillaCFGTV2V", + cfg_scale=args.cfg_scale, + ) + sampler.verbose = True + samples = sampler(denoiser, randn, c, uc=uc) + else: + assert ( + args.sdedit_denoise_strength > 0.0 + ), "sdedit_denoise_strength should be positive" + assert ( + args.sdedit_denoise_strength <= 1.0 + ), "sdedit_denoise_strength should be less than 1.0" + assert ( + args.prior_coefficient_x == 0 + ), "prior_coefficient_x should be 0 when using sdedit_denoise_strength" + denoise_strength = args.sdedit_denoise_strength + sampler = init_sampling( + sample_steps=args.sample_steps, + sampler_name=args.sampler_name, + discretization_name=args.discretization_name, + guider_config_target="sgm.modules.diffusionmodules.guiders.VanillaCFGTV2V", + cfg_scale=args.cfg_scale, + img2img_strength=denoise_strength, + ) + sampler.verbose = True + if args.prior_type == 'video': + z = model.encode_first_stage(keyframes) + elif args.prior_type == 'ref': + z = model.encode_first_stage(ref) + z = repeat(z, 'b c h w -> b c t h w', t=num_keyframes) + elif args.prior_type == 'video_ref': + z = model.encode_first_stage(keyframes) + z_ref = model.encode_first_stage(ref) + z_ref = repeat(z_ref, 'b c h w -> b c t h w', t=num_keyframes) + z = z + z_ref + else: + raise NotImplementedError + + noise = torch.randn_like(z) + sigmas = sampler.discretization(sampler.num_steps).to(z.device) + sigma = sigmas[0] + + print(f"all sigmas: {sigmas}") + print(f"noising sigma: {sigma}") + noised_z = z + noise * append_dims(sigma, z.ndim) + noised_z = noised_z / torch.sqrt( + 1.0 + sigmas[0] ** 2.0 + ) # Note: hardcoded to DDPM-like scaling. need to generalize later. + + def denoiser(x, sigma, c): + return model.denoiser(model.model, x, sigma, c) + + samples = sampler(denoiser, noised_z, cond=c, uc=uc) + + samples = model.decode_first_stage(samples) + + # save the results + keyframes = (torch.clamp(keyframes, -1.0, 1.0) + 1.0) / 2.0 + samples = (torch.clamp(samples, -1.0, 1.0) + 1.0) / 2.0 + control_hint = (torch.clamp(c["control_hint"], -1.0, 1.0) + 1.0) / 2.0 + # save_path = args.save_path + # save_path = os.path.join( + # save_path, basemodel_path.split("/")[-1].split(".")[0] + # ) + if video_save_paths == []: + save_path = args.save_path + save_path = os.path.join( + save_path, basemodel_path.split("/")[-1].split(".")[0] + ) + else: + save_path = video_save_paths[idx] + + perform_save_locally_video( + os.path.join(save_path, "original"), + keyframes, + args.target_fps, + args.save_type, + save_grid=False, + ) + + keyframes_paths = perform_save_locally_video( + os.path.join(save_path, "result"), + samples, + args.target_fps, + args.save_type, + return_savepaths=True, + save_grid=False, + ) + perform_save_locally_video( + os.path.join(save_path, "control_hint"), + control_hint, + args.target_fps, + args.save_type, + save_grid=False, + ) + print("Saved samples to {}. Enjoy.".format(save_path)) + + # save video paths + log_info["video_paths"] += video_paths + log_info["keyframes_paths"] += keyframes_paths + + # save log info + with open(os.path.join(save_path, "log_info.json"), "w") as f: + json.dump(log_info, f, indent=4) + + # back to the original model + basemodel_idx += 1 + if basemodel_idx < len(basemodel_paths): + print("--> back to the original model: {}".format(ckpt_path)) + model = model_load_ckpt(model, path=ckpt_path) diff --git a/CCEdit-main/scripts/sampling/util.py b/CCEdit-main/scripts/sampling/util.py new file mode 100644 index 0000000000000000000000000000000000000000..baed9f8cfbb4ec643eeb4fb7a1f0abefd57c8c03 --- /dev/null +++ b/CCEdit-main/scripts/sampling/util.py @@ -0,0 +1,813 @@ +import os +from itertools import islice + +import decord +import cv2 +import einops +import imageio +import numpy as np +import PIL.Image as Image +import torch +import torchvision +import tqdm +from einops import rearrange, repeat +from omegaconf import OmegaConf +from safetensors import safe_open +from safetensors.torch import load_file as load_safetensors + +from sgm.modules.diffusionmodules.sampling import ( + DPMPP2MSampler, + DPMPP2SAncestralSampler, + EulerAncestralSampler, + EulerEDMSampler, + HeunEDMSampler, + LinearMultistepSampler, +) +from sgm.modules.encoders.modules import ( + DepthMidasEncoder, + DepthZoeEncoder, + LineartEncoder, + NormalBaeEncoder, + ScribbleHEDEncoder, + ScribblePidiNetEncoder, + SoftEdgeEncoder, +) +from sgm.util import exists, instantiate_from_config, isheatmap + + +def create_model(config_path): + config = OmegaConf.load(config_path) + model = instantiate_from_config(config.model).cpu() + print(f"Loaded model config from [{config_path}]") + return model + + +def model_load_ckpt(model, path, newbasemodel=False): + # TODO: how to load ema weights? + if path.endswith("ckpt") or path.endswith(".pt") or path.endswith(".pth"): + if "deepspeed" in path: + sd = torch.load(path, map_location="cpu") + sd = {k.replace("_forward_module.", ""): v for k, v in sd.items()} + else: + # sd = torch.load(path, map_location="cpu")["state_dict"] + sd = torch.load(path, map_location="cpu") + if "state_dict" in torch.load(path, map_location="cpu"): + sd = sd["state_dict"] + elif path.endswith("safetensors"): + sd = load_safetensors(path) + else: + raise NotImplementedError(f"Unknown checkpoint format: {path}") + + # TODO: (RUOYU) I don't know why need this. We need to refine this for this is really not elegant. + sd_new = {} + for k, v in sd.items(): + if k.startswith("conditioner.embedders.") and "first_stage_model" in k: + loc = k.find("first_stage_model") + sd_new[k.replace(k[:loc], "")] = v + else: + sd_new[k] = v + sd = sd_new + del sd_new + + if newbasemodel: + sd_new = {} + for k, v in sd.items(): + if "cond_stage_model" in k: + sd_new[k.replace("cond_stage_model", "conditioner.embedders.0")] = v + continue + sd_new[k] = v + sd = sd_new + del sd_new + + missing, unexpected = model.load_state_dict(sd, strict=False) + if newbasemodel: + unwanted_substrings = ["temporal", "controlnet", "conditioner.embedders.1."] + missing = [ + each + for each in missing + if all(substring not in each for substring in unwanted_substrings) + ] + print( + f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" + ) + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + # TODO: notice that some checkpoints has lora parameters (e.g. majicmixRealistic) + for each in unexpected: + if each.startswith("lora"): + print("detected lora parameters, load lora parameters ...", end="\r") + sd_lora = {} + for k, v in sd.items(): + if k.startswith("lora"): + sd_lora[k] = v + unexpected.remove(k) + # TODO: alpha? + sd_lora = convert_load_lora( + sd_state_dict=sd, state_dict=sd_lora, alpha=0.8 + ) + break + print(f"Unexpected Keys: {unexpected}") + + return model + + +def convert_load_lora( + sd_state_dict, + state_dict, + LORA_PREFIX_UNET="lora_unet", + LORA_PREFIX_TEXT_ENCODER="lora_te", + alpha=0.6, +): + visited = [] + + for key in tqdm.tqdm(state_dict): + # it is suggested to print out the key, it usually will be something like below + # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" + + # as we have set the alpha beforehand, so just skip + if ".alpha" in key or key in visited: + print("skip: ", key) + continue + + if "text" in key: + layer_infos = ( + key.split(".")[0].split(LORA_PREFIX_TEXT_ENCODER + "_")[-1].split("_") + ) + # curr_layer = pipeline.text_encoder + if "self_attn" in key: + layername = "{}.self_attn.{}_proj".format( + layer_infos[4], layer_infos[7] + ) + else: + layername = "{}.mlp.{}".format(layer_infos[4], layer_infos[-1]) + layername = ( + "cond_stage_model.transformer.text_model.encoder.layers." + + layername + + ".weight" + ) + else: + layer_infos = key.split(".")[0].split(LORA_PREFIX_UNET + "_")[-1].split("_") + + if "lora_unet_mid_" in key: + if "_proj_" in key: + layername = ( + "model.diffusion_model.middle_block.1.proj_{}.weight".format( + layer_infos[5] + ) + ) + elif "_to_out_" in key: + layername = "model.diffusion_model.middle_block.1.transformer_blocks.0.{}.to_out.0.weight".format( + layer_infos[7] + ) + elif "_ff_net_" in key: + layername = "model.diffusion_model.middle_block.1.transformer_blocks.0.ff.net" + layername = ".".join([layername] + layer_infos[9:]) + ".weight" + elif "attn1" in key or "attn2" in key: + layername = "model.diffusion_model.middle_block.1.transformer_blocks.0.{}.to_{}.weight".format( + layer_infos[7], layer_infos[9] + ) + else: + raise ValueError("Unknown key: ", key) + else: + lora_sd_map_in = { + "0-0": [1, 1], + "0-1": [2, 1], + "1-0": [4, 1], + "1-1": [5, 1], + "2-0": [7, 1], + "2-1": [8, 1], + } + lora_sd_map_out = { + "1-0": [3, 1], + "1-1": [4, 1], + "1-2": [5, 1], + "2-0": [6, 1], + "2-1": [7, 1], + "2-2": [8, 1], + "3-0": [9, 1], + "3-1": [10, 1], + "3-2": [11, 1], + } + + if "lora_unet_down_" in key: + sd_idxs = lora_sd_map_in[ + "{}-{}".format(layer_infos[2], layer_infos[4]) + ] + flag_ = "input_blocks" + elif "lora_unet_up_" in key: + sd_idxs = lora_sd_map_out[ + "{}-{}".format(layer_infos[2], layer_infos[4]) + ] + flag_ = "output_blocks" + + if "_proj_" in key: # _proj_in and _proj_out + layername = "model.diffusion_model.{}.{}.{}.{}_{}.weight".format( + flag_, sd_idxs[0], sd_idxs[1], layer_infos[5], layer_infos[6] + ) + elif "_to_out_" in key: + # model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_out.0.weight + layername = "model.diffusion_model.{}.{}.{}.transformer_blocks.{}.{}.to_{}.{}.weight".format( + flag_, + sd_idxs[0], + sd_idxs[1], + layer_infos[7], + layer_infos[8], + layer_infos[10], + layer_infos[11], + ) + elif "_ff_net_" in key: + # model.diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.0.proj.weight + layername = "model.diffusion_model.{}.{}.{}.transformer_blocks.{}.ff.net".format( + flag_, sd_idxs[0], sd_idxs[1], layer_infos[7] + ) + layername = ".".join([layername] + layer_infos[10:]) + ".weight" + elif "attn1" in key or "attn2" in key: + # model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_k.weight + layername = "model.diffusion_model.{}.{}.{}.transformer_blocks.{}.{}.to_{}.weight".format( + flag_, + sd_idxs[0], + sd_idxs[1], + layer_infos[7], + layer_infos[8], + layer_infos[10], + ) + else: + raise ValueError("Unknown key: ", key) + # print("Unknown key: {} -> skip".format(key)) + # continue + + pair_keys = [] + if "lora_down" in key: + pair_keys.append(key.replace("lora_down", "lora_up")) + pair_keys.append(key) + else: + pair_keys.append(key) + pair_keys.append(key.replace("lora_up", "lora_down")) + + if "cond_stage_model" in layername: + layername = layername.replace("cond_stage_model", "conditioner.embedders.0") + + # update weight + # print('{} -> {}'.format(key, layername)) + if len(state_dict[pair_keys[0]].shape) == 4: + weight_up = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.float32) + weight_down = ( + state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.float32) + ) + sd_state_dict[layername] += alpha * torch.mm( + weight_up, weight_down + ).unsqueeze(2).unsqueeze(3) + else: + weight_up = state_dict[pair_keys[0]].to(torch.float32) + weight_down = state_dict[pair_keys[1]].to(torch.float32) + sd_state_dict[layername] += alpha * torch.mm(weight_up, weight_down) + + # update visited list + for item in pair_keys: + visited.append(item) + + print("loading lora done ... ") + + return sd_state_dict + + +def perform_save_locally_image(save_path, samples): + assert samples.dim() == 4, "Expected samples to have shape (B, C, H, W)" + os.makedirs(os.path.join(save_path), exist_ok=True) + base_count = len(os.listdir(os.path.join(save_path))) + # samples = embed_watemark(samples) + for sample in samples: + sample = 255.0 * rearrange(sample.cpu().numpy(), "c h w -> h w c") + Image.fromarray(sample.astype(np.uint8)).save( + os.path.join(save_path, f"{base_count:05}.png") + ) + base_count += 1 + + +def perform_save_locally_video( + save_path, samples, fps, savetype="gif", return_savepaths=False, save_grid=True, +): + assert samples.dim() == 5, "Expected samples to have shape (B, C, T, H, W)" + assert savetype in ["gif", "mp4"] + os.makedirs(os.path.join(save_path), exist_ok=True) + os.makedirs(os.path.join(save_path, savetype), exist_ok=True) + base_count_savetype = len(os.listdir(os.path.join(save_path, savetype))) + if save_grid: + os.makedirs(os.path.join(save_path, "grid"), exist_ok=True) + base_count_grid = len(os.listdir(os.path.join(save_path, "grid"))) + savepaths = [] + for sample in samples: + t = sample.shape[0] + sample_grid = einops.rearrange(sample, "c t h w -> t c h w") + if save_grid: + torchvision.utils.save_image( + sample_grid, + os.path.join(save_path, "grid", f"grid-{base_count_grid:04}.png"), + nrow=t, + normalize=False, + padding=0, + ) + + sample = 255.0 * einops.rearrange(sample.cpu().numpy(), "c t h w -> t h w c") + sample = sample.astype(np.uint8) + frames = [each for each in sample] + if savetype == "gif": + savepath = os.path.join( + save_path, "gif", f"animation-{base_count_savetype:04}.gif" + ) + imageio.mimsave( + savepath, + frames, + format="GIF", + duration=1 / fps, + loop=0, + ) + elif savetype == "mp4": + savepath = os.path.join( + save_path, "mp4", f"animation-{base_count_savetype:04}.mp4" + ) + # height, width, layers = frames[0].shape + # size = (width, height) + # fourcc = cv2.VideoWriter_fourcc(*'avc1') + # out = cv2.VideoWriter(savepath, fourcc, fps, size) + # for frame in frames: + # frame_bgr = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_RGB2BGR) + # out.write(frame_bgr) + # out.release() + with imageio.get_writer(savepath, fps=fps) as writer: + for frame in frames: + writer.append_data(frame) + + else: + raise ValueError(f"Unknown savetype: {savetype}") + base_count_savetype += 1 + if save_grid: + base_count_grid += 1 + savepaths.append(savepath) + + if return_savepaths: + return savepaths + else: + return + + +def chunk(it, size): + it = iter(it) + return iter(lambda: tuple(islice(it, size)), ()) + + +def load_img(p_cond_img, size: tuple = None): + """ + Loads an image from the given path and resizes it to the given height and width. + Converts the image to a tensor and normalizes it to the range [-1, 1]. Shape: (1, 3, H, W) + + Args: + - p_cond_img (str): path to the image file + - size (tuple): height and width to resize the image to + + Returns: + - cond_img (torch.Tensor): tensor of the resized and normalized image. + """ + + cond_img = Image.open(p_cond_img) + if size: + assert len(size) == 2, "size should be (H, W)" + H, W = size + cond_img = cond_img.resize((W, H), Image.BICUBIC) + cond_img = np.array(cond_img) + cond_img = torch.from_numpy(cond_img).permute(2, 0, 1).unsqueeze(0).float() / 255.0 + cond_img = cond_img * 2.0 - 1.0 + cond_img = torch.clamp(cond_img, -1.0, 1.0) + return cond_img + + +def init_sampling( + sample_steps=50, + sampler_name="EulerEDMSampler", + discretization_name="LegacyDDPMDiscretization", + guider_config_target="sgm.modules.diffusionmodules.guiders.VanillaCFG", + cfg_scale=7.5, + img2img_strength=1.0, +): + assert ( + sample_steps >= 1 and sample_steps <= 1000 + ), "sample_steps must be between 1 and 1000, but got {}".format(sample_steps) + steps = sample_steps + assert sampler_name in [ + "EulerEDMSampler", + "HeunEDMSampler", + "EulerAncestralSampler", + "DPMPP2SAncestralSampler", + "DPMPP2MSampler", + "LinearMultistepSampler", + ], "unknown sampler {}".format(sampler_name) + sampler = sampler_name + assert discretization_name in [ + "LegacyDDPMDiscretization", + "EDMDiscretization", + ], "unknown discretization {}".format(discretization_name) + discretization = discretization_name + + discretization_config = get_discretization(discretization) + + guider_config = get_guider( + guider_config_target=guider_config_target, scale=cfg_scale + ) + + sampler = get_sampler(sampler, steps, discretization_config, guider_config) + if img2img_strength < 1.0: + from scripts.demo.streamlit_helpers import Img2ImgDiscretizationWrapper + + sampler.discretization = Img2ImgDiscretizationWrapper( + sampler.discretization, strength=img2img_strength + ) + return sampler + + +def get_discretization(discretization): + if discretization == "LegacyDDPMDiscretization": + discretization_config = { + "target": "sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization", + } + elif discretization == "EDMDiscretization": + sigma_min = 0.03 + sigma_max = 14.61 + rho = 3.0 + + discretization_config = { + "target": "sgm.modules.diffusionmodules.discretizer.EDMDiscretization", + "params": { + "sigma_min": sigma_min, + "sigma_max": sigma_max, + "rho": rho, + }, + } + + return discretization_config + + +def get_guider( + guider_config_target="sgm.modules.diffusionmodules.guiders.VanillaCFG", + scale=7.5, +): + guider = "VanillaCFG" + + if guider == "IdentityGuider": + guider_config = { + "target": "sgm.modules.diffusionmodules.guiders.IdentityGuider" + } + elif guider == "VanillaCFG": + # scale = 7.5 + thresholder = "None" + + if thresholder == "None": + dyn_thresh_config = { + "target": "sgm.modules.diffusionmodules.sampling_utils.NoDynamicThresholding" + } + else: + raise NotImplementedError + + guider_config = { + "target": guider_config_target, + "params": {"scale": scale, "dyn_thresh_config": dyn_thresh_config}, + } + else: + raise NotImplementedError + return guider_config + + +def get_sampler(sampler_name, steps, discretization_config, guider_config): + if sampler_name == "EulerEDMSampler" or sampler_name == "HeunEDMSampler": + # default + s_churn = 0.0 + s_tmin = 0.0 + s_tmax = 999.0 + s_noise = 1.0 + + if sampler_name == "EulerEDMSampler": + sampler = EulerEDMSampler( + num_steps=steps, + discretization_config=discretization_config, + guider_config=guider_config, + s_churn=s_churn, + s_tmin=s_tmin, + s_tmax=s_tmax, + s_noise=s_noise, + verbose=True, + ) + elif sampler_name == "HeunEDMSampler": + sampler = HeunEDMSampler( + num_steps=steps, + discretization_config=discretization_config, + guider_config=guider_config, + s_churn=s_churn, + s_tmin=s_tmin, + s_tmax=s_tmax, + s_noise=s_noise, + verbose=True, + ) + elif ( + sampler_name == "EulerAncestralSampler" + or sampler_name == "DPMPP2SAncestralSampler" + ): + # default + s_noise = 1.0 + eta = 1.0 + + if sampler_name == "EulerAncestralSampler": + sampler = EulerAncestralSampler( + num_steps=steps, + discretization_config=discretization_config, + guider_config=guider_config, + eta=eta, + s_noise=s_noise, + verbose=True, + ) + elif sampler_name == "DPMPP2SAncestralSampler": + sampler = DPMPP2SAncestralSampler( + num_steps=steps, + discretization_config=discretization_config, + guider_config=guider_config, + eta=eta, + s_noise=s_noise, + verbose=True, + ) + elif sampler_name == "DPMPP2MSampler": + sampler = DPMPP2MSampler( + num_steps=steps, + discretization_config=discretization_config, + guider_config=guider_config, + verbose=True, + ) + elif sampler_name == "LinearMultistepSampler": + # default + order = 4 + sampler = LinearMultistepSampler( + num_steps=steps, + discretization_config=discretization_config, + guider_config=guider_config, + order=order, + verbose=True, + ) + else: + raise ValueError(f"unknown sampler {sampler_name}!") + + return sampler + + +def HWC3(x): + assert x.dtype == np.uint8 + if x.ndim == 2: + x = x[:, :, None] + assert x.ndim == 3 + H, W, C = x.shape + assert C == 1 or C == 3 or C == 4 + if C == 3: + return x + if C == 1: + return np.concatenate([x, x, x], axis=2) + if C == 4: + color = x[:, :, 0:3].astype(np.float32) + alpha = x[:, :, 3:4].astype(np.float32) / 255.0 + y = color * alpha + 255.0 * (1.0 - alpha) + y = y.clip(0, 255).astype(np.uint8) + return y + + +def loadmp4_and_convert_to_numpy_cv2(file_path): + + """ + Abandoned. This is slow. + Load an mp4 video file and convert it to a numpy array of frames. + + Args: + file_path (str): The path to the mp4 video file. + + Returns: + numpy.ndarray: A numpy array of frames from the video file. + """ + cap = cv2.VideoCapture(file_path) + + if not cap.isOpened(): + print("Error: Unable to open the file.") + return None + + frames = [] + while True: + ret, frame = cap.read() + + if not ret: + break + + frames.append(frame) + + cap.release() + + video_np = np.array(frames) + video_np = np.flip(video_np, axis=-1) # BGR to RGB + + return video_np.copy() + + +def loadmp4_and_convert_to_numpy(file_path): + """ + Loads an mp4 video file and converts it to a numpy array of frames. + + Args: + file_path (str): The path to the mp4 video file. + + Returns: + frames (numpy.ndarray): A numpy array of frames. + """ + video_reader = decord.VideoReader(file_path, num_threads=0) + v_len = len(video_reader) + fps = video_reader.get_avg_fps() + frames = video_reader.get_batch(list(range(v_len))) + frames = frames.asnumpy() + return frames + + +def load_video(video_path, size: tuple = None, gap: int = 1): + """ + Load a video from a given path and return a tensor representing the video frames. + + Args: + size (tuple): The size of the video frames. + video_path (str): The path to the video file or folder containing the video frames. + gap (int, optional): The number of frames to skip between each selected frame. Defaults to 1. + + Returns: + torch.Tensor: A tensor representing the video frames, with shape (T, C, H, W) and values in the range [-1, 1]. + """ + if os.path.isdir(video_path): + files = sorted(os.listdir(video_path)) + keyfiles = files[::gap] + frames = [load_img(os.path.join(video_path, kf), size) for kf in keyfiles] + elif video_path.endswith(".mp4") or video_path.endswith(".gif"): + if video_path.endswith(".mp4"): + frames = loadmp4_and_convert_to_numpy(video_path) + elif video_path.endswith(".gif"): + frames = imageio.mimread(video_path) + frames = [np.array(fr) for fr in frames] + frames = [HWC3(fr) for fr in frames] + frames = np.stack(frames, axis=0) + frames = ( + torch.from_numpy(frames).permute(0, 3, 1, 2).float() / 255.0 + ) # (T, C, H, W) + frames = frames * 2.0 - 1.0 # range in [-1, 1] + if size: + assert len(size) == 2, "size should be (H, W)" + frames = torch.nn.functional.interpolate( + frames, size=size, mode="bicubic", align_corners=False + ) + frames = frames[::gap] # pick the element every gap frames + frames = [f.unsqueeze(0) for f in frames] + else: + raise ValueError( + "Unsupported video format. Only support dirctory, .mp4 and .gif." + ) + + return torch.cat(frames, dim=0) # (T, C, H, W) + + +def get_keyframes(original_fps, target_fps, allframes, num_keyframes): + num_allframes = len(allframes) + gap = np.round(original_fps / target_fps).astype(int) + assert gap > 0, f"gap {gap} should be positive." + keyindexs = [i for i in range(0, num_allframes, gap)] + if len(keyindexs) < num_keyframes: + print( + "[WARNING]: not enough keyframes, use linspace instead. " + f"len(keyindexs): [{len(keyindexs)}] < num_keyframes [{num_keyframes}]" + ) + keyindexs = np.linspace(0, num_allframes - 1, num_keyframes).astype(int) + + return allframes[keyindexs[:num_keyframes]] + + +def load_video_keyframes( + video_path, original_fps, target_fps, num_keyframes, size: tuple = None +): + """ + Load keyframes from a video file or directory of images. + + Args: + video_path (str): Path to the video file or directory of images. + original_fps (int): The original frames per second of the video. + target_fps (int): The desired frames per second of the output keyframes. + num_keyframes (int): The number of keyframes to extract. + size (tuple, optional): The desired size of the output keyframes. Defaults to None. + + Returns: + torch.Tensor: A tensor of shape (T, C, H, W) containing the keyframes. + """ + if os.path.isdir(video_path): + files = sorted(os.listdir(video_path)) + num_allframes = len(files) + gap = np.round(original_fps / target_fps).astype(int) + assert gap > 0, f"gap {gap} should be positive." + keyindexs = [i for i in range(0, num_allframes, gap)] + if len(keyindexs) < num_keyframes: + print( + "[WARNING]: not enough keyframes, use linspace instead. " + f"len(keyindexs): [{len(keyindexs)}] < num_keyframes [{num_keyframes}]" + ) + keyindexs = np.linspace(0, num_allframes - 1, num_keyframes).astype(int) + else: + keyindexs = keyindexs[:num_keyframes] + keyfiles = [files[i] for i in keyindexs] + frames = [load_img(os.path.join(video_path, kf), size) for kf in keyfiles] + elif video_path.endswith(".mp4") or video_path.endswith(".gif"): + # TODO: not tested yet. + if video_path.endswith(".mp4"): + frames = loadmp4_and_convert_to_numpy(video_path) + elif video_path.endswith(".gif"): + frames = imageio.mimread(video_path) + frames = [np.array(fr) for fr in frames] + frames = [HWC3(fr) for fr in frames] + frames = np.stack(frames, axis=0) + frames = ( + torch.from_numpy(frames).permute(0, 3, 1, 2).float() / 255.0 + ) # (T, C, H, W) + num_allframes = frames.shape[0] + gap = np.round(original_fps / target_fps).astype(int) + assert gap > 0, f"gap {gap} should be positive." + keyindexs = [i for i in range(0, num_allframes, gap)] + if len(keyindexs) < num_keyframes: + print( + "[WARNING]: not enough keyframes, use linspace instead. " + f"len(keyindexs): [{len(keyindexs)}] < num_keyframes [{num_keyframes}]" + ) + keyindexs = np.linspace(0, num_allframes - 1, num_keyframes).astype(int) + else: + keyindexs = keyindexs[:num_keyframes] + # frames = frames[keyindexs[:num_keyframes]] + frames = frames[keyindexs] + + frames = frames * 2.0 - 1.0 # range in [-1, 1] + frames = torch.clamp(frames, -1.0, 1.0) + if size: + assert len(size) == 2, "size should be (H, W)" + frames = torch.nn.functional.interpolate( + frames, size=size, mode="bicubic", align_corners=False + ) + # frames = frames[::gap] # pick the element every gap frames + frames = [f.unsqueeze(0) for f in frames] + else: + raise ValueError( + "Unsupported video format. Only support dirctory, .mp4 and .gif." + ) + + return torch.cat(frames, dim=0) # (T, C, H, W) + + +def setup_controlgenerator(model): + control_hint_encoder = None + for embbeder in model.conditioner.embedders: + if ( + isinstance(embbeder, LineartEncoder) + or isinstance(embbeder, DepthZoeEncoder) + or isinstance(embbeder, DepthMidasEncoder) + or isinstance(embbeder, SoftEdgeEncoder) + or isinstance(embbeder, NormalBaeEncoder) + or isinstance(embbeder, ScribbleHEDEncoder) + or isinstance(embbeder, ScribblePidiNetEncoder) + ): + control_hint_encoder = embbeder + break + if control_hint_encoder is None: + raise ValueError("Cannot find LineartEncoder in the embedders.") + return control_hint_encoder + + +def load_basemodel_lora(model, basemodel_path="", lora_path=""): + if basemodel_path: + print("--> load a new base model from {}".format(basemodel_path)) + model = model_load_ckpt(model, basemodel_path, True) + + if lora_path: + print("--> load a new LoRA model from {}".format(lora_path)) + sd_state_dict = model.state_dict() + + if lora_path.endswith(".safetensors"): + lora_state_dict = {} + + # with safe_open(lora_path, framework="pt", device='cpu') as f: + with safe_open(lora_path, framework="pt", device=0) as f: + for key in f.keys(): + lora_state_dict[key] = f.get_tensor(key) + + is_lora = all("lora" in k for k in lora_state_dict.keys()) + if not is_lora: + raise ValueError( + f"The model you provided in [{lora_path}] is not a LoRA model. " + ) + else: + raise NotImplementedError + + sd_state_dict = convert_load_lora( + sd_state_dict, lora_state_dict, alpha=1.0 + ) # TODO: alpha + model.load_state_dict(sd_state_dict) + return model diff --git a/CCEdit-main/scripts/tools/extract_centerframe.py b/CCEdit-main/scripts/tools/extract_centerframe.py new file mode 100644 index 0000000000000000000000000000000000000000..b96d247ba3ae0f8f4458c407b7b6f3e685368533 --- /dev/null +++ b/CCEdit-main/scripts/tools/extract_centerframe.py @@ -0,0 +1,110 @@ +''' +Usage: +python scripts/tools/extract_centerframe.py \ + --p_video assets/Samples/tshirtman.mp4 \ + --p_save outputs/centerframe/tshirtman.png \ + --orifps 18 \ + --targetfps 6 \ + --n_keyframes 17 \ + --length_long 512 \ + --length_short 512 +''' + +import argparse +import json +import os +import random + +import einops +import torchvision +import cv2 +import numpy as np +import torch +from pytorch_lightning import seed_everything +from torch import autocast + +from scripts.sampling.util import ( + chunk, + create_model, + init_sampling, + load_video, + load_video_keyframes, + model_load_ckpt, + perform_save_locally_image, + perform_save_locally_video, +) +from sgm.util import append_dims + + +def extract_centerframe(p_video, p_save, orifps, targetfps, n_keyframes, length_long, length_short): + if n_keyframes % 2 == 0: + print('WARNING: n_keyframes should be odd, but got {}'.format(n_keyframes)) + keyframes = load_video_keyframes(p_video, orifps, targetfps, n_keyframes) + H, W = keyframes[0].shape[1:] + if H >= W: + h, w = length_long, length_short + else: + h, w = length_short, length_long + # keyframes = load_video_keyframes(p_video, orifps, targetfps, n_keyframes, (h, w)) + + centerframe = keyframes[n_keyframes // 2, :, :, :].unsqueeze(0) + centerframe = torch.nn.functional.interpolate(centerframe, (h, w), mode='bilinear', align_corners=False) + centerframe = (centerframe + 1) / 2. + centerframe = torch.clamp(centerframe, 0, 1) + + # transfer to numpy and save + centerframe = centerframe.squeeze(0).permute(1, 2, 0).cpu().numpy()[..., ::-1] + # mkdir + os.makedirs(os.path.dirname(p_save), exist_ok=True) + cv2.imwrite(p_save, (centerframe * 255).astype(np.uint8)) + print('save to {}'.format(p_save)) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--p_video', type=str, default='') + parser.add_argument('--p_save', type=str, default='') + parser.add_argument('--dir_video', type=str, default='') + parser.add_argument('--dir_save', type=str, default='') + parser.add_argument('--orifps', type=int, default=18) + parser.add_argument('--targetfps', type=int, default=3) + parser.add_argument('--n_keyframes', type=int, default=9) + parser.add_argument('--length_short', type=int, default=384) + parser.add_argument('--length_long', type=int, default=576) + args = parser.parse_args() + + assert (args.p_video != '' and args.p_save != '' ) or \ + (args.dir_video != "" and args.dir_save != "args.dir_save"), \ + 'source video must be specified' + + orifps = args.orifps + targetfps = args.targetfps + n_keyframes = args.n_keyframes + + if args.p_video != '': + p_video = args.p_video + p_save = args.p_save + extract_centerframe(p_video, p_save, orifps, targetfps, n_keyframes, args.length_long, args.length_short) + else: + dir_video = args.dir_video + dir_save = args.dir_save + os.makedirs(dir_save, exist_ok=True) + subdirs = os.listdir(dir_video) + for subdir in subdirs: + subdir_video = os.path.join(dir_video, subdir) + if not os.path.isdir(subdir_video): + continue + subdir_save = os.path.join(dir_save, subdir) + os.makedirs(subdir_save, exist_ok=True) + files = os.listdir(subdir_video) + for file in files: + if not file.endswith('.mp4') or os.path.isdir(file): + continue + p_video = os.path.join(subdir_video, file) + p_save = os.path.join(subdir_save, file.replace('.mp4', '.png')) + print('{} -> {}'.format(p_video, p_save)) + + extract_centerframe(p_video, p_save, orifps, targetfps, n_keyframes, args.length_long, args.length_short) + + + + \ No newline at end of file diff --git a/CCEdit-main/scripts/tools/pnp_generate_config.py b/CCEdit-main/scripts/tools/pnp_generate_config.py new file mode 100644 index 0000000000000000000000000000000000000000..afbc35215ad6c292dbc43bba95b5a76290c937d1 --- /dev/null +++ b/CCEdit-main/scripts/tools/pnp_generate_config.py @@ -0,0 +1,52 @@ +''' +python scripts/sampling/pnp_generate_config.py \ + --p_config config_pnp_auto.yaml \ + --output_path "outputs/automatic_ref_editing/image" \ + --image_path "outputs/centerframe/tshirtman.png" \ + --latents_path "outputs/automatic_ref_editing/latents_forward" \ + --prompt "a man walks on the beach" +''' + + +import yaml +import argparse + +def save_yaml(args): + config_data = { + 'seed': args.seed, + 'device': args.device, + 'output_path': args.output_path, + 'image_path': args.image_path, + 'latents_path': args.latents_path, + 'sd_version': args.sd_version, + 'guidance_scale': args.guidance_scale, + 'n_timesteps': args.n_timesteps, + 'prompt': args.prompt, + 'negative_prompt': args.negative_prompt, + 'pnp_attn_t': args.pnp_attn_t, + 'pnp_f_t': args.pnp_f_t + } + + with open(args.p_config, 'w') as file: + yaml.dump(config_data, file, sort_keys=False, allow_unicode=True) + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description="Save configuration to a YAML file.") + parser.add_argument('--p_config', type=str, help="Path to save the YAML configuration file.") + parser.add_argument('--output_path', type=str, help="Output path for the results.") + parser.add_argument('--image_path', type=str, help="Path to the input image.") + parser.add_argument('--latents_path', type=str, help="Path to the latents file.") + parser.add_argument('--prompt', type=str, help="Prompt for the diffusion model.") + parser.add_argument('--seed', type=int, default=1, help="Seed for random number generation.") + parser.add_argument('--device', type=str, default='cuda', help="Device to be used (e.g., 'cuda', 'cpu').") + parser.add_argument('--sd_version', type=str, default='2.1', help="Version of the diffusion model.") + parser.add_argument('--guidance_scale', type=float, default=7.5, help="Guidance scale for the diffusion model.") + parser.add_argument('--n_timesteps', type=int, default=50, help="Number of timesteps for the diffusion process.") + parser.add_argument('--negative_prompt', type=str, default='ugly, blurry, black, low res, unrealistic', help="Negative prompt for the diffusion model.") + parser.add_argument('--pnp_attn_t', type=float, default=0.5, help="PNP attention threshold.") + parser.add_argument('--pnp_f_t', type=float, default=0.8, help="PNP feature threshold.") + + args = parser.parse_args() + + save_yaml(args) + print(f"YAML configuration saved to {args.p_config}") diff --git a/CCEdit-main/scripts/util/__init__.py b/CCEdit-main/scripts/util/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/CCEdit-main/scripts/util/detection/__init__.py b/CCEdit-main/scripts/util/detection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/CCEdit-main/scripts/util/detection/nsfw_and_watermark_dectection.py b/CCEdit-main/scripts/util/detection/nsfw_and_watermark_dectection.py new file mode 100644 index 0000000000000000000000000000000000000000..af84acf308cd98007b2ced8e73851d4a0eef1f3a --- /dev/null +++ b/CCEdit-main/scripts/util/detection/nsfw_and_watermark_dectection.py @@ -0,0 +1,104 @@ +import os +import torch +import numpy as np +import torchvision.transforms as T +from PIL import Image +import clip + +RESOURCES_ROOT = "scripts/util/detection/" + + +def predict_proba(X, weights, biases): + logits = X @ weights.T + biases + proba = np.where( + logits >= 0, 1 / (1 + np.exp(-logits)), np.exp(logits) / (1 + np.exp(logits)) + ) + return proba.T + + +def load_model_weights(path: str): + model_weights = np.load(path) + return model_weights["weights"], model_weights["biases"] + + +def clip_process_images(images: torch.Tensor) -> torch.Tensor: + min_size = min(images.shape[-2:]) + return T.Compose( + [ + T.CenterCrop(min_size), # TODO: this might affect the watermark, check this + T.Resize(224, interpolation=T.InterpolationMode.BICUBIC, antialias=True), + T.Normalize( + (0.48145466, 0.4578275, 0.40821073), + (0.26862954, 0.26130258, 0.27577711), + ), + ] + )(images) + + +class DeepFloydDataFiltering(object): + def __init__(self, verbose: bool = False): + super().__init__() + self.verbose = verbose + self.clip_model, _ = clip.load("ViT-L/14", device="cpu") + self.clip_model.eval() + + self.cpu_w_weights, self.cpu_w_biases = load_model_weights( + os.path.join(RESOURCES_ROOT, "w_head_v1.npz") + ) + self.cpu_p_weights, self.cpu_p_biases = load_model_weights( + os.path.join(RESOURCES_ROOT, "p_head_v1.npz") + ) + self.w_threshold, self.p_threshold = 0.5, 0.5 + + @torch.inference_mode() + def __call__(self, images: torch.Tensor) -> torch.Tensor: + imgs = clip_process_images(images) + image_features = self.clip_model.encode_image(imgs.to("cpu")) + image_features = image_features.detach().cpu().numpy().astype(np.float16) + p_pred = predict_proba(image_features, self.cpu_p_weights, self.cpu_p_biases) + w_pred = predict_proba(image_features, self.cpu_w_weights, self.cpu_w_biases) + print(f"p_pred = {p_pred}, w_pred = {w_pred}") if self.verbose else None + query = p_pred > self.p_threshold + if query.sum() > 0: + print(f"Hit for p_threshold: {p_pred}") if self.verbose else None + images[query] = T.GaussianBlur(99, sigma=(100.0, 100.0))(images[query]) + query = w_pred > self.w_threshold + if query.sum() > 0: + print(f"Hit for w_threshold: {w_pred}") if self.verbose else None + images[query] = T.GaussianBlur(99, sigma=(100.0, 100.0))(images[query]) + return images + + +def load_img(path: str) -> torch.Tensor: + image = Image.open(path) + if not image.mode == "RGB": + image = image.convert("RGB") + image_transforms = T.Compose( + [ + T.ToTensor(), + ] + ) + return image_transforms(image)[None, ...] + + +def test(root): + from einops import rearrange + + filter = DeepFloydDataFiltering(verbose=True) + for p in os.listdir((root)): + print(f"running on {p}...") + img = load_img(os.path.join(root, p)) + filtered_img = filter(img) + filtered_img = rearrange( + 255.0 * (filtered_img.numpy())[0], "c h w -> h w c" + ).astype(np.uint8) + Image.fromarray(filtered_img).save( + os.path.join(root, f"{os.path.splitext(p)[0]}-filtered.jpg") + ) + + +if __name__ == "__main__": + import fire + + fire.Fire(test) + print("done.") diff --git a/CCEdit-main/sgm/modules/diffusionmodules/__init__.py b/CCEdit-main/sgm/modules/diffusionmodules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ce7968af9224aff42a20023b4e14ca059939e034 --- /dev/null +++ b/CCEdit-main/sgm/modules/diffusionmodules/__init__.py @@ -0,0 +1,7 @@ +from .denoiser import Denoiser +from .discretizer import Discretization +from .loss import StandardDiffusionLoss +from .model import Model, Encoder, Decoder +from .openaimodel import UNetModel +from .sampling import BaseDiffusionSampler +from .wrappers import OpenAIWrapper diff --git a/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/controlmodel.cpython-39.pyc b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/controlmodel.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c96246dbb47f7084a41ffd2093eb27df9badd52 Binary files /dev/null and b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/controlmodel.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/denoiser.cpython-39.pyc b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/denoiser.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12f495f1fa9b6e1c11df799e07c0ac571e9c9d18 Binary files /dev/null and b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/denoiser.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/denoiser_weighting.cpython-39.pyc b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/denoiser_weighting.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ec6c9f7e403429eb80ab84acc6f7ba3eaadca16 Binary files /dev/null and b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/denoiser_weighting.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/discretizer.cpython-39.pyc b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/discretizer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a206c15b72c4e7804f1dbec3519e1d276d23c1d7 Binary files /dev/null and b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/discretizer.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/guiders.cpython-39.pyc b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/guiders.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c96c6a8460ae3156cfeb4af47b57e5c4522804a Binary files /dev/null and b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/guiders.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/loss.cpython-39.pyc b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/loss.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4c0190805af8b33acbfaadba5eab4c378ba8df7 Binary files /dev/null and b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/loss.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/sampling.cpython-39.pyc b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/sampling.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54ffb4289635b4f50c6ff07d29ddbb4e0028dbc6 Binary files /dev/null and b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/sampling.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/util.cpython-39.pyc b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/util.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8301f673d09af1b0f03862c53d9e03c7fa9e948f Binary files /dev/null and b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/util.cpython-39.pyc differ diff --git a/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/wrappers.cpython-39.pyc b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/wrappers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5097ffd44cce8bd05ed27d3c3d50094a4945930 Binary files /dev/null and b/CCEdit-main/sgm/modules/diffusionmodules/__pycache__/wrappers.cpython-39.pyc differ diff --git a/FateZero-main/data/attribute/swan_swarov/00005.png b/FateZero-main/data/attribute/swan_swarov/00005.png new file mode 100644 index 0000000000000000000000000000000000000000..8574ef175a61cf3935af994325a9841b86008d24 --- /dev/null +++ b/FateZero-main/data/attribute/swan_swarov/00005.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e48a1ec062e69faf7069e36d164a6e0372dab7c43f9b2a20c731f8db2b35146 +size 436798 diff --git a/FateZero-main/data/shape/man_skate/00007.png b/FateZero-main/data/shape/man_skate/00007.png new file mode 100644 index 0000000000000000000000000000000000000000..3a6111e4af59e8cc4b07921175136a38ae601f74 --- /dev/null +++ b/FateZero-main/data/shape/man_skate/00007.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0e8637fd33f62d3310b214932581795a6a2d69f6dd270c3744ff1a1013f2d0c +size 398671 diff --git a/FateZero-main/data/shape/swan_swarov/00002.png b/FateZero-main/data/shape/swan_swarov/00002.png new file mode 100644 index 0000000000000000000000000000000000000000..cf929f00a5f899c2979c978400f78e8ae8339a1d --- /dev/null +++ b/FateZero-main/data/shape/swan_swarov/00002.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3e7390f31e4af52ab89e37467f04beacf7b9e77c08f13049e7dead9e372692e +size 438961 diff --git a/RAVE-main/.gitignore b/RAVE-main/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..a27cd89ef70257db09d3ab42721c9d171ac28dc3 --- /dev/null +++ b/RAVE-main/.gitignore @@ -0,0 +1,169 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +*.pt +*.bin +CIVIT_AI/diffusers_models/* +generated/* +pretrained_models/* +results/* +*.safetensors + +assets/notebook-generated/* +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/RAVE-main/LICENSE b/RAVE-main/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..3e820c549639f6d60edbc5172b61ea5a3bd33cfb --- /dev/null +++ b/RAVE-main/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Rehg Lab + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/RAVE-main/README.md b/RAVE-main/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5208d386d51d90d7a18d1145e1da548b04558335 --- /dev/null +++ b/RAVE-main/README.md @@ -0,0 +1,183 @@ +### RAVE: Randomized Noise Shuffling for Fast and Consistent Video Editing with Diffusion Models - Official Repo +### CVPR 2024 (Highlight) + +[Ozgur Kara](https://karaozgur.com/), [Bariscan Kurtkaya](https://bariscankurtkaya.github.io/), [Hidir Yesiltepe](https://sites.google.com/view/hidir-yesiltepe), [James M. Rehg](https://scholar.google.com/citations?hl=en&user=8kA3eDwAAAAJ), [Pinar Yanardag](https://scholar.google.com/citations?user=qzczdd8AAAAJ&hl=en) + +Web Demo + + + + +[![GitHub](https://img.shields.io/github/stars/rehg-lab/RAVE?style=social)](https://github.com/rehg-lab/RAVE) + +![Visitors](https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fgithub.com%2Frehg-lab%2FRAVE&label=visitors&countColor=%23263759) + + +![teaser](assets/examples/grid-2x3.gif) +(Note that the videos on GitHub are heavily compressed. The full videos are available on the project webpage.) + +## Abstract +TL; DR: RAVE is a zero-shot, lightweight, and fast framework for text-guided video editing, supporting videos of any length utilizing text-to-image pretrained diffusion models. + +
Click for the full abstract + + +> Recent advancements in diffusion-based models have demonstrated significant success in generating images from text. However, video editing models have not yet reached the same level of visual quality and user control. To address this, we introduce RAVE, a zero-shot video editing method that leverages pre-trained text-to-image diffusion models without additional training. RAVE takes an input video and a text prompt to produce high-quality videos while preserving the original motion and semantic structure. It employs a novel noise shuffling strategy, leveraging spatio-temporal interactions between frames, to produce temporally consistent videos faster than existing methods. It is also efficient in terms of memory requirements, allowing it to handle longer videos. RAVE is capable of a wide range of edits, from local attribute modifications to shape transformations. In order to demonstrate the versatility of RAVE, we create a comprehensive video evaluation dataset ranging from object-focused scenes to complex human activities like dancing and typing, and dynamic scenes featuring swimming fish and boats. Our qualitative and quantitative experiments highlight the effectiveness of RAVE in diverse video editing scenarios compared to existing methods. +
+ +
+ +**Features**: +- *Zero-shot framework* +- *Working fast* +- *No restriction on video length* +- *Standardized dataset for evaluating text-guided video-editing methods* +- *Compatible with off-the-shelf pre-trained approaches (e.g. [CivitAI](https://civitai.com/))* + + +## Updates +- [12/2023] Gradio demo is released, HuggingFace Space demo will be released soon +- [12/2023] Paper is available on ArXiv, project webpage is ready and code is released. + +### TODO +- [ ] Share the dataset +- [X] Add more examples +- [X] Optimize preprocessing +- [X] Add CivitAI models to Grad.io +- [X] ~~Prepare a grad.io based GUI~~ +- [X] ~~Integrate MultiControlNet~~ +- [X] ~~Adapt CIVIT AI models~~ + +## Installation and Inference + +### Setup Environment +Please install our environment using 'requirements.txt' file as: +```shell +conda create -n rave python=3.8 +conda activate rave +conda install pip +pip cache purge +pip install -r requirements.txt +``` +Also, please install PyTorch and Xformers as +```shell +pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu118 +pip install xformers==0.0.20 +``` +to set up the Conda environment. + +Our code was tested on Linux with the following versions: +```shell +timm==0.6.7 torch==2.0.1+cu118 xformers==0.0.20 diffusers==0.18.2 torch.version.cuda==11.8 python==3.8.0 +``` + +### WebUI Demo + +To run our grad.io based web demo, run the following command: +```shell +python webui.py +``` +Then, specify your configurations and perform editing. + + +### Inference + + +To run RAVE, please follow these steps: + +1- Put the video you want to edit under `data/mp4_videos` as an MP4 file. Note that we suggest using videos with a size of 512x512 or 512x320. + +2- Prepare a config file under the `configs` directory. Change the name of the `video_name` parameter to the name of the MP4 file. You can find detailed descriptions of the parameters and example configurations there. + +3- Run the following command: +```shell +python scripts/run_experiment.py [PATH OF CONFIG FILE] +``` +4- The results will be generated under the `results` directory. Also, the latents and controls are saved under the `generated` directory to speed up the editing with different prompts on the same video. +Note that the names of the preprocessors available can be found in `utils/constants.py`. + +### Use Customized Models from CIVIT AI + +Our code allows to run any customized model from CIVIT AI. To use these models, please follow the steps: + +1- Determine which model you want to use from CIVIT AI, and obtain its index. (e.g. the index for RealisticVision V5.1 is 130072, you can find the id of the model in the website link as a parameter assigned to 'VersionId', e.g. https://civitai.com/models/4201?modelVersionId=130072) + +2- In the current directory, run the following code. It downloads the model in safetensors format, and converts it to '.bin' format that is compatible with diffusers. +```shell +bash CIVIT_AI/civit_ai.sh 130072 +``` +3- Copy the path of the converted model, `$CWD/CIVIT_AI/diffusers_models/[CUSTOMIZED MODEL]` (e.g. `CIVIT_AI/diffusers_models/realisticVisionV60B1_v51VAE` for 130072), and use the path in the config file. + + +## Dataset + +Dataset will be released soon. + +## Examples +### Type of Edits + + + + + + + + + + + +
1- Local Editing2- Visual Style Editing3- Background Editing
+ + + + + + + + + + + +
4- Shape/Attribute Editing5- Extreme Shape Editing
+ +### Editing on Various Types of Motions + + + + + + + + + + + +
1- Exo-motion2- Ego-motion3- Ego-exo motion
+ + + + + + + + + + + +
4- Occlusions5- Multiple objects with appearance/disappearance
+ +## Citation + +``` +@inproceedings{kara2024rave, + title={RAVE: Randomized Noise Shuffling for Fast and Consistent Video Editing with Diffusion Models}, + author={Ozgur Kara and Bariscan Kurtkaya and Hidir Yesiltepe and James M. Rehg and Pinar Yanardag}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + year={2024} +} + +``` + +## Maintenance + +This is the official repository for **RAVE: Randomized Noise Shuffling for Fast and Consistent Video Editing with Diffusion Models**. Feel free to contact for any questions or discussions [Ozgur Kara](ozgurrkara99@gmail.com). diff --git a/RAVE-main/annotator/annotator_path.py b/RAVE-main/annotator/annotator_path.py new file mode 100644 index 0000000000000000000000000000000000000000..d476286489676cfad400f4952c25ebb30f714105 --- /dev/null +++ b/RAVE-main/annotator/annotator_path.py @@ -0,0 +1,17 @@ +import os +import torch +import utils.constants as const + +DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +models_path = f'{const.CWD}/pretrained_models' + +clip_vision_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'clip_vision') +# clip vision is always inside controlnet "extensions\sd-webui-controlnet" +# and any problem can be solved by removing controlnet and reinstall + +models_path = os.path.realpath(models_path) +os.makedirs(models_path, exist_ok=True) +print(f'ControlNet preprocessor location: {models_path}') +# Make sure that the default location is inside controlnet "extensions\sd-webui-controlnet" +# so that any problem can be solved by removing controlnet and reinstall +# if users do not change configs on their own (otherwise users will know what is wrong) diff --git a/RAVE-main/annotator/util.py b/RAVE-main/annotator/util.py new file mode 100644 index 0000000000000000000000000000000000000000..60b574bf84a0b1b3fe649be51d8406be2c36c1ac --- /dev/null +++ b/RAVE-main/annotator/util.py @@ -0,0 +1,62 @@ +import numpy as np +import cv2 + + +def HWC3(x): + assert x.dtype == np.uint8 + if x.ndim == 2: + x = x[:, :, None] + assert x.ndim == 3 + H, W, C = x.shape + assert C == 1 or C == 3 or C == 4 + if C == 3: + return x + if C == 1: + return np.concatenate([x, x, x], axis=2) + if C == 4: + color = x[:, :, 0:3].astype(np.float32) + alpha = x[:, :, 3:4].astype(np.float32) / 255.0 + y = color * alpha + 255.0 * (1.0 - alpha) + y = y.clip(0, 255).astype(np.uint8) + return y + + +def make_noise_disk(H, W, C, F): + noise = np.random.uniform(low=0, high=1, size=((H // F) + 2, (W // F) + 2, C)) + noise = cv2.resize(noise, (W + 2 * F, H + 2 * F), interpolation=cv2.INTER_CUBIC) + noise = noise[F: F + H, F: F + W] + noise -= np.min(noise) + noise /= np.max(noise) + if C == 1: + noise = noise[:, :, None] + return noise + + +def nms(x, t, s): + x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s) + + f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8) + f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8) + f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8) + f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8) + + y = np.zeros_like(x) + + for f in [f1, f2, f3, f4]: + np.putmask(y, cv2.dilate(x, kernel=f) == x, x) + + z = np.zeros_like(y, dtype=np.uint8) + z[y > t] = 255 + return z + + +def min_max_norm(x): + x -= np.min(x) + x /= np.maximum(np.max(x), 1e-5) + return x + + +def safe_step(x, step=2): + y = x.astype(np.float32) * float(step + 1) + y = y.astype(np.int32).astype(np.float32) / float(step) + return y diff --git a/RAVE-main/annotator/zoe/zoedepth/models/__init__.py b/RAVE-main/annotator/zoe/zoedepth/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1ae1a1e4e86d9a5b14586cd006ed43d2bbc9b4a6 --- /dev/null +++ b/RAVE-main/annotator/zoe/zoedepth/models/__init__.py @@ -0,0 +1,24 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/__init__.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1ae1a1e4e86d9a5b14586cd006ed43d2bbc9b4a6 --- /dev/null +++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/__init__.py @@ -0,0 +1,24 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas.py new file mode 100644 index 0000000000000000000000000000000000000000..d1fce1d7e9cdd419a7ecf5b72c49e1490269ac24 --- /dev/null +++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas.py @@ -0,0 +1,379 @@ +# MIT License +import os + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import torch +import torch.nn as nn +import numpy as np +from torchvision.transforms import Normalize + + +def denormalize(x): + """Reverses the imagenet normalization applied to the input. + + Args: + x (torch.Tensor - shape(N,3,H,W)): input tensor + + Returns: + torch.Tensor - shape(N,3,H,W): Denormalized input + """ + mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(x.device) + std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(x.device) + return x * std + mean + +def get_activation(name, bank): + def hook(model, input, output): + bank[name] = output + return hook + + +class Resize(object): + """Resize sample to given size (width, height). + """ + + def __init__( + self, + width, + height, + resize_target=True, + keep_aspect_ratio=False, + ensure_multiple_of=1, + resize_method="lower_bound", + ): + """Init. + Args: + width (int): desired output width + height (int): desired output height + resize_target (bool, optional): + True: Resize the full sample (image, mask, target). + False: Resize image only. + Defaults to True. + keep_aspect_ratio (bool, optional): + True: Keep the aspect ratio of the input sample. + Output sample might not have the given width and height, and + resize behaviour depends on the parameter 'resize_method'. + Defaults to False. + ensure_multiple_of (int, optional): + Output width and height is constrained to be multiple of this parameter. + Defaults to 1. + resize_method (str, optional): + "lower_bound": Output will be at least as large as the given size. + "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) + "minimal": Scale as least as possible. (Output size might be smaller than given size.) + Defaults to "lower_bound". + """ + # print("Params passed to Resize transform:") + # print("\twidth: ", width) + # print("\theight: ", height) + # print("\tresize_target: ", resize_target) + # print("\tkeep_aspect_ratio: ", keep_aspect_ratio) + # print("\tensure_multiple_of: ", ensure_multiple_of) + # print("\tresize_method: ", resize_method) + + self.__width = width + self.__height = height + + self.__keep_aspect_ratio = keep_aspect_ratio + self.__multiple_of = ensure_multiple_of + self.__resize_method = resize_method + + def constrain_to_multiple_of(self, x, min_val=0, max_val=None): + y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if max_val is not None and y > max_val: + y = (np.floor(x / self.__multiple_of) + * self.__multiple_of).astype(int) + + if y < min_val: + y = (np.ceil(x / self.__multiple_of) + * self.__multiple_of).astype(int) + + return y + + def get_size(self, width, height): + # determine new height and width + scale_height = self.__height / height + scale_width = self.__width / width + + if self.__keep_aspect_ratio: + if self.__resize_method == "lower_bound": + # scale such that output size is lower bound + if scale_width > scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "upper_bound": + # scale such that output size is upper bound + if scale_width < scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "minimal": + # scale as least as possbile + if abs(1 - scale_width) < abs(1 - scale_height): + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + else: + raise ValueError( + f"resize_method {self.__resize_method} not implemented" + ) + + if self.__resize_method == "lower_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, min_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, min_val=self.__width + ) + elif self.__resize_method == "upper_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, max_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, max_val=self.__width + ) + elif self.__resize_method == "minimal": + new_height = self.constrain_to_multiple_of(scale_height * height) + new_width = self.constrain_to_multiple_of(scale_width * width) + else: + raise ValueError( + f"resize_method {self.__resize_method} not implemented") + + return (new_width, new_height) + + def __call__(self, x): + width, height = self.get_size(*x.shape[-2:][::-1]) + return nn.functional.interpolate(x, (height, width), mode='bilinear', align_corners=True) + +class PrepForMidas(object): + def __init__(self, resize_mode="minimal", keep_aspect_ratio=True, img_size=384, do_resize=True): + if isinstance(img_size, int): + img_size = (img_size, img_size) + net_h, net_w = img_size + self.normalization = Normalize( + mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + self.resizer = Resize(net_w, net_h, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=32, resize_method=resize_mode) \ + if do_resize else nn.Identity() + + def __call__(self, x): + return self.normalization(self.resizer(x)) + + +class MidasCore(nn.Module): + def __init__(self, midas, trainable=False, fetch_features=True, layer_names=('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'), freeze_bn=False, keep_aspect_ratio=True, + img_size=384, **kwargs): + """Midas Base model used for multi-scale feature extraction. + + Args: + midas (torch.nn.Module): Midas model. + trainable (bool, optional): Train midas model. Defaults to False. + fetch_features (bool, optional): Extract multi-scale features. Defaults to True. + layer_names (tuple, optional): Layers used for feature extraction. Order = (head output features, last layer features, ...decoder features). Defaults to ('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'). + freeze_bn (bool, optional): Freeze BatchNorm. Generally results in better finetuning performance. Defaults to False. + keep_aspect_ratio (bool, optional): Keep the aspect ratio of input images while resizing. Defaults to True. + img_size (int, tuple, optional): Input resolution. Defaults to 384. + """ + super().__init__() + self.core = midas + self.output_channels = None + self.core_out = {} + self.trainable = trainable + self.fetch_features = fetch_features + # midas.scratch.output_conv = nn.Identity() + self.handles = [] + # self.layer_names = ['out_conv','l4_rn', 'r4', 'r3', 'r2', 'r1'] + self.layer_names = layer_names + + self.set_trainable(trainable) + self.set_fetch_features(fetch_features) + + self.prep = PrepForMidas(keep_aspect_ratio=keep_aspect_ratio, + img_size=img_size, do_resize=kwargs.get('do_resize', True)) + + if freeze_bn: + self.freeze_bn() + + def set_trainable(self, trainable): + self.trainable = trainable + if trainable: + self.unfreeze() + else: + self.freeze() + return self + + def set_fetch_features(self, fetch_features): + self.fetch_features = fetch_features + if fetch_features: + if len(self.handles) == 0: + self.attach_hooks(self.core) + else: + self.remove_hooks() + return self + + def freeze(self): + for p in self.parameters(): + p.requires_grad = False + self.trainable = False + return self + + def unfreeze(self): + for p in self.parameters(): + p.requires_grad = True + self.trainable = True + return self + + def freeze_bn(self): + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + return self + + def forward(self, x, denorm=False, return_rel_depth=False): + with torch.no_grad(): + if denorm: + x = denormalize(x) + x = self.prep(x) + # print("Shape after prep: ", x.shape) + + with torch.set_grad_enabled(self.trainable): + + # print("Input size to Midascore", x.shape) + rel_depth = self.core(x) + # print("Output from midas shape", rel_depth.shape) + if not self.fetch_features: + return rel_depth + out = [self.core_out[k] for k in self.layer_names] + + if return_rel_depth: + return rel_depth, out + return out + + def get_rel_pos_params(self): + for name, p in self.core.pretrained.named_parameters(): + if "relative_position" in name: + yield p + + def get_enc_params_except_rel_pos(self): + for name, p in self.core.pretrained.named_parameters(): + if "relative_position" not in name: + yield p + + def freeze_encoder(self, freeze_rel_pos=False): + if freeze_rel_pos: + for p in self.core.pretrained.parameters(): + p.requires_grad = False + else: + for p in self.get_enc_params_except_rel_pos(): + p.requires_grad = False + return self + + def attach_hooks(self, midas): + if len(self.handles) > 0: + self.remove_hooks() + if "out_conv" in self.layer_names: + self.handles.append(list(midas.scratch.output_conv.children())[ + 3].register_forward_hook(get_activation("out_conv", self.core_out))) + if "r4" in self.layer_names: + self.handles.append(midas.scratch.refinenet4.register_forward_hook( + get_activation("r4", self.core_out))) + if "r3" in self.layer_names: + self.handles.append(midas.scratch.refinenet3.register_forward_hook( + get_activation("r3", self.core_out))) + if "r2" in self.layer_names: + self.handles.append(midas.scratch.refinenet2.register_forward_hook( + get_activation("r2", self.core_out))) + if "r1" in self.layer_names: + self.handles.append(midas.scratch.refinenet1.register_forward_hook( + get_activation("r1", self.core_out))) + if "l4_rn" in self.layer_names: + self.handles.append(midas.scratch.layer4_rn.register_forward_hook( + get_activation("l4_rn", self.core_out))) + + return self + + def remove_hooks(self): + for h in self.handles: + h.remove() + return self + + def __del__(self): + self.remove_hooks() + + def set_output_channels(self, model_type): + self.output_channels = MIDAS_SETTINGS[model_type] + + @staticmethod + def build(midas_model_type="DPT_BEiT_L_384", train_midas=False, use_pretrained_midas=True, fetch_features=False, freeze_bn=True, force_keep_ar=False, force_reload=False, **kwargs): + if midas_model_type not in MIDAS_SETTINGS: + raise ValueError( + f"Invalid model type: {midas_model_type}. Must be one of {list(MIDAS_SETTINGS.keys())}") + if "img_size" in kwargs: + kwargs = MidasCore.parse_img_size(kwargs) + img_size = kwargs.pop("img_size", [384, 384]) + print("img_size", img_size) + midas_path = os.path.join(os.path.dirname(__file__), 'midas_repo') + midas = torch.hub.load(midas_path, midas_model_type, + pretrained=use_pretrained_midas, force_reload=force_reload, source='local') + kwargs.update({'keep_aspect_ratio': force_keep_ar}) + midas_core = MidasCore(midas, trainable=train_midas, fetch_features=fetch_features, + freeze_bn=freeze_bn, img_size=img_size, **kwargs) + midas_core.set_output_channels(midas_model_type) + return midas_core + + @staticmethod + def build_from_config(config): + return MidasCore.build(**config) + + @staticmethod + def parse_img_size(config): + assert 'img_size' in config + if isinstance(config['img_size'], str): + assert "," in config['img_size'], "img_size should be a string with comma separated img_size=H,W" + config['img_size'] = list(map(int, config['img_size'].split(","))) + assert len( + config['img_size']) == 2, "img_size should be a string with comma separated img_size=H,W" + elif isinstance(config['img_size'], int): + config['img_size'] = [config['img_size'], config['img_size']] + else: + assert isinstance(config['img_size'], list) and len( + config['img_size']) == 2, "img_size should be a list of H,W" + return config + + +nchannels2models = { + tuple([256]*5): ["DPT_BEiT_L_384", "DPT_BEiT_L_512", "DPT_BEiT_B_384", "DPT_SwinV2_L_384", "DPT_SwinV2_B_384", "DPT_SwinV2_T_256", "DPT_Large", "DPT_Hybrid"], + (512, 256, 128, 64, 64): ["MiDaS_small"] +} + +# Model name to number of output channels +MIDAS_SETTINGS = {m: k for k, v in nchannels2models.items() + for m in v + } diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/.gitignore b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..9b4486124f6db846f8746400e0826d8b37ce4a7f --- /dev/null +++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/.gitignore @@ -0,0 +1,110 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +*.png +*.pfm +*.jpg +*.jpeg +*.pt \ No newline at end of file diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/Dockerfile b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..93164297a40e68cf479afc3b8e064468903f8bf1 --- /dev/null +++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/Dockerfile @@ -0,0 +1,29 @@ +# enables cuda support in docker +FROM nvidia/cuda:10.2-cudnn7-runtime-ubuntu18.04 + +# install python 3.6, pip and requirements for opencv-python +# (see https://github.com/NVIDIA/nvidia-docker/issues/864) +RUN apt-get update && apt-get -y install \ + python3 \ + python3-pip \ + libsm6 \ + libxext6 \ + libxrender-dev \ + curl \ + && rm -rf /var/lib/apt/lists/* + +# install python dependencies +RUN pip3 install --upgrade pip +RUN pip3 install torch~=1.8 torchvision opencv-python-headless~=3.4 timm + +# copy inference code +WORKDIR /opt/MiDaS +COPY ./midas ./midas +COPY ./*.py ./ + +# download model weights so the docker image can be used offline +RUN cd weights && {curl -OL https://github.com/isl-org/MiDaS/releases/download/v3/dpt_hybrid_384.pt; cd -; } +RUN python3 run.py --model_type dpt_hybrid; exit 0 + +# entrypoint (dont forget to mount input and output directories) +CMD python3 run.py --model_type dpt_hybrid diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/LICENSE b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..0365733785a449c285c6ac704ef443f385fe798c --- /dev/null +++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Intel ISL (Intel Intelligent Systems Lab) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/README.md b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..44d485c553211625a4f9303c9b6fb186d4aa56bb --- /dev/null +++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/README.md @@ -0,0 +1,259 @@ +## Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer + +This repository contains code to compute depth from a single image. It accompanies our [paper](https://arxiv.org/abs/1907.01341v3): + +>Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer +René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, Vladlen Koltun + + +and our [preprint](https://arxiv.org/abs/2103.13413): + +> Vision Transformers for Dense Prediction +> René Ranftl, Alexey Bochkovskiy, Vladlen Koltun + + +MiDaS was trained on up to 12 datasets (ReDWeb, DIML, Movies, MegaDepth, WSVD, TartanAir, HRWSI, ApolloScape, BlendedMVS, IRS, KITTI, NYU Depth V2) with +multi-objective optimization. +The original model that was trained on 5 datasets (`MIX 5` in the paper) can be found [here](https://github.com/isl-org/MiDaS/releases/tag/v2). +The figure below shows an overview of the different MiDaS models; the bubble size scales with number of parameters. + +![](figures/Improvement_vs_FPS.png) + +### Setup + +1) Pick one or more models and download the corresponding weights to the `weights` folder: + +MiDaS 3.1 +- For highest quality: [dpt_beit_large_512](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt) +- For moderately less quality, but better speed-performance trade-off: [dpt_swin2_large_384](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_large_384.pt) +- For embedded devices: [dpt_swin2_tiny_256](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_tiny_256.pt), [dpt_levit_224](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_levit_224.pt) +- For inference on Intel CPUs, OpenVINO may be used for the small legacy model: openvino_midas_v21_small [.xml](https://github.com/isl-org/MiDaS/releases/download/v3_1/openvino_midas_v21_small_256.xml), [.bin](https://github.com/isl-org/MiDaS/releases/download/v3_1/openvino_midas_v21_small_256.bin) + +MiDaS 3.0: Legacy transformer models [dpt_large_384](https://github.com/isl-org/MiDaS/releases/download/v3/dpt_large_384.pt) and [dpt_hybrid_384](https://github.com/isl-org/MiDaS/releases/download/v3/dpt_hybrid_384.pt) + +MiDaS 2.1: Legacy convolutional models [midas_v21_384](https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_384.pt) and [midas_v21_small_256](https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_small_256.pt) + +1) Set up dependencies: + + ```shell + conda env create -f environment.yaml + conda activate midas-py310 + ``` + +#### optional + +For the Next-ViT model, execute + +```shell +git submodule add https://github.com/isl-org/Next-ViT midas/external/next_vit +``` + +For the OpenVINO model, install + +```shell +pip install openvino +``` + +### Usage + +1) Place one or more input images in the folder `input`. + +2) Run the model with + + ```shell + python run.py --model_type --input_path input --output_path output + ``` + where `````` is chosen from [dpt_beit_large_512](#model_type), [dpt_beit_large_384](#model_type), + [dpt_beit_base_384](#model_type), [dpt_swin2_large_384](#model_type), [dpt_swin2_base_384](#model_type), + [dpt_swin2_tiny_256](#model_type), [dpt_swin_large_384](#model_type), [dpt_next_vit_large_384](#model_type), + [dpt_levit_224](#model_type), [dpt_large_384](#model_type), [dpt_hybrid_384](#model_type), + [midas_v21_384](#model_type), [midas_v21_small_256](#model_type), [openvino_midas_v21_small_256](#model_type). + +3) The resulting depth maps are written to the `output` folder. + +#### optional + +1) By default, the inference resizes the height of input images to the size of a model to fit into the encoder. This + size is given by the numbers in the model names of the [accuracy table](#accuracy). Some models do not only support a single + inference height but a range of different heights. Feel free to explore different heights by appending the extra + command line argument `--height`. Unsupported height values will throw an error. Note that using this argument may + decrease the model accuracy. +2) By default, the inference keeps the aspect ratio of input images when feeding them into the encoder if this is + supported by a model (all models except for Swin, Swin2, LeViT). In order to resize to a square resolution, + disregarding the aspect ratio while preserving the height, use the command line argument `--square`. + +#### via Camera + + If you want the input images to be grabbed from the camera and shown in a window, leave the input and output paths + away and choose a model type as shown above: + + ```shell + python run.py --model_type --side + ``` + + The argument `--side` is optional and causes both the input RGB image and the output depth map to be shown + side-by-side for comparison. + +#### via Docker + +1) Make sure you have installed Docker and the + [NVIDIA Docker runtime](https://github.com/NVIDIA/nvidia-docker/wiki/Installation-\(Native-GPU-Support\)). + +2) Build the Docker image: + + ```shell + docker build -t midas . + ``` + +3) Run inference: + + ```shell + docker run --rm --gpus all -v $PWD/input:/opt/MiDaS/input -v $PWD/output:/opt/MiDaS/output -v $PWD/weights:/opt/MiDaS/weights midas + ``` + + This command passes through all of your NVIDIA GPUs to the container, mounts the + `input` and `output` directories and then runs the inference. + +#### via PyTorch Hub + +The pretrained model is also available on [PyTorch Hub](https://pytorch.org/hub/intelisl_midas_v2/) + +#### via TensorFlow or ONNX + +See [README](https://github.com/isl-org/MiDaS/tree/master/tf) in the `tf` subdirectory. + +Currently only supports MiDaS v2.1. + + +#### via Mobile (iOS / Android) + +See [README](https://github.com/isl-org/MiDaS/tree/master/mobile) in the `mobile` subdirectory. + +#### via ROS1 (Robot Operating System) + +See [README](https://github.com/isl-org/MiDaS/tree/master/ros) in the `ros` subdirectory. + +Currently only supports MiDaS v2.1. DPT-based models to be added. + + +### Accuracy + +We provide a **zero-shot error** $\epsilon_d$ which is evaluated for 6 different datasets +(see [paper](https://arxiv.org/abs/1907.01341v3)). **Lower error values are better**. +$\color{green}{\textsf{Overall model quality is represented by the improvement}}$ ([Imp.](#improvement)) with respect to +MiDaS 3.0 DPTL-384. The models are grouped by the height used for inference, whereas the square training resolution is given by +the numbers in the model names. The table also shows the **number of parameters** (in millions) and the +**frames per second** for inference at the training resolution (for GPU RTX 3090): + +| MiDaS Model | DIW
WHDR | Eth3d
AbsRel | Sintel
AbsRel | TUM
δ1 | KITTI
δ1 | NYUv2
δ1 | $\color{green}{\textsf{Imp.}}$
% | Par.
M | FPS
  | +|-----------------------------------------------------------------------------------------------------------------------|-------------------------:|-----------------------------:|------------------------------:|-------------------------:|-------------------------:|-------------------------:|-------------------------------------------------:|----------------------:|--------------------------:| +| **Inference height 512** | | | | | | | | | | +| [v3.1 BEiTL-512](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt) | 0.1137 | 0.0659 | 0.2366 | **6.13** | 11.56* | **1.86*** | $\color{green}{\textsf{19}}$ | **345** | **5.7** | +| [v3.1 BEiTL-512](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt)$\tiny{\square}$ | **0.1121** | **0.0614** | **0.2090** | 6.46 | **5.00*** | 1.90* | $\color{green}{\textsf{34}}$ | **345** | **5.7** | +| | | | | | | | | | | +| **Inference height 384** | | | | | | | | | | +| [v3.1 BEiTL-512](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt) | 0.1245 | 0.0681 | **0.2176** | **6.13** | 6.28* | **2.16*** | $\color{green}{\textsf{28}}$ | 345 | 12 | +| [v3.1 Swin2L-384](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_large_384.pt)$\tiny{\square}$ | 0.1106 | 0.0732 | 0.2442 | 8.87 | **5.84*** | 2.92* | $\color{green}{\textsf{22}}$ | 213 | 41 | +| [v3.1 Swin2B-384](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_base_384.pt)$\tiny{\square}$ | 0.1095 | 0.0790 | 0.2404 | 8.93 | 5.97* | 3.28* | $\color{green}{\textsf{22}}$ | 102 | 39 | +| [v3.1 SwinL-384](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin_large_384.pt)$\tiny{\square}$ | 0.1126 | 0.0853 | 0.2428 | 8.74 | 6.60* | 3.34* | $\color{green}{\textsf{17}}$ | 213 | 49 | +| [v3.1 BEiTL-384](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_384.pt) | 0.1239 | **0.0667** | 0.2545 | 7.17 | 9.84* | 2.21* | $\color{green}{\textsf{17}}$ | 344 | 13 | +| [v3.1 Next-ViTL-384](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_next_vit_large_384.pt) | **0.1031** | 0.0954 | 0.2295 | 9.21 | 6.89* | 3.47* | $\color{green}{\textsf{16}}$ | **72** | 30 | +| [v3.1 BEiTB-384](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_base_384.pt) | 0.1159 | 0.0967 | 0.2901 | 9.88 | 26.60* | 3.91* | $\color{green}{\textsf{-31}}$ | 112 | 31 | +| [v3.0 DPTL-384](https://github.com/isl-org/MiDaS/releases/download/v3/dpt_large_384.pt) | 0.1082 | 0.0888 | 0.2697 | 9.97 | 8.46 | 8.32 | $\color{green}{\textsf{0}}$ | 344 | **61** | +| [v3.0 DPTH-384](https://github.com/isl-org/MiDaS/releases/download/v3/dpt_hybrid_384.pt) | 0.1106 | 0.0934 | 0.2741 | 10.89 | 11.56 | 8.69 | $\color{green}{\textsf{-10}}$ | 123 | 50 | +| [v2.1 Large384](https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_384.pt) | 0.1295 | 0.1155 | 0.3285 | 12.51 | 16.08 | 8.71 | $\color{green}{\textsf{-32}}$ | 105 | 47 | +| | | | | | | | | | | +| **Inference height 256** | | | | | | | | | | +| [v3.1 Swin2T-256](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_tiny_256.pt)$\tiny{\square}$ | **0.1211** | **0.1106** | **0.2868** | **13.43** | **10.13*** | **5.55*** | $\color{green}{\textsf{-11}}$ | 42 | 64 | +| [v2.1 Small256](https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_small_256.pt) | 0.1344 | 0.1344 | 0.3370 | 14.53 | 29.27 | 13.43 | $\color{green}{\textsf{-76}}$ | **21** | **90** | +| | | | | | | | | | | +| **Inference height 224** | | | | | | | | | | +| [v3.1 LeViT224](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_levit_224.pt)$\tiny{\square}$ | **0.1314** | **0.1206** | **0.3148** | **18.21** | **15.27*** | **8.64*** | $\color{green}{\textsf{-40}}$ | **51** | **73** | + +* No zero-shot error, because models are also trained on KITTI and NYU Depth V2\ +$\square$ Validation performed at **square resolution**, either because the transformer encoder backbone of a model +does not support non-square resolutions (Swin, Swin2, LeViT) or for comparison with these models. All other +validations keep the aspect ratio. A difference in resolution limits the comparability of the zero-shot error and the +improvement, because these quantities are averages over the pixels of an image and do not take into account the +advantage of more details due to a higher resolution.\ +Best values per column and same validation height in bold + +#### Improvement + +The improvement in the above table is defined as the relative zero-shot error with respect to MiDaS v3.0 +DPTL-384 and averaging over the datasets. So, if $\epsilon_d$ is the zero-shot error for dataset $d$, then +the $\color{green}{\textsf{improvement}}$ is given by $100(1-(1/6)\sum_d\epsilon_d/\epsilon_{d,\rm{DPT_{L-384}}})$%. + +Note that the improvements of 10% for MiDaS v2.0 → v2.1 and 21% for MiDaS v2.1 → v3.0 are not visible from the +improvement column (Imp.) in the table but would require an evaluation with respect to MiDaS v2.1 Large384 +and v2.0 Large384 respectively instead of v3.0 DPTL-384. + +### Depth map comparison + +Zoom in for better visibility +![](figures/Comparison.png) + +### Speed on Camera Feed + +Test configuration +- Windows 10 +- 11th Gen Intel Core i7-1185G7 3.00GHz +- 16GB RAM +- Camera resolution 640x480 +- openvino_midas_v21_small_256 + +Speed: 22 FPS + +### Changelog + +* [Dec 2022] Released MiDaS v3.1: + - New models based on 5 different types of transformers ([BEiT](https://arxiv.org/pdf/2106.08254.pdf), [Swin2](https://arxiv.org/pdf/2111.09883.pdf), [Swin](https://arxiv.org/pdf/2103.14030.pdf), [Next-ViT](https://arxiv.org/pdf/2207.05501.pdf), [LeViT](https://arxiv.org/pdf/2104.01136.pdf)) + - Training datasets extended from 10 to 12, including also KITTI and NYU Depth V2 using [BTS](https://github.com/cleinc/bts) split + - Best model, BEiTLarge 512, with resolution 512x512, is on average about [28% more accurate](#Accuracy) than MiDaS v3.0 + - Integrated live depth estimation from camera feed +* [Sep 2021] Integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See [Gradio Web Demo](https://huggingface.co/spaces/akhaliq/DPT-Large). +* [Apr 2021] Released MiDaS v3.0: + - New models based on [Dense Prediction Transformers](https://arxiv.org/abs/2103.13413) are on average [21% more accurate](#Accuracy) than MiDaS v2.1 + - Additional models can be found [here](https://github.com/isl-org/DPT) +* [Nov 2020] Released MiDaS v2.1: + - New model that was trained on 10 datasets and is on average about [10% more accurate](#Accuracy) than [MiDaS v2.0](https://github.com/isl-org/MiDaS/releases/tag/v2) + - New light-weight model that achieves [real-time performance](https://github.com/isl-org/MiDaS/tree/master/mobile) on mobile platforms. + - Sample applications for [iOS](https://github.com/isl-org/MiDaS/tree/master/mobile/ios) and [Android](https://github.com/isl-org/MiDaS/tree/master/mobile/android) + - [ROS package](https://github.com/isl-org/MiDaS/tree/master/ros) for easy deployment on robots +* [Jul 2020] Added TensorFlow and ONNX code. Added [online demo](http://35.202.76.57/). +* [Dec 2019] Released new version of MiDaS - the new model is significantly more accurate and robust +* [Jul 2019] Initial release of MiDaS ([Link](https://github.com/isl-org/MiDaS/releases/tag/v1)) + +### Citation + +Please cite our paper if you use this code or any of the models: +``` +@ARTICLE {Ranftl2022, + author = "Ren\'{e} Ranftl and Katrin Lasinger and David Hafner and Konrad Schindler and Vladlen Koltun", + title = "Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-Shot Cross-Dataset Transfer", + journal = "IEEE Transactions on Pattern Analysis and Machine Intelligence", + year = "2022", + volume = "44", + number = "3" +} +``` + +If you use a DPT-based model, please also cite: + +``` +@article{Ranftl2021, + author = {Ren\'{e} Ranftl and Alexey Bochkovskiy and Vladlen Koltun}, + title = {Vision Transformers for Dense Prediction}, + journal = {ICCV}, + year = {2021}, +} +``` + +### Acknowledgements + +Our work builds on and uses code from [timm](https://github.com/rwightman/pytorch-image-models) and [Next-ViT](https://github.com/bytedance/Next-ViT). +We'd like to thank the authors for making these libraries available. + +### License + +MIT License diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/environment.yaml b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/environment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e20cfd31c52ecbdaba67382878fe281faf456beb --- /dev/null +++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/environment.yaml @@ -0,0 +1,16 @@ +name: midas-py310 +channels: + - pytorch + - defaults +dependencies: + - nvidia::cudatoolkit=11.7 + - python=3.10.8 + - pytorch::pytorch=1.13.0 + - torchvision=0.14.0 + - pip=22.3.1 + - numpy=1.23.4 + - pip: + - opencv-python==4.6.0.66 + - imutils==0.5.4 + - timm==0.6.12 + - einops==0.6.0 \ No newline at end of file diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/hubconf.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/hubconf.py new file mode 100644 index 0000000000000000000000000000000000000000..37c4cbd644c5a0533a7203f2115875be8e596ec1 --- /dev/null +++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/hubconf.py @@ -0,0 +1,435 @@ +dependencies = ["torch"] + +import torch + +from midas.dpt_depth import DPTDepthModel +from midas.midas_net import MidasNet +from midas.midas_net_custom import MidasNet_small + +def DPT_BEiT_L_512(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_BEiT_L_512 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="beitl16_512", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_BEiT_L_384(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_BEiT_L_384 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="beitl16_384", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_BEiT_B_384(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_BEiT_B_384 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="beitb16_384", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_base_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_SwinV2_L_384(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_SwinV2_L_384 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="swin2l24_384", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_large_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_SwinV2_B_384(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_SwinV2_B_384 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="swin2b24_384", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_base_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_SwinV2_T_256(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_SwinV2_T_256 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="swin2t16_256", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_tiny_256.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_Swin_L_384(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_Swin_L_384 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="swinl12_384", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin_large_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_Next_ViT_L_384(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_Next_ViT_L_384 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="next_vit_large_6m", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_next_vit_large_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_LeViT_224(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT_LeViT_224 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="levit_384", + non_negative=True, + head_features_1=64, + head_features_2=8, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_levit_224.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_Large(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT-Large model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="vitl16_384", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3/dpt_large_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def DPT_Hybrid(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS DPT-Hybrid model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = DPTDepthModel( + path=None, + backbone="vitb_rn50_384", + non_negative=True, + ) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v3/dpt_hybrid_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def MiDaS(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS v2.1 model for monocular depth estimation + pretrained (bool): load pretrained weights into model + """ + + model = MidasNet() + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_384.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + +def MiDaS_small(pretrained=True, **kwargs): + """ # This docstring shows up in hub.help() + MiDaS v2.1 small model for monocular depth estimation on resource-constrained devices + pretrained (bool): load pretrained weights into model + """ + + model = MidasNet_small(None, features=64, backbone="efficientnet_lite3", exportable=True, non_negative=True, blocks={'expand': True}) + + if pretrained: + checkpoint = ( + "https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_small_256.pt" + ) + state_dict = torch.hub.load_state_dict_from_url( + checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True + ) + model.load_state_dict(state_dict) + + return model + + +def transforms(): + import cv2 + from torchvision.transforms import Compose + from midas.transforms import Resize, NormalizeImage, PrepareForNet + from midas import transforms + + transforms.default_transform = Compose( + [ + lambda img: {"image": img / 255.0}, + Resize( + 384, + 384, + resize_target=None, + keep_aspect_ratio=True, + ensure_multiple_of=32, + resize_method="upper_bound", + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + PrepareForNet(), + lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0), + ] + ) + + transforms.small_transform = Compose( + [ + lambda img: {"image": img / 255.0}, + Resize( + 256, + 256, + resize_target=None, + keep_aspect_ratio=True, + ensure_multiple_of=32, + resize_method="upper_bound", + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + PrepareForNet(), + lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0), + ] + ) + + transforms.dpt_transform = Compose( + [ + lambda img: {"image": img / 255.0}, + Resize( + 384, + 384, + resize_target=None, + keep_aspect_ratio=True, + ensure_multiple_of=32, + resize_method="minimal", + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + PrepareForNet(), + lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0), + ] + ) + + transforms.beit512_transform = Compose( + [ + lambda img: {"image": img / 255.0}, + Resize( + 512, + 512, + resize_target=None, + keep_aspect_ratio=True, + ensure_multiple_of=32, + resize_method="minimal", + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + PrepareForNet(), + lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0), + ] + ) + + transforms.swin384_transform = Compose( + [ + lambda img: {"image": img / 255.0}, + Resize( + 384, + 384, + resize_target=None, + keep_aspect_ratio=False, + ensure_multiple_of=32, + resize_method="minimal", + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + PrepareForNet(), + lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0), + ] + ) + + transforms.swin256_transform = Compose( + [ + lambda img: {"image": img / 255.0}, + Resize( + 256, + 256, + resize_target=None, + keep_aspect_ratio=False, + ensure_multiple_of=32, + resize_method="minimal", + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + PrepareForNet(), + lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0), + ] + ) + + transforms.levit_transform = Compose( + [ + lambda img: {"image": img / 255.0}, + Resize( + 224, + 224, + resize_target=None, + keep_aspect_ratio=False, + ensure_multiple_of=32, + resize_method="minimal", + image_interpolation_method=cv2.INTER_CUBIC, + ), + NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + PrepareForNet(), + lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0), + ] + ) + + return transforms diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/input/.placeholder b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/input/.placeholder new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/output/.placeholder b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/output/.placeholder new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/run.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/run.py new file mode 100644 index 0000000000000000000000000000000000000000..81ed8249631f1cc6f430871985a4e48cad63f85f --- /dev/null +++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/run.py @@ -0,0 +1,277 @@ +"""Compute depth maps for images in the input folder. +""" +import os +import glob +import torch +import utils +import cv2 +import argparse +import time + +import numpy as np + +from imutils.video import VideoStream +from midas.model_loader import default_models, load_model + +first_execution = True +def process(device, model, model_type, image, input_size, target_size, optimize, use_camera): + """ + Run the inference and interpolate. + + Args: + device (torch.device): the torch device used + model: the model used for inference + model_type: the type of the model + image: the image fed into the neural network + input_size: the size (width, height) of the neural network input (for OpenVINO) + target_size: the size (width, height) the neural network output is interpolated to + optimize: optimize the model to half-floats on CUDA? + use_camera: is the camera used? + + Returns: + the prediction + """ + global first_execution + + if "openvino" in model_type: + if first_execution or not use_camera: + print(f" Input resized to {input_size[0]}x{input_size[1]} before entering the encoder") + first_execution = False + + sample = [np.reshape(image, (1, 3, *input_size))] + prediction = model(sample)[model.output(0)][0] + prediction = cv2.resize(prediction, dsize=target_size, + interpolation=cv2.INTER_CUBIC) + else: + sample = torch.from_numpy(image).to(device).unsqueeze(0) + + if optimize and device == torch.device("cuda"): + if first_execution: + print(" Optimization to half-floats activated. Use with caution, because models like Swin require\n" + " float precision to work properly and may yield non-finite depth values to some extent for\n" + " half-floats.") + sample = sample.to(memory_format=torch.channels_last) + sample = sample.half() + + if first_execution or not use_camera: + height, width = sample.shape[2:] + print(f" Input resized to {width}x{height} before entering the encoder") + first_execution = False + + prediction = model.forward(sample) + prediction = ( + torch.nn.functional.interpolate( + prediction.unsqueeze(1), + size=target_size[::-1], + mode="bicubic", + align_corners=False, + ) + .squeeze() + .cpu() + .numpy() + ) + + return prediction + + +def create_side_by_side(image, depth, grayscale): + """ + Take an RGB image and depth map and place them side by side. This includes a proper normalization of the depth map + for better visibility. + + Args: + image: the RGB image + depth: the depth map + grayscale: use a grayscale colormap? + + Returns: + the image and depth map place side by side + """ + depth_min = depth.min() + depth_max = depth.max() + normalized_depth = 255 * (depth - depth_min) / (depth_max - depth_min) + normalized_depth *= 3 + + right_side = np.repeat(np.expand_dims(normalized_depth, 2), 3, axis=2) / 3 + if not grayscale: + right_side = cv2.applyColorMap(np.uint8(right_side), cv2.COLORMAP_INFERNO) + + if image is None: + return right_side + else: + return np.concatenate((image, right_side), axis=1) + + +def run(input_path, output_path, model_path, model_type="dpt_beit_large_512", optimize=False, side=False, height=None, + square=False, grayscale=False): + """Run MonoDepthNN to compute depth maps. + + Args: + input_path (str): path to input folder + output_path (str): path to output folder + model_path (str): path to saved model + model_type (str): the model type + optimize (bool): optimize the model to half-floats on CUDA? + side (bool): RGB and depth side by side in output images? + height (int): inference encoder image height + square (bool): resize to a square resolution? + grayscale (bool): use a grayscale colormap? + """ + print("Initialize") + + # select device + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Device: %s" % device) + + model, transform, net_w, net_h = load_model(device, model_path, model_type, optimize, height, square) + + # get input + if input_path is not None: + image_names = glob.glob(os.path.join(input_path, "*")) + num_images = len(image_names) + else: + print("No input path specified. Grabbing images from camera.") + + # create output folder + if output_path is not None: + os.makedirs(output_path, exist_ok=True) + + print("Start processing") + + if input_path is not None: + if output_path is None: + print("Warning: No output path specified. Images will be processed but not shown or stored anywhere.") + for index, image_name in enumerate(image_names): + + print(" Processing {} ({}/{})".format(image_name, index + 1, num_images)) + + # input + original_image_rgb = utils.read_image(image_name) # in [0, 1] + image = transform({"image": original_image_rgb})["image"] + + # compute + with torch.no_grad(): + prediction = process(device, model, model_type, image, (net_w, net_h), original_image_rgb.shape[1::-1], + optimize, False) + + # output + if output_path is not None: + filename = os.path.join( + output_path, os.path.splitext(os.path.basename(image_name))[0] + '-' + model_type + ) + if not side: + utils.write_depth(filename, prediction, grayscale, bits=2) + else: + original_image_bgr = np.flip(original_image_rgb, 2) + content = create_side_by_side(original_image_bgr*255, prediction, grayscale) + cv2.imwrite(filename + ".png", content) + utils.write_pfm(filename + ".pfm", prediction.astype(np.float32)) + + else: + with torch.no_grad(): + fps = 1 + video = VideoStream(0).start() + time_start = time.time() + frame_index = 0 + while True: + frame = video.read() + if frame is not None: + original_image_rgb = np.flip(frame, 2) # in [0, 255] (flip required to get RGB) + image = transform({"image": original_image_rgb/255})["image"] + + prediction = process(device, model, model_type, image, (net_w, net_h), + original_image_rgb.shape[1::-1], optimize, True) + + original_image_bgr = np.flip(original_image_rgb, 2) if side else None + content = create_side_by_side(original_image_bgr, prediction, grayscale) + cv2.imshow('MiDaS Depth Estimation - Press Escape to close window ', content/255) + + if output_path is not None: + filename = os.path.join(output_path, 'Camera' + '-' + model_type + '_' + str(frame_index)) + cv2.imwrite(filename + ".png", content) + + alpha = 0.1 + if time.time()-time_start > 0: + fps = (1 - alpha) * fps + alpha * 1 / (time.time()-time_start) # exponential moving average + time_start = time.time() + print(f"\rFPS: {round(fps,2)}", end="") + + if cv2.waitKey(1) == 27: # Escape key + break + + frame_index += 1 + print() + + print("Finished") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument('-i', '--input_path', + default=None, + help='Folder with input images (if no input path is specified, images are tried to be grabbed ' + 'from camera)' + ) + + parser.add_argument('-o', '--output_path', + default=None, + help='Folder for output images' + ) + + parser.add_argument('-m', '--model_weights', + default=None, + help='Path to the trained weights of model' + ) + + parser.add_argument('-t', '--model_type', + default='dpt_beit_large_512', + help='Model type: ' + 'dpt_beit_large_512, dpt_beit_large_384, dpt_beit_base_384, dpt_swin2_large_384, ' + 'dpt_swin2_base_384, dpt_swin2_tiny_256, dpt_swin_large_384, dpt_next_vit_large_384, ' + 'dpt_levit_224, dpt_large_384, dpt_hybrid_384, midas_v21_384, midas_v21_small_256 or ' + 'openvino_midas_v21_small_256' + ) + + parser.add_argument('-s', '--side', + action='store_true', + help='Output images contain RGB and depth images side by side' + ) + + parser.add_argument('--optimize', dest='optimize', action='store_true', help='Use half-float optimization') + parser.set_defaults(optimize=False) + + parser.add_argument('--height', + type=int, default=None, + help='Preferred height of images feed into the encoder during inference. Note that the ' + 'preferred height may differ from the actual height, because an alignment to multiples of ' + '32 takes place. Many models support only the height chosen during training, which is ' + 'used automatically if this parameter is not set.' + ) + parser.add_argument('--square', + action='store_true', + help='Option to resize images to a square resolution by changing their widths when images are ' + 'fed into the encoder during inference. If this parameter is not set, the aspect ratio of ' + 'images is tried to be preserved if supported by the model.' + ) + parser.add_argument('--grayscale', + action='store_true', + help='Use a grayscale colormap instead of the inferno one. Although the inferno colormap, ' + 'which is used by default, is better for visibility, it does not allow storing 16-bit ' + 'depth values in PNGs but only 8-bit ones due to the precision limitation of this ' + 'colormap.' + ) + + args = parser.parse_args() + + + if args.model_weights is None: + args.model_weights = default_models[args.model_type] + + # set torch options + torch.backends.cudnn.enabled = True + torch.backends.cudnn.benchmark = True + + # compute depth maps + run(args.input_path, args.output_path, args.model_weights, args.model_type, args.optimize, args.side, args.height, + args.square, args.grayscale) diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/utils.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d836b7e196be6db8c0b145fe3b03357a10b4725d --- /dev/null +++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/utils.py @@ -0,0 +1,199 @@ +"""Utils for monoDepth. +""" +import sys +import re +import numpy as np +import cv2 +import torch + + +def read_pfm(path): + """Read pfm file. + + Args: + path (str): path to file + + Returns: + tuple: (data, scale) + """ + with open(path, "rb") as file: + + color = None + width = None + height = None + scale = None + endian = None + + header = file.readline().rstrip() + if header.decode("ascii") == "PF": + color = True + elif header.decode("ascii") == "Pf": + color = False + else: + raise Exception("Not a PFM file: " + path) + + dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii")) + if dim_match: + width, height = list(map(int, dim_match.groups())) + else: + raise Exception("Malformed PFM header.") + + scale = float(file.readline().decode("ascii").rstrip()) + if scale < 0: + # little-endian + endian = "<" + scale = -scale + else: + # big-endian + endian = ">" + + data = np.fromfile(file, endian + "f") + shape = (height, width, 3) if color else (height, width) + + data = np.reshape(data, shape) + data = np.flipud(data) + + return data, scale + + +def write_pfm(path, image, scale=1): + """Write pfm file. + + Args: + path (str): pathto file + image (array): data + scale (int, optional): Scale. Defaults to 1. + """ + + with open(path, "wb") as file: + color = None + + if image.dtype.name != "float32": + raise Exception("Image dtype must be float32.") + + image = np.flipud(image) + + if len(image.shape) == 3 and image.shape[2] == 3: # color image + color = True + elif ( + len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1 + ): # greyscale + color = False + else: + raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.") + + file.write("PF\n" if color else "Pf\n".encode()) + file.write("%d %d\n".encode() % (image.shape[1], image.shape[0])) + + endian = image.dtype.byteorder + + if endian == "<" or endian == "=" and sys.byteorder == "little": + scale = -scale + + file.write("%f\n".encode() % scale) + + image.tofile(file) + + +def read_image(path): + """Read image and output RGB image (0-1). + + Args: + path (str): path to file + + Returns: + array: RGB image (0-1) + """ + img = cv2.imread(path) + + if img.ndim == 2: + img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0 + + return img + + +def resize_image(img): + """Resize image and make it fit for network. + + Args: + img (array): image + + Returns: + tensor: data ready for network + """ + height_orig = img.shape[0] + width_orig = img.shape[1] + + if width_orig > height_orig: + scale = width_orig / 384 + else: + scale = height_orig / 384 + + height = (np.ceil(height_orig / scale / 32) * 32).astype(int) + width = (np.ceil(width_orig / scale / 32) * 32).astype(int) + + img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA) + + img_resized = ( + torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float() + ) + img_resized = img_resized.unsqueeze(0) + + return img_resized + + +def resize_depth(depth, width, height): + """Resize depth map and bring to CPU (numpy). + + Args: + depth (tensor): depth + width (int): image width + height (int): image height + + Returns: + array: processed depth + """ + depth = torch.squeeze(depth[0, :, :, :]).to("cpu") + + depth_resized = cv2.resize( + depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC + ) + + return depth_resized + +def write_depth(path, depth, grayscale, bits=1): + """Write depth map to png file. + + Args: + path (str): filepath without extension + depth (array): depth + grayscale (bool): use a grayscale colormap? + """ + if not grayscale: + bits = 1 + + if not np.isfinite(depth).all(): + depth=np.nan_to_num(depth, nan=0.0, posinf=0.0, neginf=0.0) + print("WARNING: Non-finite depth values present") + + depth_min = depth.min() + depth_max = depth.max() + + max_val = (2**(8*bits))-1 + + if depth_max - depth_min > np.finfo("float").eps: + out = max_val * (depth - depth_min) / (depth_max - depth_min) + else: + out = np.zeros(depth.shape, dtype=depth.dtype) + + if not grayscale: + out = cv2.applyColorMap(np.uint8(out), cv2.COLORMAP_INFERNO) + + if bits == 1: + cv2.imwrite(path + ".png", out.astype("uint8")) + elif bits == 2: + cv2.imwrite(path + ".png", out.astype("uint16")) + + return diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/weights/.placeholder b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/weights/.placeholder new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/__init__.py b/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f1e9a694852aaa28c500419d413ea8a572338e18 --- /dev/null +++ b/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/__init__.py @@ -0,0 +1,31 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +from .zoedepth_v1 import ZoeDepth + +all_versions = { + "v1": ZoeDepth, +} + +get_version = lambda v : all_versions[v] \ No newline at end of file diff --git a/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/__pycache__/__init__.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7278ac58c648c1ab11c024eda89cef8c7bf78dfe Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/__pycache__/__init__.cpython-38.pyc differ diff --git a/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/__pycache__/zoedepth_v1.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/__pycache__/zoedepth_v1.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24a81fe566b736c5c5eb9f4e84b95b44eedcf557 Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/__pycache__/zoedepth_v1.cpython-38.pyc differ diff --git a/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/config_zoedepth.json b/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/config_zoedepth.json new file mode 100644 index 0000000000000000000000000000000000000000..dfc9fa7b17615cf557b6ad01c8fedd6c0c32e88f --- /dev/null +++ b/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/config_zoedepth.json @@ -0,0 +1,58 @@ +{ + "model": { + "name": "ZoeDepth", + "version_name": "v1", + "n_bins": 64, + "bin_embedding_dim": 128, + "bin_centers_type": "softplus", + "n_attractors":[16, 8, 4, 1], + "attractor_alpha": 1000, + "attractor_gamma": 2, + "attractor_kind" : "mean", + "attractor_type" : "inv", + "midas_model_type" : "DPT_BEiT_L_384", + "min_temp": 0.0212, + "max_temp": 50.0, + "output_distribution": "logbinomial", + "memory_efficient": true, + "inverse_midas": false, + "img_size": [384, 512] + }, + + "train": { + "train_midas": true, + "use_pretrained_midas": true, + "trainer": "zoedepth", + "epochs": 5, + "bs": 16, + "optim_kwargs": {"lr": 0.000161, "wd": 0.01}, + "sched_kwargs": {"div_factor": 1, "final_div_factor": 10000, "pct_start": 0.7, "three_phase":false, "cycle_momentum": true}, + "same_lr": false, + "w_si": 1, + "w_domain": 0.2, + "w_reg": 0, + "w_grad": 0, + "avoid_boundary": false, + "random_crop": false, + "input_width": 640, + "input_height": 480, + "midas_lr_factor": 1, + "encoder_lr_factor":10, + "pos_enc_lr_factor":10, + "freeze_midas_bn": true + + }, + + "infer":{ + "train_midas": false, + "use_pretrained_midas": false, + "pretrained_resource" : null, + "force_keep_ar": true + }, + + "eval":{ + "train_midas": false, + "use_pretrained_midas": false, + "pretrained_resource" : null + } +} \ No newline at end of file diff --git a/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/config_zoedepth_kitti.json b/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/config_zoedepth_kitti.json new file mode 100644 index 0000000000000000000000000000000000000000..3e7266ec2d7e918143f54ee728ea4d8d4e9adb11 --- /dev/null +++ b/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/config_zoedepth_kitti.json @@ -0,0 +1,22 @@ +{ + "model": { + "bin_centers_type": "normed", + "img_size": [384, 768] + }, + + "train": { + }, + + "infer":{ + "train_midas": false, + "use_pretrained_midas": false, + "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_K.pt", + "force_keep_ar": true + }, + + "eval":{ + "train_midas": false, + "use_pretrained_midas": false, + "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_K.pt" + } +} \ No newline at end of file diff --git a/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/zoedepth_v1.py b/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/zoedepth_v1.py new file mode 100644 index 0000000000000000000000000000000000000000..4d5e8c2d272c3ae5f5ff1025aa96e6653b23d82f --- /dev/null +++ b/RAVE-main/annotator/zoe/zoedepth/models/zoedepth/zoedepth_v1.py @@ -0,0 +1,250 @@ +# MIT License + +# Copyright (c) 2022 Intelligent Systems Lab Org + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# File author: Shariq Farooq Bhat + +import itertools + +import torch +import torch.nn as nn +from ..depth_model import DepthModel +from ..base_models.midas import MidasCore +from ..layers.attractor import AttractorLayer, AttractorLayerUnnormed +from ..layers.dist_layers import ConditionalLogBinomial +from ..layers.localbins_layers import (Projector, SeedBinRegressor, + SeedBinRegressorUnnormed) +from ..model_io import load_state_from_resource + + +class ZoeDepth(DepthModel): + def __init__(self, core, n_bins=64, bin_centers_type="softplus", bin_embedding_dim=128, min_depth=1e-3, max_depth=10, + n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, train_midas=True, + midas_lr_factor=10, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs): + """ZoeDepth model. This is the version of ZoeDepth that has a single metric head + + Args: + core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features + n_bins (int, optional): Number of bin centers. Defaults to 64. + bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. + For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus". + bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128. + min_depth (float, optional): Lower bound for normed bin centers. Defaults to 1e-3. + max_depth (float, optional): Upper bound for normed bin centers. Defaults to 10. + n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. + attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300. + attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. + attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'. + attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'. + min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5. + max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50. + train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True. + midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10. + encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10. + pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. + """ + super().__init__() + + self.core = core + self.max_depth = max_depth + self.min_depth = min_depth + self.min_temp = min_temp + self.bin_centers_type = bin_centers_type + + self.midas_lr_factor = midas_lr_factor + self.encoder_lr_factor = encoder_lr_factor + self.pos_enc_lr_factor = pos_enc_lr_factor + self.train_midas = train_midas + self.inverse_midas = inverse_midas + + if self.encoder_lr_factor <= 0: + self.core.freeze_encoder( + freeze_rel_pos=self.pos_enc_lr_factor <= 0) + + N_MIDAS_OUT = 32 + btlnck_features = self.core.output_channels[0] + num_out_features = self.core.output_channels[1:] + + self.conv2 = nn.Conv2d(btlnck_features, btlnck_features, + kernel_size=1, stride=1, padding=0) # btlnck conv + + if bin_centers_type == "normed": + SeedBinRegressorLayer = SeedBinRegressor + Attractor = AttractorLayer + elif bin_centers_type == "softplus": + SeedBinRegressorLayer = SeedBinRegressorUnnormed + Attractor = AttractorLayerUnnormed + elif bin_centers_type == "hybrid1": + SeedBinRegressorLayer = SeedBinRegressor + Attractor = AttractorLayerUnnormed + elif bin_centers_type == "hybrid2": + SeedBinRegressorLayer = SeedBinRegressorUnnormed + Attractor = AttractorLayer + else: + raise ValueError( + "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") + + self.seed_bin_regressor = SeedBinRegressorLayer( + btlnck_features, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth) + self.seed_projector = Projector(btlnck_features, bin_embedding_dim) + self.projectors = nn.ModuleList([ + Projector(num_out, bin_embedding_dim) + for num_out in num_out_features + ]) + self.attractors = nn.ModuleList([ + Attractor(bin_embedding_dim, n_bins, n_attractors=n_attractors[i], min_depth=min_depth, max_depth=max_depth, + alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type) + for i in range(len(num_out_features)) + ]) + + last_in = N_MIDAS_OUT + 1 # +1 for relative depth + + # use log binomial instead of softmax + self.conditional_log_binomial = ConditionalLogBinomial( + last_in, bin_embedding_dim, n_classes=n_bins, min_temp=min_temp, max_temp=max_temp) + + def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs): + """ + Args: + x (torch.Tensor): Input image tensor of shape (B, C, H, W) + return_final_centers (bool, optional): Whether to return the final bin centers. Defaults to False. + denorm (bool, optional): Whether to denormalize the input image. This reverses ImageNet normalization as midas normalization is different. Defaults to False. + return_probs (bool, optional): Whether to return the output probability distribution. Defaults to False. + + Returns: + dict: Dictionary containing the following keys: + - rel_depth (torch.Tensor): Relative depth map of shape (B, H, W) + - metric_depth (torch.Tensor): Metric depth map of shape (B, 1, H, W) + - bin_centers (torch.Tensor): Bin centers of shape (B, n_bins). Present only if return_final_centers is True + - probs (torch.Tensor): Output probability distribution of shape (B, n_bins, H, W). Present only if return_probs is True + + """ + b, c, h, w = x.shape + # print("input shape ", x.shape) + self.orig_input_width = w + self.orig_input_height = h + rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True) + # print("output shapes", rel_depth.shape, out.shape) + + outconv_activation = out[0] + btlnck = out[1] + x_blocks = out[2:] + + x_d0 = self.conv2(btlnck) + x = x_d0 + _, seed_b_centers = self.seed_bin_regressor(x) + + if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2': + b_prev = (seed_b_centers - self.min_depth) / \ + (self.max_depth - self.min_depth) + else: + b_prev = seed_b_centers + + prev_b_embedding = self.seed_projector(x) + + # unroll this loop for better performance + for projector, attractor, x in zip(self.projectors, self.attractors, x_blocks): + b_embedding = projector(x) + b, b_centers = attractor( + b_embedding, b_prev, prev_b_embedding, interpolate=True) + b_prev = b.clone() + prev_b_embedding = b_embedding.clone() + + last = outconv_activation + + if self.inverse_midas: + # invert depth followed by normalization + rel_depth = 1.0 / (rel_depth + 1e-6) + rel_depth = (rel_depth - rel_depth.min()) / \ + (rel_depth.max() - rel_depth.min()) + # concat rel depth with last. First interpolate rel depth to last size + rel_cond = rel_depth.unsqueeze(1) + rel_cond = nn.functional.interpolate( + rel_cond, size=last.shape[2:], mode='bilinear', align_corners=True) + last = torch.cat([last, rel_cond], dim=1) + + b_embedding = nn.functional.interpolate( + b_embedding, last.shape[-2:], mode='bilinear', align_corners=True) + x = self.conditional_log_binomial(last, b_embedding) + + # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor + # print(x.shape, b_centers.shape) + b_centers = nn.functional.interpolate( + b_centers, x.shape[-2:], mode='bilinear', align_corners=True) + out = torch.sum(x * b_centers, dim=1, keepdim=True) + + # Structure output dict + output = dict(metric_depth=out) + if return_final_centers or return_probs: + output['bin_centers'] = b_centers + + if return_probs: + output['probs'] = x + + return output + + def get_lr_params(self, lr): + """ + Learning rate configuration for different layers of the model + Args: + lr (float) : Base learning rate + Returns: + list : list of parameters to optimize and their learning rates, in the format required by torch optimizers. + """ + param_conf = [] + if self.train_midas: + if self.encoder_lr_factor > 0: + param_conf.append({'params': self.core.get_enc_params_except_rel_pos( + ), 'lr': lr / self.encoder_lr_factor}) + + if self.pos_enc_lr_factor > 0: + param_conf.append( + {'params': self.core.get_rel_pos_params(), 'lr': lr / self.pos_enc_lr_factor}) + + midas_params = self.core.core.scratch.parameters() + midas_lr_factor = self.midas_lr_factor + param_conf.append( + {'params': midas_params, 'lr': lr / midas_lr_factor}) + + remaining_modules = [] + for name, child in self.named_children(): + if name != 'core': + remaining_modules.append(child) + remaining_params = itertools.chain( + *[child.parameters() for child in remaining_modules]) + + param_conf.append({'params': remaining_params, 'lr': lr}) + + return param_conf + + @staticmethod + def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs): + core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas, + train_midas=train_midas, fetch_features=True, freeze_bn=freeze_midas_bn, **kwargs) + model = ZoeDepth(core, **kwargs) + if pretrained_resource: + assert isinstance(pretrained_resource, str), "pretrained_resource must be a string" + model = load_state_from_resource(model, pretrained_resource) + return model + + @staticmethod + def build_from_config(config): + return ZoeDepth.build(**config) diff --git a/RAVE-main/configs/truck-multicontrolnet.yaml b/RAVE-main/configs/truck-multicontrolnet.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b79160e73c38c296fc0bb30a36076cb160579560 --- /dev/null +++ b/RAVE-main/configs/truck-multicontrolnet.yaml @@ -0,0 +1,35 @@ +# to perform grid search for any parameters, write them as a list. e.g. to try batch sizes of 2,3,4, write batch_size: [2,3,4] +# to use 2 controls for controlnet, separate the names of the preprocesses and their scales with '-'. e.g. preprocess_name: 'depth_zoe-lineart_realistic' control +# controlnet_conditioning_scale: '0.75-0.25' + +video_name: "truck" # specify the name of the video existing in the data/mp4_videos folder (e.g. data/mp4_videos/truck.mp4) +preprocess_name: 'depth_zoe-lineart_realistic' # specify the name of the condition used for the video (e.g. depth_zoe condition) + +batch_size: 3 # denotes the batch size of grids (e.g. 4 grids run in parallel) +batch_size_vae: 1 # denotes the batch size for the VAE (e.g. 1 grid runs in parallel for the VAE) + +cond_step_start: 0.0 # denotes the step to start conditioning + +controlnet_conditioning_scale: '0.6-0.4' # denotes the scale of the conditioning +controlnet_guidance_end: 1.0 # denotes the end of the controlnet guidance +controlnet_guidance_start: 0.0 # denotes the start of the controlnet guidance + +give_control_inversion: true # denotes whether to give control to the inversion + +grid_size: 3 # denotes the size of each grid (e.g. grid_size x grid_size) +sample_size: -1 # denotes the number of grids to be generated (-1 for the full video) +pad: 1 # denotes the padding of the video (if 1, use the same video) +guidance_scale: 7.5 # denotes the scale of the guidance +inversion_prompt: '' # denotes the inversion prompt + +is_ddim_inversion: true # denotes whether to use ddim for inversion +is_shuffle: true # denotes whether to applying shuffling between the grids + +negative_prompts: "" # denotes the negative prompts +num_inference_steps: 50 # denotes the number of inference steps during the sampling process +num_inversion_step: 50 # denotes the number of inversion steps during the inversion process +positive_prompts: "Wooden trucks drive on a racetrack" # denotes the positive prompts +save_folder: 'truck' # denotes the name of the folder to save the results under results + +seed: 0 # denotes the seed +model_id: 'None' # None to use stable diffusion v1.5, otherwise use the model id \ No newline at end of file diff --git a/RAVE-main/configs/truck-multiple_parameters.yaml b/RAVE-main/configs/truck-multiple_parameters.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f1e5ddb7221acfc235694e3495cf4a1bfb8b6d08 --- /dev/null +++ b/RAVE-main/configs/truck-multiple_parameters.yaml @@ -0,0 +1,35 @@ +# to perform grid search for any parameters, write them as a list. e.g. to try batch sizes of 2,3,4, write batch_size: [2,3,4] +# to use 2 controls for controlnet, separate the names of the preprocesses and their scales with '-'. e.g. preprocess_name: 'depth_zoe-lineart_realistic' control +# controlnet_conditioning_scale: '0.75-0.25' + +video_name: "truck" # specify the name of the video existing in the data/mp4_videos folder (e.g. data/mp4_videos/truck.mp4) +preprocess_name: ['depth_zoe', 'lineart_realistic', 'softedge_hed'] # specify the name of the condition used for the video (e.g. depth_zoe condition) + +batch_size: 4 # denotes the batch size of grids (e.g. 4 grids run in parallel) +batch_size_vae: 1 # denotes the batch size for the VAE (e.g. 1 grid runs in parallel for the VAE) + +cond_step_start: 0.0 # denotes the step to start conditioning + +controlnet_conditioning_scale: 1.0 # denotes the scale of the conditioning +controlnet_guidance_end: 1.0 # denotes the end of the controlnet guidance +controlnet_guidance_start: 0.0 # denotes the start of the controlnet guidance + +give_control_inversion: true # denotes whether to give control to the inversion + +grid_size: 3 # denotes the size of each grid (e.g. grid_size x grid_size) +sample_size: -1 # denotes the number of grids to be generated (-1 for the full video) +pad: 1 # denotes the padding of the video (if 1, use the same video) +guidance_scale: 7.5 # denotes the scale of the guidance +inversion_prompt: '' # denotes the inversion prompt + +is_ddim_inversion: true # denotes whether to use ddim for inversion +is_shuffle: true # denotes whether to applying shuffling between the grids + +negative_prompts: "" # denotes the negative prompts +num_inference_steps: 50 # denotes the number of inference steps during the sampling process +num_inversion_step: 50 # denotes the number of inversion steps during the inversion process +positive_prompts: "Wooden trucks drive on a racetrack" # denotes the positive prompts +save_folder: 'truck' # denotes the name of the folder to save the results under results + +seed: 0 # denotes the seed +model_id: 'None' # None to use stable diffusion v1.5, otherwise use the model id \ No newline at end of file diff --git a/RAVE-main/configs/truck-realisticv5.1.yaml b/RAVE-main/configs/truck-realisticv5.1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..55f144c4f85de5a52bb4edac301f93d2a1814999 --- /dev/null +++ b/RAVE-main/configs/truck-realisticv5.1.yaml @@ -0,0 +1,35 @@ +# to perform grid search for any parameters, write them as a list. e.g. to try batch sizes of 2,3,4, write batch_size: [2,3,4] +# to use 2 controls for controlnet, separate the names of the preprocesses and their scales with '-'. e.g. preprocess_name: 'depth_zoe-lineart_realistic' control +# controlnet_conditioning_scale: '0.75-0.25' + +video_name: "truck" # specify the name of the video existing in the data/mp4_videos folder (e.g. data/mp4_videos/truck.mp4) +preprocess_name: 'depth_zoe' # specify the name of the condition used for the video (e.g. depth_zoe condition) + +batch_size: 4 # denotes the batch size of grids (e.g. 4 grids run in parallel) +batch_size_vae: 1 # denotes the batch size for the VAE (e.g. 1 grid runs in parallel for the VAE) + +cond_step_start: 0.0 # denotes the step to start conditioning + +controlnet_conditioning_scale: 1.0 # denotes the scale of the conditioning +controlnet_guidance_end: 1.0 # denotes the end of the controlnet guidance +controlnet_guidance_start: 0.0 # denotes the start of the controlnet guidance + +give_control_inversion: true # denotes whether to give control to the inversion + +grid_size: 3 # denotes the size of each grid (e.g. grid_size x grid_size) +sample_size: -1 # denotes the number of grids to be generated (-1 for the full video) +pad: 1 # denotes the padding of the video (if 1, use the same video) +guidance_scale: 7.5 # denotes the scale of the guidance +inversion_prompt: '' # denotes the inversion prompt + +is_ddim_inversion: true # denotes whether to use ddim for inversion +is_shuffle: true # denotes whether to applying shuffling between the grids + +negative_prompts: "" # denotes the negative prompts +num_inference_steps: 50 # denotes the number of inference steps during the sampling process +num_inversion_step: 50 # denotes the number of inversion steps during the inversion process +positive_prompts: "Wooden trucks drive on a racetrack" # denotes the positive prompts +save_folder: 'truck' # denotes the name of the folder to save the results under results + +seed: 0 # denotes the seed +model_id: ['CIVIT_AI/diffusers_models/realisticVisionV60B1_v51VAE'] # None to use stable diffusion v1.5, otherwise use the path of the civitai model \ No newline at end of file diff --git a/RAVE-main/configs/truck.yaml b/RAVE-main/configs/truck.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a9db8658852af079c718b492244e114dc20afd4c --- /dev/null +++ b/RAVE-main/configs/truck.yaml @@ -0,0 +1,35 @@ +# to perform grid search for any parameters, write them as a list. e.g. to try batch sizes of 2,3,4, write batch_size: [2,3,4] +# to use 2 controls for controlnet, separate the names of the preprocesses and their scales with '-'. e.g. preprocess_name: 'depth_zoe-lineart_realistic' control +# controlnet_conditioning_scale: '0.75-0.25' + +video_name: "truck" # specify the name of the video existing in the data/mp4_videos folder (e.g. data/mp4_videos/truck.mp4) +preprocess_name: 'depth_zoe' # specify the name of the condition used for the video (e.g. depth_zoe condition) + +batch_size: 4 # denotes the batch size of grids (e.g. 4 grids run in parallel) +batch_size_vae: 1 # denotes the batch size for the VAE (e.g. 1 grid runs in parallel for the VAE) + +cond_step_start: 0.0 # denotes the step to start conditioning + +controlnet_conditioning_scale: 1.0 # denotes the scale of the conditioning +controlnet_guidance_end: 1.0 # denotes the end of the controlnet guidance +controlnet_guidance_start: 0.0 # denotes the start of the controlnet guidance + +give_control_inversion: true # denotes whether to give control to the inversion + +grid_size: 3 # denotes the size of each grid (e.g. grid_size x grid_size) +sample_size: -1 # denotes the number of grids to be generated (-1 for the full video) +pad: 1 # denotes the padding of the video (if 1, use the same video) +guidance_scale: 7.5 # denotes the scale of the guidance +inversion_prompt: '' # denotes the inversion prompt + +is_ddim_inversion: true # denotes whether to use ddim for inversion +is_shuffle: true # denotes whether to applying shuffling between the grids + +negative_prompts: "" # denotes the negative prompts +num_inference_steps: 50 # denotes the number of inference steps during the sampling process +num_inversion_step: 50 # denotes the number of inversion steps during the inversion process +positive_prompts: "Wooden trucks drive on a racetrack" # denotes the positive prompts +save_folder: 'truck' # denotes the name of the folder to save the results under results + +seed: 0 # denotes the seed +model_id: 'None' # None to use stable diffusion v1.5, otherwise use the model id \ No newline at end of file diff --git a/RAVE-main/demo_notebook.ipynb b/RAVE-main/demo_notebook.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..0badf8d40cf584fd12fcb84a9de5d4aeed9b4c43 --- /dev/null +++ b/RAVE-main/demo_notebook.ipynb @@ -0,0 +1,264 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import argparse\n", + "import os\n", + "import sys\n", + "import yaml\n", + "import datetime\n", + "sys.path.append('/'.join(os.getcwd().split('/')[:-1]))\n", + "from pipelines.sd_controlnet_rave import RAVE\n", + "from pipelines.sd_multicontrolnet_rave import RAVE_MultiControlNet\n", + "from IPython.display import Video, Image\n", + "import PIL\n", + "import utils.constants as const\n", + "import utils.video_grid_utils as vgu\n", + "import warnings\n", + "warnings.filterwarnings(\"ignore\")\n", + "import pprint \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "def init_device():\n", + " device_name = 'cuda' if torch.cuda.is_available() else 'cpu'\n", + " device = torch.device(device_name)\n", + " return device\n", + "\n", + "def init_paths(input_ns):\n", + " if input_ns.save_folder == None or input_ns.save_folder == '':\n", + " input_ns.save_folder = input_ns.video_name.replace('.mp4', '').replace('.gif', '')\n", + " else:\n", + " input_ns.save_folder += f\"/{input_ns.video_name.replace('.mp4', '').replace('.gif', '')}\"\n", + " save_dir = f'{const.OUTPUT_PATH}/{input_ns.save_folder}'\n", + " os.makedirs(save_dir, exist_ok=True)\n", + " save_idx = max([int(x[-5:]) for x in os.listdir(save_dir)])+1 if os.listdir(save_dir) != [] else 0\n", + " input_ns.save_path = f'{save_dir}/{input_ns.positive_prompts}-{str(save_idx).zfill(5)}'\n", + " \n", + "\n", + " input_ns.video_path = f'{const.MP4_PATH}/{input_ns.video_name}.mp4'\n", + " \n", + " if '-' in input_ns.preprocess_name:\n", + " input_ns.hf_cn_path = [const.PREPROCESSOR_DICT[i] for i in input_ns.preprocess_name.split('-')]\n", + " else:\n", + " input_ns.hf_cn_path = const.PREPROCESSOR_DICT[input_ns.preprocess_name]\n", + " input_ns.hf_path = \"runwayml/stable-diffusion-v1-5\"\n", + " \n", + " input_ns.inverse_path = f'{const.GENERATED_DATA_PATH}/inverses/{input_ns.video_name}/{input_ns.preprocess_name}_{input_ns.model_id}_{input_ns.grid_size}x{input_ns.grid_size}_{input_ns.pad}'\n", + " input_ns.control_path = f'{const.GENERATED_DATA_PATH}/controls/{input_ns.video_name}/{input_ns.preprocess_name}_{input_ns.grid_size}x{input_ns.grid_size}_{input_ns.pad}'\n", + " os.makedirs(input_ns.control_path, exist_ok=True)\n", + " os.makedirs(input_ns.inverse_path, exist_ok=True)\n", + " os.makedirs(input_ns.save_path, exist_ok=True)\n", + " return input_ns\n", + " \n", + "def run(input_ns):\n", + "\n", + " if 'model_id' not in list(input_ns.__dict__.keys()):\n", + " input_ns.model_id = \"None\"\n", + " device = init_device()\n", + " input_ns = init_paths(input_ns)\n", + "\n", + " input_ns.image_pil_list = vgu.prepare_video_to_grid(input_ns.video_path, input_ns.sample_size, input_ns.grid_size, input_ns.pad)\n", + " print(input_ns.video_path )\n", + " input_ns.sample_size = len(input_ns.image_pil_list)\n", + " print(f'Frame count: {len(input_ns.image_pil_list)}')\n", + "\n", + " controlnet_class = RAVE_MultiControlNet if '-' in str(input_ns.controlnet_conditioning_scale) else RAVE\n", + " \n", + "\n", + " CN = controlnet_class(device)\n", + "\n", + "\n", + " CN.init_models(input_ns.hf_cn_path, input_ns.hf_path, input_ns.preprocess_name, input_ns.model_id)\n", + " \n", + " input_dict = vars(input_ns)\n", + " pp = pprint.PrettyPrinter(indent=4)\n", + " pp.pprint(input_dict)\n", + " yaml_dict = {k:v for k,v in input_dict.items() if k != 'image_pil_list'}\n", + "\n", + " start_time = datetime.datetime.now()\n", + " if '-' in str(input_ns.controlnet_conditioning_scale):\n", + " res_vid, control_vid_1, control_vid_2 = CN(input_dict)\n", + " else: \n", + " res_vid, control_vid = CN(input_dict)\n", + " end_time = datetime.datetime.now()\n", + " save_name = f\"{'-'.join(input_ns.positive_prompts.split())}_cstart-{input_ns.controlnet_guidance_start}_gs-{input_ns.guidance_scale}_pre-{'-'.join((input_ns.preprocess_name.replace('-','+').split('_')))}_cscale-{input_ns.controlnet_conditioning_scale}_grid-{input_ns.grid_size}_pad-{input_ns.pad}_model-{input_ns.model_id.split('/')[-1]}\"\n", + " res_vid[0].save(f\"{input_ns.save_path}/{save_name}.gif\", save_all=True, append_images=res_vid[1:], optimize=False, loop=10000)\n", + " # control_vid[0].save(f\"{input_ns.save_path}/control_{save_name}.gif\", save_all=True, append_images=control_vid[1:], optimize=False, loop=10000)\n", + "\n", + " yaml_dict['total_time'] = (end_time - start_time).total_seconds()\n", + " yaml_dict['total_number_of_frames'] = len(res_vid)\n", + " yaml_dict['sec_per_frame'] = yaml_dict['total_time']/yaml_dict['total_number_of_frames']\n", + " with open(f'{input_ns.save_path}/config.yaml', 'w') as yaml_file:\n", + " yaml.dump(yaml_dict, yaml_file)\n", + " \n", + " if '-' in str(input_ns.controlnet_conditioning_scale):\n", + " return (res_vid, control_vid_1, control_vid_2)\n", + " else: \n", + " return (res_vid, control_vid)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Set Parameters" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "input_ns = argparse.Namespace(**{})\n", + "input_ns.video_name = \"truck\" # specify the name of the video existing in the data/mp4_videos folder (e.g. data/mp4_videos/truck.mp4)\n", + "input_ns.preprocess_name = 'depth_zoe' # specify the name of the condition used for the video (e.g. depth_zoe condition)\n", + "\n", + "input_ns.batch_size = 4 # denotes the batch size of grids (e.g. 4 grids run in parallel)\n", + "input_ns.batch_size_vae = 1 # denotes the batch size for the VAE (e.g. 1 grid runs in parallel for the VAE)\n", + "\n", + "input_ns.cond_step_start = 0.0 # denotes the step to start conditioning\n", + "\n", + "input_ns.controlnet_conditioning_scale = 1.0 # denotes the scale of the conditioning\n", + "input_ns.controlnet_guidance_end = 1.0 # denotes the end of the controlnet guidance\n", + "input_ns.controlnet_guidance_start = 0.0 # denotes the start of the controlnet guidance\n", + "\n", + "input_ns.give_control_inversion = True # denotes whether to give control to the inversion\n", + "\n", + "input_ns.grid_size = 3 # denotes the size of each grid (e.g. grid_size x grid_size)\n", + "input_ns.sample_size = -1 # denotes the number of grids to be generated (-1 for the full video)\n", + "input_ns.pad = 1 # denotes the padding of the video (if 1, use the same video)\n", + "input_ns.guidance_scale = 7.5 # denotes the scale of the guidance\n", + "input_ns.inversion_prompt = '' # denotes the inversion prompt\n", + "\n", + "input_ns.is_ddim_inversion = True # denotes whether to use ddim for inversion\n", + "input_ns.is_shuffle = True # denotes whether to applying shuffling between the grids\n", + "\n", + "input_ns.negative_prompts = \"\" # denotes the negative prompts\n", + "input_ns.num_inference_steps = 50 # denotes the number of inference steps during the sampling process\n", + "input_ns.num_inversion_step = 50 # denotes the number of inversion steps during the inversion process\n", + "input_ns.positive_prompts = \"Wooden trucks drive on a racetrack\" # denotes the positive prompts\n", + "input_ns.save_folder = '' # denotes the name of the folder to save the results under results\n", + "\n", + "input_ns.seed = 0 # denotes the seed\n", + "input_ns.model_id = 'None' # None to use stable diffusion v1.5, otherwise use the model id" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run RAVE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res = run(input_ns)\n", + "save_dir_name = 'truck'\n", + "save_dir = f'assets/notebook-generated/{save_dir_name}'\n", + "os.makedirs(save_dir, exist_ok=True)\n", + "if len(res) == 3:\n", + " res_vid, control_vid_1, control_vid_2 = res\n", + " control_vid_1[0].save(f\"{save_dir}/control.gif\", save_all=True, append_images=control_vid_1[1:], loop=0)\n", + " control_vid_2[0].save(f\"{save_dir}/control_2.gif\", save_all=True, append_images=control_vid_2[1:], loop=0)\n", + " res_vid[0].save(f\"{save_dir}/result.gif\", save_all=True, append_images=res_vid[1:], loop=0)\n", + "else:\n", + " res_vid, control_vid = res\n", + " control_vid[0].save(f\"{save_dir}/control.gif\", save_all=True, append_images=control_vid[1:], loop=0)\n", + " res_vid[0].save(f\"{save_dir}/result.gif\", save_all=True, append_images=res_vid[1:], loop=0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Results" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Original Video" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "Video(filename=input_ns.video_path)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Edited Video" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "Image(filename=f\"{save_dir}/result.gif\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Control" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "Image(filename=f\"{save_dir}/control.gif\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "rave", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/RAVE-main/evaluation_uncleaned/eval_utils.py b/RAVE-main/evaluation_uncleaned/eval_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8ea917ac4e8a2ed596513d1cf5d509ff7c38fcc9 --- /dev/null +++ b/RAVE-main/evaluation_uncleaned/eval_utils.py @@ -0,0 +1,303 @@ +from sklearn.metrics.pairwise import cosine_similarity +from PIL import Image +import torch.nn.functional as F +import cv2 +import imageio +import argparse +import sys +import torch +import clip +import warnings +import numpy as np +sys.path.append('/coc/flash6/okara7/codes/video-editing/hf-controlnet/RAFT/RAFT-master') +sys.path.append('/coc/flash6/okara7/codes/video-editing/hf-controlnet/RAFT/RAFT-master/core') +from core.raft import RAFT +from core.utils.utils import InputPadder +from skimage.metrics import structural_similarity + +def video_to_pil_list(video_path): + if video_path.endswith('.mp4'): + vidcap = cv2.VideoCapture(video_path) + pil_list = [] + while True: + success, image = vidcap.read() + if success: + pil_list.append(Image.fromarray(image)) + else: + break + + return pil_list + elif video_path.endswith('.gif'): + gif = imageio.get_reader(video_path) + pil_list = [] + + for frame in gif: + pil_list.append(Image.fromarray(frame)) + + return pil_list + + +def coords_grid(b, h, w, homogeneous=False, device=None): + y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) # [H, W] + + stacks = [x, y] + + if homogeneous: + ones = torch.ones_like(x) # [H, W] + stacks.append(ones) + + grid = torch.stack(stacks, dim=0).float() # [2, H, W] or [3, H, W] + + grid = grid[None].repeat(b, 1, 1, 1) # [B, 2, H, W] or [B, 3, H, W] + + if device is not None: + grid = grid.to(device) + + return grid + + +def bilinear_sample(img, + sample_coords, + mode='bilinear', + padding_mode='zeros', + return_mask=False): + # img: [B, C, H, W] + # sample_coords: [B, 2, H, W] in image scale + if sample_coords.size(1) != 2: # [B, H, W, 2] + sample_coords = sample_coords.permute(0, 3, 1, 2) + + b, _, h, w = sample_coords.shape + + # Normalize to [-1, 1] + x_grid = 2 * sample_coords[:, 0] / (w - 1) - 1 + y_grid = 2 * sample_coords[:, 1] / (h - 1) - 1 + + grid = torch.stack([x_grid, y_grid], dim=-1) # [B, H, W, 2] + + img = F.grid_sample(img, + grid, + mode=mode, + padding_mode=padding_mode, + align_corners=True) + + if return_mask: + mask = (x_grid >= -1) & (y_grid >= -1) & (x_grid <= 1) & ( + y_grid <= 1) # [B, H, W] + + return img, mask + + return img + + +def flow_warp_rerender(feature, + flow, + mask=False, + mode='bilinear', + padding_mode='zeros'): + b, c, h, w = feature.size() + assert flow.size(1) == 2 + + grid = coords_grid(b, h, w).to(flow.device) + flow # [B, 2, H, W] + + return bilinear_sample(feature, + grid, + mode=mode, + padding_mode=padding_mode, + return_mask=mask) + + +def clip_text(pil_list, text_prompt, preprocess, device, model): + text = clip.tokenize([text_prompt]).to(device) + + scores = [] + images = [] + with torch.no_grad(): + text_features = model.encode_text(text) + for pil in pil_list: + image = preprocess(pil).unsqueeze(0).to(device) + images.append(image) + image_features = model.encode_image(torch.cat(images)) + scores = [torch.cosine_similarity(text_features, image_feature).item() for image_feature in image_features] + + score = sum(scores) / len(scores) + + return score + +def clip_frame(pil_list, preprocess, device, model): + image_features = [] + images = [] + with torch.no_grad(): + for pil in pil_list: + image = preprocess(pil).unsqueeze(0).to(device) + images.append(image) + + image_features = model.encode_image(torch.cat(images)) + + image_features = image_features.cpu().numpy() + cosine_sim_matrix = cosine_similarity(image_features) + np.fill_diagonal(cosine_sim_matrix, 0) # set diagonal elements to 0 + score = cosine_sim_matrix.sum() / (len(pil_list) * (len(pil_list)-1)) + + return score + +def pick_score_func(frames, prompt, model, processor, device): + image_inputs = processor(images=frames, padding=True, truncation=True, max_length=77, return_tensors="pt").to(device) + text_inputs = processor(text=prompt, padding=True, truncation=True, max_length=77, return_tensors="pt").to(device) + + with torch.no_grad(): + image_embs = model.get_image_features(**image_inputs) + image_embs = image_embs / torch.norm(image_embs, dim=-1, keepdim=True) + text_embs = model.get_text_features(**text_inputs) + text_embs = text_embs / torch.norm(text_embs, dim=-1, keepdim=True) + score_per_image = model.logit_scale.exp() * (text_embs @ image_embs.T)[0] + score_per_image = score_per_image.detach().cpu().numpy() + score = score_per_image.mean() + + return score + +def prepare_raft_model(device): + raft_dict = { + 'model': '/coc/flash6/okara7/codes/kurtkaya/RAFT/models/raft-things.pth', + 'small': False, + 'mixed_precision': False, + 'alternate_corr': False + } + + args = argparse.Namespace(**raft_dict) + + model = torch.nn.DataParallel(RAFT(args)) + model.load_state_dict(torch.load(args.model)) + + model = model.module + model.to(device) + model.eval() + + return model + +def flow_warp(img: np.ndarray, + flow: np.ndarray, + filling_value: int = 0, + interpolate_mode: str = 'nearest'): + '''Use flow to warp img. + + Args: + img (ndarray): Image to be warped. + flow (ndarray): Optical Flow. + filling_value (int): The missing pixels will be set with filling_value. + interpolate_mode (str): bilinear -> Bilinear Interpolation; + nearest -> Nearest Neighbor. + + Returns: + ndarray: Warped image with the same shape of img + ''' + warnings.warn('This function is just for prototyping and cannot ' + 'guarantee the computational efficiency.') + assert flow.ndim == 3, 'Flow must be in 3D arrays.' + height = flow.shape[0] + width = flow.shape[1] + channels = img.shape[2] + + output = np.ones( + (height, width, channels), dtype=img.dtype) * filling_value + + grid = np.indices((height, width)).swapaxes(0, 1).swapaxes(1, 2) + dx = grid[:, :, 0] + flow[:, :, 1] + dy = grid[:, :, 1] + flow[:, :, 0] + sx = np.floor(dx).astype(int) + sy = np.floor(dy).astype(int) + valid = (sx >= 0) & (sx < height - 1) & (sy >= 0) & (sy < width - 1) + + if interpolate_mode == 'nearest': + output[valid, :] = img[dx[valid].round().astype(int), + dy[valid].round().astype(int), :] + elif interpolate_mode == 'bilinear': + # dirty walkround for integer positions + eps_ = 1e-6 + dx, dy = dx + eps_, dy + eps_ + left_top_ = img[np.floor(dx[valid]).astype(int), + np.floor(dy[valid]).astype(int), :] * ( + np.ceil(dx[valid]) - dx[valid])[:, None] * ( + np.ceil(dy[valid]) - dy[valid])[:, None] + left_down_ = img[np.ceil(dx[valid]).astype(int), + np.floor(dy[valid]).astype(int), :] * ( + dx[valid] - np.floor(dx[valid]))[:, None] * ( + np.ceil(dy[valid]) - dy[valid])[:, None] + right_top_ = img[np.floor(dx[valid]).astype(int), + np.ceil(dy[valid]).astype(int), :] * ( + np.ceil(dx[valid]) - dx[valid])[:, None] * ( + dy[valid] - np.floor(dy[valid]))[:, None] + right_down_ = img[np.ceil(dx[valid]).astype(int), + np.ceil(dy[valid]).astype(int), :] * ( + dx[valid] - np.floor(dx[valid]))[:, None] * ( + dy[valid] - np.floor(dy[valid]))[:, None] + output[valid, :] = left_top_ + left_down_ + right_top_ + right_down_ + else: + raise NotImplementedError( + 'We only support interpolation modes of nearest and bilinear, ' + f'but got {interpolate_mode}.') + return output.astype(img.dtype) + +def calculate_flow(pil_list, model, DEVICE): + def load_image(imfile, DEVICE): + img = np.array(imfile).astype(np.uint8) + img = torch.from_numpy(img).permute(2, 0, 1).float() + return img[None].to(DEVICE) + + flow_up_list = [] + with torch.no_grad(): + images = pil_list.copy() + for imfile1, imfile2 in zip(images[:-1], images[1:]): + image1 = load_image(imfile1, DEVICE) + image2 = load_image(imfile2, DEVICE) + + padder = InputPadder(image1.shape) + image1, image2 = padder.pad(image1, image2) + + _, flow_up = model(image1, image2, iters=20, test_mode=True) + + flow_up_list.append(flow_up.detach().squeeze().permute(1,2,0).cpu().numpy()) + return flow_up_list + +def rerender_warp(img, flow, mode='bilinear'): + expand = False + if len(img.shape) == 2: + expand = True + img = np.expand_dims(img, 2) + + img = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0) + dtype = img.dtype + img = img.to(torch.float) + flow = torch.from_numpy(flow).permute(2, 0, 1).unsqueeze(0) + res = flow_warp_rerender(img, flow, mode=mode) + res = res.to(dtype) + res = res[0].cpu().permute(1, 2, 0).numpy() + if expand: + res = res[:, :, 0] + return res + +def opencv_warp(img, flow): + + h, w = flow.shape[:2] + flow[:,:,0] += np.arange(w) + flow[:,:,1] += np.arange(h)[:,np.newaxis] + warped_img = cv2.remap(img, flow, None, cv2.INTER_LINEAR) + return warped_img + +rearrange = lambda x: (np.array(x)/255).reshape(-1,1) + +def warp_video(edit_pil_list, source_pil_list, raft_model, device, distance_func): + # print('source size', source_pil_list[0].size) + flow_up_list = calculate_flow(source_pil_list, raft_model, device) + + res_list = [edit_pil_list[0]] + for i,pil_img in enumerate(edit_pil_list[:-1]): + warped = opencv_warp(np.array(pil_img), flow_up_list[i]) + pil_warped = Image.fromarray(warped) + # pil_warped.save(f'warped_{i}.png') + res_list.append(pil_warped) + # res_list[0].save('warped.gif', save_all=True, append_images=res_list[1:], duration=100, loop=0) + # print('size of video', res_list[0].size) + if distance_func == structural_similarity: + return np.mean(np.array([distance_func(np.array(edit_pil_list[i]), np.array(res_list[i]), channel_axis=2) for i in range(len(res_list))])) + else: + return np.mean(np.array([distance_func(edit_pil_list[i], res_list[i]) for i in range(len(res_list))])) diff --git a/RAVE-main/evaluation_uncleaned/preprocesser_utils.py b/RAVE-main/evaluation_uncleaned/preprocesser_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ae809c64b5dce25a7c5d4d1a6cdc7240c9fedac4 --- /dev/null +++ b/RAVE-main/evaluation_uncleaned/preprocesser_utils.py @@ -0,0 +1,266 @@ +import cv2 +import yaml + +import numpy as np +from annotator.lineart import LineartDetector +from annotator.zoe import ZoeDetector +from annotator.manga_line import MangaLineExtration +from annotator.lineart_anime import LineartAnimeDetector +from annotator.hed import apply_hed +from annotator.canny import apply_canny +from annotator.pidinet import apply_pidinet +from annotator.leres import apply_leres +from annotator.midas import apply_midas + +import torch +import torch.nn.functional as F +import utils.image_process_utils as ipu + +def yaml_load(path): + with open(path, 'r') as stream: + try: + return yaml.safe_load(stream) + except yaml.YAMLError as exc: + print(exc) + +def yaml_dump(path, data): + with open(path, 'w') as outfile: + yaml.dump(data, outfile, default_flow_style=False) + +def pad64(x): + return int(np.ceil(float(x) / 64.0) * 64 - x) + +def HWC3(x): + assert x.dtype == np.uint8 + if x.ndim == 2: + x = x[:, :, None] + assert x.ndim == 3 + H, W, C = x.shape + assert C == 1 or C == 3 or C == 4 + if C == 3: + return x + if C == 1: + return np.concatenate([x, x, x], axis=2) + if C == 4: + color = x[:, :, 0:3].astype(np.float32) + alpha = x[:, :, 3:4].astype(np.float32) / 255.0 + y = color * alpha + 255.0 * (1.0 - alpha) + y = y.clip(0, 255).astype(np.uint8) + return y + +def safer_memory(x): + # Fix many MAC/AMD problems + return np.ascontiguousarray(x.copy()).copy() + + +def resize_image_with_pad(input_image, resolution, skip_hwc3=False): + if skip_hwc3: + img = input_image + else: + img = HWC3(input_image) + H_raw, W_raw, _ = img.shape + k = float(resolution) / float(min(H_raw, W_raw)) + interpolation = cv2.INTER_CUBIC if k > 1 else cv2.INTER_AREA + H_target = int(np.round(float(H_raw) * k)) + W_target = int(np.round(float(W_raw) * k)) + img = cv2.resize(img, (W_target, H_target), interpolation=interpolation) + H_pad, W_pad = pad64(H_target), pad64(W_target) + img_padded = np.pad(img, [[0, H_pad], [0, W_pad], [0, 0]], mode='edge') + + def remove_pad(x): + return safer_memory(x[:H_target, :W_target]) + + return safer_memory(img_padded), remove_pad + + + +def lineart_standard(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + x = img.astype(np.float32) + g = cv2.GaussianBlur(x, (0, 0), 6.0) + intensity = np.min(g - x, axis=2).clip(0, 255) + intensity /= max(16, np.median(intensity[intensity > 8])) + intensity *= 127 + result = intensity.clip(0, 255).astype(np.uint8) + return remove_pad(result), True + + +def lineart(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_lineart = LineartDetector('sk_model.pth') + + # applied auto inversion + result = 255 - model_lineart(img) + return remove_pad(result), True + + +def lineart_coarse(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_lineart_coarse = LineartDetector('sk_model2.pth') + + # applied auto inversion + result = 255 - model_lineart_coarse(img) + return remove_pad(result), True + +def lineart_anime(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_lineart_anime = LineartAnimeDetector() + + # applied auto inversion + result = 255 - model_lineart_anime(img) + return remove_pad(result), True + + +def lineart_anime_denoise(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_manga_line = MangaLineExtration() + + # applied auto inversion + result = model_manga_line(img) + return remove_pad(result), True + + +def canny(img, res=512, thr_a=100, thr_b=200, **kwargs): + l, h = thr_a, thr_b + img, remove_pad = resize_image_with_pad(img, res) + model_canny = apply_canny + result = model_canny(img, l, h) + return remove_pad(result), True + + + +def hed(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_hed = apply_hed + result = model_hed(img) + return remove_pad(result), True + + +def hed_safe(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_hed = apply_hed + result = model_hed(img, is_safe=True) + return remove_pad(result), True + +def midas(img, res=512, a=np.pi * 2.0, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_midas = apply_midas + result, _ = model_midas(img, a) + return remove_pad(result), True + + +def leres(img, res=512, thr_a=0, thr_b=0, boost=False, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_leres = apply_leres + result = model_leres(img, thr_a, thr_b, boost=boost) + return remove_pad(result), True + +def lerespp(img, res=512, thr_a=0, thr_b=0, boost=True, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_leres = apply_leres + result = model_leres(img, thr_a, thr_b, boost=boost) + return remove_pad(result), True + + +def pidinet(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_pidinet = apply_pidinet + result = model_pidinet(img) + return remove_pad(result), True + + +def pidinet_ts(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_pidinet = apply_pidinet + result = model_pidinet(img, apply_fliter=True) + return remove_pad(result), True + + +def pidinet_safe(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_pidinet = apply_pidinet + result = model_pidinet(img, is_safe=True) + return remove_pad(result), True + + + +def zoe_depth(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_zoe_depth = ZoeDetector() + result = model_zoe_depth(img) + return remove_pad(result), True + + +preprocessors_dict = { + 'lineart_realistic': lineart, + 'lineart_coarse': lineart_coarse, + 'lineart_standard': lineart_standard, + 'lineart_anime': lineart_anime, + 'lineart_anime_denoise': lineart_anime_denoise, + 'softedge_hed': hed, + 'softedge_hedsafe': hed_safe, + 'softedge_pidinet': pidinet, + 'softedge_pidsafe': pidinet_safe, + 'canny': canny, + 'depth_leres': leres, + 'depth_leres++': lerespp, + 'depth_midas': midas, + 'depth_zoe': zoe_depth, +} + +def pixel_perfect_process(input_image, p_name): + raw_H, raw_W, _ = input_image.shape + preprocessor_resolution = raw_H + detected_map, _ = preprocessors_dict[p_name](input_image, res=preprocessor_resolution) + return detected_map + +def calculate_flow(prev_frame, curr_frame): + prev = ipu.pil_to_cv_gray(prev_frame) + curr = ipu.pil_to_cv_gray(curr_frame) + + flow = cv2.calcOpticalFlowFarneback(prev, curr, None, 0.5, 3, 15, 3, 5, 1.2, 0) + h, w = flow.shape[:2] + flow = -flow + flow[:,:,0] += np.arange(w) + flow[:,:,1] += np.arange(h)[:,np.newaxis] + return flow + +def condition_smoothing(prev_condition, prev_flow, curr_condition, next_condition, next_flow, smoothing): + # prev_condition.shape # (H, W) + warped_prev = cv2.remap(prev_condition, prev_flow, None, cv2.INTER_LINEAR) + warped_next = cv2.remap(next_condition, next_flow, None, cv2.INTER_LINEAR) + + # curr_condition = smoothing * warped_prev + (1 - smoothing) * warped_next + curr_condition = 2*smoothing * warped_prev + smoothing * warped_next + (1 - (3 * smoothing)) * curr_condition + return curr_condition # (H, W) numpy array + + +def warp(x, flo): + """ + warp an image/tensor (im2) back to im1, according to the optical flow + x: [B, C, H, W] (im2) + flo: [B, 2, H, W] flow + """ + B, C, H, W = x.size() + # mesh grid + xx = torch.arange(0, W).view(1, -1).repeat(H, 1) + yy = torch.arange(0, H).view(-1, 1).repeat(1, W) + xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1) + yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1) + grid = torch.cat((xx, yy), 1).float() + + grid = grid.to(x.device) + vgrid = grid + flo + # scale grid to [-1,1] + vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0 + vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0 + + vgrid = vgrid.permute(0, 2, 3, 1) + output = F.grid_sample(x, vgrid, mode='nearest', align_corners=True, padding_mode='zeros') + # mask = torch.ones(x.size()).to(x.device) + # mask = F.grid_sample(mask, vgrid) + + # mask[mask < 0.999] = 0 + # mask[mask > 0] = 1 + + return output \ No newline at end of file diff --git a/RAVE-main/evaluation_uncleaned/quantitative_evaluation.py b/RAVE-main/evaluation_uncleaned/quantitative_evaluation.py new file mode 100644 index 0000000000000000000000000000000000000000..d7e2e931cbdd3c305f03ec7104eeb0b1ffaa363e --- /dev/null +++ b/RAVE-main/evaluation_uncleaned/quantitative_evaluation.py @@ -0,0 +1,187 @@ +import torch +import clip + +import sys + +import shutil +import os +import glob + +import numpy as np + + +from collections import defaultdict +from transformers import AutoProcessor, AutoModel + +from skimage.metrics import structural_similarity + +import utils.eval_utils as eu +import utils.preprocesser_utils as pu + + +if __name__ == '__main__': + typ = sys.argv[1] + if typ == 'style': + dataset_path = '/coc/flash6/okara7/codes/video-editing/hf-controlnet/data/rave_dataset_prepared_512' + style_prompts_dict = pu.yaml_load(f'{dataset_path}/style_prompts.yaml') + prev_methods_path = '/coc/flash6/okara7/codes/video-editing/hf-controlnet/PREV_OUTPUTS/outputs_512' + rave_dataset_path = '/coc/flash6/okara7/codes/video-editing/hf-controlnet/res_automate/11-01-2023_rave_512_style' + no_shuffle_path = '/coc/flash6/okara7/codes/video-editing/hf-controlnet/res_automate/11-04-2023/no-shuffle-style' + output_dir = '/coc/flash6/okara7/codes/video-editing/hf-controlnet/FINAL_PREPARED/evaluation_set_512_style' + elif typ == 'shape': + dataset_path = '/coc/flash6/okara7/codes/video-editing/hf-controlnet/data/rave_dataset_prepared_512' + style_prompts_dict = pu.yaml_load(f'{dataset_path}/shape_prompts.yaml') + prev_methods_path = '/coc/flash6/okara7/codes/video-editing/hf-controlnet/PREV_OUTPUTS/outputs_shape_512' + rave_dataset_path = '/coc/flash6/okara7/codes/video-editing/hf-controlnet/res_automate/11-02-2023-shape_512' + no_shuffle_path = '/coc/flash6/okara7/codes/video-editing/hf-controlnet/res_automate/11-04-2023/no-shuffle-shape' + output_dir = '/coc/flash6/okara7/codes/video-editing/hf-controlnet/FINAL_PREPARED/evaluation_set_512_shape' + frame_count = int(sys.argv[2]) + st = 50 + prepare = False + output_dir = f'{output_dir}/{frame_count}-frames' + frame_prompt_dict = style_prompts_dict[f'{frame_count}-frames'] + + if prepare: + for key in frame_prompt_dict: + for prompt in frame_prompt_dict[key]: + output_save_dir = f'{output_dir}/{key}/{prompt}' + os.makedirs(output_save_dir, exist_ok=True) + + # Prepare Rerender Data + for i in range(1,4): + rerender_path = f'{prev_methods_path}/st-{st}_fr-{frame_count}/rerender/{key}_pad-{i}/{prompt.replace(" ", "-")}/res.gif' + if os.path.exists(rerender_path): + shutil.copy(rerender_path, f'{output_dir}/{key}/{prompt}/rerender.gif') + break + + # Prepare Tokenflow Data + for i in range(1,4): + tokenflow_path = f'{prev_methods_path}/st-{st}_fr-{frame_count}/tokenflow/pnp_SD_1.5/{key}_pad-{i}/{prompt}' + if os.path.exists(tokenflow_path): + tokenflow_path = glob.glob(f'{tokenflow_path}/**/*.gif', recursive=True) + if len(tokenflow_path) > 0: + tokenflow_path = tokenflow_path[0] + shutil.copy(tokenflow_path, f'{output_dir}/{key}/{prompt}/tokenflow.gif') + break + + # Prepare Pix2Video Data + pix2video_path = f'{prev_methods_path}/st-{st}_fr-{frame_count}/pix2video/{key}/{prompt.replace(" ","+")}/samples' + if os.path.exists(pix2video_path): + try: + + pix2video_path = glob.glob(f'{pix2video_path}/sample/*.gif', recursive=True)[0] + shutil.copy(pix2video_path, f'{output_dir}/{key}/{prompt}/pix2video.gif') + break + except: + print(pix2video_path) + break + + # Prepare Text2Video-Zero Data + for i in range(1,4): + text2video_path = f'{prev_methods_path}/st-{st}_fr-{frame_count}/text2video/{key}_pad-{i}/{prompt}' + if os.path.exists(text2video_path): + text2video_path = glob.glob(f'{text2video_path}/**/*.gif', recursive=True)[0] + shutil.copy(text2video_path, f'{output_dir}/{key}/{prompt}/text2video.gif') + break + + # Prepare Rave Data + rave_path = glob.glob(f'{rave_dataset_path}/{key}*/{prompt}*/*.gif', recursive=True) + if len(rave_path) > 0: + + rave_path = rave_path[0] + shutil.copy(rave_path, f'{output_dir}/{key}/{prompt}/rave.gif') + + # Prepare No-Shuffle Data + + + no_shuffle = glob.glob(f'{no_shuffle_path}/*{key}*/*{prompt}*/*.gif', recursive=True) + + if len(no_shuffle) > 0: + + no_shuffle = no_shuffle[0] + shutil.copy(no_shuffle, f'{output_dir}/{key}/{prompt}/no-shuffle.gif') + + # Prepare Source Video + source_video_path = glob.glob(f'{dataset_path}/{frame_count}-frames/{key}*.mp4', recursive=True)[0] + shutil.copy(source_video_path, f'{output_dir}/{key}/{prompt}/source.mp4') + else: + device = "cuda" if torch.cuda.is_available() else "cpu" + model, preprocess = clip.load("ViT-B/32", device=device) + + pick_model = AutoModel.from_pretrained("pickapic-anonymous/PickScore_v1").to(device) + pick_processor = AutoProcessor.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K") + raft_model = eu.prepare_raft_model(device) + + rearrange = lambda x: (np.array(x)/255).reshape(-1,1) + l2_norm = lambda x,y: np.linalg.norm(rearrange(x)-rearrange(y))/rearrange(x).shape[0] + l1_norm = lambda x,y: np.linalg.norm(rearrange(x)-rearrange(y), ord=1)/rearrange(x).shape[0] + + main_dict = { + 'rerender': {}, + 'tokenflow': {}, + 'text2video': {}, + 'rave': {}, + 'no-shuffle': {}, + 'pix2video': {}, + } + + scores_main = defaultdict(float) + + for video_name in frame_prompt_dict: + + for prompt in frame_prompt_dict[video_name]: + + for k in main_dict.keys(): + + main_dict[k][video_name] = {} + scores = scores_main.copy() + video_path = f'{output_dir}/{video_name}/{prompt}/{k}.gif' + source_video_path = f'{output_dir}/{video_name}/{prompt}/source.mp4' + if os.path.exists(video_path): + pil_list = eu.video_to_pil_list(video_path) + source_pil_list = eu.video_to_pil_list(source_video_path) + + scores['clip-frame'] = eu.clip_frame(pil_list, preprocess, device, model) + scores['clip-text'] = eu.clip_text(pil_list, prompt, preprocess, device, model) + + scores['pick-score'] = eu.pick_score_func(pil_list, prompt, pick_model, pick_processor, device) + if k == 'rerender': + # scores['warp-error-l1'] = eu.warp_video(pil_list, source_pil_list[1:-1], raft_model, device, l2_norm) + # scores['warp-error-l2'] = eu.warp_video(pil_list, source_pil_list[1:-1], raft_model, device, l1_norm) + scores['warp-error-ssim'] = eu.warp_video(pil_list, source_pil_list[1:-1], raft_model, device, structural_similarity) + else: + # scores['warp-error-l1'] = eu.warp_video(pil_list, source_pil_list, raft_model, device, l2_norm) + # scores['warp-error-l2'] = eu.warp_video(pil_list, source_pil_list, raft_model, device, l1_norm) + scores['warp-error-ssim'] = eu.warp_video(pil_list, source_pil_list, raft_model, device, structural_similarity) + # print(f'{video_name} - {prompt} - {k} - ', end='\n') + + main_dict[k][video_name][prompt] = scores.copy() + print(f'{video_name} - {prompt} - ', end='\n') + for k in main_dict.keys(): + print(f'\t{k}: ', end='') + for s in sorted(main_dict[k][video_name][prompt].keys()): + if 'warp-error-l1' in s: + print(f'{(main_dict[k][video_name][prompt][s]*100000):.2f}', end=', ') + elif 'warp-error-l2' in s or 'warp-error-ssim' in s: + print(f'{(main_dict[k][video_name][prompt][s]*100):.2f}', end=', ') + else: + print(f'{main_dict[k][video_name][prompt][s]:.4f}', end=', ') + print() + print() + + for k in main_dict.keys(): + samp_num = 0 + scores = scores_main.copy() + for video_name in main_dict[k]: + for prompt in main_dict[k][video_name]: + for score in main_dict[k][video_name][prompt]: + scores[score] += main_dict[k][video_name][prompt][score] + samp_num += 1 + for score in scores: + scores[score] /= samp_num + print(k,scores) + + + + + \ No newline at end of file diff --git a/RAVE-main/requirements.txt b/RAVE-main/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..16de89d2eb07ffb717af84cfac5cc5c307aa8e15 --- /dev/null +++ b/RAVE-main/requirements.txt @@ -0,0 +1,29 @@ +accelerate +basicsr +diffusers==0.18.2 +einops +gradio +imageio +matplotlib +mmdet==3.2.0 +mmpose==1.2.0 +numpy +omegaconf +opencv_python +openvino +pandas +Pillow +prettytable +pytorch_lightning +PyYAML +safetensors +scipy +setuptools +scikit-image +timm==0.6.7 +torch_tb_profiler +tqdm +transformers +huggingface_hub<0.26.0 + + diff --git a/RAVE-main/utils/__pycache__/constants.cpython-38.pyc b/RAVE-main/utils/__pycache__/constants.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..411af81cb01d8bcab5d1573d2878da5718b98fd9 Binary files /dev/null and b/RAVE-main/utils/__pycache__/constants.cpython-38.pyc differ diff --git a/RAVE-main/utils/__pycache__/feature_utils.cpython-38.pyc b/RAVE-main/utils/__pycache__/feature_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89d982b4844cbfe3a9d399cad4adfd340a935666 Binary files /dev/null and b/RAVE-main/utils/__pycache__/feature_utils.cpython-38.pyc differ diff --git a/RAVE-main/utils/__pycache__/image_process_utils.cpython-38.pyc b/RAVE-main/utils/__pycache__/image_process_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea3cff5fb4be43843588890c602dc21679865746 Binary files /dev/null and b/RAVE-main/utils/__pycache__/image_process_utils.cpython-38.pyc differ diff --git a/RAVE-main/utils/__pycache__/preprocesser_utils.cpython-38.pyc b/RAVE-main/utils/__pycache__/preprocesser_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d11ad44511d5e8370244cd5db2aca370be05b1d6 Binary files /dev/null and b/RAVE-main/utils/__pycache__/preprocesser_utils.cpython-38.pyc differ diff --git a/RAVE-main/utils/__pycache__/video_grid_utils.cpython-38.pyc b/RAVE-main/utils/__pycache__/video_grid_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fe2d59b043c3a33a4d1c405589f9ca4b12e702f Binary files /dev/null and b/RAVE-main/utils/__pycache__/video_grid_utils.cpython-38.pyc differ diff --git a/RAVE-main/utils/constants.py b/RAVE-main/utils/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..15653399729ef07ee710b4082db95521c79c35a4 --- /dev/null +++ b/RAVE-main/utils/constants.py @@ -0,0 +1,41 @@ + +import os +from datetime import datetime + +date_time = datetime.now().strftime("%m-%d-%Y") + +CWD = os.getcwd() +MP4_PATH = os.path.join(CWD, 'data', 'mp4_videos') +OUTPUT_PATH = os.path.join(CWD, 'results', date_time) + +GENERATED_DATA_PATH = os.path.join(CWD, 'generated', 'data') +PREPROCESSOR_DICT = { + 'lineart_realistic': "lllyasviel/control_v11p_sd15_lineart", + 'lineart_coarse': "lllyasviel/control_v11p_sd15_lineart", + 'lineart_standard': "lllyasviel/control_v11p_sd15_lineart", + 'lineart_anime': "lllyasviel/control_v11p_sd15s2_lineart_anime", + 'lineart_anime_denoise': "lllyasviel/control_v11p_sd15s2_lineart_anime", + 'softedge_hed': 'lllyasviel/control_v11p_sd15_softedge', + 'softedge_hedsafe': 'lllyasviel/control_v11p_sd15_softedge', + 'softedge_pidinet': 'lllyasviel/control_v11p_sd15_softedge', + 'softedge_pidsafe': 'lllyasviel/control_v11p_sd15_softedge', + 'canny': 'lllyasviel/control_v11p_sd15_canny', + 'depth_leres': 'lllyasviel/control_v11f1p_sd15_depth', + 'depth_leres++': 'lllyasviel/control_v11f1p_sd15_depth', + 'depth_midas': 'lllyasviel/control_v11f1p_sd15_depth', + 'depth_zoe': 'lllyasviel/control_v11f1p_sd15_depth', +} + +MODEL_IDS = { + 'Realistic Vision V5.1': '130072', + 'Realistic Vision V6.0' : '245598', + 'MajicMIXRealisticV7' : '176425', + 'DreamShaper' : '128713', + 'EpicPhotoGasm' : '223670', + 'DivineEleganceMix (Anime)': '238656', + 'GhostMix (Anime)': '76907', + 'CetusMix (Anime)': '105924', + 'Counterfeit (Anime)': '57618', + 'SD 1.5': 'None' +} + diff --git a/RAVE-main/utils/feature_utils.py b/RAVE-main/utils/feature_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aae529cbd98aa45ce582dece865d8aae00ce75f9 --- /dev/null +++ b/RAVE-main/utils/feature_utils.py @@ -0,0 +1,55 @@ +import torch + +def flatten_grid(x, grid_size=[2, 2]): + ''' + x: B x C x H x W + ''' + B, C, H, W = x.size() + + hs, ws = grid_size + + img_h = H // hs + + flattened = torch.cat(torch.split(x, img_h, dim=2), dim=-1) + + return flattened + +def unflatten_grid(x, grid_size=[2,2]): + ''' + x: B x C x H x W + ''' + B, C, H, W = x.size() + hs, ws = grid_size + img_w = W // (ws) + + unflattened = torch.cat(torch.split(x, img_w, dim=3), dim=-2) + + return unflattened + +def prepare_key_grid_latents(latents_video, latent_grid_size=[2,2], key_grid_size=[3,3], rand_indices=None): + + T = latents_video.size(0) + img_h, img_w = latents_video.size(-2) // latent_grid_size[0], latents_video.size(-1) // latent_grid_size[1] + list_of_flattens = [flatten_grid(el.unsqueeze(0), latent_grid_size) for el in latents_video] + long_flatten = torch.cat(list_of_flattens, dim=-1) + + keyframe_grid = unflatten_grid(torch.cat([long_flatten[:,:,:,ind*(img_w):(ind+1)*(img_w)] for ind in rand_indices], dim=-1), key_grid_size) + return keyframe_grid, rand_indices + + +def pil_grid_to_frames(pil_grid, grid_size=[2,2]): + w,h = pil_grid.size + img_w = w // grid_size[1] + img_h = h // grid_size[0] + list_of_pil = [] + for i in range(grid_size[0]): + for j in range(grid_size[1]): + list_of_pil.append(pil_grid.crop((j*img_w, i*img_h, (j+1)*img_w, (i+1)*img_h))) + return list_of_pil + + +if __name__ == '__main__': + a = torch.randint(0,5,(1,3), dtype=torch.float) + + + diff --git a/RAVE-main/utils/image_process_utils.py b/RAVE-main/utils/image_process_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..df712e582d9dcbc9254ecc423f895c976f7c4ec2 --- /dev/null +++ b/RAVE-main/utils/image_process_utils.py @@ -0,0 +1,61 @@ +from PIL import Image +import torch +import numpy as np +import cv2 as cv + +def load_pil_img(load_path): + img = Image.open(load_path).convert('RGB') + return img + +def load_img_as_torch_batch(load_path): + pil_img = load_pil_img(load_path) + return pil_img_to_torch_tensor(pil_img).unsqueeze(0) + +def pil_img_to_torch_tensor_grayscale(img_pil): + ''' + Takes a PIL image and returns a torch tensor of shape (1, 1, H, W) with values in [0, 1] + ''' + return torch.tensor(np.array(img_pil).transpose(0, 1)/255, dtype=torch.float).unsqueeze(0).unsqueeze(0) + +def pil_img_to_torch_tensor(img_pil): + ''' + Takes a PIL image and returns a torch tensor of shape (1, 3, H, W) with values in [0, 1] + ''' + return torch.tensor(np.array(img_pil).transpose(2, 0, 1)/255, dtype=torch.float).unsqueeze(0) + +def torch_to_pil_img(img_torch): + ''' + Takes a torch tensor of shape (1, 3, H, W) with values in [0, 1] and returns a PIL image + ''' + return Image.fromarray((img_torch.squeeze(0).detach().cpu().numpy().transpose(1, 2, 0)*255).astype('uint8')) + +def torch_to_pil_img_batch(img_torch): + ''' + Takes a torch tensor of shape (1, 3, H, W) with values in [0, 1] and returns a PIL image + ''' + return [torch_to_pil_img(img_torch[i]) for i in range(img_torch.shape[0])] + + +def pil_to_cv_gray(pil_img): + return cv.cvtColor(cv.cvtColor(np.array(pil_img), cv.COLOR_RGB2BGR), cv.COLOR_RGB2GRAY) + +def np_to_pil(np_img): + return Image.fromarray((np_img/255).astype(np.float32).transpose(2,0,1), 'RGB') + +def cv_to_pil(np_img): + return Image.fromarray((np_img/255).astype(np.float32), 'RGB') + + +def create_grid_from_numpy(np_img, grid_size=[2,2]): + + _, h,w = np_img.shape + w_grid = w * grid_size[1] + h_grid = h * grid_size[0] + grid = np.zeros((h_grid, w_grid)) + img_idx = 0 + + for i in range(grid_size[0]): + for j in range(grid_size[1]): + grid[i*h:(i+1)*h, j*w:(j+1)*w] = np_img[img_idx] + img_idx += 1 + return grid \ No newline at end of file diff --git a/RAVE-main/utils/preprocesser_utils.py b/RAVE-main/utils/preprocesser_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..458088dc0b84150b02999831f55bbb616924664e --- /dev/null +++ b/RAVE-main/utils/preprocesser_utils.py @@ -0,0 +1,212 @@ +import cv2 +import yaml + +import numpy as np +from annotator.lineart import LineartDetector +from annotator.zoe import ZoeDetector +from annotator.manga_line import MangaLineExtration +from annotator.lineart_anime import LineartAnimeDetector +from annotator.hed import apply_hed +from annotator.canny import apply_canny +from annotator.pidinet import apply_pidinet +from annotator.leres import apply_leres +from annotator.midas import apply_midas + + +def yaml_load(path): + with open(path, 'r') as stream: + try: + return yaml.safe_load(stream) + except yaml.YAMLError as exc: + print(exc) + +def yaml_dump(path, data): + with open(path, 'w') as outfile: + yaml.dump(data, outfile, default_flow_style=False) + +def pad64(x): + return int(np.ceil(float(x) / 64.0) * 64 - x) + +def HWC3(x): + assert x.dtype == np.uint8 + if x.ndim == 2: + x = x[:, :, None] + assert x.ndim == 3 + H, W, C = x.shape + assert C == 1 or C == 3 or C == 4 + if C == 3: + return x + if C == 1: + return np.concatenate([x, x, x], axis=2) + if C == 4: + color = x[:, :, 0:3].astype(np.float32) + alpha = x[:, :, 3:4].astype(np.float32) / 255.0 + y = color * alpha + 255.0 * (1.0 - alpha) + y = y.clip(0, 255).astype(np.uint8) + return y + +def safer_memory(x): + # Fix many MAC/AMD problems + return np.ascontiguousarray(x.copy()).copy() + + +def resize_image_with_pad(input_image, resolution, skip_hwc3=False): + if skip_hwc3: + img = input_image + else: + img = HWC3(input_image) + H_raw, W_raw, _ = img.shape + k = float(resolution) / float(min(H_raw, W_raw)) + interpolation = cv2.INTER_CUBIC if k > 1 else cv2.INTER_AREA + H_target = int(np.round(float(H_raw) * k)) + W_target = int(np.round(float(W_raw) * k)) + img = cv2.resize(img, (W_target, H_target), interpolation=interpolation) + H_pad, W_pad = pad64(H_target), pad64(W_target) + img_padded = np.pad(img, [[0, H_pad], [0, W_pad], [0, 0]], mode='edge') + + def remove_pad(x): + return safer_memory(x[:H_target, :W_target]) + + return safer_memory(img_padded), remove_pad + + + +def lineart_standard(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + x = img.astype(np.float32) + g = cv2.GaussianBlur(x, (0, 0), 6.0) + intensity = np.min(g - x, axis=2).clip(0, 255) + intensity /= max(16, np.median(intensity[intensity > 8])) + intensity *= 127 + result = intensity.clip(0, 255).astype(np.uint8) + return remove_pad(result), True + + +def lineart(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_lineart = LineartDetector('sk_model.pth') + + # applied auto inversion + result = 255 - model_lineart(img) + return remove_pad(result), True + + +def lineart_coarse(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_lineart_coarse = LineartDetector('sk_model2.pth') + + # applied auto inversion + result = 255 - model_lineart_coarse(img) + return remove_pad(result), True + +def lineart_anime(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_lineart_anime = LineartAnimeDetector() + + # applied auto inversion + result = 255 - model_lineart_anime(img) + return remove_pad(result), True + + +def lineart_anime_denoise(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_manga_line = MangaLineExtration() + + # applied auto inversion + result = model_manga_line(img) + return remove_pad(result), True + + +def canny(img, res=512, thr_a=100, thr_b=200, **kwargs): + l, h = thr_a, thr_b + img, remove_pad = resize_image_with_pad(img, res) + model_canny = apply_canny + result = model_canny(img, l, h) + return remove_pad(result), True + + + +def hed(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_hed = apply_hed + result = model_hed(img) + return remove_pad(result), True + + +def hed_safe(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_hed = apply_hed + result = model_hed(img, is_safe=True) + return remove_pad(result), True + +def midas(img, res=512, a=np.pi * 2.0, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_midas = apply_midas + result, _ = model_midas(img, a) + return remove_pad(result), True + + +def leres(img, res=512, thr_a=0, thr_b=0, boost=False, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_leres = apply_leres + result = model_leres(img, thr_a, thr_b, boost=boost) + return remove_pad(result), True + +def lerespp(img, res=512, thr_a=0, thr_b=0, boost=True, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_leres = apply_leres + result = model_leres(img, thr_a, thr_b, boost=boost) + return remove_pad(result), True + + +def pidinet(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_pidinet = apply_pidinet + result = model_pidinet(img) + return remove_pad(result), True + + +def pidinet_ts(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_pidinet = apply_pidinet + result = model_pidinet(img, apply_fliter=True) + return remove_pad(result), True + + +def pidinet_safe(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_pidinet = apply_pidinet + result = model_pidinet(img, is_safe=True) + return remove_pad(result), True + + + +def zoe_depth(img, res=512, **kwargs): + img, remove_pad = resize_image_with_pad(img, res) + model_zoe_depth = ZoeDetector() + result = model_zoe_depth(img) + return remove_pad(result), True + + +preprocessors_dict = { + 'lineart_realistic': lineart, + 'lineart_coarse': lineart_coarse, + 'lineart_standard': lineart_standard, + 'lineart_anime': lineart_anime, + 'lineart_anime_denoise': lineart_anime_denoise, + 'softedge_hed': hed, + 'softedge_hedsafe': hed_safe, + 'softedge_pidinet': pidinet, + 'softedge_pidsafe': pidinet_safe, + 'canny': canny, + 'depth_leres': leres, + 'depth_leres++': lerespp, + 'depth_midas': midas, + 'depth_zoe': zoe_depth, +} + +def pixel_perfect_process(input_image, p_name): + raw_H, raw_W, _ = input_image.shape + preprocessor_resolution = raw_H + detected_map, _ = preprocessors_dict[p_name](input_image, res=preprocessor_resolution) + return detected_map diff --git a/RAVE-main/utils/video_grid_utils.py b/RAVE-main/utils/video_grid_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..64ddc14e9c4d664044c9d5f3325d5423f28143f2 --- /dev/null +++ b/RAVE-main/utils/video_grid_utils.py @@ -0,0 +1,95 @@ +import os +import cv2 as cv +import numpy as np +import torch +import imageio +import glob + +from torchvision.utils import make_grid +from torchvision.transforms import transforms +from torchvision.transforms.functional import to_pil_image + + +def prepare_video_to_grid(path, grid_count, grid_size, pad): + + video = cv.VideoCapture(path) + if grid_count == -1: + frame_count = int(video.get(cv.CAP_PROP_FRAME_COUNT)) + else: + frame_count = min(grid_count * pad * grid_size**2, int(video.get(cv.CAP_PROP_FRAME_COUNT))) + + transform = transforms.Compose([ + transforms.ConvertImageDtype(dtype=torch.float), + ]) + success = True + + max_grid_area = 512*512* grid_size**2 + grids = [] + frames = [] + + total_grid = grid_size**2 + for idx in range(frame_count): + success,image = video.read() + assert success, 'Video read failed' + if idx % pad == 0: + rgb_img = cv.cvtColor(image, cv.COLOR_BGR2RGB) + rgb_img = np.transpose(rgb_img, (2, 0, 1)) + frames.append(transform(torch.from_numpy(rgb_img))) + + if len(frames) == total_grid: + grid = make_grid(frames, nrow=grid_size, padding=0) + pil_image = (to_pil_image(grid)) + w,h = pil_image.size + a = float(np.sqrt((w*h/max_grid_area))) + w1 = int((w//a)//(grid_size*8))*grid_size*8 + h1 = int((h//a)//(grid_size*8))*grid_size*8 + pil_image= pil_image.resize((w1, h1)) + grids.append(pil_image) + + frames = [] + + return grids # list of frames + +def prepare_video_to_frames(path, grid_count, grid_size, pad, format='gif'): + video = cv.VideoCapture(path) + + if grid_count == -1: + frame_count = int(video.get(cv.CAP_PROP_FRAME_COUNT)) + + else: + frame_count = min(grid_count * pad * grid_size**2, int(video.get(cv.CAP_PROP_FRAME_COUNT))) + + frame_idx = 0 + frames = [] + frames_grid = [] + + dir_path = os.path.dirname(path) + video_name = path.split('/')[-1].split('.')[0] + os.makedirs(os.path.join(dir_path, 'frames/', video_name), exist_ok=True) + os.makedirs(os.path.join(dir_path, 'video/', video_name), exist_ok=True) + + for idx in range(frame_count): + success,image = video.read() + assert success, 'Video read failed' + if idx % pad == 0: + frames.append(image) + + for frame in frames[:(len(frames)//(grid_size**2)*(grid_size**2))]: + frames_grid.append(frame) + cv.imwrite(os.path.join(dir_path, 'frames/', video_name, f'{str(frame_idx).zfill(5)}.png'), frame) + frame_idx += 1 + + + if format == 'gif': + with imageio.get_writer(os.path.join(dir_path, 'video/', f'{video_name}_fc{frame_idx}_pad{pad}_grid{grid_size}.gif'), mode='I') as writer: + for frame in frames_grid: + frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB) + writer.append_data(frame) + elif format == 'mp4': + image_files = sorted(glob.glob(os.path.join(dir_path, 'frames/', video_name, '*.png'))) + images = [imageio.imread(image_file) for image_file in image_files] + save_file_path = os.path.join(dir_path, 'video/', f'{video_name}_fc{frame_idx}_pad{pad}_grid{grid_size}.mp4') + imageio.mimsave(save_file_path, images, fps=20) + + return frame_idx # number of frames + diff --git a/RAVE-main/version.py b/RAVE-main/version.py new file mode 100644 index 0000000000000000000000000000000000000000..87e8a61a8328dbf0c865ec8a56c1c256fd463aa1 --- /dev/null +++ b/RAVE-main/version.py @@ -0,0 +1,2 @@ +import imageio_ffmpeg +print(imageio_ffmpeg.__version__) \ No newline at end of file diff --git a/RAVE-main/webui.py b/RAVE-main/webui.py new file mode 100644 index 0000000000000000000000000000000000000000..31999c8e4c9af5b946cc738a4b919155b50b1cba --- /dev/null +++ b/RAVE-main/webui.py @@ -0,0 +1,298 @@ +import gradio as gr +import cv2 +import os +import torch +import argparse +import os +import sys +import yaml +import datetime +sys.path.append(os.path.dirname(os.getcwd())) +from pipelines.sd_controlnet_rave import RAVE +from pipelines.sd_multicontrolnet_rave import RAVE_MultiControlNet +import shutil +import subprocess +import utils.constants as const +import utils.video_grid_utils as vgu +import warnings +warnings.filterwarnings("ignore") +import pprint +import glob + + +def init_device(): + device_name = 'cuda' if torch.cuda.is_available() else 'cpu' + device = torch.device(device_name) + return device + +def init_paths(input_ns): + if input_ns.save_folder == None or input_ns.save_folder == '': + input_ns.save_folder = input_ns.video_name + else: + input_ns.save_folder = os.path.join(input_ns.save_folder, input_ns.video_name) + save_dir = os.path.join(const.OUTPUT_PATH, input_ns.save_folder) + os.makedirs(save_dir, exist_ok=True) + save_idx = max([int(x[-5:]) for x in os.listdir(save_dir)])+1 if os.listdir(save_dir) != [] else 0 + input_ns.save_path = os.path.join(save_dir, f'{input_ns.positive_prompts}-{str(save_idx).zfill(5)}') + + + if '-' in input_ns.preprocess_name: + input_ns.hf_cn_path = [const.PREPROCESSOR_DICT[i] for i in input_ns.preprocess_name.split('-')] + else: + input_ns.hf_cn_path = const.PREPROCESSOR_DICT[input_ns.preprocess_name] + input_ns.hf_path = "runwayml/stable-diffusion-v1-5" + + input_ns.inverse_path = os.path.join(const.GENERATED_DATA_PATH, 'inverses', input_ns.video_name, f'{input_ns.preprocess_name}_{input_ns.model_id}_{input_ns.grid_size}x{input_ns.grid_size}_{input_ns.pad}') + input_ns.control_path = os.path.join(const.GENERATED_DATA_PATH, 'controls', input_ns.video_name, f'{input_ns.preprocess_name}_{input_ns.grid_size}x{input_ns.grid_size}_{input_ns.pad}') + os.makedirs(input_ns.control_path, exist_ok=True) + os.makedirs(input_ns.inverse_path, exist_ok=True) + os.makedirs(input_ns.save_path, exist_ok=True) + return input_ns + +def install_civitai_model(model_id): + full_path = os.path.join(const.CWD, 'CIVIT_AI', 'diffusers_models', model_id, '*') + if len(glob.glob(full_path)) > 0: + full_path = glob.glob(full_path)[0] + return full_path + install_path = os.path.join(const.CWD, 'CIVIT_AI', 'safetensors') + install_path_model = os.path.join(const.CWD, 'CIVIT_AI', 'safetensors', model_id) + diffusers_path = os.path.join(const.CWD, 'CIVIT_AI', 'diffusers_models', model_id) + convert_py_path = os.path.join(const.CWD, 'CIVIT_AI', 'convert.py') + os.makedirs(install_path, exist_ok=True) + os.makedirs(diffusers_path, exist_ok=True) + subprocess.run(f'wget https://civitai.com/api/download/models/{model_id} --content-disposition --directory {install_path_model}'.split()) + model_name = glob.glob(os.path.join(install_path, model_id, '*'))[0] + model_name2 = os.path.basename(glob.glob(os.path.join(install_path, model_id, '*'))[0]).replace('.safetensors', '') + diffusers_path_model_name = os.path.join(const.CWD, 'CIVIT_AI', 'diffusers_models', model_id, model_name2) + print(model_name) + subprocess.run(f'python {convert_py_path} --checkpoint_path {model_name} --dump_path {diffusers_path_model_name} --from_safetensors'.split()) + subprocess.run(f'rm -rf {install_path}'.split()) + return diffusers_path_model_name + +def run(*args): + list_of_inputs = [x for x in args] + input_ns = argparse.Namespace(**{}) + input_ns.video_path = list_of_inputs[0] # video_path + input_ns.video_name = os.path.basename(input_ns.video_path).replace('.mp4', '').replace('.gif', '') + input_ns.preprocess_name = list_of_inputs[1] + + input_ns.batch_size = list_of_inputs[2] + input_ns.batch_size_vae = list_of_inputs[3] + + input_ns.cond_step_start = list_of_inputs[4] + input_ns.controlnet_conditioning_scale = list_of_inputs[5] + input_ns.controlnet_guidance_end = list_of_inputs[6] + input_ns.controlnet_guidance_start = list_of_inputs[7] + + input_ns.give_control_inversion = list_of_inputs[8] + + input_ns.grid_size = list_of_inputs[9] + input_ns.sample_size = list_of_inputs[10] + input_ns.pad = list_of_inputs[11] + input_ns.guidance_scale = list_of_inputs[12] + input_ns.inversion_prompt = list_of_inputs[13] + + input_ns.is_ddim_inversion = list_of_inputs[14] + input_ns.is_shuffle = list_of_inputs[15] + + input_ns.negative_prompts = list_of_inputs[16] + input_ns.num_inference_steps = list_of_inputs[17] + input_ns.num_inversion_step = list_of_inputs[18] + input_ns.positive_prompts = list_of_inputs[19] + input_ns.save_folder = list_of_inputs[20] + + input_ns.seed = list_of_inputs[21] + input_ns.model_id = const.MODEL_IDS[list_of_inputs[22]] + # input_ns.width = list_of_inputs[23] + # input_ns.height = list_of_inputs[24] + # input_ns.original_size = list_of_inputs[25] + diffusers_model_path = os.path.join(const.CWD, 'CIVIT_AI', 'diffusers_models') + os.makedirs(diffusers_model_path, exist_ok=True) + if 'model_id' not in list(input_ns.__dict__.keys()): + input_ns.model_id = "None" + + if str(input_ns.model_id) != 'None': + input_ns.model_id = install_civitai_model(input_ns.model_id) + + + device = init_device() + input_ns = init_paths(input_ns) + + input_ns.image_pil_list = vgu.prepare_video_to_grid(input_ns.video_path, input_ns.sample_size, input_ns.grid_size, input_ns.pad) + + print(input_ns.video_path) + input_ns.sample_size = len(input_ns.image_pil_list) + print(f'Frame count: {len(input_ns.image_pil_list)}') + + controlnet_class = RAVE_MultiControlNet if '-' in str(input_ns.controlnet_conditioning_scale) else RAVE + + + CN = controlnet_class(device) + + CN.init_models(input_ns.hf_cn_path, input_ns.hf_path, input_ns.preprocess_name, input_ns.model_id) + + input_dict = vars(input_ns) + pp = pprint.PrettyPrinter(indent=4) + pp.pprint(input_dict) + yaml_dict = {k:v for k,v in input_dict.items() if k != 'image_pil_list'} + + start_time = datetime.datetime.now() + if '-' in str(input_ns.controlnet_conditioning_scale): + res_vid, control_vid_1, control_vid_2 = CN(input_dict) + else: + res_vid, control_vid = CN(input_dict) + end_time = datetime.datetime.now() + save_name = f"{'-'.join(input_ns.positive_prompts.split())}_cstart-{input_ns.controlnet_guidance_start}_gs-{input_ns.guidance_scale}_pre-{'-'.join((input_ns.preprocess_name.replace('-','+').split('_')))}_cscale-{input_ns.controlnet_conditioning_scale}_grid-{input_ns.grid_size}_pad-{input_ns.pad}_model-{os.path.basename(input_ns.model_id)}" + res_vid[0].save(os.path.join(input_ns.save_path, f'{save_name}.gif'), save_all=True, append_images=res_vid[1:], loop=10000) + control_vid[0].save(os.path.join(input_ns.save_path, f'control_{save_name}.gif'), save_all=True, append_images=control_vid[1:], optimize=False, loop=10000) + + yaml_dict['total_time'] = (end_time - start_time).total_seconds() + yaml_dict['total_number_of_frames'] = len(res_vid) + yaml_dict['sec_per_frame'] = yaml_dict['total_time']/yaml_dict['total_number_of_frames'] + with open(os.path.join(input_ns.save_path, 'config.yaml'), 'w') as yaml_file: + yaml.dump(yaml_dict, yaml_file) + + return os.path.join(input_ns.save_path, f'{save_name}.gif'), os.path.join(input_ns.save_path, f'control_{save_name}.gif') + + + +block = gr.Blocks().queue() +with block: + with gr.Row(): + gr.Markdown('## RAVE') + with gr.Row(): + with gr.Column(): + # input_path = gr.Video(label='Input Video', + # sources='upload', + # format='mp4', + # visible=True) + with gr.Row(): + input_path = gr.File(label='Upload Input Video', file_types=['.mp4'], scale=1) + + inputs = gr.Video(label='Input Video', + format='mp4', + visible=True, + interactive=False, + scale=5) + input_path.upload(lambda x:x, inputs=[input_path], outputs=[inputs]) + + with gr.Row(): + positive_prompts = gr.Textbox(label='Positive prompts') + negative_prompts = gr.Textbox(label='Negative prompts') + with gr.Row(): + preprocess_name = gr.Dropdown(const.PREPROCESSOR_DICT.keys(), + label='Control type', + value='depth_zoe') + guidance_scale = gr.Slider(label='Guidance scale', + minimum=0, + maximum=40, + step=0.1, + value=7.5) + + with gr.Row(): + inversion_prompt = gr.Textbox(label='Inversion prompt') + seed = gr.Slider(label='Seed', + minimum=0, + maximum=2147483647, + step=1, + value=0, + randomize=True) + + with gr.Row(): + model_id = gr.Dropdown(const.MODEL_IDS, + label='Model id', + value='SD 1.5') + save_folder = gr.Textbox(label='Save folder') + + run_button = gr.Button(value='Run All') + with gr.Accordion('Configuration', + open=False): + with gr.Row(): + batch_size = gr.Slider(label='Batch size', + minimum=1, + maximum=36, + value=4, + step=1) + batch_size_vae = gr.Slider(label='Batch size of VAE', + minimum=1, + maximum=36, + value=1, + step=1) + + with gr.Row(): + is_ddim_inversion = gr.Checkbox( + label='Use DDIM Inversion', + value=True) + is_shuffle = gr.Checkbox( + label='Shuffle', + value=True) + + with gr.Row(): + num_inference_steps = gr.Slider(label='Number of inference steps', + minimum=1, + maximum=100, + value=20, + step=1) + num_inversion_step = gr.Slider(label='Number of inversion steps', + minimum=1, + maximum=100, + value=20, + step=1) + cond_step_start = gr.Slider(label='Conditioning step start', + minimum=0, + maximum=1.0, + value=0.0, + step=0.1) + + with gr.Row(): + controlnet_conditioning_scale = gr.Slider(label='ControlNet conditioning scale', + minimum=0.0, + maximum=1.0, + value=1.0, + step=0.01) + controlnet_guidance_end = gr.Slider(label='ControlNet guidance end', + minimum=0.0, + maximum=1.0, + value=1.0, + step=0.01) + controlnet_guidance_start = gr.Slider(label='ControlNet guidance start', + minimum=0.0, + maximum=1.0, + value=0.0, + step=0.01) + give_control_inversion = gr.Checkbox( + label='Give control during inversion', + value=True) + + with gr.Row(): + grid_size = gr.Slider(label='Grid size', + minimum=1, + maximum=10, + value=3, + step=1) + sample_size = gr.Slider(label='Sample size', + minimum=-1, + maximum=100, + value=-1, + step=1) + pad = gr.Slider(label='Pad', + minimum=1, + maximum=10, + value=1, + step=1) + + with gr.Column(): + with gr.Row(): + result_video = gr.Image(label='Edited Video', + interactive=False) + control_video = gr.Image(label='Control Video', + interactive=False) + + inputs = [input_path, preprocess_name, batch_size, batch_size_vae, cond_step_start, controlnet_conditioning_scale, controlnet_guidance_end, controlnet_guidance_start, give_control_inversion, grid_size, sample_size, pad, guidance_scale, inversion_prompt, is_ddim_inversion, is_shuffle, negative_prompts, num_inference_steps, num_inversion_step, positive_prompts, save_folder, seed, model_id] + + run_button.click(fn=run, + inputs=inputs, + outputs=[result_video, control_video]) + + +block.launch(share=True) \ No newline at end of file