Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- CCEdit-main/scripts/__pycache__/__init__.cpython-39.pyc +0 -0
- CCEdit-main/scripts/demo/__init__.py +0 -0
- CCEdit-main/scripts/demo/detect.py +157 -0
- CCEdit-main/scripts/demo/sampling.py +328 -0
- CCEdit-main/scripts/demo/sampling_command.py +152 -0
- CCEdit-main/scripts/demo/streamlit_helpers.py +668 -0
- CCEdit-main/scripts/sampling/__init__.py +0 -0
- CCEdit-main/scripts/sampling/__pycache__/__init__.cpython-39.pyc +0 -0
- CCEdit-main/scripts/sampling/__pycache__/util.cpython-39.pyc +0 -0
- CCEdit-main/scripts/sampling/pnp_generate_config.py +52 -0
- CCEdit-main/scripts/sampling/sampling_image.py +168 -0
- CCEdit-main/scripts/sampling/sampling_tv2v.py +209 -0
- CCEdit-main/scripts/sampling/sampling_tv2v_ref.py +550 -0
- CCEdit-main/scripts/sampling/util.py +813 -0
- CCEdit-main/scripts/tools/extract_centerframe.py +110 -0
- CCEdit-main/scripts/tools/pnp_generate_config.py +52 -0
- CCEdit-main/scripts/util/__init__.py +0 -0
- CCEdit-main/scripts/util/detection/__init__.py +0 -0
- CCEdit-main/scripts/util/detection/nsfw_and_watermark_dectection.py +104 -0
- CCEdit-main/sgm/modules/diffusionmodules/__init__.py +7 -0
- CCEdit-main/sgm/modules/diffusionmodules/__pycache__/controlmodel.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/diffusionmodules/__pycache__/denoiser.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/diffusionmodules/__pycache__/denoiser_weighting.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/diffusionmodules/__pycache__/discretizer.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/diffusionmodules/__pycache__/guiders.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/diffusionmodules/__pycache__/loss.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/diffusionmodules/__pycache__/sampling.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/diffusionmodules/__pycache__/util.cpython-39.pyc +0 -0
- CCEdit-main/sgm/modules/diffusionmodules/__pycache__/wrappers.cpython-39.pyc +0 -0
- FateZero-main/data/attribute/swan_swarov/00005.png +3 -0
- FateZero-main/data/shape/man_skate/00007.png +3 -0
- FateZero-main/data/shape/swan_swarov/00002.png +3 -0
- RAVE-main/.gitignore +169 -0
- RAVE-main/LICENSE +21 -0
- RAVE-main/README.md +183 -0
- RAVE-main/annotator/annotator_path.py +17 -0
- RAVE-main/annotator/util.py +62 -0
- RAVE-main/annotator/zoe/zoedepth/models/__init__.py +24 -0
- RAVE-main/annotator/zoe/zoedepth/models/base_models/__init__.py +24 -0
- RAVE-main/annotator/zoe/zoedepth/models/base_models/midas.py +379 -0
- RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/.gitignore +110 -0
- RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/Dockerfile +29 -0
- RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/LICENSE +21 -0
- RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/README.md +259 -0
- RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/environment.yaml +16 -0
- RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/hubconf.py +435 -0
- RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/input/.placeholder +0 -0
- RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/output/.placeholder +0 -0
- RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/run.py +277 -0
- RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/utils.py +199 -0
CCEdit-main/scripts/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (140 Bytes). View file
|
|
|
CCEdit-main/scripts/demo/__init__.py
ADDED
|
File without changes
|
CCEdit-main/scripts/demo/detect.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
from imwatermark import WatermarkDecoder
|
| 8 |
+
except ImportError as e:
|
| 9 |
+
try:
|
| 10 |
+
# Assume some of the other dependencies such as torch are not fulfilled
|
| 11 |
+
# import file without loading unnecessary libraries.
|
| 12 |
+
import importlib.util
|
| 13 |
+
import sys
|
| 14 |
+
|
| 15 |
+
spec = importlib.util.find_spec("imwatermark.maxDct")
|
| 16 |
+
assert spec is not None
|
| 17 |
+
maxDct = importlib.util.module_from_spec(spec)
|
| 18 |
+
sys.modules["maxDct"] = maxDct
|
| 19 |
+
spec.loader.exec_module(maxDct)
|
| 20 |
+
|
| 21 |
+
class WatermarkDecoder(object):
|
| 22 |
+
"""A minimal version of
|
| 23 |
+
https://github.com/ShieldMnt/invisible-watermark/blob/main/imwatermark/watermark.py
|
| 24 |
+
to only reconstruct bits using dwtDct"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, wm_type="bytes", length=0):
|
| 27 |
+
assert wm_type == "bits", "Only bits defined in minimal import"
|
| 28 |
+
self._wmType = wm_type
|
| 29 |
+
self._wmLen = length
|
| 30 |
+
|
| 31 |
+
def reconstruct(self, bits):
|
| 32 |
+
if len(bits) != self._wmLen:
|
| 33 |
+
raise RuntimeError("bits are not matched with watermark length")
|
| 34 |
+
|
| 35 |
+
return bits
|
| 36 |
+
|
| 37 |
+
def decode(self, cv2Image, method="dwtDct", **configs):
|
| 38 |
+
(r, c, channels) = cv2Image.shape
|
| 39 |
+
if r * c < 256 * 256:
|
| 40 |
+
raise RuntimeError("image too small, should be larger than 256x256")
|
| 41 |
+
|
| 42 |
+
bits = []
|
| 43 |
+
assert method == "dwtDct"
|
| 44 |
+
embed = maxDct.EmbedMaxDct(watermarks=[], wmLen=self._wmLen, **configs)
|
| 45 |
+
bits = embed.decode(cv2Image)
|
| 46 |
+
return self.reconstruct(bits)
|
| 47 |
+
|
| 48 |
+
except:
|
| 49 |
+
raise e
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# A fixed 48-bit message that was choosen at random
|
| 53 |
+
# WATERMARK_MESSAGE = 0xB3EC907BB19E
|
| 54 |
+
WATERMARK_MESSAGE = 0b101100111110110010010000011110111011000110011110
|
| 55 |
+
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
|
| 56 |
+
WATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
|
| 57 |
+
MATCH_VALUES = [
|
| 58 |
+
[27, "No watermark detected"],
|
| 59 |
+
[33, "Partial watermark match. Cannot determine with certainty."],
|
| 60 |
+
[
|
| 61 |
+
35,
|
| 62 |
+
(
|
| 63 |
+
"Likely watermarked. In our test 0.02% of real images were "
|
| 64 |
+
'falsely detected as "Likely watermarked"'
|
| 65 |
+
),
|
| 66 |
+
],
|
| 67 |
+
[
|
| 68 |
+
49,
|
| 69 |
+
(
|
| 70 |
+
"Very likely watermarked. In our test no real images were "
|
| 71 |
+
'falsely detected as "Very likely watermarked"'
|
| 72 |
+
),
|
| 73 |
+
],
|
| 74 |
+
]
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class GetWatermarkMatch:
|
| 78 |
+
def __init__(self, watermark):
|
| 79 |
+
self.watermark = watermark
|
| 80 |
+
self.num_bits = len(self.watermark)
|
| 81 |
+
self.decoder = WatermarkDecoder("bits", self.num_bits)
|
| 82 |
+
|
| 83 |
+
def __call__(self, x: np.ndarray) -> np.ndarray:
|
| 84 |
+
"""
|
| 85 |
+
Detects the number of matching bits the predefined watermark with one
|
| 86 |
+
or multiple images. Images should be in cv2 format, e.g. h x w x c.
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
x: ([B], h w, c) in range [0, 255]
|
| 90 |
+
|
| 91 |
+
Returns:
|
| 92 |
+
number of matched bits ([B],)
|
| 93 |
+
"""
|
| 94 |
+
squeeze = len(x.shape) == 3
|
| 95 |
+
if squeeze:
|
| 96 |
+
x = x[None, ...]
|
| 97 |
+
x = np.flip(x, axis=-1)
|
| 98 |
+
|
| 99 |
+
bs = x.shape[0]
|
| 100 |
+
detected = np.empty((bs, self.num_bits), dtype=bool)
|
| 101 |
+
for k in range(bs):
|
| 102 |
+
detected[k] = self.decoder.decode(x[k], "dwtDct")
|
| 103 |
+
result = np.sum(detected == self.watermark, axis=-1)
|
| 104 |
+
if squeeze:
|
| 105 |
+
return result[0]
|
| 106 |
+
else:
|
| 107 |
+
return result
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
get_watermark_match = GetWatermarkMatch(WATERMARK_BITS)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
if __name__ == "__main__":
|
| 114 |
+
parser = argparse.ArgumentParser()
|
| 115 |
+
parser.add_argument(
|
| 116 |
+
"filename",
|
| 117 |
+
nargs="+",
|
| 118 |
+
type=str,
|
| 119 |
+
help="Image files to check for watermarks",
|
| 120 |
+
)
|
| 121 |
+
opts = parser.parse_args()
|
| 122 |
+
|
| 123 |
+
print(
|
| 124 |
+
"""
|
| 125 |
+
This script tries to detect watermarked images. Please be aware of
|
| 126 |
+
the following:
|
| 127 |
+
- As the watermark is supposed to be invisible, there is the risk that
|
| 128 |
+
watermarked images may not be detected.
|
| 129 |
+
- To maximize the chance of detection make sure that the image has the same
|
| 130 |
+
dimensions as when the watermark was applied (most likely 1024x1024
|
| 131 |
+
or 512x512).
|
| 132 |
+
- Specific image manipulation may drastically decrease the chance that
|
| 133 |
+
watermarks can be detected.
|
| 134 |
+
- There is also the chance that an image has the characteristics of the
|
| 135 |
+
watermark by chance.
|
| 136 |
+
- The watermark script is public, anybody may watermark any images, and
|
| 137 |
+
could therefore claim it to be generated.
|
| 138 |
+
- All numbers below are based on a test using 10,000 images without any
|
| 139 |
+
modifications after applying the watermark.
|
| 140 |
+
"""
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
for fn in opts.filename:
|
| 144 |
+
image = cv2.imread(fn)
|
| 145 |
+
if image is None:
|
| 146 |
+
print(f"Couldn't read {fn}. Skipping")
|
| 147 |
+
continue
|
| 148 |
+
|
| 149 |
+
num_bits = get_watermark_match(image)
|
| 150 |
+
k = 0
|
| 151 |
+
while num_bits > MATCH_VALUES[k][0]:
|
| 152 |
+
k += 1
|
| 153 |
+
print(
|
| 154 |
+
f"{fn}: {MATCH_VALUES[k][1]}",
|
| 155 |
+
f"Bits that matched the watermark {num_bits} from {len(WATERMARK_BITS)}\n",
|
| 156 |
+
sep="\n\t",
|
| 157 |
+
)
|
CCEdit-main/scripts/demo/sampling.py
ADDED
|
@@ -0,0 +1,328 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pytorch_lightning import seed_everything
|
| 2 |
+
from scripts.demo.streamlit_helpers import *
|
| 3 |
+
from scripts.util.detection.nsfw_and_watermark_dectection import DeepFloydDataFiltering
|
| 4 |
+
|
| 5 |
+
SAVE_PATH = "outputs/demo/txt2img/"
|
| 6 |
+
|
| 7 |
+
SD_XL_BASE_RATIOS = {
|
| 8 |
+
"0.5": (704, 1408),
|
| 9 |
+
"0.52": (704, 1344),
|
| 10 |
+
"0.57": (768, 1344),
|
| 11 |
+
"0.6": (768, 1280),
|
| 12 |
+
"0.68": (832, 1216),
|
| 13 |
+
"0.72": (832, 1152),
|
| 14 |
+
"0.78": (896, 1152),
|
| 15 |
+
"0.82": (896, 1088),
|
| 16 |
+
"0.88": (960, 1088),
|
| 17 |
+
"0.94": (960, 1024),
|
| 18 |
+
"1.0": (1024, 1024),
|
| 19 |
+
"1.07": (1024, 960),
|
| 20 |
+
"1.13": (1088, 960),
|
| 21 |
+
"1.21": (1088, 896),
|
| 22 |
+
"1.29": (1152, 896),
|
| 23 |
+
"1.38": (1152, 832),
|
| 24 |
+
"1.46": (1216, 832),
|
| 25 |
+
"1.67": (1280, 768),
|
| 26 |
+
"1.75": (1344, 768),
|
| 27 |
+
"1.91": (1344, 704),
|
| 28 |
+
"2.0": (1408, 704),
|
| 29 |
+
"2.09": (1472, 704),
|
| 30 |
+
"2.4": (1536, 640),
|
| 31 |
+
"2.5": (1600, 640),
|
| 32 |
+
"2.89": (1664, 576),
|
| 33 |
+
"3.0": (1728, 576),
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
VERSION2SPECS = {
|
| 37 |
+
"SD-XL base": {
|
| 38 |
+
"H": 1024,
|
| 39 |
+
"W": 1024,
|
| 40 |
+
"C": 4,
|
| 41 |
+
"f": 8,
|
| 42 |
+
"is_legacy": False,
|
| 43 |
+
"config": "configs/inference/sd_xl_base.yaml",
|
| 44 |
+
"ckpt": "checkpoints/sd_xl_base_0.9.safetensors",
|
| 45 |
+
"is_guided": True,
|
| 46 |
+
},
|
| 47 |
+
"sd-2.1": {
|
| 48 |
+
"H": 512,
|
| 49 |
+
"W": 512,
|
| 50 |
+
"C": 4,
|
| 51 |
+
"f": 8,
|
| 52 |
+
"is_legacy": True,
|
| 53 |
+
"config": "configs/inference/sd_2_1.yaml",
|
| 54 |
+
"ckpt": "checkpoints/v2-1_512-ema-pruned.safetensors",
|
| 55 |
+
"is_guided": True,
|
| 56 |
+
},
|
| 57 |
+
"sd-2.1-768": {
|
| 58 |
+
"H": 768,
|
| 59 |
+
"W": 768,
|
| 60 |
+
"C": 4,
|
| 61 |
+
"f": 8,
|
| 62 |
+
"is_legacy": True,
|
| 63 |
+
"config": "configs/inference/sd_2_1_768.yaml",
|
| 64 |
+
"ckpt": "checkpoints/v2-1_768-ema-pruned.safetensors",
|
| 65 |
+
},
|
| 66 |
+
"SDXL-Refiner": {
|
| 67 |
+
"H": 1024,
|
| 68 |
+
"W": 1024,
|
| 69 |
+
"C": 4,
|
| 70 |
+
"f": 8,
|
| 71 |
+
"is_legacy": True,
|
| 72 |
+
"config": "configs/inference/sd_xl_refiner.yaml",
|
| 73 |
+
"ckpt": "checkpoints/sd_xl_refiner_0.9.safetensors",
|
| 74 |
+
"is_guided": True,
|
| 75 |
+
},
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def load_img(display=True, key=None, device="cuda"):
|
| 80 |
+
image = get_interactive_image(key=key)
|
| 81 |
+
if image is None:
|
| 82 |
+
return None
|
| 83 |
+
if display:
|
| 84 |
+
st.image(image)
|
| 85 |
+
w, h = image.size
|
| 86 |
+
print(f"loaded input image of size ({w}, {h})")
|
| 87 |
+
width, height = map(
|
| 88 |
+
lambda x: x - x % 64, (w, h)
|
| 89 |
+
) # resize to integer multiple of 64
|
| 90 |
+
image = image.resize((width, height))
|
| 91 |
+
image = np.array(image.convert("RGB"))
|
| 92 |
+
image = image[None].transpose(0, 3, 1, 2)
|
| 93 |
+
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
| 94 |
+
return image.to(device)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def run_txt2img(
|
| 98 |
+
state, version, version_dict, is_legacy=False, return_latents=False, filter=None
|
| 99 |
+
):
|
| 100 |
+
if version == "SD-XL base":
|
| 101 |
+
ratio = st.sidebar.selectbox("Ratio:", list(SD_XL_BASE_RATIOS.keys()), 10)
|
| 102 |
+
W, H = SD_XL_BASE_RATIOS[ratio]
|
| 103 |
+
else:
|
| 104 |
+
H = st.sidebar.number_input(
|
| 105 |
+
"H", value=version_dict["H"], min_value=64, max_value=2048
|
| 106 |
+
)
|
| 107 |
+
W = st.sidebar.number_input(
|
| 108 |
+
"W", value=version_dict["W"], min_value=64, max_value=2048
|
| 109 |
+
)
|
| 110 |
+
C = version_dict["C"]
|
| 111 |
+
F = version_dict["f"]
|
| 112 |
+
|
| 113 |
+
init_dict = {
|
| 114 |
+
"orig_width": W,
|
| 115 |
+
"orig_height": H,
|
| 116 |
+
"target_width": W,
|
| 117 |
+
"target_height": H,
|
| 118 |
+
}
|
| 119 |
+
value_dict = init_embedder_options(
|
| 120 |
+
get_unique_embedder_keys_from_conditioner(state["model"].conditioner),
|
| 121 |
+
init_dict,
|
| 122 |
+
prompt=prompt,
|
| 123 |
+
negative_prompt=negative_prompt,
|
| 124 |
+
)
|
| 125 |
+
num_rows, num_cols, sampler = init_sampling(
|
| 126 |
+
use_identity_guider=not version_dict["is_guided"]
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
num_samples = num_rows * num_cols
|
| 130 |
+
|
| 131 |
+
if st.button("Sample"):
|
| 132 |
+
st.write(f"**Model I:** {version}")
|
| 133 |
+
out = do_sample(
|
| 134 |
+
state["model"],
|
| 135 |
+
sampler,
|
| 136 |
+
value_dict,
|
| 137 |
+
num_samples,
|
| 138 |
+
H,
|
| 139 |
+
W,
|
| 140 |
+
C,
|
| 141 |
+
F,
|
| 142 |
+
force_uc_zero_embeddings=["txt"] if not is_legacy else [],
|
| 143 |
+
return_latents=return_latents,
|
| 144 |
+
filter=filter,
|
| 145 |
+
)
|
| 146 |
+
return out
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def run_img2img(
|
| 150 |
+
state, version_dict, is_legacy=False, return_latents=False, filter=None
|
| 151 |
+
):
|
| 152 |
+
img = load_img()
|
| 153 |
+
if img is None:
|
| 154 |
+
return None
|
| 155 |
+
H, W = img.shape[2], img.shape[3]
|
| 156 |
+
|
| 157 |
+
init_dict = {
|
| 158 |
+
"orig_width": W,
|
| 159 |
+
"orig_height": H,
|
| 160 |
+
"target_width": W,
|
| 161 |
+
"target_height": H,
|
| 162 |
+
}
|
| 163 |
+
value_dict = init_embedder_options(
|
| 164 |
+
get_unique_embedder_keys_from_conditioner(state["model"].conditioner),
|
| 165 |
+
init_dict,
|
| 166 |
+
)
|
| 167 |
+
strength = st.number_input(
|
| 168 |
+
"**Img2Img Strength**", value=0.5, min_value=0.0, max_value=1.0
|
| 169 |
+
)
|
| 170 |
+
num_rows, num_cols, sampler = init_sampling(
|
| 171 |
+
img2img_strength=strength,
|
| 172 |
+
use_identity_guider=not version_dict["is_guided"],
|
| 173 |
+
)
|
| 174 |
+
num_samples = num_rows * num_cols
|
| 175 |
+
|
| 176 |
+
if st.button("Sample"):
|
| 177 |
+
out = do_img2img(
|
| 178 |
+
repeat(img, "1 ... -> n ...", n=num_samples),
|
| 179 |
+
state["model"],
|
| 180 |
+
sampler,
|
| 181 |
+
value_dict,
|
| 182 |
+
num_samples,
|
| 183 |
+
force_uc_zero_embeddings=["txt"] if not is_legacy else [],
|
| 184 |
+
return_latents=return_latents,
|
| 185 |
+
filter=filter,
|
| 186 |
+
)
|
| 187 |
+
return out
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def apply_refiner(
|
| 191 |
+
input,
|
| 192 |
+
state,
|
| 193 |
+
sampler,
|
| 194 |
+
num_samples,
|
| 195 |
+
prompt,
|
| 196 |
+
negative_prompt,
|
| 197 |
+
filter=None,
|
| 198 |
+
):
|
| 199 |
+
init_dict = {
|
| 200 |
+
"orig_width": input.shape[3] * 8,
|
| 201 |
+
"orig_height": input.shape[2] * 8,
|
| 202 |
+
"target_width": input.shape[3] * 8,
|
| 203 |
+
"target_height": input.shape[2] * 8,
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
value_dict = init_dict
|
| 207 |
+
value_dict["prompt"] = prompt
|
| 208 |
+
value_dict["negative_prompt"] = negative_prompt
|
| 209 |
+
|
| 210 |
+
value_dict["crop_coords_top"] = 0
|
| 211 |
+
value_dict["crop_coords_left"] = 0
|
| 212 |
+
|
| 213 |
+
value_dict["aesthetic_score"] = 6.0
|
| 214 |
+
value_dict["negative_aesthetic_score"] = 2.5
|
| 215 |
+
|
| 216 |
+
st.warning(f"refiner input shape: {input.shape}")
|
| 217 |
+
samples = do_img2img(
|
| 218 |
+
input,
|
| 219 |
+
state["model"],
|
| 220 |
+
sampler,
|
| 221 |
+
value_dict,
|
| 222 |
+
num_samples,
|
| 223 |
+
skip_encode=True,
|
| 224 |
+
filter=filter,
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
return samples
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
if __name__ == "__main__":
|
| 231 |
+
st.title("Stable Diffusion")
|
| 232 |
+
version = st.selectbox("Model Version", list(VERSION2SPECS.keys()), 0)
|
| 233 |
+
version_dict = VERSION2SPECS[version]
|
| 234 |
+
mode = st.radio("Mode", ("txt2img", "img2img"), 0)
|
| 235 |
+
st.write("__________________________")
|
| 236 |
+
|
| 237 |
+
if version == "SD-XL base":
|
| 238 |
+
add_pipeline = st.checkbox("Load SDXL-Refiner?", False)
|
| 239 |
+
st.write("__________________________")
|
| 240 |
+
else:
|
| 241 |
+
add_pipeline = False
|
| 242 |
+
|
| 243 |
+
filter = DeepFloydDataFiltering(verbose=False)
|
| 244 |
+
|
| 245 |
+
seed = st.sidebar.number_input("seed", value=42, min_value=0, max_value=int(1e9))
|
| 246 |
+
seed_everything(seed)
|
| 247 |
+
|
| 248 |
+
save_locally, save_path = init_save_locally(os.path.join(SAVE_PATH, version))
|
| 249 |
+
|
| 250 |
+
state = init_st(version_dict)
|
| 251 |
+
if state["msg"]:
|
| 252 |
+
st.info(state["msg"])
|
| 253 |
+
model = state["model"]
|
| 254 |
+
|
| 255 |
+
is_legacy = version_dict["is_legacy"]
|
| 256 |
+
|
| 257 |
+
prompt = st.text_input(
|
| 258 |
+
"prompt",
|
| 259 |
+
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
|
| 260 |
+
)
|
| 261 |
+
if is_legacy:
|
| 262 |
+
negative_prompt = st.text_input("negative prompt", "")
|
| 263 |
+
else:
|
| 264 |
+
negative_prompt = "" # which is unused
|
| 265 |
+
|
| 266 |
+
if add_pipeline:
|
| 267 |
+
st.write("__________________________")
|
| 268 |
+
|
| 269 |
+
version2 = "SDXL-Refiner"
|
| 270 |
+
st.warning(
|
| 271 |
+
f"Running with {version2} as the second stage model. Make sure to provide (V)RAM :) "
|
| 272 |
+
)
|
| 273 |
+
st.write("**Refiner Options:**")
|
| 274 |
+
|
| 275 |
+
version_dict2 = VERSION2SPECS[version2]
|
| 276 |
+
state2 = init_st(version_dict2)
|
| 277 |
+
st.info(state2["msg"])
|
| 278 |
+
|
| 279 |
+
stage2strength = st.number_input(
|
| 280 |
+
"**Refinement strength**", value=0.3, min_value=0.0, max_value=1.0
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
sampler2 = init_sampling(
|
| 284 |
+
key=2,
|
| 285 |
+
img2img_strength=stage2strength,
|
| 286 |
+
use_identity_guider=not version_dict2["is_guided"],
|
| 287 |
+
get_num_samples=False,
|
| 288 |
+
)
|
| 289 |
+
st.write("__________________________")
|
| 290 |
+
|
| 291 |
+
if mode == "txt2img":
|
| 292 |
+
out = run_txt2img(
|
| 293 |
+
state,
|
| 294 |
+
version,
|
| 295 |
+
version_dict,
|
| 296 |
+
is_legacy=is_legacy,
|
| 297 |
+
return_latents=add_pipeline,
|
| 298 |
+
filter=filter,
|
| 299 |
+
)
|
| 300 |
+
elif mode == "img2img":
|
| 301 |
+
out = run_img2img(
|
| 302 |
+
state,
|
| 303 |
+
version_dict,
|
| 304 |
+
is_legacy=is_legacy,
|
| 305 |
+
return_latents=add_pipeline,
|
| 306 |
+
filter=filter,
|
| 307 |
+
)
|
| 308 |
+
else:
|
| 309 |
+
raise ValueError(f"unknown mode {mode}")
|
| 310 |
+
if isinstance(out, (tuple, list)):
|
| 311 |
+
samples, samples_z = out
|
| 312 |
+
else:
|
| 313 |
+
samples = out
|
| 314 |
+
|
| 315 |
+
if add_pipeline:
|
| 316 |
+
st.write("**Running Refinement Stage**")
|
| 317 |
+
samples = apply_refiner(
|
| 318 |
+
samples_z,
|
| 319 |
+
state2,
|
| 320 |
+
sampler2,
|
| 321 |
+
samples_z.shape[0],
|
| 322 |
+
prompt=prompt,
|
| 323 |
+
negative_prompt=negative_prompt if is_legacy else "",
|
| 324 |
+
filter=filter,
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
if save_locally and samples is not None:
|
| 328 |
+
perform_save_locally(save_path, samples)
|
CCEdit-main/scripts/demo/sampling_command.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pytorch_lightning import seed_everything
|
| 2 |
+
from scripts.demo.streamlit_helpers import *
|
| 3 |
+
from scripts.util.detection.nsfw_and_watermark_dectection import DeepFloydDataFiltering
|
| 4 |
+
import torchvision
|
| 5 |
+
|
| 6 |
+
SAVE_PATH = "outputs/demo/txt2img/"
|
| 7 |
+
|
| 8 |
+
SD_XL_BASE_RATIOS = {
|
| 9 |
+
"0.5": (704, 1408),
|
| 10 |
+
"0.52": (704, 1344),
|
| 11 |
+
"0.57": (768, 1344),
|
| 12 |
+
"0.6": (768, 1280),
|
| 13 |
+
"0.68": (832, 1216),
|
| 14 |
+
"0.72": (832, 1152),
|
| 15 |
+
"0.78": (896, 1152),
|
| 16 |
+
"0.82": (896, 1088),
|
| 17 |
+
"0.88": (960, 1088),
|
| 18 |
+
"0.94": (960, 1024),
|
| 19 |
+
"1.0": (1024, 1024),
|
| 20 |
+
"1.07": (1024, 960),
|
| 21 |
+
"1.13": (1088, 960),
|
| 22 |
+
"1.21": (1088, 896),
|
| 23 |
+
"1.29": (1152, 896),
|
| 24 |
+
"1.38": (1152, 832),
|
| 25 |
+
"1.46": (1216, 832),
|
| 26 |
+
"1.67": (1280, 768),
|
| 27 |
+
"1.75": (1344, 768),
|
| 28 |
+
"1.91": (1344, 704),
|
| 29 |
+
"2.0": (1408, 704),
|
| 30 |
+
"2.09": (1472, 704),
|
| 31 |
+
"2.4": (1536, 640),
|
| 32 |
+
"2.5": (1600, 640),
|
| 33 |
+
"2.89": (1664, 576),
|
| 34 |
+
"3.0": (1728, 576),
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
VERSION2SPECS = {
|
| 38 |
+
"SD-XL base": {
|
| 39 |
+
"H": 1024,
|
| 40 |
+
"W": 1024,
|
| 41 |
+
"C": 4,
|
| 42 |
+
"f": 8,
|
| 43 |
+
"is_legacy": False,
|
| 44 |
+
"config": "configs/inference/sd_xl_base.yaml",
|
| 45 |
+
"ckpt": "checkpoints/sd_xl_base_0.9.safetensors",
|
| 46 |
+
"is_guided": True,
|
| 47 |
+
},
|
| 48 |
+
"sd-2.1": {
|
| 49 |
+
"H": 512,
|
| 50 |
+
"W": 512,
|
| 51 |
+
"C": 4,
|
| 52 |
+
"f": 8,
|
| 53 |
+
"is_legacy": True,
|
| 54 |
+
"config": "configs/inference/sd_2_1.yaml",
|
| 55 |
+
"ckpt": "checkpoints/v2-1_512-ema-pruned.safetensors",
|
| 56 |
+
"is_guided": True,
|
| 57 |
+
},
|
| 58 |
+
"sd-2.1-768": {
|
| 59 |
+
"H": 768,
|
| 60 |
+
"W": 768,
|
| 61 |
+
"C": 4,
|
| 62 |
+
"f": 8,
|
| 63 |
+
"is_legacy": True,
|
| 64 |
+
"config": "configs/inference/sd_2_1_768.yaml",
|
| 65 |
+
"ckpt": "checkpoints/v2-1_768-ema-pruned.safetensors",
|
| 66 |
+
},
|
| 67 |
+
"SDXL-Refiner": {
|
| 68 |
+
"H": 1024,
|
| 69 |
+
"W": 1024,
|
| 70 |
+
"C": 4,
|
| 71 |
+
"f": 8,
|
| 72 |
+
"is_legacy": True,
|
| 73 |
+
"config": "configs/inference/sd_xl_refiner.yaml",
|
| 74 |
+
"ckpt": "checkpoints/sd_xl_refiner_0.9.safetensors",
|
| 75 |
+
"is_guided": True,
|
| 76 |
+
},
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
version = "sd-2.1"
|
| 80 |
+
# version = "SD-XL base"
|
| 81 |
+
version_dict = VERSION2SPECS[version]
|
| 82 |
+
|
| 83 |
+
# if version == "SD-XL base":
|
| 84 |
+
# # ratio = st.sidebar.selectbox("Ratio:", list(SD_XL_BASE_RATIOS.keys()), 10)
|
| 85 |
+
# ratio = '1.0'
|
| 86 |
+
# W, H = SD_XL_BASE_RATIOS[ratio]
|
| 87 |
+
# else:
|
| 88 |
+
# H = st.sidebar.number_input(
|
| 89 |
+
# "H", value=version_dict["H"], min_value=64, max_value=2048
|
| 90 |
+
# )
|
| 91 |
+
# W = st.sidebar.number_input(
|
| 92 |
+
# "W", value=version_dict["W"], min_value=64, max_value=2048
|
| 93 |
+
# )
|
| 94 |
+
|
| 95 |
+
# initialize model
|
| 96 |
+
state = init_st(version_dict)
|
| 97 |
+
if state["msg"]:
|
| 98 |
+
st.info(state["msg"])
|
| 99 |
+
model = state["model"]
|
| 100 |
+
|
| 101 |
+
if version == "SD-XL base":
|
| 102 |
+
ratio = '1.0'
|
| 103 |
+
W, H = SD_XL_BASE_RATIOS[ratio]
|
| 104 |
+
else:
|
| 105 |
+
W, H = 512, 512
|
| 106 |
+
|
| 107 |
+
C = version_dict["C"]
|
| 108 |
+
F = version_dict["f"]
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
prompt = 'a corgi is sitting on a couch'
|
| 112 |
+
negative_prompt = 'ugly, low quality'
|
| 113 |
+
|
| 114 |
+
init_dict = {
|
| 115 |
+
"orig_width": W,
|
| 116 |
+
"orig_height": H,
|
| 117 |
+
"target_width": W,
|
| 118 |
+
"target_height": H,
|
| 119 |
+
}
|
| 120 |
+
value_dict = init_embedder_options(
|
| 121 |
+
get_unique_embedder_keys_from_conditioner(state["model"].conditioner),
|
| 122 |
+
init_dict,
|
| 123 |
+
prompt=prompt,
|
| 124 |
+
negative_prompt=negative_prompt,
|
| 125 |
+
)
|
| 126 |
+
num_rows, num_cols, sampler = init_sampling(
|
| 127 |
+
use_identity_guider=not version_dict["is_guided"]
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
num_samples = num_rows * num_cols
|
| 132 |
+
|
| 133 |
+
# st.write(f"**Model I:** {version}")
|
| 134 |
+
is_legacy=False
|
| 135 |
+
return_latents = False
|
| 136 |
+
filter=None
|
| 137 |
+
out = do_sample(
|
| 138 |
+
state["model"],
|
| 139 |
+
sampler,
|
| 140 |
+
value_dict,
|
| 141 |
+
num_samples,
|
| 142 |
+
H,
|
| 143 |
+
W,
|
| 144 |
+
C,
|
| 145 |
+
F,
|
| 146 |
+
force_uc_zero_embeddings=["txt"] if not is_legacy else [],
|
| 147 |
+
return_latents=return_latents,
|
| 148 |
+
filter=filter,
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
torchvision.utils.save_image(out, 'debug/myres_2_1.png', nrow=4)
|
| 152 |
+
# torchvision.utils.save_image(out, 'debug/myres.png', nrow=4)
|
CCEdit-main/scripts/demo/streamlit_helpers.py
ADDED
|
@@ -0,0 +1,668 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import Union, List
|
| 3 |
+
|
| 4 |
+
import math
|
| 5 |
+
import numpy as np
|
| 6 |
+
import streamlit as st
|
| 7 |
+
import torch
|
| 8 |
+
from PIL import Image
|
| 9 |
+
from einops import rearrange, repeat
|
| 10 |
+
from imwatermark import WatermarkEncoder
|
| 11 |
+
from omegaconf import OmegaConf, ListConfig
|
| 12 |
+
from torch import autocast
|
| 13 |
+
from torchvision import transforms
|
| 14 |
+
from torchvision.utils import make_grid
|
| 15 |
+
from safetensors.torch import load_file as load_safetensors
|
| 16 |
+
|
| 17 |
+
from sgm.modules.diffusionmodules.sampling import (
|
| 18 |
+
EulerEDMSampler,
|
| 19 |
+
HeunEDMSampler,
|
| 20 |
+
EulerAncestralSampler,
|
| 21 |
+
DPMPP2SAncestralSampler,
|
| 22 |
+
DPMPP2MSampler,
|
| 23 |
+
LinearMultistepSampler,
|
| 24 |
+
)
|
| 25 |
+
from sgm.util import append_dims
|
| 26 |
+
from sgm.util import instantiate_from_config
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class WatermarkEmbedder:
|
| 30 |
+
def __init__(self, watermark):
|
| 31 |
+
self.watermark = watermark
|
| 32 |
+
self.num_bits = len(WATERMARK_BITS)
|
| 33 |
+
self.encoder = WatermarkEncoder()
|
| 34 |
+
self.encoder.set_watermark("bits", self.watermark)
|
| 35 |
+
|
| 36 |
+
def __call__(self, image: torch.Tensor):
|
| 37 |
+
"""
|
| 38 |
+
Adds a predefined watermark to the input image
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
image: ([N,] B, C, H, W) in range [0, 1]
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
same as input but watermarked
|
| 45 |
+
"""
|
| 46 |
+
# watermarking libary expects input as cv2 format
|
| 47 |
+
squeeze = len(image.shape) == 4
|
| 48 |
+
if squeeze:
|
| 49 |
+
image = image[None, ...]
|
| 50 |
+
n = image.shape[0]
|
| 51 |
+
image_np = rearrange(
|
| 52 |
+
(255 * image).detach().cpu(), "n b c h w -> (n b) h w c"
|
| 53 |
+
).numpy()
|
| 54 |
+
# torch (b, c, h, w) in [0, 1] -> numpy (b, h, w, c) [0, 255]
|
| 55 |
+
for k in range(image_np.shape[0]):
|
| 56 |
+
image_np[k] = self.encoder.encode(image_np[k], "dwtDct")
|
| 57 |
+
image = torch.from_numpy(
|
| 58 |
+
rearrange(image_np, "(n b) h w c -> n b c h w", n=n)
|
| 59 |
+
).to(image.device)
|
| 60 |
+
image = torch.clamp(image / 255, min=0.0, max=1.0)
|
| 61 |
+
if squeeze:
|
| 62 |
+
image = image[0]
|
| 63 |
+
return image
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
# A fixed 48-bit message that was choosen at random
|
| 67 |
+
# WATERMARK_MESSAGE = 0xB3EC907BB19E
|
| 68 |
+
WATERMARK_MESSAGE = 0b101100111110110010010000011110111011000110011110
|
| 69 |
+
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
|
| 70 |
+
WATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
|
| 71 |
+
embed_watemark = WatermarkEmbedder(WATERMARK_BITS)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@st.cache_resource()
|
| 75 |
+
def init_st(version_dict, load_ckpt=True):
|
| 76 |
+
state = dict()
|
| 77 |
+
if not "model" in state:
|
| 78 |
+
config = version_dict["config"]
|
| 79 |
+
ckpt = version_dict["ckpt"]
|
| 80 |
+
|
| 81 |
+
config = OmegaConf.load(config)
|
| 82 |
+
model, msg = load_model_from_config(config, ckpt if load_ckpt else None)
|
| 83 |
+
|
| 84 |
+
state["msg"] = msg
|
| 85 |
+
state["model"] = model
|
| 86 |
+
state["ckpt"] = ckpt if load_ckpt else None
|
| 87 |
+
state["config"] = config
|
| 88 |
+
return state
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def load_model_from_config(config, ckpt=None, verbose=True):
|
| 92 |
+
model = instantiate_from_config(config.model)
|
| 93 |
+
|
| 94 |
+
if ckpt is not None:
|
| 95 |
+
print(f"Loading model from {ckpt}")
|
| 96 |
+
if ckpt.endswith("ckpt"):
|
| 97 |
+
pl_sd = torch.load(ckpt, map_location="cpu")
|
| 98 |
+
if "global_step" in pl_sd:
|
| 99 |
+
global_step = pl_sd["global_step"]
|
| 100 |
+
st.info(f"loaded ckpt from global step {global_step}")
|
| 101 |
+
print(f"Global Step: {pl_sd['global_step']}")
|
| 102 |
+
sd = pl_sd["state_dict"]
|
| 103 |
+
elif ckpt.endswith("safetensors"):
|
| 104 |
+
sd = load_safetensors(ckpt)
|
| 105 |
+
else:
|
| 106 |
+
raise NotImplementedError
|
| 107 |
+
|
| 108 |
+
msg = None
|
| 109 |
+
|
| 110 |
+
m, u = model.load_state_dict(sd, strict=False)
|
| 111 |
+
|
| 112 |
+
if len(m) > 0 and verbose:
|
| 113 |
+
print("missing keys:")
|
| 114 |
+
print(m)
|
| 115 |
+
if len(u) > 0 and verbose:
|
| 116 |
+
print("unexpected keys:")
|
| 117 |
+
print(u)
|
| 118 |
+
else:
|
| 119 |
+
msg = None
|
| 120 |
+
|
| 121 |
+
model.cuda()
|
| 122 |
+
model.eval()
|
| 123 |
+
return model, msg
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def get_unique_embedder_keys_from_conditioner(conditioner):
|
| 127 |
+
return list(set([x.input_key for x in conditioner.embedders]))
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def init_embedder_options(keys, init_dict, prompt=None, negative_prompt=None):
|
| 131 |
+
# Hardcoded demo settings; might undergo some changes in the future
|
| 132 |
+
|
| 133 |
+
value_dict = {}
|
| 134 |
+
for key in keys:
|
| 135 |
+
if key == "txt":
|
| 136 |
+
if prompt is None:
|
| 137 |
+
prompt = st.text_input(
|
| 138 |
+
"Prompt", "A professional photograph of an astronaut riding a pig"
|
| 139 |
+
)
|
| 140 |
+
if negative_prompt is None:
|
| 141 |
+
negative_prompt = st.text_input("Negative prompt", "")
|
| 142 |
+
|
| 143 |
+
value_dict["prompt"] = prompt
|
| 144 |
+
value_dict["negative_prompt"] = negative_prompt
|
| 145 |
+
|
| 146 |
+
if key == "original_size_as_tuple":
|
| 147 |
+
orig_width = st.number_input(
|
| 148 |
+
"orig_width",
|
| 149 |
+
value=init_dict["orig_width"],
|
| 150 |
+
min_value=16,
|
| 151 |
+
)
|
| 152 |
+
orig_height = st.number_input(
|
| 153 |
+
"orig_height",
|
| 154 |
+
value=init_dict["orig_height"],
|
| 155 |
+
min_value=16,
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
value_dict["orig_width"] = orig_width
|
| 159 |
+
value_dict["orig_height"] = orig_height
|
| 160 |
+
|
| 161 |
+
if key == "crop_coords_top_left":
|
| 162 |
+
crop_coord_top = st.number_input("crop_coords_top", value=0, min_value=0)
|
| 163 |
+
crop_coord_left = st.number_input("crop_coords_left", value=0, min_value=0)
|
| 164 |
+
|
| 165 |
+
value_dict["crop_coords_top"] = crop_coord_top
|
| 166 |
+
value_dict["crop_coords_left"] = crop_coord_left
|
| 167 |
+
|
| 168 |
+
if key == "aesthetic_score":
|
| 169 |
+
value_dict["aesthetic_score"] = 6.0
|
| 170 |
+
value_dict["negative_aesthetic_score"] = 2.5
|
| 171 |
+
|
| 172 |
+
if key == "target_size_as_tuple":
|
| 173 |
+
target_width = st.number_input(
|
| 174 |
+
"target_width",
|
| 175 |
+
value=init_dict["target_width"],
|
| 176 |
+
min_value=16,
|
| 177 |
+
)
|
| 178 |
+
target_height = st.number_input(
|
| 179 |
+
"target_height",
|
| 180 |
+
value=init_dict["target_height"],
|
| 181 |
+
min_value=16,
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
value_dict["target_width"] = target_width
|
| 185 |
+
value_dict["target_height"] = target_height
|
| 186 |
+
|
| 187 |
+
return value_dict
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def perform_save_locally(save_path, samples):
|
| 191 |
+
os.makedirs(os.path.join(save_path), exist_ok=True)
|
| 192 |
+
base_count = len(os.listdir(os.path.join(save_path)))
|
| 193 |
+
# samples = embed_watemark(samples)
|
| 194 |
+
for sample in samples:
|
| 195 |
+
sample = 255.0 * rearrange(sample.cpu().numpy(), "c h w -> h w c")
|
| 196 |
+
Image.fromarray(sample.astype(np.uint8)).save(
|
| 197 |
+
os.path.join(save_path, f"{base_count:09}.png")
|
| 198 |
+
)
|
| 199 |
+
base_count += 1
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def init_save_locally(_dir, init_value: bool = False):
|
| 203 |
+
save_locally = st.sidebar.checkbox("Save images locally", value=init_value)
|
| 204 |
+
if save_locally:
|
| 205 |
+
save_path = st.text_input("Save path", value=os.path.join(_dir, "samples"))
|
| 206 |
+
else:
|
| 207 |
+
save_path = None
|
| 208 |
+
|
| 209 |
+
return save_locally, save_path
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
class Img2ImgDiscretizationWrapper:
|
| 213 |
+
"""
|
| 214 |
+
wraps a discretizer, and prunes the sigmas
|
| 215 |
+
params:
|
| 216 |
+
strength: float between 0.0 and 1.0. 1.0 means full sampling (all sigmas are returned)
|
| 217 |
+
"""
|
| 218 |
+
|
| 219 |
+
def __init__(self, discretization, strength: float = 1.0):
|
| 220 |
+
self.discretization = discretization
|
| 221 |
+
self.strength = strength
|
| 222 |
+
assert 0.0 <= self.strength <= 1.0
|
| 223 |
+
|
| 224 |
+
def __call__(self, *args, **kwargs):
|
| 225 |
+
# sigmas start large first, and decrease then
|
| 226 |
+
sigmas = self.discretization(*args, **kwargs)
|
| 227 |
+
print(f"sigmas after discretization, before pruning img2img: ", sigmas)
|
| 228 |
+
sigmas = torch.flip(sigmas, (0,))
|
| 229 |
+
sigmas = sigmas[: max(int(self.strength * len(sigmas)), 1)]
|
| 230 |
+
print("prune index:", max(int(self.strength * len(sigmas)), 1))
|
| 231 |
+
sigmas = torch.flip(sigmas, (0,))
|
| 232 |
+
print(f"sigmas after pruning: ", sigmas)
|
| 233 |
+
return sigmas
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def get_guider(key):
|
| 237 |
+
guider = st.sidebar.selectbox(
|
| 238 |
+
f"Discretization #{key}",
|
| 239 |
+
[
|
| 240 |
+
"VanillaCFG",
|
| 241 |
+
"IdentityGuider",
|
| 242 |
+
],
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
if guider == "IdentityGuider":
|
| 246 |
+
guider_config = {
|
| 247 |
+
"target": "sgm.modules.diffusionmodules.guiders.IdentityGuider"
|
| 248 |
+
}
|
| 249 |
+
elif guider == "VanillaCFG":
|
| 250 |
+
scale = st.number_input(
|
| 251 |
+
f"cfg-scale #{key}", value=5.0, min_value=0.0, max_value=100.0
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
thresholder = st.sidebar.selectbox(
|
| 255 |
+
f"Thresholder #{key}",
|
| 256 |
+
[
|
| 257 |
+
"None",
|
| 258 |
+
],
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
if thresholder == "None":
|
| 262 |
+
dyn_thresh_config = {
|
| 263 |
+
"target": "sgm.modules.diffusionmodules.sampling_utils.NoDynamicThresholding"
|
| 264 |
+
}
|
| 265 |
+
else:
|
| 266 |
+
raise NotImplementedError
|
| 267 |
+
|
| 268 |
+
guider_config = {
|
| 269 |
+
"target": "sgm.modules.diffusionmodules.guiders.VanillaCFG",
|
| 270 |
+
"params": {"scale": scale, "dyn_thresh_config": dyn_thresh_config},
|
| 271 |
+
}
|
| 272 |
+
else:
|
| 273 |
+
raise NotImplementedError
|
| 274 |
+
return guider_config
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def init_sampling(
|
| 278 |
+
key=1, img2img_strength=1.0, use_identity_guider=False, get_num_samples=True
|
| 279 |
+
):
|
| 280 |
+
if get_num_samples:
|
| 281 |
+
num_rows = 1
|
| 282 |
+
num_cols = st.number_input(
|
| 283 |
+
f"num cols #{key}", value=2, min_value=1, max_value=10
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
steps = st.sidebar.number_input(
|
| 287 |
+
f"steps #{key}", value=50, min_value=1, max_value=1000
|
| 288 |
+
)
|
| 289 |
+
sampler = st.sidebar.selectbox(
|
| 290 |
+
f"Sampler #{key}",
|
| 291 |
+
[
|
| 292 |
+
"EulerEDMSampler",
|
| 293 |
+
"HeunEDMSampler",
|
| 294 |
+
"EulerAncestralSampler",
|
| 295 |
+
"DPMPP2SAncestralSampler",
|
| 296 |
+
"DPMPP2MSampler",
|
| 297 |
+
"LinearMultistepSampler",
|
| 298 |
+
],
|
| 299 |
+
0,
|
| 300 |
+
)
|
| 301 |
+
discretization = st.sidebar.selectbox(
|
| 302 |
+
f"Discretization #{key}",
|
| 303 |
+
[
|
| 304 |
+
"LegacyDDPMDiscretization",
|
| 305 |
+
"EDMDiscretization",
|
| 306 |
+
],
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
discretization_config = get_discretization(discretization, key=key)
|
| 310 |
+
|
| 311 |
+
guider_config = get_guider(key=key)
|
| 312 |
+
|
| 313 |
+
sampler = get_sampler(sampler, steps, discretization_config, guider_config, key=key)
|
| 314 |
+
if img2img_strength < 1.0:
|
| 315 |
+
st.warning(
|
| 316 |
+
f"Wrapping {sampler.__class__.__name__} with Img2ImgDiscretizationWrapper"
|
| 317 |
+
)
|
| 318 |
+
sampler.discretization = Img2ImgDiscretizationWrapper(
|
| 319 |
+
sampler.discretization, strength=img2img_strength
|
| 320 |
+
)
|
| 321 |
+
if get_num_samples:
|
| 322 |
+
return num_rows, num_cols, sampler
|
| 323 |
+
return sampler
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def get_discretization(discretization, key=1):
|
| 327 |
+
if discretization == "LegacyDDPMDiscretization":
|
| 328 |
+
discretization_config = {
|
| 329 |
+
"target": "sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization",
|
| 330 |
+
}
|
| 331 |
+
elif discretization == "EDMDiscretization":
|
| 332 |
+
sigma_min = st.number_input(f"sigma_min #{key}", value=0.03) # 0.0292
|
| 333 |
+
sigma_max = st.number_input(f"sigma_max #{key}", value=14.61) # 14.6146
|
| 334 |
+
rho = st.number_input(f"rho #{key}", value=3.0)
|
| 335 |
+
discretization_config = {
|
| 336 |
+
"target": "sgm.modules.diffusionmodules.discretizer.EDMDiscretization",
|
| 337 |
+
"params": {
|
| 338 |
+
"sigma_min": sigma_min,
|
| 339 |
+
"sigma_max": sigma_max,
|
| 340 |
+
"rho": rho,
|
| 341 |
+
},
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
return discretization_config
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
def get_sampler(sampler_name, steps, discretization_config, guider_config, key=1):
|
| 348 |
+
if sampler_name == "EulerEDMSampler" or sampler_name == "HeunEDMSampler":
|
| 349 |
+
s_churn = st.sidebar.number_input(f"s_churn #{key}", value=0.0, min_value=0.0)
|
| 350 |
+
s_tmin = st.sidebar.number_input(f"s_tmin #{key}", value=0.0, min_value=0.0)
|
| 351 |
+
s_tmax = st.sidebar.number_input(f"s_tmax #{key}", value=999.0, min_value=0.0)
|
| 352 |
+
s_noise = st.sidebar.number_input(f"s_noise #{key}", value=1.0, min_value=0.0)
|
| 353 |
+
|
| 354 |
+
if sampler_name == "EulerEDMSampler":
|
| 355 |
+
sampler = EulerEDMSampler(
|
| 356 |
+
num_steps=steps,
|
| 357 |
+
discretization_config=discretization_config,
|
| 358 |
+
guider_config=guider_config,
|
| 359 |
+
s_churn=s_churn,
|
| 360 |
+
s_tmin=s_tmin,
|
| 361 |
+
s_tmax=s_tmax,
|
| 362 |
+
s_noise=s_noise,
|
| 363 |
+
verbose=True,
|
| 364 |
+
)
|
| 365 |
+
elif sampler_name == "HeunEDMSampler":
|
| 366 |
+
sampler = HeunEDMSampler(
|
| 367 |
+
num_steps=steps,
|
| 368 |
+
discretization_config=discretization_config,
|
| 369 |
+
guider_config=guider_config,
|
| 370 |
+
s_churn=s_churn,
|
| 371 |
+
s_tmin=s_tmin,
|
| 372 |
+
s_tmax=s_tmax,
|
| 373 |
+
s_noise=s_noise,
|
| 374 |
+
verbose=True,
|
| 375 |
+
)
|
| 376 |
+
elif (
|
| 377 |
+
sampler_name == "EulerAncestralSampler"
|
| 378 |
+
or sampler_name == "DPMPP2SAncestralSampler"
|
| 379 |
+
):
|
| 380 |
+
s_noise = st.sidebar.number_input("s_noise", value=1.0, min_value=0.0)
|
| 381 |
+
eta = st.sidebar.number_input("eta", value=1.0, min_value=0.0)
|
| 382 |
+
|
| 383 |
+
if sampler_name == "EulerAncestralSampler":
|
| 384 |
+
sampler = EulerAncestralSampler(
|
| 385 |
+
num_steps=steps,
|
| 386 |
+
discretization_config=discretization_config,
|
| 387 |
+
guider_config=guider_config,
|
| 388 |
+
eta=eta,
|
| 389 |
+
s_noise=s_noise,
|
| 390 |
+
verbose=True,
|
| 391 |
+
)
|
| 392 |
+
elif sampler_name == "DPMPP2SAncestralSampler":
|
| 393 |
+
sampler = DPMPP2SAncestralSampler(
|
| 394 |
+
num_steps=steps,
|
| 395 |
+
discretization_config=discretization_config,
|
| 396 |
+
guider_config=guider_config,
|
| 397 |
+
eta=eta,
|
| 398 |
+
s_noise=s_noise,
|
| 399 |
+
verbose=True,
|
| 400 |
+
)
|
| 401 |
+
elif sampler_name == "DPMPP2MSampler":
|
| 402 |
+
sampler = DPMPP2MSampler(
|
| 403 |
+
num_steps=steps,
|
| 404 |
+
discretization_config=discretization_config,
|
| 405 |
+
guider_config=guider_config,
|
| 406 |
+
verbose=True,
|
| 407 |
+
)
|
| 408 |
+
elif sampler_name == "LinearMultistepSampler":
|
| 409 |
+
order = st.sidebar.number_input("order", value=4, min_value=1)
|
| 410 |
+
sampler = LinearMultistepSampler(
|
| 411 |
+
num_steps=steps,
|
| 412 |
+
discretization_config=discretization_config,
|
| 413 |
+
guider_config=guider_config,
|
| 414 |
+
order=order,
|
| 415 |
+
verbose=True,
|
| 416 |
+
)
|
| 417 |
+
else:
|
| 418 |
+
raise ValueError(f"unknown sampler {sampler_name}!")
|
| 419 |
+
|
| 420 |
+
return sampler
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
def get_interactive_image(key=None) -> Image.Image:
|
| 424 |
+
image = st.file_uploader("Input", type=["jpg", "JPEG", "png"], key=key)
|
| 425 |
+
if image is not None:
|
| 426 |
+
image = Image.open(image)
|
| 427 |
+
if not image.mode == "RGB":
|
| 428 |
+
image = image.convert("RGB")
|
| 429 |
+
return image
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
def load_img(display=True, key=None):
|
| 433 |
+
image = get_interactive_image(key=key)
|
| 434 |
+
if image is None:
|
| 435 |
+
return None
|
| 436 |
+
if display:
|
| 437 |
+
st.image(image)
|
| 438 |
+
w, h = image.size
|
| 439 |
+
print(f"loaded input image of size ({w}, {h})")
|
| 440 |
+
|
| 441 |
+
transform = transforms.Compose(
|
| 442 |
+
[
|
| 443 |
+
transforms.ToTensor(),
|
| 444 |
+
transforms.Lambda(lambda x: x * 2.0 - 1.0),
|
| 445 |
+
]
|
| 446 |
+
)
|
| 447 |
+
img = transform(image)[None, ...]
|
| 448 |
+
st.text(f"input min/max/mean: {img.min():.3f}/{img.max():.3f}/{img.mean():.3f}")
|
| 449 |
+
return img
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
def get_init_img(batch_size=1, key=None):
|
| 453 |
+
init_image = load_img(key=key).cuda()
|
| 454 |
+
init_image = repeat(init_image, "1 ... -> b ...", b=batch_size)
|
| 455 |
+
return init_image
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
def do_sample(
|
| 459 |
+
model,
|
| 460 |
+
sampler,
|
| 461 |
+
value_dict,
|
| 462 |
+
num_samples,
|
| 463 |
+
H,
|
| 464 |
+
W,
|
| 465 |
+
C,
|
| 466 |
+
F,
|
| 467 |
+
force_uc_zero_embeddings: List = None,
|
| 468 |
+
batch2model_input: List = None,
|
| 469 |
+
return_latents=False,
|
| 470 |
+
filter=None,
|
| 471 |
+
):
|
| 472 |
+
if force_uc_zero_embeddings is None:
|
| 473 |
+
force_uc_zero_embeddings = []
|
| 474 |
+
if batch2model_input is None:
|
| 475 |
+
batch2model_input = []
|
| 476 |
+
|
| 477 |
+
st.text("Sampling")
|
| 478 |
+
|
| 479 |
+
outputs = st.empty()
|
| 480 |
+
precision_scope = autocast
|
| 481 |
+
with torch.no_grad():
|
| 482 |
+
with precision_scope("cuda"):
|
| 483 |
+
with model.ema_scope():
|
| 484 |
+
num_samples = [num_samples]
|
| 485 |
+
batch, batch_uc = get_batch(
|
| 486 |
+
get_unique_embedder_keys_from_conditioner(model.conditioner),
|
| 487 |
+
value_dict,
|
| 488 |
+
num_samples,
|
| 489 |
+
)
|
| 490 |
+
for key in batch:
|
| 491 |
+
if isinstance(batch[key], torch.Tensor):
|
| 492 |
+
print(key, batch[key].shape)
|
| 493 |
+
elif isinstance(batch[key], list):
|
| 494 |
+
print(key, [len(l) for l in batch[key]])
|
| 495 |
+
else:
|
| 496 |
+
print(key, batch[key])
|
| 497 |
+
c, uc = model.conditioner.get_unconditional_conditioning(
|
| 498 |
+
batch,
|
| 499 |
+
batch_uc=batch_uc,
|
| 500 |
+
force_uc_zero_embeddings=force_uc_zero_embeddings,
|
| 501 |
+
)
|
| 502 |
+
|
| 503 |
+
for k in c:
|
| 504 |
+
if not k == "crossattn":
|
| 505 |
+
c[k], uc[k] = map(
|
| 506 |
+
lambda y: y[k][: math.prod(num_samples)].to("cuda"), (c, uc)
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
additional_model_inputs = {}
|
| 510 |
+
for k in batch2model_input:
|
| 511 |
+
additional_model_inputs[k] = batch[k]
|
| 512 |
+
|
| 513 |
+
shape = (math.prod(num_samples), C, H // F, W // F)
|
| 514 |
+
randn = torch.randn(shape).to("cuda")
|
| 515 |
+
|
| 516 |
+
def denoiser(input, sigma, c):
|
| 517 |
+
return model.denoiser(
|
| 518 |
+
model.model, input, sigma, c, **additional_model_inputs
|
| 519 |
+
)
|
| 520 |
+
|
| 521 |
+
samples_z = sampler(denoiser, randn, cond=c, uc=uc)
|
| 522 |
+
samples_x = model.decode_first_stage(samples_z)
|
| 523 |
+
samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0)
|
| 524 |
+
|
| 525 |
+
if filter is not None:
|
| 526 |
+
samples = filter(samples)
|
| 527 |
+
|
| 528 |
+
grid = torch.stack([samples])
|
| 529 |
+
grid = rearrange(grid, "n b c h w -> (n h) (b w) c")
|
| 530 |
+
outputs.image(grid.cpu().numpy())
|
| 531 |
+
|
| 532 |
+
if return_latents:
|
| 533 |
+
return samples, samples_z
|
| 534 |
+
return samples
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
def get_batch(keys, value_dict, N: Union[List, ListConfig], device="cuda"):
|
| 538 |
+
# Hardcoded demo setups; might undergo some changes in the future
|
| 539 |
+
|
| 540 |
+
batch = {}
|
| 541 |
+
batch_uc = {}
|
| 542 |
+
|
| 543 |
+
for key in keys:
|
| 544 |
+
if key == "txt":
|
| 545 |
+
batch["txt"] = (
|
| 546 |
+
np.repeat([value_dict["prompt"]], repeats=math.prod(N))
|
| 547 |
+
.reshape(N)
|
| 548 |
+
.tolist()
|
| 549 |
+
)
|
| 550 |
+
batch_uc["txt"] = (
|
| 551 |
+
np.repeat([value_dict["negative_prompt"]], repeats=math.prod(N))
|
| 552 |
+
.reshape(N)
|
| 553 |
+
.tolist()
|
| 554 |
+
)
|
| 555 |
+
elif key == "original_size_as_tuple":
|
| 556 |
+
# import pdb; pdb.set_trace()
|
| 557 |
+
batch["original_size_as_tuple"] = (
|
| 558 |
+
torch.tensor([value_dict["orig_height"], value_dict["orig_width"]])
|
| 559 |
+
.to(device)
|
| 560 |
+
.repeat(*N, 1)
|
| 561 |
+
)
|
| 562 |
+
elif key == "crop_coords_top_left":
|
| 563 |
+
# import pdb; pdb.set_trace()
|
| 564 |
+
batch["crop_coords_top_left"] = (
|
| 565 |
+
torch.tensor(
|
| 566 |
+
[value_dict["crop_coords_top"], value_dict["crop_coords_left"]]
|
| 567 |
+
)
|
| 568 |
+
.to(device)
|
| 569 |
+
.repeat(*N, 1)
|
| 570 |
+
)
|
| 571 |
+
elif key == "aesthetic_score":
|
| 572 |
+
batch["aesthetic_score"] = (
|
| 573 |
+
torch.tensor([value_dict["aesthetic_score"]]).to(device).repeat(*N, 1)
|
| 574 |
+
)
|
| 575 |
+
batch_uc["aesthetic_score"] = (
|
| 576 |
+
torch.tensor([value_dict["negative_aesthetic_score"]])
|
| 577 |
+
.to(device)
|
| 578 |
+
.repeat(*N, 1)
|
| 579 |
+
)
|
| 580 |
+
|
| 581 |
+
elif key == "target_size_as_tuple":
|
| 582 |
+
batch["target_size_as_tuple"] = (
|
| 583 |
+
torch.tensor([value_dict["target_height"], value_dict["target_width"]])
|
| 584 |
+
.to(device)
|
| 585 |
+
.repeat(*N, 1)
|
| 586 |
+
)
|
| 587 |
+
else:
|
| 588 |
+
batch[key] = value_dict[key]
|
| 589 |
+
|
| 590 |
+
for key in batch.keys():
|
| 591 |
+
if key not in batch_uc and isinstance(batch[key], torch.Tensor):
|
| 592 |
+
batch_uc[key] = torch.clone(batch[key])
|
| 593 |
+
return batch, batch_uc
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
@torch.no_grad()
|
| 597 |
+
def do_img2img(
|
| 598 |
+
img,
|
| 599 |
+
model,
|
| 600 |
+
sampler,
|
| 601 |
+
value_dict,
|
| 602 |
+
num_samples,
|
| 603 |
+
force_uc_zero_embeddings=[],
|
| 604 |
+
additional_kwargs={},
|
| 605 |
+
offset_noise_level: int = 0.0,
|
| 606 |
+
return_latents=False,
|
| 607 |
+
skip_encode=False,
|
| 608 |
+
filter=None,
|
| 609 |
+
):
|
| 610 |
+
st.text("Sampling")
|
| 611 |
+
|
| 612 |
+
outputs = st.empty()
|
| 613 |
+
precision_scope = autocast
|
| 614 |
+
with torch.no_grad():
|
| 615 |
+
with precision_scope("cuda"):
|
| 616 |
+
with model.ema_scope():
|
| 617 |
+
batch, batch_uc = get_batch(
|
| 618 |
+
get_unique_embedder_keys_from_conditioner(model.conditioner),
|
| 619 |
+
value_dict,
|
| 620 |
+
[num_samples],
|
| 621 |
+
)
|
| 622 |
+
c, uc = model.conditioner.get_unconditional_conditioning(
|
| 623 |
+
batch,
|
| 624 |
+
batch_uc=batch_uc,
|
| 625 |
+
force_uc_zero_embeddings=force_uc_zero_embeddings,
|
| 626 |
+
)
|
| 627 |
+
|
| 628 |
+
for k in c:
|
| 629 |
+
c[k], uc[k] = map(lambda y: y[k][:num_samples].to("cuda"), (c, uc))
|
| 630 |
+
|
| 631 |
+
for k in additional_kwargs:
|
| 632 |
+
c[k] = uc[k] = additional_kwargs[k]
|
| 633 |
+
if skip_encode:
|
| 634 |
+
z = img
|
| 635 |
+
else:
|
| 636 |
+
z = model.encode_first_stage(img)
|
| 637 |
+
noise = torch.randn_like(z)
|
| 638 |
+
sigmas = sampler.discretization(sampler.num_steps)
|
| 639 |
+
sigma = sigmas[0]
|
| 640 |
+
|
| 641 |
+
st.info(f"all sigmas: {sigmas}")
|
| 642 |
+
st.info(f"noising sigma: {sigma}")
|
| 643 |
+
|
| 644 |
+
if offset_noise_level > 0.0:
|
| 645 |
+
noise = noise + offset_noise_level * append_dims(
|
| 646 |
+
torch.randn(z.shape[0], device=z.device), z.ndim
|
| 647 |
+
)
|
| 648 |
+
noised_z = z + noise * append_dims(sigma, z.ndim)
|
| 649 |
+
noised_z = noised_z / torch.sqrt(
|
| 650 |
+
1.0 + sigmas[0] ** 2.0
|
| 651 |
+
) # Note: hardcoded to DDPM-like scaling. need to generalize later.
|
| 652 |
+
|
| 653 |
+
def denoiser(x, sigma, c):
|
| 654 |
+
return model.denoiser(model.model, x, sigma, c)
|
| 655 |
+
|
| 656 |
+
samples_z = sampler(denoiser, noised_z, cond=c, uc=uc)
|
| 657 |
+
samples_x = model.decode_first_stage(samples_z)
|
| 658 |
+
samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0)
|
| 659 |
+
|
| 660 |
+
if filter is not None:
|
| 661 |
+
samples = filter(samples)
|
| 662 |
+
|
| 663 |
+
grid = embed_watemark(torch.stack([samples]))
|
| 664 |
+
grid = rearrange(grid, "n b c h w -> (n h) (b w) c")
|
| 665 |
+
outputs.image(grid.cpu().numpy())
|
| 666 |
+
if return_latents:
|
| 667 |
+
return samples, samples_z
|
| 668 |
+
return samples
|
CCEdit-main/scripts/sampling/__init__.py
ADDED
|
File without changes
|
CCEdit-main/scripts/sampling/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (149 Bytes). View file
|
|
|
CCEdit-main/scripts/sampling/__pycache__/util.cpython-39.pyc
ADDED
|
Binary file (19.8 kB). View file
|
|
|
CCEdit-main/scripts/sampling/pnp_generate_config.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
python scripts/sampling/pnp_generate_config.py \
|
| 3 |
+
--p_config outputs/debug/automatic_ref_editing/config_pnp_auto.yaml \
|
| 4 |
+
--output_path "outputs/debug/automatic_ref_editing/output" \
|
| 5 |
+
--image_path "src/pnp-diffusers/data/horse.jpg" \
|
| 6 |
+
--latents_path "outputs/debug/automatic_ref_editing/latents_forward" \
|
| 7 |
+
--prompt "a photo of a pink toy horse on the beach"
|
| 8 |
+
'''
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
import yaml
|
| 12 |
+
import argparse
|
| 13 |
+
|
| 14 |
+
def save_yaml(args):
|
| 15 |
+
config_data = {
|
| 16 |
+
'seed': args.seed,
|
| 17 |
+
'device': args.device,
|
| 18 |
+
'output_path': args.output_path,
|
| 19 |
+
'image_path': args.image_path,
|
| 20 |
+
'latents_path': args.latents_path,
|
| 21 |
+
'sd_version': args.sd_version,
|
| 22 |
+
'guidance_scale': args.guidance_scale,
|
| 23 |
+
'n_timesteps': args.n_timesteps,
|
| 24 |
+
'prompt': args.prompt,
|
| 25 |
+
'negative_prompt': args.negative_prompt,
|
| 26 |
+
'pnp_attn_t': args.pnp_attn_t,
|
| 27 |
+
'pnp_f_t': args.pnp_f_t
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
with open(args.p_config, 'w') as file:
|
| 31 |
+
yaml.dump(config_data, file, sort_keys=False, allow_unicode=True)
|
| 32 |
+
|
| 33 |
+
if __name__ == '__main__':
|
| 34 |
+
parser = argparse.ArgumentParser(description="Save configuration to a YAML file.")
|
| 35 |
+
parser.add_argument('--p_config', type=str, help="Path to save the YAML configuration file.")
|
| 36 |
+
parser.add_argument('--output_path', type=str, help="Output path for the results.")
|
| 37 |
+
parser.add_argument('--image_path', type=str, help="Path to the input image.")
|
| 38 |
+
parser.add_argument('--latents_path', type=str, help="Path to the latents file.")
|
| 39 |
+
parser.add_argument('--prompt', type=str, help="Prompt for the diffusion model.")
|
| 40 |
+
parser.add_argument('--seed', type=int, default=1, help="Seed for random number generation.")
|
| 41 |
+
parser.add_argument('--device', type=str, default='cuda', help="Device to be used (e.g., 'cuda', 'cpu').")
|
| 42 |
+
parser.add_argument('--sd_version', type=str, default='2.1', help="Version of the diffusion model.")
|
| 43 |
+
parser.add_argument('--guidance_scale', type=float, default=7.5, help="Guidance scale for the diffusion model.")
|
| 44 |
+
parser.add_argument('--n_timesteps', type=int, default=50, help="Number of timesteps for the diffusion process.")
|
| 45 |
+
parser.add_argument('--negative_prompt', type=str, default='ugly, blurry, black, low res, unrealistic', help="Negative prompt for the diffusion model.")
|
| 46 |
+
parser.add_argument('--pnp_attn_t', type=float, default=0.5, help="PNP attention threshold.")
|
| 47 |
+
parser.add_argument('--pnp_f_t', type=float, default=0.8, help="PNP feature threshold.")
|
| 48 |
+
|
| 49 |
+
args = parser.parse_args()
|
| 50 |
+
|
| 51 |
+
save_yaml(args)
|
| 52 |
+
print(f"YAML configuration saved to {args.p_config}")
|
CCEdit-main/scripts/sampling/sampling_image.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pytorch_lightning import seed_everything
|
| 2 |
+
from scripts.demo.streamlit_helpers import *
|
| 3 |
+
from scripts.util.detection.nsfw_and_watermark_dectection import DeepFloydDataFiltering
|
| 4 |
+
|
| 5 |
+
import argparse
|
| 6 |
+
import tqdm
|
| 7 |
+
|
| 8 |
+
if __name__ == "__main__":
|
| 9 |
+
|
| 10 |
+
parser = argparse.ArgumentParser()
|
| 11 |
+
parser.add_argument('--model_version', type=str, default='2.1',
|
| 12 |
+
choices=['2.1', '2.1-768', 'xl'])
|
| 13 |
+
parser.add_argument("--num_samples", type=int, default=4)
|
| 14 |
+
parser.add_argument("--seed", type=int, default=42)
|
| 15 |
+
parser.add_argument("--prompt", type=str, default="a corgi is sitting on a couch")
|
| 16 |
+
parser.add_argument("--prompt_listpath", type=str, default="", help="path to a txt file with a list of prompts")
|
| 17 |
+
parser.add_argument("--negative_prompt", type=str, default="ugly, low quality")
|
| 18 |
+
parser.add_argument('--save_path', type=str, default='outputs/demo/txt2img/')
|
| 19 |
+
args = parser.parse_args()
|
| 20 |
+
|
| 21 |
+
seed_everything(args.seed)
|
| 22 |
+
save_path = args.save_path
|
| 23 |
+
|
| 24 |
+
version_map = {
|
| 25 |
+
'2.1': 'sd-2.1',
|
| 26 |
+
'2.1-768': 'sd-2.1-768',
|
| 27 |
+
'xl': 'SD-XL base',
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
SD_XL_BASE_RATIOS = {
|
| 31 |
+
"0.5": (704, 1408),
|
| 32 |
+
"0.52": (704, 1344),
|
| 33 |
+
"0.57": (768, 1344),
|
| 34 |
+
"0.6": (768, 1280),
|
| 35 |
+
"0.68": (832, 1216),
|
| 36 |
+
"0.72": (832, 1152),
|
| 37 |
+
"0.78": (896, 1152),
|
| 38 |
+
"0.82": (896, 1088),
|
| 39 |
+
"0.88": (960, 1088),
|
| 40 |
+
"0.94": (960, 1024),
|
| 41 |
+
"1.0": (1024, 1024),
|
| 42 |
+
"1.07": (1024, 960),
|
| 43 |
+
"1.13": (1088, 960),
|
| 44 |
+
"1.21": (1088, 896),
|
| 45 |
+
"1.29": (1152, 896),
|
| 46 |
+
"1.38": (1152, 832),
|
| 47 |
+
"1.46": (1216, 832),
|
| 48 |
+
"1.67": (1280, 768),
|
| 49 |
+
"1.75": (1344, 768),
|
| 50 |
+
"1.91": (1344, 704),
|
| 51 |
+
"2.0": (1408, 704),
|
| 52 |
+
"2.09": (1472, 704),
|
| 53 |
+
"2.4": (1536, 640),
|
| 54 |
+
"2.5": (1600, 640),
|
| 55 |
+
"2.89": (1664, 576),
|
| 56 |
+
"3.0": (1728, 576),
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
VERSION2SPECS = {
|
| 60 |
+
"SD-XL base": {
|
| 61 |
+
"H": 1024,
|
| 62 |
+
"W": 1024,
|
| 63 |
+
"C": 4,
|
| 64 |
+
"f": 8,
|
| 65 |
+
"is_legacy": False,
|
| 66 |
+
"config": "configs/inference/sd_xl_base.yaml",
|
| 67 |
+
"ckpt": "checkpoints/sd_xl_base_0.9.safetensors",
|
| 68 |
+
"is_guided": True,
|
| 69 |
+
},
|
| 70 |
+
"sd-2.1": {
|
| 71 |
+
"H": 512,
|
| 72 |
+
"W": 512,
|
| 73 |
+
"C": 4,
|
| 74 |
+
"f": 8,
|
| 75 |
+
"is_legacy": True,
|
| 76 |
+
"config": "configs/inference/sd_2_1.yaml",
|
| 77 |
+
"ckpt": "checkpoints/v2-1_512-ema-pruned.safetensors",
|
| 78 |
+
"is_guided": True,
|
| 79 |
+
},
|
| 80 |
+
"sd-2.1-768": {
|
| 81 |
+
"H": 768,
|
| 82 |
+
"W": 768,
|
| 83 |
+
"C": 4,
|
| 84 |
+
"f": 8,
|
| 85 |
+
"is_legacy": True,
|
| 86 |
+
"config": "configs/inference/sd_2_1_768.yaml",
|
| 87 |
+
"ckpt": "checkpoints/v2-1_768-ema-pruned.safetensors",
|
| 88 |
+
},
|
| 89 |
+
"SDXL-Refiner": {
|
| 90 |
+
"H": 1024,
|
| 91 |
+
"W": 1024,
|
| 92 |
+
"C": 4,
|
| 93 |
+
"f": 8,
|
| 94 |
+
"is_legacy": True,
|
| 95 |
+
"config": "configs/inference/sd_xl_refiner.yaml",
|
| 96 |
+
"ckpt": "checkpoints/sd_xl_refiner_0.9.safetensors",
|
| 97 |
+
"is_guided": True,
|
| 98 |
+
},
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
version = args.model_version
|
| 102 |
+
version = version_map[version]
|
| 103 |
+
version_dict = VERSION2SPECS[version]
|
| 104 |
+
|
| 105 |
+
# initialize model
|
| 106 |
+
state = init_st(version_dict)
|
| 107 |
+
if state["msg"]:
|
| 108 |
+
st.info(state["msg"])
|
| 109 |
+
model = state["model"]
|
| 110 |
+
|
| 111 |
+
if version == "SD-XL base":
|
| 112 |
+
ratio = '1.0'
|
| 113 |
+
W, H = SD_XL_BASE_RATIOS[ratio]
|
| 114 |
+
else:
|
| 115 |
+
W, H = version_dict['W'], version_dict['H']
|
| 116 |
+
|
| 117 |
+
C = version_dict["C"]
|
| 118 |
+
F = version_dict["f"]
|
| 119 |
+
|
| 120 |
+
if args.prompt_listpath:
|
| 121 |
+
with open(args.prompt_listpath, 'r') as f:
|
| 122 |
+
prompts = f.readlines()
|
| 123 |
+
prompts = [p.strip() for p in prompts]
|
| 124 |
+
else:
|
| 125 |
+
prompts = [args.prompt]
|
| 126 |
+
negative_prompt = args.negative_prompt
|
| 127 |
+
init_dict = {
|
| 128 |
+
"orig_width": W,
|
| 129 |
+
"orig_height": H,
|
| 130 |
+
"target_width": W,
|
| 131 |
+
"target_height": H,
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
for prompt in tqdm.tqdm(prompts):
|
| 135 |
+
print('Current Prompt: >>>>> {} <<<<<'.format(prompt))
|
| 136 |
+
value_dict = init_embedder_options(
|
| 137 |
+
get_unique_embedder_keys_from_conditioner(state["model"].conditioner),
|
| 138 |
+
init_dict,
|
| 139 |
+
prompt=prompt,
|
| 140 |
+
negative_prompt=negative_prompt,
|
| 141 |
+
)
|
| 142 |
+
_, _, sampler = init_sampling(
|
| 143 |
+
use_identity_guider=not version_dict["is_guided"]
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
num_samples = args.num_samples
|
| 147 |
+
|
| 148 |
+
is_legacy=False
|
| 149 |
+
return_latents = False
|
| 150 |
+
filter=None
|
| 151 |
+
with torch.no_grad():
|
| 152 |
+
samples = do_sample(
|
| 153 |
+
state["model"],
|
| 154 |
+
sampler,
|
| 155 |
+
value_dict,
|
| 156 |
+
num_samples,
|
| 157 |
+
H,
|
| 158 |
+
W,
|
| 159 |
+
C,
|
| 160 |
+
F,
|
| 161 |
+
force_uc_zero_embeddings=["txt"] if not is_legacy else [],
|
| 162 |
+
return_latents=return_latents,
|
| 163 |
+
filter=filter,
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
if samples is not None:
|
| 167 |
+
perform_save_locally(save_path, samples)
|
| 168 |
+
print("Saved samples to {}. Enjoy.".format(save_path))
|
CCEdit-main/scripts/sampling/sampling_tv2v.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
import random
|
| 5 |
+
import sys
|
| 6 |
+
sys.path.insert(0, '../src')
|
| 7 |
+
import torch
|
| 8 |
+
from einops import rearrange, repeat
|
| 9 |
+
from pytorch_lightning import seed_everything
|
| 10 |
+
from safetensors import safe_open
|
| 11 |
+
from torch import autocast
|
| 12 |
+
|
| 13 |
+
from scripts.sampling.util import (
|
| 14 |
+
chunk,
|
| 15 |
+
convert_load_lora,
|
| 16 |
+
create_model,
|
| 17 |
+
init_sampling,
|
| 18 |
+
load_video_keyframes,
|
| 19 |
+
model_load_ckpt,
|
| 20 |
+
perform_save_locally_video,
|
| 21 |
+
)
|
| 22 |
+
from sgm.util import append_dims
|
| 23 |
+
|
| 24 |
+
if __name__ == "__main__":
|
| 25 |
+
parser = argparse.ArgumentParser()
|
| 26 |
+
parser.add_argument("--seed", type=int, default=42)
|
| 27 |
+
parser.add_argument(
|
| 28 |
+
"--config_path",
|
| 29 |
+
type=str,
|
| 30 |
+
default="configs/inference_ccedit/keyframe_no2ndca_depthmidas.yaml",
|
| 31 |
+
)
|
| 32 |
+
parser.add_argument(
|
| 33 |
+
"--ckpt_path",
|
| 34 |
+
type=str,
|
| 35 |
+
default="models/tv2v-no2ndca-depthmidas.ckpt",
|
| 36 |
+
)
|
| 37 |
+
parser.add_argument(
|
| 38 |
+
"--use_default", action="store_true", help="use default ckpt at first"
|
| 39 |
+
)
|
| 40 |
+
parser.add_argument(
|
| 41 |
+
"--basemodel_path",
|
| 42 |
+
type=str,
|
| 43 |
+
default="",
|
| 44 |
+
help="load a new base model instead of original sd-1.5",
|
| 45 |
+
)
|
| 46 |
+
parser.add_argument("--basemodel_listpath", type=str, default="")
|
| 47 |
+
parser.add_argument("--lora_path", type=str, default="")
|
| 48 |
+
parser.add_argument("--vae_path", type=str, default="")
|
| 49 |
+
parser.add_argument(
|
| 50 |
+
"--jsonl_path",
|
| 51 |
+
type=str,
|
| 52 |
+
required=True,
|
| 53 |
+
help="path to jsonl file containing video paths, prompts, and edit prompts"
|
| 54 |
+
)
|
| 55 |
+
parser.add_argument("--save_root", type=str, default="outputs")
|
| 56 |
+
parser.add_argument("--H", type=int, default=512)
|
| 57 |
+
parser.add_argument("--W", type=int, default=768)
|
| 58 |
+
parser.add_argument("--original_fps", type=int, default=18)
|
| 59 |
+
parser.add_argument("--target_fps", type=int, default=6)
|
| 60 |
+
parser.add_argument("--num_keyframes", type=int, default=17)
|
| 61 |
+
parser.add_argument("--negative_prompt", type=str, default="ugly, low quality")
|
| 62 |
+
parser.add_argument("--sample_steps", type=int, default=30)
|
| 63 |
+
parser.add_argument("--sampler_name", type=str, default="DPMPP2SAncestralSampler")
|
| 64 |
+
parser.add_argument(
|
| 65 |
+
"--discretization_name", type=str, default="LegacyDDPMDiscretization"
|
| 66 |
+
)
|
| 67 |
+
parser.add_argument("--cfg_scale", type=float, default=7.5)
|
| 68 |
+
parser.add_argument("--prior_coefficient_x", type=float, default=0.0)
|
| 69 |
+
parser.add_argument("--prior_coefficient_noise", type=float, default=1.0)
|
| 70 |
+
parser.add_argument("--sdedit_denoise_strength", type=float, default=0.0)
|
| 71 |
+
parser.add_argument("--num_samples", type=int, default=2)
|
| 72 |
+
parser.add_argument("--batch_size", type=int, default=1)
|
| 73 |
+
parser.add_argument('--disable_check_repeat', action='store_true', help='disable check repeat')
|
| 74 |
+
parser.add_argument('--lora_strength', type=float, default=0.8)
|
| 75 |
+
parser.add_argument('--save_type', type=str, default='mp4', choices=['gif', 'mp4'])
|
| 76 |
+
parser.add_argument('--inpainting_mode', action='store_true', help='inpainting mode')
|
| 77 |
+
args = parser.parse_args()
|
| 78 |
+
|
| 79 |
+
seed = args.seed
|
| 80 |
+
if seed == -1:
|
| 81 |
+
seed = random.randint(0, 1000000)
|
| 82 |
+
seed_everything(seed)
|
| 83 |
+
|
| 84 |
+
model = create_model(config_path=args.config_path).to("cuda")
|
| 85 |
+
ckpt_path = args.ckpt_path
|
| 86 |
+
print("--> load ckpt from: ", ckpt_path)
|
| 87 |
+
model = model_load_ckpt(model, path=ckpt_path)
|
| 88 |
+
model.eval()
|
| 89 |
+
|
| 90 |
+
with open(args.jsonl_path, 'r') as f:
|
| 91 |
+
lines = f.readlines()
|
| 92 |
+
video_info_list = [json.loads(line) for line in lines]
|
| 93 |
+
|
| 94 |
+
for video_info in video_info_list:
|
| 95 |
+
video_name = video_info['video']
|
| 96 |
+
prompt = video_info['prompt']
|
| 97 |
+
add_prompt = video_info['edit_prompt']
|
| 98 |
+
video_path = os.path.join('/home/wangjuntong/video_editing_dataset/all_sourse', video_name)
|
| 99 |
+
save_path = os.path.join(args.save_root, os.path.splitext(video_name)[0])
|
| 100 |
+
|
| 101 |
+
keyframes = load_video_keyframes(
|
| 102 |
+
video_path,
|
| 103 |
+
args.original_fps,
|
| 104 |
+
args.target_fps,
|
| 105 |
+
args.num_keyframes,
|
| 106 |
+
(args.H, args.W),
|
| 107 |
+
)
|
| 108 |
+
keyframes = keyframes.unsqueeze(0)
|
| 109 |
+
keyframes = rearrange(keyframes, "b t c h w -> b c t h w").to(model.device)
|
| 110 |
+
control_hint = keyframes
|
| 111 |
+
|
| 112 |
+
batch = {
|
| 113 |
+
"txt": [prompt],
|
| 114 |
+
"control_hint": control_hint,
|
| 115 |
+
}
|
| 116 |
+
negative_prompt = args.negative_prompt
|
| 117 |
+
batch_uc = {
|
| 118 |
+
"txt": [negative_prompt],
|
| 119 |
+
"control_hint": batch["control_hint"].clone(),
|
| 120 |
+
}
|
| 121 |
+
if add_prompt:
|
| 122 |
+
batch["txt"] = [add_prompt + ", " + prompt]
|
| 123 |
+
|
| 124 |
+
c, uc = model.conditioner.get_unconditional_conditioning(
|
| 125 |
+
batch_c=batch,
|
| 126 |
+
batch_uc=batch_uc,
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
sampling_kwargs = {}
|
| 130 |
+
|
| 131 |
+
for k in c:
|
| 132 |
+
if isinstance(c[k], torch.Tensor):
|
| 133 |
+
c[k], uc[k] = map(lambda y: y[k].to(model.device), (c, uc))
|
| 134 |
+
shape = (4, args.num_keyframes, args.H // 8, args.W // 8)
|
| 135 |
+
|
| 136 |
+
precision_scope = autocast
|
| 137 |
+
with torch.no_grad():
|
| 138 |
+
with torch.cuda.amp.autocast():
|
| 139 |
+
randn = torch.randn(1, *shape).to(model.device)
|
| 140 |
+
if args.sdedit_denoise_strength == 0.0:
|
| 141 |
+
|
| 142 |
+
def denoiser(input, sigma, c):
|
| 143 |
+
return model.denoiser(
|
| 144 |
+
model.model, input, sigma, c, **sampling_kwargs
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
if args.prior_coefficient_x != 0.0:
|
| 148 |
+
prior = model.encode_first_stage(keyframes)
|
| 149 |
+
randn = (
|
| 150 |
+
args.prior_coefficient_x * prior
|
| 151 |
+
+ args.prior_coefficient_noise * randn
|
| 152 |
+
)
|
| 153 |
+
sampler = init_sampling(
|
| 154 |
+
sample_steps=args.sample_steps,
|
| 155 |
+
sampler_name=args.sampler_name,
|
| 156 |
+
discretization_name=args.discretization_name,
|
| 157 |
+
guider_config_target="sgm.modules.diffusionmodules.guiders.VanillaCFGTV2V",
|
| 158 |
+
cfg_scale=args.cfg_scale,
|
| 159 |
+
)
|
| 160 |
+
sampler.verbose = True
|
| 161 |
+
samples = sampler(denoiser, randn, c, uc=uc)
|
| 162 |
+
else:
|
| 163 |
+
assert (
|
| 164 |
+
args.sdedit_denoise_strength > 0.0
|
| 165 |
+
), "sdedit_denoise_strength should be positive"
|
| 166 |
+
assert (
|
| 167 |
+
args.sdedit_denoise_strength <= 1.0
|
| 168 |
+
), "sdedit_denoise_strength should be less than 1.0"
|
| 169 |
+
assert (
|
| 170 |
+
args.prior_coefficient_x == 0
|
| 171 |
+
), "prior_coefficient_x should be 0 when using sdedit_denoise_strength"
|
| 172 |
+
denoise_strength = args.sdedit_denoise_strength
|
| 173 |
+
sampler = init_sampling(
|
| 174 |
+
sample_steps=args.sample_steps,
|
| 175 |
+
sampler_name=args.sampler_name,
|
| 176 |
+
discretization_name=args.discretization_name,
|
| 177 |
+
guider_config_target="sgm.modules.diffusionmodules.guiders.VanillaCFGTV2V",
|
| 178 |
+
cfg_scale=args.cfg_scale,
|
| 179 |
+
img2img_strength=denoise_strength,
|
| 180 |
+
)
|
| 181 |
+
sampler.verbose = True
|
| 182 |
+
z = model.encode_first_stage(keyframes)
|
| 183 |
+
noise = torch.randn_like(z)
|
| 184 |
+
sigmas = sampler.discretization(sampler.num_steps).to(z.device)
|
| 185 |
+
sigma = sigmas[0]
|
| 186 |
+
|
| 187 |
+
print(f"all sigmas: {sigmas}")
|
| 188 |
+
print(f"noising sigma: {sigma}")
|
| 189 |
+
noised_z = z + noise * append_dims(sigma, z.ndim)
|
| 190 |
+
noised_z = noised_z / torch.sqrt(
|
| 191 |
+
1.0 + sigmas[0] ** 2.0
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
def denoiser(x, sigma, c):
|
| 195 |
+
return model.denoiser(model.model, x, sigma, c)
|
| 196 |
+
samples = sampler(denoiser, noised_z, cond=c, uc=uc)
|
| 197 |
+
|
| 198 |
+
samples = model.decode_first_stage(samples)
|
| 199 |
+
|
| 200 |
+
samples = (torch.clamp(samples, -1.0, 1.0) + 1.0) / 2.0
|
| 201 |
+
os.makedirs(save_path, exist_ok=True)
|
| 202 |
+
perform_save_locally_video(
|
| 203 |
+
save_path,
|
| 204 |
+
samples,
|
| 205 |
+
args.target_fps,
|
| 206 |
+
args.save_type,
|
| 207 |
+
save_grid=False
|
| 208 |
+
)
|
| 209 |
+
print(f"Saved video to {save_path}")
|
CCEdit-main/scripts/sampling/sampling_tv2v_ref.py
ADDED
|
@@ -0,0 +1,550 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
import random
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
from einops import rearrange, repeat
|
| 8 |
+
from pytorch_lightning import seed_everything
|
| 9 |
+
from safetensors import safe_open
|
| 10 |
+
from torch import autocast
|
| 11 |
+
|
| 12 |
+
from scripts.sampling.util import (
|
| 13 |
+
chunk,
|
| 14 |
+
convert_load_lora,
|
| 15 |
+
create_model,
|
| 16 |
+
init_sampling,
|
| 17 |
+
load_img,
|
| 18 |
+
load_video_keyframes,
|
| 19 |
+
model_load_ckpt,
|
| 20 |
+
perform_save_locally_video,
|
| 21 |
+
)
|
| 22 |
+
from sgm.util import append_dims
|
| 23 |
+
|
| 24 |
+
if __name__ == "__main__":
|
| 25 |
+
parser = argparse.ArgumentParser()
|
| 26 |
+
parser.add_argument("--seed", type=int, default=42)
|
| 27 |
+
parser.add_argument(
|
| 28 |
+
"--config_path",
|
| 29 |
+
type=str,
|
| 30 |
+
default="",
|
| 31 |
+
)
|
| 32 |
+
parser.add_argument(
|
| 33 |
+
"--ckpt_path",
|
| 34 |
+
type=str,
|
| 35 |
+
default="",
|
| 36 |
+
)
|
| 37 |
+
parser.add_argument(
|
| 38 |
+
"--use_default", action="store_true", help="use default ckpt at first"
|
| 39 |
+
)
|
| 40 |
+
parser.add_argument(
|
| 41 |
+
"--basemodel_path",
|
| 42 |
+
type=str,
|
| 43 |
+
default="",
|
| 44 |
+
help="load a new base model instead of original sd-1.5",
|
| 45 |
+
)
|
| 46 |
+
parser.add_argument("--basemodel_listpath", type=str, default="")
|
| 47 |
+
parser.add_argument("--lora_path", type=str, default="")
|
| 48 |
+
parser.add_argument("--vae_path", type=str, default="")
|
| 49 |
+
parser.add_argument(
|
| 50 |
+
"--video_path",
|
| 51 |
+
type=str,
|
| 52 |
+
default="",
|
| 53 |
+
)
|
| 54 |
+
parser.add_argument(
|
| 55 |
+
'--reference_path',
|
| 56 |
+
type=str,
|
| 57 |
+
default='',
|
| 58 |
+
)
|
| 59 |
+
parser.add_argument("--prompt_listpath", type=str, default="")
|
| 60 |
+
parser.add_argument("--video_listpath", type=str, default="")
|
| 61 |
+
parser.add_argument(
|
| 62 |
+
"--videos_directory",
|
| 63 |
+
type=str,
|
| 64 |
+
default="",
|
| 65 |
+
help="directory containing videos to be processed",
|
| 66 |
+
)
|
| 67 |
+
parser.add_argument(
|
| 68 |
+
'--json_path',
|
| 69 |
+
type=str,
|
| 70 |
+
default='',
|
| 71 |
+
help='path to json file containing video paths and captions'
|
| 72 |
+
)
|
| 73 |
+
parser.add_argument(
|
| 74 |
+
'--videos_root',
|
| 75 |
+
type=str,
|
| 76 |
+
default='',
|
| 77 |
+
help='path to the root of videos'
|
| 78 |
+
)
|
| 79 |
+
parser.add_argument(
|
| 80 |
+
'--reference_root',
|
| 81 |
+
type=str,
|
| 82 |
+
default='',
|
| 83 |
+
help='path to the root of reference videos'
|
| 84 |
+
)
|
| 85 |
+
parser.add_argument("--save_path", type=str, default="outputs/demo/tv2v")
|
| 86 |
+
parser.add_argument("--H", type=int, default=256)
|
| 87 |
+
parser.add_argument("--W", type=int, default=384)
|
| 88 |
+
parser.add_argument("--detect_ratio", type=float, default=1.0)
|
| 89 |
+
parser.add_argument("--original_fps", type=int, default=20)
|
| 90 |
+
parser.add_argument("--target_fps", type=int, default=3)
|
| 91 |
+
parser.add_argument("--num_keyframes", type=int, default=9)
|
| 92 |
+
parser.add_argument("--prompt", type=str, default="")
|
| 93 |
+
parser.add_argument("--negative_prompt", type=str, default="ugly, low quality")
|
| 94 |
+
parser.add_argument("--add_prompt", type=str, default="masterpiece, high quality")
|
| 95 |
+
parser.add_argument("--sample_steps", type=int, default=50)
|
| 96 |
+
parser.add_argument("--sampler_name", type=str, default="EulerEDMSampler")
|
| 97 |
+
parser.add_argument(
|
| 98 |
+
"--discretization_name", type=str, default="LegacyDDPMDiscretization"
|
| 99 |
+
)
|
| 100 |
+
parser.add_argument("--cfg_scale", type=float, default=7.5)
|
| 101 |
+
parser.add_argument("--prior_coefficient_x", type=float, default=0.0)
|
| 102 |
+
parser.add_argument("--prior_coefficient_noise", type=float, default=1.0)
|
| 103 |
+
parser.add_argument("--sdedit_denoise_strength", type=float, default=0.0)
|
| 104 |
+
parser.add_argument('--prior_type', type=str, default='ref', choices=['video', 'ref', 'video_ref'])
|
| 105 |
+
parser.add_argument("--num_samples", type=int, default=1)
|
| 106 |
+
parser.add_argument("--batch_size", type=int, default=4)
|
| 107 |
+
parser.add_argument('--disable_check_repeat', action='store_true', help='disable check repeat')
|
| 108 |
+
parser.add_argument('--lora_strength', type=float, default=0.8)
|
| 109 |
+
parser.add_argument('--save_type', type=str, default='mp4', choices=['gif', 'mp4'])
|
| 110 |
+
parser.add_argument('--auto_ref_editing', action='store_true', help='auto center editing')
|
| 111 |
+
args = parser.parse_args()
|
| 112 |
+
|
| 113 |
+
seed = args.seed
|
| 114 |
+
if seed == -1:
|
| 115 |
+
seed = random.randint(0, 1000000)
|
| 116 |
+
seed_everything(seed)
|
| 117 |
+
|
| 118 |
+
# initialize the model
|
| 119 |
+
model = create_model(config_path=args.config_path).to("cuda")
|
| 120 |
+
ckpt_path = args.ckpt_path
|
| 121 |
+
print("--> load ckpt from: ", ckpt_path)
|
| 122 |
+
model = model_load_ckpt(model, path=ckpt_path)
|
| 123 |
+
model.eval()
|
| 124 |
+
|
| 125 |
+
# load the prompts and video_paths
|
| 126 |
+
video_save_paths = []
|
| 127 |
+
assert not (args.prompt_listpath and args.videos_directory), (
|
| 128 |
+
"Only one of prompt_listpath and videos_directory can be provided, "
|
| 129 |
+
"but got prompt_listpath: {}, videos_directory: {}".format(
|
| 130 |
+
args.prompt_listpath, args.videos_directory
|
| 131 |
+
)
|
| 132 |
+
)
|
| 133 |
+
if args.prompt_listpath:
|
| 134 |
+
with open(args.prompt_listpath, "r") as f:
|
| 135 |
+
prompts = f.readlines()
|
| 136 |
+
prompts = [p.strip() for p in prompts]
|
| 137 |
+
# load paths of cond_img
|
| 138 |
+
assert args.video_listpath, (
|
| 139 |
+
"video_listpath must be provided when prompt_listpath is provided, "
|
| 140 |
+
"but got video_listpath: {}".format(args.video_listpath)
|
| 141 |
+
)
|
| 142 |
+
with open(args.video_listpath, "r") as f:
|
| 143 |
+
video_paths = f.readlines()
|
| 144 |
+
video_paths = [p.strip() for p in video_paths]
|
| 145 |
+
elif args.videos_directory:
|
| 146 |
+
prompts = []
|
| 147 |
+
video_paths = []
|
| 148 |
+
for video_name in os.listdir(args.videos_directory):
|
| 149 |
+
video_path = os.path.join(args.videos_directory, video_name)
|
| 150 |
+
if os.path.isdir(video_path):
|
| 151 |
+
prompts.append(video_name)
|
| 152 |
+
video_paths.append(video_path)
|
| 153 |
+
elif args.json_path:
|
| 154 |
+
assert args.videos_root != '', 'videos_root must be provided when json_path is provided'
|
| 155 |
+
assert args.reference_root != '', 'reference_root must be provided when json_path is provided'
|
| 156 |
+
with open(args.json_path, 'r') as f:
|
| 157 |
+
json_dict = json.load(f)
|
| 158 |
+
prompts = []
|
| 159 |
+
video_paths = []
|
| 160 |
+
ref_paths = []
|
| 161 |
+
for item in json_dict:
|
| 162 |
+
video_path = os.path.join(args.videos_root, item["Video Type"], item["Video Name"] + '.mp4')
|
| 163 |
+
|
| 164 |
+
for edit in item['Editing']:
|
| 165 |
+
video_save_path = os.path.join(args.save_path, item["Video Type"], item["Video Name"], edit["Target Prompt"])
|
| 166 |
+
if os.path.exists(video_save_path):
|
| 167 |
+
print(f'video {video_save_path} exists, skip it.')
|
| 168 |
+
continue
|
| 169 |
+
|
| 170 |
+
video_paths.append(video_path)
|
| 171 |
+
prompts.append(edit["Target Prompt"])
|
| 172 |
+
video_save_paths.append(video_save_path)
|
| 173 |
+
# outputs/debug/automatic_ref_editing/output_auto
|
| 174 |
+
# ref_paths.append(os.path.join(
|
| 175 |
+
# args.videos_root + '-centerframe', item["Video Type"], item["Video Name"] + '.png'))
|
| 176 |
+
ref_paths.append(os.path.join(
|
| 177 |
+
args.reference_root, 'output-{}.png'.format(edit["Target Prompt"])))
|
| 178 |
+
else:
|
| 179 |
+
assert args.prompt and args.video_path, (
|
| 180 |
+
"prompt and video_path must be provided when prompt_listpath and videos_directory are not provided, "
|
| 181 |
+
"but got prompt: {}, video_path: {}".format(args.prompt, args.video_path)
|
| 182 |
+
)
|
| 183 |
+
prompts = [args.prompt]
|
| 184 |
+
video_paths = [args.video_path]
|
| 185 |
+
|
| 186 |
+
assert len(prompts) == len(
|
| 187 |
+
video_paths
|
| 188 |
+
), "The number of prompts and video_paths must be the same, and you provided {} prompts and {} video_paths".format(
|
| 189 |
+
len(prompts), len(video_paths)
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
if not args.json_path:
|
| 193 |
+
ref_paths = [args.reference_path]
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
num_samples = args.num_samples
|
| 197 |
+
batch_size = args.batch_size
|
| 198 |
+
|
| 199 |
+
print("\nNumber of prompts: {}".format(len(prompts)))
|
| 200 |
+
print("Generate {} samples for each prompt".format(num_samples))
|
| 201 |
+
|
| 202 |
+
prompts = [item for item in prompts for _ in range(num_samples)]
|
| 203 |
+
video_paths = [item for item in video_paths for _ in range(num_samples)]
|
| 204 |
+
ref_paths = [item for item in ref_paths for _ in range(num_samples)]
|
| 205 |
+
|
| 206 |
+
prompts_chunk = list(chunk(prompts, batch_size))
|
| 207 |
+
video_paths_chunk = list(chunk(video_paths, batch_size))
|
| 208 |
+
ref_paths_chunk = list(chunk(ref_paths, batch_size))
|
| 209 |
+
del prompts
|
| 210 |
+
del video_paths
|
| 211 |
+
del ref_paths
|
| 212 |
+
|
| 213 |
+
# load paths of basemodel if provided
|
| 214 |
+
assert not (args.basemodel_path and args.basemodel_listpath), (
|
| 215 |
+
"Only one of basemodel_path and basemodel_listpath can be provided, "
|
| 216 |
+
"but got basemodel_path: {}, basemodel_listpath: {}".format(
|
| 217 |
+
args.basemodel_path, args.basemodel_listpath
|
| 218 |
+
)
|
| 219 |
+
)
|
| 220 |
+
basemodel_paths = []
|
| 221 |
+
if args.basemodel_listpath:
|
| 222 |
+
with open(args.basemodel_listpath, "r") as f:
|
| 223 |
+
basemodel_paths = f.readlines()
|
| 224 |
+
basemodel_paths = [p.strip() for p in basemodel_paths]
|
| 225 |
+
if args.basemodel_path:
|
| 226 |
+
basemodel_paths = [args.basemodel_path]
|
| 227 |
+
if args.use_default:
|
| 228 |
+
basemodel_paths = ["default"] + basemodel_paths
|
| 229 |
+
if len(basemodel_paths) == 0:
|
| 230 |
+
basemodel_paths = ["default"]
|
| 231 |
+
|
| 232 |
+
for basemodel_idx, basemodel_path in enumerate(basemodel_paths):
|
| 233 |
+
print("-> base model idx: ", basemodel_idx)
|
| 234 |
+
print("-> base model path: ", basemodel_path)
|
| 235 |
+
|
| 236 |
+
if basemodel_path == "default":
|
| 237 |
+
pass
|
| 238 |
+
elif basemodel_path:
|
| 239 |
+
print("--> load a new base model from {}".format(basemodel_path))
|
| 240 |
+
model = model_load_ckpt(model, basemodel_path, True)
|
| 241 |
+
|
| 242 |
+
if args.lora_path:
|
| 243 |
+
print("--> load a new LoRA model from {}".format(args.lora_path))
|
| 244 |
+
sd_state_dict = model.state_dict()
|
| 245 |
+
lora_path = args.lora_path
|
| 246 |
+
|
| 247 |
+
if lora_path.endswith(".safetensors"):
|
| 248 |
+
lora_state_dict = {}
|
| 249 |
+
|
| 250 |
+
# with safe_open(lora_path, framework="pt", device='cpu') as f:
|
| 251 |
+
with safe_open(lora_path, framework="pt", device=0) as f:
|
| 252 |
+
for key in f.keys():
|
| 253 |
+
lora_state_dict[key] = f.get_tensor(key)
|
| 254 |
+
|
| 255 |
+
is_lora = all("lora" in k for k in lora_state_dict.keys())
|
| 256 |
+
if not is_lora:
|
| 257 |
+
raise ValueError(
|
| 258 |
+
f"The model you provided in [{lora_path}] is not a LoRA model. "
|
| 259 |
+
)
|
| 260 |
+
else:
|
| 261 |
+
raise NotImplementedError
|
| 262 |
+
sd_state_dict = convert_load_lora(
|
| 263 |
+
sd_state_dict, lora_state_dict, alpha=args.lora_strength
|
| 264 |
+
) #
|
| 265 |
+
model.load_state_dict(sd_state_dict)
|
| 266 |
+
|
| 267 |
+
# TODO: the logic here is not elegant.
|
| 268 |
+
if args.vae_path:
|
| 269 |
+
vae_path = args.vae_path
|
| 270 |
+
print("--> load a new VAE model from {}".format(vae_path))
|
| 271 |
+
|
| 272 |
+
if vae_path.endswith(".pt"):
|
| 273 |
+
vae_state_dict = torch.load(vae_path, map_location="cpu")["state_dict"]
|
| 274 |
+
msg = model.first_stage_model.load_state_dict(
|
| 275 |
+
vae_state_dict, strict=False
|
| 276 |
+
)
|
| 277 |
+
elif vae_path.endswith(".safetensors"):
|
| 278 |
+
vae_state_dict = {}
|
| 279 |
+
|
| 280 |
+
# with safe_open(vae_path, framework="pt", device='cpu') as f:
|
| 281 |
+
with safe_open(vae_path, framework="pt", device=0) as f:
|
| 282 |
+
for key in f.keys():
|
| 283 |
+
vae_state_dict[key] = f.get_tensor(key)
|
| 284 |
+
|
| 285 |
+
msg = model.first_stage_model.load_state_dict(
|
| 286 |
+
vae_state_dict, strict=False
|
| 287 |
+
)
|
| 288 |
+
else:
|
| 289 |
+
raise ValueError("Cannot load vae model from {}".format(vae_path))
|
| 290 |
+
|
| 291 |
+
print("msg of loading vae: ", msg)
|
| 292 |
+
|
| 293 |
+
if os.path.exists(
|
| 294 |
+
os.path.join(
|
| 295 |
+
args.save_path,
|
| 296 |
+
basemodel_path.split("/")[-1].split(".")[0],
|
| 297 |
+
"log_info.json",
|
| 298 |
+
)
|
| 299 |
+
):
|
| 300 |
+
with open(
|
| 301 |
+
os.path.join(
|
| 302 |
+
args.save_path,
|
| 303 |
+
basemodel_path.split("/")[-1].split(".")[0],
|
| 304 |
+
"log_info.json",
|
| 305 |
+
),
|
| 306 |
+
"r",
|
| 307 |
+
) as f:
|
| 308 |
+
log_info = json.load(f)
|
| 309 |
+
else:
|
| 310 |
+
log_info = {
|
| 311 |
+
"basemodel_path": basemodel_path,
|
| 312 |
+
"lora_path": args.lora_path,
|
| 313 |
+
"vae_path": args.vae_path,
|
| 314 |
+
"video_paths": [],
|
| 315 |
+
"keyframes_paths": [],
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
num_keyframes = args.num_keyframes
|
| 319 |
+
|
| 320 |
+
for idx, (prompts, video_paths, ref_paths) in enumerate(
|
| 321 |
+
zip(prompts_chunk, video_paths_chunk, ref_paths_chunk)
|
| 322 |
+
):
|
| 323 |
+
# if idx == 2: # ! DEBUG
|
| 324 |
+
# break
|
| 325 |
+
if not args.disable_check_repeat:
|
| 326 |
+
while video_paths[0] in log_info["video_paths"]:
|
| 327 |
+
print(f"video [{video_paths[0]}] has been processed, skip it.")
|
| 328 |
+
prompts_list, video_paths_list = list(prompts), list(video_paths)
|
| 329 |
+
prompts_list.pop(0)
|
| 330 |
+
video_paths_list.pop(0)
|
| 331 |
+
prompts, video_paths = tuple(prompts_list), tuple(video_paths_list)
|
| 332 |
+
del prompts_list, video_paths_list
|
| 333 |
+
if len(prompts) == 0:
|
| 334 |
+
break
|
| 335 |
+
if len(video_paths) == 0:
|
| 336 |
+
continue
|
| 337 |
+
|
| 338 |
+
bs = min(len(prompts), batch_size)
|
| 339 |
+
print(f"\nProgress: {idx} / {len(prompts_chunk)}. ")
|
| 340 |
+
H, W = args.H, args.W
|
| 341 |
+
keyframes_list = []
|
| 342 |
+
print("load video ...")
|
| 343 |
+
try:
|
| 344 |
+
for video_path in video_paths:
|
| 345 |
+
keyframes = load_video_keyframes(
|
| 346 |
+
video_path,
|
| 347 |
+
args.original_fps,
|
| 348 |
+
args.target_fps,
|
| 349 |
+
num_keyframes,
|
| 350 |
+
(H, W),
|
| 351 |
+
)
|
| 352 |
+
keyframes = keyframes.unsqueeze(0) # B T C H W
|
| 353 |
+
keyframes = rearrange(keyframes, "b t c h w -> b c t h w").to(
|
| 354 |
+
model.device
|
| 355 |
+
)
|
| 356 |
+
keyframes_list.append(keyframes)
|
| 357 |
+
except:
|
| 358 |
+
print(f"Error when loading video from {video_paths}")
|
| 359 |
+
continue
|
| 360 |
+
print("load video done ...")
|
| 361 |
+
keyframes = torch.cat(keyframes_list, dim=0)
|
| 362 |
+
control_hint = keyframes
|
| 363 |
+
|
| 364 |
+
# load reference
|
| 365 |
+
ref_list = []
|
| 366 |
+
if args.auto_ref_editing:
|
| 367 |
+
print('Conduct auto ref editing, args.reference_path is ignored.')
|
| 368 |
+
# import pdb; pdb.set_trace()
|
| 369 |
+
raise NotImplementedError
|
| 370 |
+
|
| 371 |
+
else:
|
| 372 |
+
for ref_path in ref_paths:
|
| 373 |
+
ref = load_img(ref_path, (H, W))
|
| 374 |
+
ref_list.append(ref)
|
| 375 |
+
ref = torch.cat(ref_list, dim=0).to(model.device)
|
| 376 |
+
|
| 377 |
+
batch = {
|
| 378 |
+
"txt": prompts,
|
| 379 |
+
"control_hint": control_hint,
|
| 380 |
+
'cond_img': ref,
|
| 381 |
+
}
|
| 382 |
+
|
| 383 |
+
negative_prompt = args.negative_prompt
|
| 384 |
+
batch_uc = {
|
| 385 |
+
"txt": [negative_prompt for _ in range(bs)],
|
| 386 |
+
"control_hint": batch["control_hint"].clone(), # balance mode in controlnet-webui
|
| 387 |
+
'cond_img': batch["cond_img"].clone(), # follow the balance mode
|
| 388 |
+
}
|
| 389 |
+
# batch["txt"] = ["masterpiece, best quality, " + each for each in batch["txt"]]
|
| 390 |
+
if args.add_prompt:
|
| 391 |
+
batch["txt"] = [args.add_prompt + ", " + each for each in batch["txt"]]
|
| 392 |
+
c, uc = model.conditioner.get_unconditional_conditioning(
|
| 393 |
+
batch_c=batch,
|
| 394 |
+
batch_uc=batch_uc,
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
sampling_kwargs = {} # usually empty
|
| 398 |
+
|
| 399 |
+
for k in c:
|
| 400 |
+
if isinstance(c[k], torch.Tensor):
|
| 401 |
+
c[k], uc[k] = map(lambda y: y[k][:bs].to(model.device), (c, uc))
|
| 402 |
+
shape = (4, num_keyframes, H // 8, W // 8)
|
| 403 |
+
|
| 404 |
+
precision_scope = autocast
|
| 405 |
+
with torch.no_grad():
|
| 406 |
+
with torch.cuda.amp.autocast():
|
| 407 |
+
randn = torch.randn(bs, *shape).to(model.device)
|
| 408 |
+
if args.sdedit_denoise_strength == 0.0:
|
| 409 |
+
|
| 410 |
+
def denoiser(input, sigma, c):
|
| 411 |
+
return model.denoiser(
|
| 412 |
+
model.model, input, sigma, c, **sampling_kwargs
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
if args.prior_coefficient_x != 0.0:
|
| 416 |
+
assert 0.0 < args.prior_coefficient_x <= 1.0, (
|
| 417 |
+
"prior_coefficient_x should be in (0.0, 1.0], "
|
| 418 |
+
"but got {}".format(args.prior_coefficient_x)
|
| 419 |
+
)
|
| 420 |
+
# prior = model.encode_first_stage(keyframes)
|
| 421 |
+
if args.prior_type == 'video':
|
| 422 |
+
prior = model.encode_first_stage(keyframes)
|
| 423 |
+
elif args.prior_type == 'ref':
|
| 424 |
+
prior = model.encode_first_stage(ref)
|
| 425 |
+
prior = repeat(prior, 'b c h w -> b c t h w', t=num_keyframes)
|
| 426 |
+
elif args.prior_type == 'video_ref':
|
| 427 |
+
prior = model.encode_first_stage(keyframes)
|
| 428 |
+
prior_ref = model.encode_first_stage(ref)
|
| 429 |
+
prior_ref = repeat(prior_ref, 'b c h w -> b c t h w', t=num_keyframes)
|
| 430 |
+
prior = prior + prior_ref
|
| 431 |
+
else:
|
| 432 |
+
raise NotImplementedError
|
| 433 |
+
randn = (
|
| 434 |
+
args.prior_coefficient_x * prior
|
| 435 |
+
+ args.prior_coefficient_noise * randn
|
| 436 |
+
)
|
| 437 |
+
sampler = init_sampling(
|
| 438 |
+
sample_steps=args.sample_steps,
|
| 439 |
+
sampler_name=args.sampler_name,
|
| 440 |
+
discretization_name=args.discretization_name,
|
| 441 |
+
guider_config_target="sgm.modules.diffusionmodules.guiders.VanillaCFGTV2V",
|
| 442 |
+
cfg_scale=args.cfg_scale,
|
| 443 |
+
)
|
| 444 |
+
sampler.verbose = True
|
| 445 |
+
samples = sampler(denoiser, randn, c, uc=uc)
|
| 446 |
+
else:
|
| 447 |
+
assert (
|
| 448 |
+
args.sdedit_denoise_strength > 0.0
|
| 449 |
+
), "sdedit_denoise_strength should be positive"
|
| 450 |
+
assert (
|
| 451 |
+
args.sdedit_denoise_strength <= 1.0
|
| 452 |
+
), "sdedit_denoise_strength should be less than 1.0"
|
| 453 |
+
assert (
|
| 454 |
+
args.prior_coefficient_x == 0
|
| 455 |
+
), "prior_coefficient_x should be 0 when using sdedit_denoise_strength"
|
| 456 |
+
denoise_strength = args.sdedit_denoise_strength
|
| 457 |
+
sampler = init_sampling(
|
| 458 |
+
sample_steps=args.sample_steps,
|
| 459 |
+
sampler_name=args.sampler_name,
|
| 460 |
+
discretization_name=args.discretization_name,
|
| 461 |
+
guider_config_target="sgm.modules.diffusionmodules.guiders.VanillaCFGTV2V",
|
| 462 |
+
cfg_scale=args.cfg_scale,
|
| 463 |
+
img2img_strength=denoise_strength,
|
| 464 |
+
)
|
| 465 |
+
sampler.verbose = True
|
| 466 |
+
if args.prior_type == 'video':
|
| 467 |
+
z = model.encode_first_stage(keyframes)
|
| 468 |
+
elif args.prior_type == 'ref':
|
| 469 |
+
z = model.encode_first_stage(ref)
|
| 470 |
+
z = repeat(z, 'b c h w -> b c t h w', t=num_keyframes)
|
| 471 |
+
elif args.prior_type == 'video_ref':
|
| 472 |
+
z = model.encode_first_stage(keyframes)
|
| 473 |
+
z_ref = model.encode_first_stage(ref)
|
| 474 |
+
z_ref = repeat(z_ref, 'b c h w -> b c t h w', t=num_keyframes)
|
| 475 |
+
z = z + z_ref
|
| 476 |
+
else:
|
| 477 |
+
raise NotImplementedError
|
| 478 |
+
|
| 479 |
+
noise = torch.randn_like(z)
|
| 480 |
+
sigmas = sampler.discretization(sampler.num_steps).to(z.device)
|
| 481 |
+
sigma = sigmas[0]
|
| 482 |
+
|
| 483 |
+
print(f"all sigmas: {sigmas}")
|
| 484 |
+
print(f"noising sigma: {sigma}")
|
| 485 |
+
noised_z = z + noise * append_dims(sigma, z.ndim)
|
| 486 |
+
noised_z = noised_z / torch.sqrt(
|
| 487 |
+
1.0 + sigmas[0] ** 2.0
|
| 488 |
+
) # Note: hardcoded to DDPM-like scaling. need to generalize later.
|
| 489 |
+
|
| 490 |
+
def denoiser(x, sigma, c):
|
| 491 |
+
return model.denoiser(model.model, x, sigma, c)
|
| 492 |
+
|
| 493 |
+
samples = sampler(denoiser, noised_z, cond=c, uc=uc)
|
| 494 |
+
|
| 495 |
+
samples = model.decode_first_stage(samples)
|
| 496 |
+
|
| 497 |
+
# save the results
|
| 498 |
+
keyframes = (torch.clamp(keyframes, -1.0, 1.0) + 1.0) / 2.0
|
| 499 |
+
samples = (torch.clamp(samples, -1.0, 1.0) + 1.0) / 2.0
|
| 500 |
+
control_hint = (torch.clamp(c["control_hint"], -1.0, 1.0) + 1.0) / 2.0
|
| 501 |
+
# save_path = args.save_path
|
| 502 |
+
# save_path = os.path.join(
|
| 503 |
+
# save_path, basemodel_path.split("/")[-1].split(".")[0]
|
| 504 |
+
# )
|
| 505 |
+
if video_save_paths == []:
|
| 506 |
+
save_path = args.save_path
|
| 507 |
+
save_path = os.path.join(
|
| 508 |
+
save_path, basemodel_path.split("/")[-1].split(".")[0]
|
| 509 |
+
)
|
| 510 |
+
else:
|
| 511 |
+
save_path = video_save_paths[idx]
|
| 512 |
+
|
| 513 |
+
perform_save_locally_video(
|
| 514 |
+
os.path.join(save_path, "original"),
|
| 515 |
+
keyframes,
|
| 516 |
+
args.target_fps,
|
| 517 |
+
args.save_type,
|
| 518 |
+
save_grid=False,
|
| 519 |
+
)
|
| 520 |
+
|
| 521 |
+
keyframes_paths = perform_save_locally_video(
|
| 522 |
+
os.path.join(save_path, "result"),
|
| 523 |
+
samples,
|
| 524 |
+
args.target_fps,
|
| 525 |
+
args.save_type,
|
| 526 |
+
return_savepaths=True,
|
| 527 |
+
save_grid=False,
|
| 528 |
+
)
|
| 529 |
+
perform_save_locally_video(
|
| 530 |
+
os.path.join(save_path, "control_hint"),
|
| 531 |
+
control_hint,
|
| 532 |
+
args.target_fps,
|
| 533 |
+
args.save_type,
|
| 534 |
+
save_grid=False,
|
| 535 |
+
)
|
| 536 |
+
print("Saved samples to {}. Enjoy.".format(save_path))
|
| 537 |
+
|
| 538 |
+
# save video paths
|
| 539 |
+
log_info["video_paths"] += video_paths
|
| 540 |
+
log_info["keyframes_paths"] += keyframes_paths
|
| 541 |
+
|
| 542 |
+
# save log info
|
| 543 |
+
with open(os.path.join(save_path, "log_info.json"), "w") as f:
|
| 544 |
+
json.dump(log_info, f, indent=4)
|
| 545 |
+
|
| 546 |
+
# back to the original model
|
| 547 |
+
basemodel_idx += 1
|
| 548 |
+
if basemodel_idx < len(basemodel_paths):
|
| 549 |
+
print("--> back to the original model: {}".format(ckpt_path))
|
| 550 |
+
model = model_load_ckpt(model, path=ckpt_path)
|
CCEdit-main/scripts/sampling/util.py
ADDED
|
@@ -0,0 +1,813 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from itertools import islice
|
| 3 |
+
|
| 4 |
+
import decord
|
| 5 |
+
import cv2
|
| 6 |
+
import einops
|
| 7 |
+
import imageio
|
| 8 |
+
import numpy as np
|
| 9 |
+
import PIL.Image as Image
|
| 10 |
+
import torch
|
| 11 |
+
import torchvision
|
| 12 |
+
import tqdm
|
| 13 |
+
from einops import rearrange, repeat
|
| 14 |
+
from omegaconf import OmegaConf
|
| 15 |
+
from safetensors import safe_open
|
| 16 |
+
from safetensors.torch import load_file as load_safetensors
|
| 17 |
+
|
| 18 |
+
from sgm.modules.diffusionmodules.sampling import (
|
| 19 |
+
DPMPP2MSampler,
|
| 20 |
+
DPMPP2SAncestralSampler,
|
| 21 |
+
EulerAncestralSampler,
|
| 22 |
+
EulerEDMSampler,
|
| 23 |
+
HeunEDMSampler,
|
| 24 |
+
LinearMultistepSampler,
|
| 25 |
+
)
|
| 26 |
+
from sgm.modules.encoders.modules import (
|
| 27 |
+
DepthMidasEncoder,
|
| 28 |
+
DepthZoeEncoder,
|
| 29 |
+
LineartEncoder,
|
| 30 |
+
NormalBaeEncoder,
|
| 31 |
+
ScribbleHEDEncoder,
|
| 32 |
+
ScribblePidiNetEncoder,
|
| 33 |
+
SoftEdgeEncoder,
|
| 34 |
+
)
|
| 35 |
+
from sgm.util import exists, instantiate_from_config, isheatmap
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def create_model(config_path):
|
| 39 |
+
config = OmegaConf.load(config_path)
|
| 40 |
+
model = instantiate_from_config(config.model).cpu()
|
| 41 |
+
print(f"Loaded model config from [{config_path}]")
|
| 42 |
+
return model
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def model_load_ckpt(model, path, newbasemodel=False):
|
| 46 |
+
# TODO: how to load ema weights?
|
| 47 |
+
if path.endswith("ckpt") or path.endswith(".pt") or path.endswith(".pth"):
|
| 48 |
+
if "deepspeed" in path:
|
| 49 |
+
sd = torch.load(path, map_location="cpu")
|
| 50 |
+
sd = {k.replace("_forward_module.", ""): v for k, v in sd.items()}
|
| 51 |
+
else:
|
| 52 |
+
# sd = torch.load(path, map_location="cpu")["state_dict"]
|
| 53 |
+
sd = torch.load(path, map_location="cpu")
|
| 54 |
+
if "state_dict" in torch.load(path, map_location="cpu"):
|
| 55 |
+
sd = sd["state_dict"]
|
| 56 |
+
elif path.endswith("safetensors"):
|
| 57 |
+
sd = load_safetensors(path)
|
| 58 |
+
else:
|
| 59 |
+
raise NotImplementedError(f"Unknown checkpoint format: {path}")
|
| 60 |
+
|
| 61 |
+
# TODO: (RUOYU) I don't know why need this. We need to refine this for this is really not elegant.
|
| 62 |
+
sd_new = {}
|
| 63 |
+
for k, v in sd.items():
|
| 64 |
+
if k.startswith("conditioner.embedders.") and "first_stage_model" in k:
|
| 65 |
+
loc = k.find("first_stage_model")
|
| 66 |
+
sd_new[k.replace(k[:loc], "")] = v
|
| 67 |
+
else:
|
| 68 |
+
sd_new[k] = v
|
| 69 |
+
sd = sd_new
|
| 70 |
+
del sd_new
|
| 71 |
+
|
| 72 |
+
if newbasemodel:
|
| 73 |
+
sd_new = {}
|
| 74 |
+
for k, v in sd.items():
|
| 75 |
+
if "cond_stage_model" in k:
|
| 76 |
+
sd_new[k.replace("cond_stage_model", "conditioner.embedders.0")] = v
|
| 77 |
+
continue
|
| 78 |
+
sd_new[k] = v
|
| 79 |
+
sd = sd_new
|
| 80 |
+
del sd_new
|
| 81 |
+
|
| 82 |
+
missing, unexpected = model.load_state_dict(sd, strict=False)
|
| 83 |
+
if newbasemodel:
|
| 84 |
+
unwanted_substrings = ["temporal", "controlnet", "conditioner.embedders.1."]
|
| 85 |
+
missing = [
|
| 86 |
+
each
|
| 87 |
+
for each in missing
|
| 88 |
+
if all(substring not in each for substring in unwanted_substrings)
|
| 89 |
+
]
|
| 90 |
+
print(
|
| 91 |
+
f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys"
|
| 92 |
+
)
|
| 93 |
+
if len(missing) > 0:
|
| 94 |
+
print(f"Missing Keys: {missing}")
|
| 95 |
+
if len(unexpected) > 0:
|
| 96 |
+
# TODO: notice that some checkpoints has lora parameters (e.g. majicmixRealistic)
|
| 97 |
+
for each in unexpected:
|
| 98 |
+
if each.startswith("lora"):
|
| 99 |
+
print("detected lora parameters, load lora parameters ...", end="\r")
|
| 100 |
+
sd_lora = {}
|
| 101 |
+
for k, v in sd.items():
|
| 102 |
+
if k.startswith("lora"):
|
| 103 |
+
sd_lora[k] = v
|
| 104 |
+
unexpected.remove(k)
|
| 105 |
+
# TODO: alpha?
|
| 106 |
+
sd_lora = convert_load_lora(
|
| 107 |
+
sd_state_dict=sd, state_dict=sd_lora, alpha=0.8
|
| 108 |
+
)
|
| 109 |
+
break
|
| 110 |
+
print(f"Unexpected Keys: {unexpected}")
|
| 111 |
+
|
| 112 |
+
return model
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def convert_load_lora(
|
| 116 |
+
sd_state_dict,
|
| 117 |
+
state_dict,
|
| 118 |
+
LORA_PREFIX_UNET="lora_unet",
|
| 119 |
+
LORA_PREFIX_TEXT_ENCODER="lora_te",
|
| 120 |
+
alpha=0.6,
|
| 121 |
+
):
|
| 122 |
+
visited = []
|
| 123 |
+
|
| 124 |
+
for key in tqdm.tqdm(state_dict):
|
| 125 |
+
# it is suggested to print out the key, it usually will be something like below
|
| 126 |
+
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
|
| 127 |
+
|
| 128 |
+
# as we have set the alpha beforehand, so just skip
|
| 129 |
+
if ".alpha" in key or key in visited:
|
| 130 |
+
print("skip: ", key)
|
| 131 |
+
continue
|
| 132 |
+
|
| 133 |
+
if "text" in key:
|
| 134 |
+
layer_infos = (
|
| 135 |
+
key.split(".")[0].split(LORA_PREFIX_TEXT_ENCODER + "_")[-1].split("_")
|
| 136 |
+
)
|
| 137 |
+
# curr_layer = pipeline.text_encoder
|
| 138 |
+
if "self_attn" in key:
|
| 139 |
+
layername = "{}.self_attn.{}_proj".format(
|
| 140 |
+
layer_infos[4], layer_infos[7]
|
| 141 |
+
)
|
| 142 |
+
else:
|
| 143 |
+
layername = "{}.mlp.{}".format(layer_infos[4], layer_infos[-1])
|
| 144 |
+
layername = (
|
| 145 |
+
"cond_stage_model.transformer.text_model.encoder.layers."
|
| 146 |
+
+ layername
|
| 147 |
+
+ ".weight"
|
| 148 |
+
)
|
| 149 |
+
else:
|
| 150 |
+
layer_infos = key.split(".")[0].split(LORA_PREFIX_UNET + "_")[-1].split("_")
|
| 151 |
+
|
| 152 |
+
if "lora_unet_mid_" in key:
|
| 153 |
+
if "_proj_" in key:
|
| 154 |
+
layername = (
|
| 155 |
+
"model.diffusion_model.middle_block.1.proj_{}.weight".format(
|
| 156 |
+
layer_infos[5]
|
| 157 |
+
)
|
| 158 |
+
)
|
| 159 |
+
elif "_to_out_" in key:
|
| 160 |
+
layername = "model.diffusion_model.middle_block.1.transformer_blocks.0.{}.to_out.0.weight".format(
|
| 161 |
+
layer_infos[7]
|
| 162 |
+
)
|
| 163 |
+
elif "_ff_net_" in key:
|
| 164 |
+
layername = "model.diffusion_model.middle_block.1.transformer_blocks.0.ff.net"
|
| 165 |
+
layername = ".".join([layername] + layer_infos[9:]) + ".weight"
|
| 166 |
+
elif "attn1" in key or "attn2" in key:
|
| 167 |
+
layername = "model.diffusion_model.middle_block.1.transformer_blocks.0.{}.to_{}.weight".format(
|
| 168 |
+
layer_infos[7], layer_infos[9]
|
| 169 |
+
)
|
| 170 |
+
else:
|
| 171 |
+
raise ValueError("Unknown key: ", key)
|
| 172 |
+
else:
|
| 173 |
+
lora_sd_map_in = {
|
| 174 |
+
"0-0": [1, 1],
|
| 175 |
+
"0-1": [2, 1],
|
| 176 |
+
"1-0": [4, 1],
|
| 177 |
+
"1-1": [5, 1],
|
| 178 |
+
"2-0": [7, 1],
|
| 179 |
+
"2-1": [8, 1],
|
| 180 |
+
}
|
| 181 |
+
lora_sd_map_out = {
|
| 182 |
+
"1-0": [3, 1],
|
| 183 |
+
"1-1": [4, 1],
|
| 184 |
+
"1-2": [5, 1],
|
| 185 |
+
"2-0": [6, 1],
|
| 186 |
+
"2-1": [7, 1],
|
| 187 |
+
"2-2": [8, 1],
|
| 188 |
+
"3-0": [9, 1],
|
| 189 |
+
"3-1": [10, 1],
|
| 190 |
+
"3-2": [11, 1],
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
if "lora_unet_down_" in key:
|
| 194 |
+
sd_idxs = lora_sd_map_in[
|
| 195 |
+
"{}-{}".format(layer_infos[2], layer_infos[4])
|
| 196 |
+
]
|
| 197 |
+
flag_ = "input_blocks"
|
| 198 |
+
elif "lora_unet_up_" in key:
|
| 199 |
+
sd_idxs = lora_sd_map_out[
|
| 200 |
+
"{}-{}".format(layer_infos[2], layer_infos[4])
|
| 201 |
+
]
|
| 202 |
+
flag_ = "output_blocks"
|
| 203 |
+
|
| 204 |
+
if "_proj_" in key: # _proj_in and _proj_out
|
| 205 |
+
layername = "model.diffusion_model.{}.{}.{}.{}_{}.weight".format(
|
| 206 |
+
flag_, sd_idxs[0], sd_idxs[1], layer_infos[5], layer_infos[6]
|
| 207 |
+
)
|
| 208 |
+
elif "_to_out_" in key:
|
| 209 |
+
# model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_out.0.weight
|
| 210 |
+
layername = "model.diffusion_model.{}.{}.{}.transformer_blocks.{}.{}.to_{}.{}.weight".format(
|
| 211 |
+
flag_,
|
| 212 |
+
sd_idxs[0],
|
| 213 |
+
sd_idxs[1],
|
| 214 |
+
layer_infos[7],
|
| 215 |
+
layer_infos[8],
|
| 216 |
+
layer_infos[10],
|
| 217 |
+
layer_infos[11],
|
| 218 |
+
)
|
| 219 |
+
elif "_ff_net_" in key:
|
| 220 |
+
# model.diffusion_model.input_blocks.1.1.transformer_blocks.0.ff.net.0.proj.weight
|
| 221 |
+
layername = "model.diffusion_model.{}.{}.{}.transformer_blocks.{}.ff.net".format(
|
| 222 |
+
flag_, sd_idxs[0], sd_idxs[1], layer_infos[7]
|
| 223 |
+
)
|
| 224 |
+
layername = ".".join([layername] + layer_infos[10:]) + ".weight"
|
| 225 |
+
elif "attn1" in key or "attn2" in key:
|
| 226 |
+
# model.diffusion_model.input_blocks.1.1.transformer_blocks.0.attn1.to_k.weight
|
| 227 |
+
layername = "model.diffusion_model.{}.{}.{}.transformer_blocks.{}.{}.to_{}.weight".format(
|
| 228 |
+
flag_,
|
| 229 |
+
sd_idxs[0],
|
| 230 |
+
sd_idxs[1],
|
| 231 |
+
layer_infos[7],
|
| 232 |
+
layer_infos[8],
|
| 233 |
+
layer_infos[10],
|
| 234 |
+
)
|
| 235 |
+
else:
|
| 236 |
+
raise ValueError("Unknown key: ", key)
|
| 237 |
+
# print("Unknown key: {} -> skip".format(key))
|
| 238 |
+
# continue
|
| 239 |
+
|
| 240 |
+
pair_keys = []
|
| 241 |
+
if "lora_down" in key:
|
| 242 |
+
pair_keys.append(key.replace("lora_down", "lora_up"))
|
| 243 |
+
pair_keys.append(key)
|
| 244 |
+
else:
|
| 245 |
+
pair_keys.append(key)
|
| 246 |
+
pair_keys.append(key.replace("lora_up", "lora_down"))
|
| 247 |
+
|
| 248 |
+
if "cond_stage_model" in layername:
|
| 249 |
+
layername = layername.replace("cond_stage_model", "conditioner.embedders.0")
|
| 250 |
+
|
| 251 |
+
# update weight
|
| 252 |
+
# print('{} -> {}'.format(key, layername))
|
| 253 |
+
if len(state_dict[pair_keys[0]].shape) == 4:
|
| 254 |
+
weight_up = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.float32)
|
| 255 |
+
weight_down = (
|
| 256 |
+
state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.float32)
|
| 257 |
+
)
|
| 258 |
+
sd_state_dict[layername] += alpha * torch.mm(
|
| 259 |
+
weight_up, weight_down
|
| 260 |
+
).unsqueeze(2).unsqueeze(3)
|
| 261 |
+
else:
|
| 262 |
+
weight_up = state_dict[pair_keys[0]].to(torch.float32)
|
| 263 |
+
weight_down = state_dict[pair_keys[1]].to(torch.float32)
|
| 264 |
+
sd_state_dict[layername] += alpha * torch.mm(weight_up, weight_down)
|
| 265 |
+
|
| 266 |
+
# update visited list
|
| 267 |
+
for item in pair_keys:
|
| 268 |
+
visited.append(item)
|
| 269 |
+
|
| 270 |
+
print("loading lora done ... ")
|
| 271 |
+
|
| 272 |
+
return sd_state_dict
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def perform_save_locally_image(save_path, samples):
|
| 276 |
+
assert samples.dim() == 4, "Expected samples to have shape (B, C, H, W)"
|
| 277 |
+
os.makedirs(os.path.join(save_path), exist_ok=True)
|
| 278 |
+
base_count = len(os.listdir(os.path.join(save_path)))
|
| 279 |
+
# samples = embed_watemark(samples)
|
| 280 |
+
for sample in samples:
|
| 281 |
+
sample = 255.0 * rearrange(sample.cpu().numpy(), "c h w -> h w c")
|
| 282 |
+
Image.fromarray(sample.astype(np.uint8)).save(
|
| 283 |
+
os.path.join(save_path, f"{base_count:05}.png")
|
| 284 |
+
)
|
| 285 |
+
base_count += 1
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
def perform_save_locally_video(
|
| 289 |
+
save_path, samples, fps, savetype="gif", return_savepaths=False, save_grid=True,
|
| 290 |
+
):
|
| 291 |
+
assert samples.dim() == 5, "Expected samples to have shape (B, C, T, H, W)"
|
| 292 |
+
assert savetype in ["gif", "mp4"]
|
| 293 |
+
os.makedirs(os.path.join(save_path), exist_ok=True)
|
| 294 |
+
os.makedirs(os.path.join(save_path, savetype), exist_ok=True)
|
| 295 |
+
base_count_savetype = len(os.listdir(os.path.join(save_path, savetype)))
|
| 296 |
+
if save_grid:
|
| 297 |
+
os.makedirs(os.path.join(save_path, "grid"), exist_ok=True)
|
| 298 |
+
base_count_grid = len(os.listdir(os.path.join(save_path, "grid")))
|
| 299 |
+
savepaths = []
|
| 300 |
+
for sample in samples:
|
| 301 |
+
t = sample.shape[0]
|
| 302 |
+
sample_grid = einops.rearrange(sample, "c t h w -> t c h w")
|
| 303 |
+
if save_grid:
|
| 304 |
+
torchvision.utils.save_image(
|
| 305 |
+
sample_grid,
|
| 306 |
+
os.path.join(save_path, "grid", f"grid-{base_count_grid:04}.png"),
|
| 307 |
+
nrow=t,
|
| 308 |
+
normalize=False,
|
| 309 |
+
padding=0,
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
sample = 255.0 * einops.rearrange(sample.cpu().numpy(), "c t h w -> t h w c")
|
| 313 |
+
sample = sample.astype(np.uint8)
|
| 314 |
+
frames = [each for each in sample]
|
| 315 |
+
if savetype == "gif":
|
| 316 |
+
savepath = os.path.join(
|
| 317 |
+
save_path, "gif", f"animation-{base_count_savetype:04}.gif"
|
| 318 |
+
)
|
| 319 |
+
imageio.mimsave(
|
| 320 |
+
savepath,
|
| 321 |
+
frames,
|
| 322 |
+
format="GIF",
|
| 323 |
+
duration=1 / fps,
|
| 324 |
+
loop=0,
|
| 325 |
+
)
|
| 326 |
+
elif savetype == "mp4":
|
| 327 |
+
savepath = os.path.join(
|
| 328 |
+
save_path, "mp4", f"animation-{base_count_savetype:04}.mp4"
|
| 329 |
+
)
|
| 330 |
+
# height, width, layers = frames[0].shape
|
| 331 |
+
# size = (width, height)
|
| 332 |
+
# fourcc = cv2.VideoWriter_fourcc(*'avc1')
|
| 333 |
+
# out = cv2.VideoWriter(savepath, fourcc, fps, size)
|
| 334 |
+
# for frame in frames:
|
| 335 |
+
# frame_bgr = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_RGB2BGR)
|
| 336 |
+
# out.write(frame_bgr)
|
| 337 |
+
# out.release()
|
| 338 |
+
with imageio.get_writer(savepath, fps=fps) as writer:
|
| 339 |
+
for frame in frames:
|
| 340 |
+
writer.append_data(frame)
|
| 341 |
+
|
| 342 |
+
else:
|
| 343 |
+
raise ValueError(f"Unknown savetype: {savetype}")
|
| 344 |
+
base_count_savetype += 1
|
| 345 |
+
if save_grid:
|
| 346 |
+
base_count_grid += 1
|
| 347 |
+
savepaths.append(savepath)
|
| 348 |
+
|
| 349 |
+
if return_savepaths:
|
| 350 |
+
return savepaths
|
| 351 |
+
else:
|
| 352 |
+
return
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def chunk(it, size):
|
| 356 |
+
it = iter(it)
|
| 357 |
+
return iter(lambda: tuple(islice(it, size)), ())
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
def load_img(p_cond_img, size: tuple = None):
|
| 361 |
+
"""
|
| 362 |
+
Loads an image from the given path and resizes it to the given height and width.
|
| 363 |
+
Converts the image to a tensor and normalizes it to the range [-1, 1]. Shape: (1, 3, H, W)
|
| 364 |
+
|
| 365 |
+
Args:
|
| 366 |
+
- p_cond_img (str): path to the image file
|
| 367 |
+
- size (tuple): height and width to resize the image to
|
| 368 |
+
|
| 369 |
+
Returns:
|
| 370 |
+
- cond_img (torch.Tensor): tensor of the resized and normalized image.
|
| 371 |
+
"""
|
| 372 |
+
|
| 373 |
+
cond_img = Image.open(p_cond_img)
|
| 374 |
+
if size:
|
| 375 |
+
assert len(size) == 2, "size should be (H, W)"
|
| 376 |
+
H, W = size
|
| 377 |
+
cond_img = cond_img.resize((W, H), Image.BICUBIC)
|
| 378 |
+
cond_img = np.array(cond_img)
|
| 379 |
+
cond_img = torch.from_numpy(cond_img).permute(2, 0, 1).unsqueeze(0).float() / 255.0
|
| 380 |
+
cond_img = cond_img * 2.0 - 1.0
|
| 381 |
+
cond_img = torch.clamp(cond_img, -1.0, 1.0)
|
| 382 |
+
return cond_img
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
def init_sampling(
|
| 386 |
+
sample_steps=50,
|
| 387 |
+
sampler_name="EulerEDMSampler",
|
| 388 |
+
discretization_name="LegacyDDPMDiscretization",
|
| 389 |
+
guider_config_target="sgm.modules.diffusionmodules.guiders.VanillaCFG",
|
| 390 |
+
cfg_scale=7.5,
|
| 391 |
+
img2img_strength=1.0,
|
| 392 |
+
):
|
| 393 |
+
assert (
|
| 394 |
+
sample_steps >= 1 and sample_steps <= 1000
|
| 395 |
+
), "sample_steps must be between 1 and 1000, but got {}".format(sample_steps)
|
| 396 |
+
steps = sample_steps
|
| 397 |
+
assert sampler_name in [
|
| 398 |
+
"EulerEDMSampler",
|
| 399 |
+
"HeunEDMSampler",
|
| 400 |
+
"EulerAncestralSampler",
|
| 401 |
+
"DPMPP2SAncestralSampler",
|
| 402 |
+
"DPMPP2MSampler",
|
| 403 |
+
"LinearMultistepSampler",
|
| 404 |
+
], "unknown sampler {}".format(sampler_name)
|
| 405 |
+
sampler = sampler_name
|
| 406 |
+
assert discretization_name in [
|
| 407 |
+
"LegacyDDPMDiscretization",
|
| 408 |
+
"EDMDiscretization",
|
| 409 |
+
], "unknown discretization {}".format(discretization_name)
|
| 410 |
+
discretization = discretization_name
|
| 411 |
+
|
| 412 |
+
discretization_config = get_discretization(discretization)
|
| 413 |
+
|
| 414 |
+
guider_config = get_guider(
|
| 415 |
+
guider_config_target=guider_config_target, scale=cfg_scale
|
| 416 |
+
)
|
| 417 |
+
|
| 418 |
+
sampler = get_sampler(sampler, steps, discretization_config, guider_config)
|
| 419 |
+
if img2img_strength < 1.0:
|
| 420 |
+
from scripts.demo.streamlit_helpers import Img2ImgDiscretizationWrapper
|
| 421 |
+
|
| 422 |
+
sampler.discretization = Img2ImgDiscretizationWrapper(
|
| 423 |
+
sampler.discretization, strength=img2img_strength
|
| 424 |
+
)
|
| 425 |
+
return sampler
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
def get_discretization(discretization):
|
| 429 |
+
if discretization == "LegacyDDPMDiscretization":
|
| 430 |
+
discretization_config = {
|
| 431 |
+
"target": "sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization",
|
| 432 |
+
}
|
| 433 |
+
elif discretization == "EDMDiscretization":
|
| 434 |
+
sigma_min = 0.03
|
| 435 |
+
sigma_max = 14.61
|
| 436 |
+
rho = 3.0
|
| 437 |
+
|
| 438 |
+
discretization_config = {
|
| 439 |
+
"target": "sgm.modules.diffusionmodules.discretizer.EDMDiscretization",
|
| 440 |
+
"params": {
|
| 441 |
+
"sigma_min": sigma_min,
|
| 442 |
+
"sigma_max": sigma_max,
|
| 443 |
+
"rho": rho,
|
| 444 |
+
},
|
| 445 |
+
}
|
| 446 |
+
|
| 447 |
+
return discretization_config
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
def get_guider(
|
| 451 |
+
guider_config_target="sgm.modules.diffusionmodules.guiders.VanillaCFG",
|
| 452 |
+
scale=7.5,
|
| 453 |
+
):
|
| 454 |
+
guider = "VanillaCFG"
|
| 455 |
+
|
| 456 |
+
if guider == "IdentityGuider":
|
| 457 |
+
guider_config = {
|
| 458 |
+
"target": "sgm.modules.diffusionmodules.guiders.IdentityGuider"
|
| 459 |
+
}
|
| 460 |
+
elif guider == "VanillaCFG":
|
| 461 |
+
# scale = 7.5
|
| 462 |
+
thresholder = "None"
|
| 463 |
+
|
| 464 |
+
if thresholder == "None":
|
| 465 |
+
dyn_thresh_config = {
|
| 466 |
+
"target": "sgm.modules.diffusionmodules.sampling_utils.NoDynamicThresholding"
|
| 467 |
+
}
|
| 468 |
+
else:
|
| 469 |
+
raise NotImplementedError
|
| 470 |
+
|
| 471 |
+
guider_config = {
|
| 472 |
+
"target": guider_config_target,
|
| 473 |
+
"params": {"scale": scale, "dyn_thresh_config": dyn_thresh_config},
|
| 474 |
+
}
|
| 475 |
+
else:
|
| 476 |
+
raise NotImplementedError
|
| 477 |
+
return guider_config
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
def get_sampler(sampler_name, steps, discretization_config, guider_config):
|
| 481 |
+
if sampler_name == "EulerEDMSampler" or sampler_name == "HeunEDMSampler":
|
| 482 |
+
# default
|
| 483 |
+
s_churn = 0.0
|
| 484 |
+
s_tmin = 0.0
|
| 485 |
+
s_tmax = 999.0
|
| 486 |
+
s_noise = 1.0
|
| 487 |
+
|
| 488 |
+
if sampler_name == "EulerEDMSampler":
|
| 489 |
+
sampler = EulerEDMSampler(
|
| 490 |
+
num_steps=steps,
|
| 491 |
+
discretization_config=discretization_config,
|
| 492 |
+
guider_config=guider_config,
|
| 493 |
+
s_churn=s_churn,
|
| 494 |
+
s_tmin=s_tmin,
|
| 495 |
+
s_tmax=s_tmax,
|
| 496 |
+
s_noise=s_noise,
|
| 497 |
+
verbose=True,
|
| 498 |
+
)
|
| 499 |
+
elif sampler_name == "HeunEDMSampler":
|
| 500 |
+
sampler = HeunEDMSampler(
|
| 501 |
+
num_steps=steps,
|
| 502 |
+
discretization_config=discretization_config,
|
| 503 |
+
guider_config=guider_config,
|
| 504 |
+
s_churn=s_churn,
|
| 505 |
+
s_tmin=s_tmin,
|
| 506 |
+
s_tmax=s_tmax,
|
| 507 |
+
s_noise=s_noise,
|
| 508 |
+
verbose=True,
|
| 509 |
+
)
|
| 510 |
+
elif (
|
| 511 |
+
sampler_name == "EulerAncestralSampler"
|
| 512 |
+
or sampler_name == "DPMPP2SAncestralSampler"
|
| 513 |
+
):
|
| 514 |
+
# default
|
| 515 |
+
s_noise = 1.0
|
| 516 |
+
eta = 1.0
|
| 517 |
+
|
| 518 |
+
if sampler_name == "EulerAncestralSampler":
|
| 519 |
+
sampler = EulerAncestralSampler(
|
| 520 |
+
num_steps=steps,
|
| 521 |
+
discretization_config=discretization_config,
|
| 522 |
+
guider_config=guider_config,
|
| 523 |
+
eta=eta,
|
| 524 |
+
s_noise=s_noise,
|
| 525 |
+
verbose=True,
|
| 526 |
+
)
|
| 527 |
+
elif sampler_name == "DPMPP2SAncestralSampler":
|
| 528 |
+
sampler = DPMPP2SAncestralSampler(
|
| 529 |
+
num_steps=steps,
|
| 530 |
+
discretization_config=discretization_config,
|
| 531 |
+
guider_config=guider_config,
|
| 532 |
+
eta=eta,
|
| 533 |
+
s_noise=s_noise,
|
| 534 |
+
verbose=True,
|
| 535 |
+
)
|
| 536 |
+
elif sampler_name == "DPMPP2MSampler":
|
| 537 |
+
sampler = DPMPP2MSampler(
|
| 538 |
+
num_steps=steps,
|
| 539 |
+
discretization_config=discretization_config,
|
| 540 |
+
guider_config=guider_config,
|
| 541 |
+
verbose=True,
|
| 542 |
+
)
|
| 543 |
+
elif sampler_name == "LinearMultistepSampler":
|
| 544 |
+
# default
|
| 545 |
+
order = 4
|
| 546 |
+
sampler = LinearMultistepSampler(
|
| 547 |
+
num_steps=steps,
|
| 548 |
+
discretization_config=discretization_config,
|
| 549 |
+
guider_config=guider_config,
|
| 550 |
+
order=order,
|
| 551 |
+
verbose=True,
|
| 552 |
+
)
|
| 553 |
+
else:
|
| 554 |
+
raise ValueError(f"unknown sampler {sampler_name}!")
|
| 555 |
+
|
| 556 |
+
return sampler
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
def HWC3(x):
|
| 560 |
+
assert x.dtype == np.uint8
|
| 561 |
+
if x.ndim == 2:
|
| 562 |
+
x = x[:, :, None]
|
| 563 |
+
assert x.ndim == 3
|
| 564 |
+
H, W, C = x.shape
|
| 565 |
+
assert C == 1 or C == 3 or C == 4
|
| 566 |
+
if C == 3:
|
| 567 |
+
return x
|
| 568 |
+
if C == 1:
|
| 569 |
+
return np.concatenate([x, x, x], axis=2)
|
| 570 |
+
if C == 4:
|
| 571 |
+
color = x[:, :, 0:3].astype(np.float32)
|
| 572 |
+
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
|
| 573 |
+
y = color * alpha + 255.0 * (1.0 - alpha)
|
| 574 |
+
y = y.clip(0, 255).astype(np.uint8)
|
| 575 |
+
return y
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
def loadmp4_and_convert_to_numpy_cv2(file_path):
|
| 579 |
+
|
| 580 |
+
"""
|
| 581 |
+
Abandoned. This is slow.
|
| 582 |
+
Load an mp4 video file and convert it to a numpy array of frames.
|
| 583 |
+
|
| 584 |
+
Args:
|
| 585 |
+
file_path (str): The path to the mp4 video file.
|
| 586 |
+
|
| 587 |
+
Returns:
|
| 588 |
+
numpy.ndarray: A numpy array of frames from the video file.
|
| 589 |
+
"""
|
| 590 |
+
cap = cv2.VideoCapture(file_path)
|
| 591 |
+
|
| 592 |
+
if not cap.isOpened():
|
| 593 |
+
print("Error: Unable to open the file.")
|
| 594 |
+
return None
|
| 595 |
+
|
| 596 |
+
frames = []
|
| 597 |
+
while True:
|
| 598 |
+
ret, frame = cap.read()
|
| 599 |
+
|
| 600 |
+
if not ret:
|
| 601 |
+
break
|
| 602 |
+
|
| 603 |
+
frames.append(frame)
|
| 604 |
+
|
| 605 |
+
cap.release()
|
| 606 |
+
|
| 607 |
+
video_np = np.array(frames)
|
| 608 |
+
video_np = np.flip(video_np, axis=-1) # BGR to RGB
|
| 609 |
+
|
| 610 |
+
return video_np.copy()
|
| 611 |
+
|
| 612 |
+
|
| 613 |
+
def loadmp4_and_convert_to_numpy(file_path):
|
| 614 |
+
"""
|
| 615 |
+
Loads an mp4 video file and converts it to a numpy array of frames.
|
| 616 |
+
|
| 617 |
+
Args:
|
| 618 |
+
file_path (str): The path to the mp4 video file.
|
| 619 |
+
|
| 620 |
+
Returns:
|
| 621 |
+
frames (numpy.ndarray): A numpy array of frames.
|
| 622 |
+
"""
|
| 623 |
+
video_reader = decord.VideoReader(file_path, num_threads=0)
|
| 624 |
+
v_len = len(video_reader)
|
| 625 |
+
fps = video_reader.get_avg_fps()
|
| 626 |
+
frames = video_reader.get_batch(list(range(v_len)))
|
| 627 |
+
frames = frames.asnumpy()
|
| 628 |
+
return frames
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
def load_video(video_path, size: tuple = None, gap: int = 1):
|
| 632 |
+
"""
|
| 633 |
+
Load a video from a given path and return a tensor representing the video frames.
|
| 634 |
+
|
| 635 |
+
Args:
|
| 636 |
+
size (tuple): The size of the video frames.
|
| 637 |
+
video_path (str): The path to the video file or folder containing the video frames.
|
| 638 |
+
gap (int, optional): The number of frames to skip between each selected frame. Defaults to 1.
|
| 639 |
+
|
| 640 |
+
Returns:
|
| 641 |
+
torch.Tensor: A tensor representing the video frames, with shape (T, C, H, W) and values in the range [-1, 1].
|
| 642 |
+
"""
|
| 643 |
+
if os.path.isdir(video_path):
|
| 644 |
+
files = sorted(os.listdir(video_path))
|
| 645 |
+
keyfiles = files[::gap]
|
| 646 |
+
frames = [load_img(os.path.join(video_path, kf), size) for kf in keyfiles]
|
| 647 |
+
elif video_path.endswith(".mp4") or video_path.endswith(".gif"):
|
| 648 |
+
if video_path.endswith(".mp4"):
|
| 649 |
+
frames = loadmp4_and_convert_to_numpy(video_path)
|
| 650 |
+
elif video_path.endswith(".gif"):
|
| 651 |
+
frames = imageio.mimread(video_path)
|
| 652 |
+
frames = [np.array(fr) for fr in frames]
|
| 653 |
+
frames = [HWC3(fr) for fr in frames]
|
| 654 |
+
frames = np.stack(frames, axis=0)
|
| 655 |
+
frames = (
|
| 656 |
+
torch.from_numpy(frames).permute(0, 3, 1, 2).float() / 255.0
|
| 657 |
+
) # (T, C, H, W)
|
| 658 |
+
frames = frames * 2.0 - 1.0 # range in [-1, 1]
|
| 659 |
+
if size:
|
| 660 |
+
assert len(size) == 2, "size should be (H, W)"
|
| 661 |
+
frames = torch.nn.functional.interpolate(
|
| 662 |
+
frames, size=size, mode="bicubic", align_corners=False
|
| 663 |
+
)
|
| 664 |
+
frames = frames[::gap] # pick the element every gap frames
|
| 665 |
+
frames = [f.unsqueeze(0) for f in frames]
|
| 666 |
+
else:
|
| 667 |
+
raise ValueError(
|
| 668 |
+
"Unsupported video format. Only support dirctory, .mp4 and .gif."
|
| 669 |
+
)
|
| 670 |
+
|
| 671 |
+
return torch.cat(frames, dim=0) # (T, C, H, W)
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
def get_keyframes(original_fps, target_fps, allframes, num_keyframes):
|
| 675 |
+
num_allframes = len(allframes)
|
| 676 |
+
gap = np.round(original_fps / target_fps).astype(int)
|
| 677 |
+
assert gap > 0, f"gap {gap} should be positive."
|
| 678 |
+
keyindexs = [i for i in range(0, num_allframes, gap)]
|
| 679 |
+
if len(keyindexs) < num_keyframes:
|
| 680 |
+
print(
|
| 681 |
+
"[WARNING]: not enough keyframes, use linspace instead. "
|
| 682 |
+
f"len(keyindexs): [{len(keyindexs)}] < num_keyframes [{num_keyframes}]"
|
| 683 |
+
)
|
| 684 |
+
keyindexs = np.linspace(0, num_allframes - 1, num_keyframes).astype(int)
|
| 685 |
+
|
| 686 |
+
return allframes[keyindexs[:num_keyframes]]
|
| 687 |
+
|
| 688 |
+
|
| 689 |
+
def load_video_keyframes(
|
| 690 |
+
video_path, original_fps, target_fps, num_keyframes, size: tuple = None
|
| 691 |
+
):
|
| 692 |
+
"""
|
| 693 |
+
Load keyframes from a video file or directory of images.
|
| 694 |
+
|
| 695 |
+
Args:
|
| 696 |
+
video_path (str): Path to the video file or directory of images.
|
| 697 |
+
original_fps (int): The original frames per second of the video.
|
| 698 |
+
target_fps (int): The desired frames per second of the output keyframes.
|
| 699 |
+
num_keyframes (int): The number of keyframes to extract.
|
| 700 |
+
size (tuple, optional): The desired size of the output keyframes. Defaults to None.
|
| 701 |
+
|
| 702 |
+
Returns:
|
| 703 |
+
torch.Tensor: A tensor of shape (T, C, H, W) containing the keyframes.
|
| 704 |
+
"""
|
| 705 |
+
if os.path.isdir(video_path):
|
| 706 |
+
files = sorted(os.listdir(video_path))
|
| 707 |
+
num_allframes = len(files)
|
| 708 |
+
gap = np.round(original_fps / target_fps).astype(int)
|
| 709 |
+
assert gap > 0, f"gap {gap} should be positive."
|
| 710 |
+
keyindexs = [i for i in range(0, num_allframes, gap)]
|
| 711 |
+
if len(keyindexs) < num_keyframes:
|
| 712 |
+
print(
|
| 713 |
+
"[WARNING]: not enough keyframes, use linspace instead. "
|
| 714 |
+
f"len(keyindexs): [{len(keyindexs)}] < num_keyframes [{num_keyframes}]"
|
| 715 |
+
)
|
| 716 |
+
keyindexs = np.linspace(0, num_allframes - 1, num_keyframes).astype(int)
|
| 717 |
+
else:
|
| 718 |
+
keyindexs = keyindexs[:num_keyframes]
|
| 719 |
+
keyfiles = [files[i] for i in keyindexs]
|
| 720 |
+
frames = [load_img(os.path.join(video_path, kf), size) for kf in keyfiles]
|
| 721 |
+
elif video_path.endswith(".mp4") or video_path.endswith(".gif"):
|
| 722 |
+
# TODO: not tested yet.
|
| 723 |
+
if video_path.endswith(".mp4"):
|
| 724 |
+
frames = loadmp4_and_convert_to_numpy(video_path)
|
| 725 |
+
elif video_path.endswith(".gif"):
|
| 726 |
+
frames = imageio.mimread(video_path)
|
| 727 |
+
frames = [np.array(fr) for fr in frames]
|
| 728 |
+
frames = [HWC3(fr) for fr in frames]
|
| 729 |
+
frames = np.stack(frames, axis=0)
|
| 730 |
+
frames = (
|
| 731 |
+
torch.from_numpy(frames).permute(0, 3, 1, 2).float() / 255.0
|
| 732 |
+
) # (T, C, H, W)
|
| 733 |
+
num_allframes = frames.shape[0]
|
| 734 |
+
gap = np.round(original_fps / target_fps).astype(int)
|
| 735 |
+
assert gap > 0, f"gap {gap} should be positive."
|
| 736 |
+
keyindexs = [i for i in range(0, num_allframes, gap)]
|
| 737 |
+
if len(keyindexs) < num_keyframes:
|
| 738 |
+
print(
|
| 739 |
+
"[WARNING]: not enough keyframes, use linspace instead. "
|
| 740 |
+
f"len(keyindexs): [{len(keyindexs)}] < num_keyframes [{num_keyframes}]"
|
| 741 |
+
)
|
| 742 |
+
keyindexs = np.linspace(0, num_allframes - 1, num_keyframes).astype(int)
|
| 743 |
+
else:
|
| 744 |
+
keyindexs = keyindexs[:num_keyframes]
|
| 745 |
+
# frames = frames[keyindexs[:num_keyframes]]
|
| 746 |
+
frames = frames[keyindexs]
|
| 747 |
+
|
| 748 |
+
frames = frames * 2.0 - 1.0 # range in [-1, 1]
|
| 749 |
+
frames = torch.clamp(frames, -1.0, 1.0)
|
| 750 |
+
if size:
|
| 751 |
+
assert len(size) == 2, "size should be (H, W)"
|
| 752 |
+
frames = torch.nn.functional.interpolate(
|
| 753 |
+
frames, size=size, mode="bicubic", align_corners=False
|
| 754 |
+
)
|
| 755 |
+
# frames = frames[::gap] # pick the element every gap frames
|
| 756 |
+
frames = [f.unsqueeze(0) for f in frames]
|
| 757 |
+
else:
|
| 758 |
+
raise ValueError(
|
| 759 |
+
"Unsupported video format. Only support dirctory, .mp4 and .gif."
|
| 760 |
+
)
|
| 761 |
+
|
| 762 |
+
return torch.cat(frames, dim=0) # (T, C, H, W)
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
def setup_controlgenerator(model):
|
| 766 |
+
control_hint_encoder = None
|
| 767 |
+
for embbeder in model.conditioner.embedders:
|
| 768 |
+
if (
|
| 769 |
+
isinstance(embbeder, LineartEncoder)
|
| 770 |
+
or isinstance(embbeder, DepthZoeEncoder)
|
| 771 |
+
or isinstance(embbeder, DepthMidasEncoder)
|
| 772 |
+
or isinstance(embbeder, SoftEdgeEncoder)
|
| 773 |
+
or isinstance(embbeder, NormalBaeEncoder)
|
| 774 |
+
or isinstance(embbeder, ScribbleHEDEncoder)
|
| 775 |
+
or isinstance(embbeder, ScribblePidiNetEncoder)
|
| 776 |
+
):
|
| 777 |
+
control_hint_encoder = embbeder
|
| 778 |
+
break
|
| 779 |
+
if control_hint_encoder is None:
|
| 780 |
+
raise ValueError("Cannot find LineartEncoder in the embedders.")
|
| 781 |
+
return control_hint_encoder
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
def load_basemodel_lora(model, basemodel_path="", lora_path=""):
|
| 785 |
+
if basemodel_path:
|
| 786 |
+
print("--> load a new base model from {}".format(basemodel_path))
|
| 787 |
+
model = model_load_ckpt(model, basemodel_path, True)
|
| 788 |
+
|
| 789 |
+
if lora_path:
|
| 790 |
+
print("--> load a new LoRA model from {}".format(lora_path))
|
| 791 |
+
sd_state_dict = model.state_dict()
|
| 792 |
+
|
| 793 |
+
if lora_path.endswith(".safetensors"):
|
| 794 |
+
lora_state_dict = {}
|
| 795 |
+
|
| 796 |
+
# with safe_open(lora_path, framework="pt", device='cpu') as f:
|
| 797 |
+
with safe_open(lora_path, framework="pt", device=0) as f:
|
| 798 |
+
for key in f.keys():
|
| 799 |
+
lora_state_dict[key] = f.get_tensor(key)
|
| 800 |
+
|
| 801 |
+
is_lora = all("lora" in k for k in lora_state_dict.keys())
|
| 802 |
+
if not is_lora:
|
| 803 |
+
raise ValueError(
|
| 804 |
+
f"The model you provided in [{lora_path}] is not a LoRA model. "
|
| 805 |
+
)
|
| 806 |
+
else:
|
| 807 |
+
raise NotImplementedError
|
| 808 |
+
|
| 809 |
+
sd_state_dict = convert_load_lora(
|
| 810 |
+
sd_state_dict, lora_state_dict, alpha=1.0
|
| 811 |
+
) # TODO: alpha
|
| 812 |
+
model.load_state_dict(sd_state_dict)
|
| 813 |
+
return model
|
CCEdit-main/scripts/tools/extract_centerframe.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
Usage:
|
| 3 |
+
python scripts/tools/extract_centerframe.py \
|
| 4 |
+
--p_video assets/Samples/tshirtman.mp4 \
|
| 5 |
+
--p_save outputs/centerframe/tshirtman.png \
|
| 6 |
+
--orifps 18 \
|
| 7 |
+
--targetfps 6 \
|
| 8 |
+
--n_keyframes 17 \
|
| 9 |
+
--length_long 512 \
|
| 10 |
+
--length_short 512
|
| 11 |
+
'''
|
| 12 |
+
|
| 13 |
+
import argparse
|
| 14 |
+
import json
|
| 15 |
+
import os
|
| 16 |
+
import random
|
| 17 |
+
|
| 18 |
+
import einops
|
| 19 |
+
import torchvision
|
| 20 |
+
import cv2
|
| 21 |
+
import numpy as np
|
| 22 |
+
import torch
|
| 23 |
+
from pytorch_lightning import seed_everything
|
| 24 |
+
from torch import autocast
|
| 25 |
+
|
| 26 |
+
from scripts.sampling.util import (
|
| 27 |
+
chunk,
|
| 28 |
+
create_model,
|
| 29 |
+
init_sampling,
|
| 30 |
+
load_video,
|
| 31 |
+
load_video_keyframes,
|
| 32 |
+
model_load_ckpt,
|
| 33 |
+
perform_save_locally_image,
|
| 34 |
+
perform_save_locally_video,
|
| 35 |
+
)
|
| 36 |
+
from sgm.util import append_dims
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def extract_centerframe(p_video, p_save, orifps, targetfps, n_keyframes, length_long, length_short):
|
| 40 |
+
if n_keyframes % 2 == 0:
|
| 41 |
+
print('WARNING: n_keyframes should be odd, but got {}'.format(n_keyframes))
|
| 42 |
+
keyframes = load_video_keyframes(p_video, orifps, targetfps, n_keyframes)
|
| 43 |
+
H, W = keyframes[0].shape[1:]
|
| 44 |
+
if H >= W:
|
| 45 |
+
h, w = length_long, length_short
|
| 46 |
+
else:
|
| 47 |
+
h, w = length_short, length_long
|
| 48 |
+
# keyframes = load_video_keyframes(p_video, orifps, targetfps, n_keyframes, (h, w))
|
| 49 |
+
|
| 50 |
+
centerframe = keyframes[n_keyframes // 2, :, :, :].unsqueeze(0)
|
| 51 |
+
centerframe = torch.nn.functional.interpolate(centerframe, (h, w), mode='bilinear', align_corners=False)
|
| 52 |
+
centerframe = (centerframe + 1) / 2.
|
| 53 |
+
centerframe = torch.clamp(centerframe, 0, 1)
|
| 54 |
+
|
| 55 |
+
# transfer to numpy and save
|
| 56 |
+
centerframe = centerframe.squeeze(0).permute(1, 2, 0).cpu().numpy()[..., ::-1]
|
| 57 |
+
# mkdir
|
| 58 |
+
os.makedirs(os.path.dirname(p_save), exist_ok=True)
|
| 59 |
+
cv2.imwrite(p_save, (centerframe * 255).astype(np.uint8))
|
| 60 |
+
print('save to {}'.format(p_save))
|
| 61 |
+
|
| 62 |
+
if __name__ == "__main__":
|
| 63 |
+
parser = argparse.ArgumentParser()
|
| 64 |
+
parser.add_argument('--p_video', type=str, default='')
|
| 65 |
+
parser.add_argument('--p_save', type=str, default='')
|
| 66 |
+
parser.add_argument('--dir_video', type=str, default='')
|
| 67 |
+
parser.add_argument('--dir_save', type=str, default='')
|
| 68 |
+
parser.add_argument('--orifps', type=int, default=18)
|
| 69 |
+
parser.add_argument('--targetfps', type=int, default=3)
|
| 70 |
+
parser.add_argument('--n_keyframes', type=int, default=9)
|
| 71 |
+
parser.add_argument('--length_short', type=int, default=384)
|
| 72 |
+
parser.add_argument('--length_long', type=int, default=576)
|
| 73 |
+
args = parser.parse_args()
|
| 74 |
+
|
| 75 |
+
assert (args.p_video != '' and args.p_save != '' ) or \
|
| 76 |
+
(args.dir_video != "" and args.dir_save != "args.dir_save"), \
|
| 77 |
+
'source video must be specified'
|
| 78 |
+
|
| 79 |
+
orifps = args.orifps
|
| 80 |
+
targetfps = args.targetfps
|
| 81 |
+
n_keyframes = args.n_keyframes
|
| 82 |
+
|
| 83 |
+
if args.p_video != '':
|
| 84 |
+
p_video = args.p_video
|
| 85 |
+
p_save = args.p_save
|
| 86 |
+
extract_centerframe(p_video, p_save, orifps, targetfps, n_keyframes, args.length_long, args.length_short)
|
| 87 |
+
else:
|
| 88 |
+
dir_video = args.dir_video
|
| 89 |
+
dir_save = args.dir_save
|
| 90 |
+
os.makedirs(dir_save, exist_ok=True)
|
| 91 |
+
subdirs = os.listdir(dir_video)
|
| 92 |
+
for subdir in subdirs:
|
| 93 |
+
subdir_video = os.path.join(dir_video, subdir)
|
| 94 |
+
if not os.path.isdir(subdir_video):
|
| 95 |
+
continue
|
| 96 |
+
subdir_save = os.path.join(dir_save, subdir)
|
| 97 |
+
os.makedirs(subdir_save, exist_ok=True)
|
| 98 |
+
files = os.listdir(subdir_video)
|
| 99 |
+
for file in files:
|
| 100 |
+
if not file.endswith('.mp4') or os.path.isdir(file):
|
| 101 |
+
continue
|
| 102 |
+
p_video = os.path.join(subdir_video, file)
|
| 103 |
+
p_save = os.path.join(subdir_save, file.replace('.mp4', '.png'))
|
| 104 |
+
print('{} -> {}'.format(p_video, p_save))
|
| 105 |
+
|
| 106 |
+
extract_centerframe(p_video, p_save, orifps, targetfps, n_keyframes, args.length_long, args.length_short)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
|
CCEdit-main/scripts/tools/pnp_generate_config.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
python scripts/sampling/pnp_generate_config.py \
|
| 3 |
+
--p_config config_pnp_auto.yaml \
|
| 4 |
+
--output_path "outputs/automatic_ref_editing/image" \
|
| 5 |
+
--image_path "outputs/centerframe/tshirtman.png" \
|
| 6 |
+
--latents_path "outputs/automatic_ref_editing/latents_forward" \
|
| 7 |
+
--prompt "a man walks on the beach"
|
| 8 |
+
'''
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
import yaml
|
| 12 |
+
import argparse
|
| 13 |
+
|
| 14 |
+
def save_yaml(args):
|
| 15 |
+
config_data = {
|
| 16 |
+
'seed': args.seed,
|
| 17 |
+
'device': args.device,
|
| 18 |
+
'output_path': args.output_path,
|
| 19 |
+
'image_path': args.image_path,
|
| 20 |
+
'latents_path': args.latents_path,
|
| 21 |
+
'sd_version': args.sd_version,
|
| 22 |
+
'guidance_scale': args.guidance_scale,
|
| 23 |
+
'n_timesteps': args.n_timesteps,
|
| 24 |
+
'prompt': args.prompt,
|
| 25 |
+
'negative_prompt': args.negative_prompt,
|
| 26 |
+
'pnp_attn_t': args.pnp_attn_t,
|
| 27 |
+
'pnp_f_t': args.pnp_f_t
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
with open(args.p_config, 'w') as file:
|
| 31 |
+
yaml.dump(config_data, file, sort_keys=False, allow_unicode=True)
|
| 32 |
+
|
| 33 |
+
if __name__ == '__main__':
|
| 34 |
+
parser = argparse.ArgumentParser(description="Save configuration to a YAML file.")
|
| 35 |
+
parser.add_argument('--p_config', type=str, help="Path to save the YAML configuration file.")
|
| 36 |
+
parser.add_argument('--output_path', type=str, help="Output path for the results.")
|
| 37 |
+
parser.add_argument('--image_path', type=str, help="Path to the input image.")
|
| 38 |
+
parser.add_argument('--latents_path', type=str, help="Path to the latents file.")
|
| 39 |
+
parser.add_argument('--prompt', type=str, help="Prompt for the diffusion model.")
|
| 40 |
+
parser.add_argument('--seed', type=int, default=1, help="Seed for random number generation.")
|
| 41 |
+
parser.add_argument('--device', type=str, default='cuda', help="Device to be used (e.g., 'cuda', 'cpu').")
|
| 42 |
+
parser.add_argument('--sd_version', type=str, default='2.1', help="Version of the diffusion model.")
|
| 43 |
+
parser.add_argument('--guidance_scale', type=float, default=7.5, help="Guidance scale for the diffusion model.")
|
| 44 |
+
parser.add_argument('--n_timesteps', type=int, default=50, help="Number of timesteps for the diffusion process.")
|
| 45 |
+
parser.add_argument('--negative_prompt', type=str, default='ugly, blurry, black, low res, unrealistic', help="Negative prompt for the diffusion model.")
|
| 46 |
+
parser.add_argument('--pnp_attn_t', type=float, default=0.5, help="PNP attention threshold.")
|
| 47 |
+
parser.add_argument('--pnp_f_t', type=float, default=0.8, help="PNP feature threshold.")
|
| 48 |
+
|
| 49 |
+
args = parser.parse_args()
|
| 50 |
+
|
| 51 |
+
save_yaml(args)
|
| 52 |
+
print(f"YAML configuration saved to {args.p_config}")
|
CCEdit-main/scripts/util/__init__.py
ADDED
|
File without changes
|
CCEdit-main/scripts/util/detection/__init__.py
ADDED
|
File without changes
|
CCEdit-main/scripts/util/detection/nsfw_and_watermark_dectection.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torchvision.transforms as T
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import clip
|
| 7 |
+
|
| 8 |
+
RESOURCES_ROOT = "scripts/util/detection/"
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def predict_proba(X, weights, biases):
|
| 12 |
+
logits = X @ weights.T + biases
|
| 13 |
+
proba = np.where(
|
| 14 |
+
logits >= 0, 1 / (1 + np.exp(-logits)), np.exp(logits) / (1 + np.exp(logits))
|
| 15 |
+
)
|
| 16 |
+
return proba.T
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def load_model_weights(path: str):
|
| 20 |
+
model_weights = np.load(path)
|
| 21 |
+
return model_weights["weights"], model_weights["biases"]
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def clip_process_images(images: torch.Tensor) -> torch.Tensor:
|
| 25 |
+
min_size = min(images.shape[-2:])
|
| 26 |
+
return T.Compose(
|
| 27 |
+
[
|
| 28 |
+
T.CenterCrop(min_size), # TODO: this might affect the watermark, check this
|
| 29 |
+
T.Resize(224, interpolation=T.InterpolationMode.BICUBIC, antialias=True),
|
| 30 |
+
T.Normalize(
|
| 31 |
+
(0.48145466, 0.4578275, 0.40821073),
|
| 32 |
+
(0.26862954, 0.26130258, 0.27577711),
|
| 33 |
+
),
|
| 34 |
+
]
|
| 35 |
+
)(images)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class DeepFloydDataFiltering(object):
|
| 39 |
+
def __init__(self, verbose: bool = False):
|
| 40 |
+
super().__init__()
|
| 41 |
+
self.verbose = verbose
|
| 42 |
+
self.clip_model, _ = clip.load("ViT-L/14", device="cpu")
|
| 43 |
+
self.clip_model.eval()
|
| 44 |
+
|
| 45 |
+
self.cpu_w_weights, self.cpu_w_biases = load_model_weights(
|
| 46 |
+
os.path.join(RESOURCES_ROOT, "w_head_v1.npz")
|
| 47 |
+
)
|
| 48 |
+
self.cpu_p_weights, self.cpu_p_biases = load_model_weights(
|
| 49 |
+
os.path.join(RESOURCES_ROOT, "p_head_v1.npz")
|
| 50 |
+
)
|
| 51 |
+
self.w_threshold, self.p_threshold = 0.5, 0.5
|
| 52 |
+
|
| 53 |
+
@torch.inference_mode()
|
| 54 |
+
def __call__(self, images: torch.Tensor) -> torch.Tensor:
|
| 55 |
+
imgs = clip_process_images(images)
|
| 56 |
+
image_features = self.clip_model.encode_image(imgs.to("cpu"))
|
| 57 |
+
image_features = image_features.detach().cpu().numpy().astype(np.float16)
|
| 58 |
+
p_pred = predict_proba(image_features, self.cpu_p_weights, self.cpu_p_biases)
|
| 59 |
+
w_pred = predict_proba(image_features, self.cpu_w_weights, self.cpu_w_biases)
|
| 60 |
+
print(f"p_pred = {p_pred}, w_pred = {w_pred}") if self.verbose else None
|
| 61 |
+
query = p_pred > self.p_threshold
|
| 62 |
+
if query.sum() > 0:
|
| 63 |
+
print(f"Hit for p_threshold: {p_pred}") if self.verbose else None
|
| 64 |
+
images[query] = T.GaussianBlur(99, sigma=(100.0, 100.0))(images[query])
|
| 65 |
+
query = w_pred > self.w_threshold
|
| 66 |
+
if query.sum() > 0:
|
| 67 |
+
print(f"Hit for w_threshold: {w_pred}") if self.verbose else None
|
| 68 |
+
images[query] = T.GaussianBlur(99, sigma=(100.0, 100.0))(images[query])
|
| 69 |
+
return images
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def load_img(path: str) -> torch.Tensor:
|
| 73 |
+
image = Image.open(path)
|
| 74 |
+
if not image.mode == "RGB":
|
| 75 |
+
image = image.convert("RGB")
|
| 76 |
+
image_transforms = T.Compose(
|
| 77 |
+
[
|
| 78 |
+
T.ToTensor(),
|
| 79 |
+
]
|
| 80 |
+
)
|
| 81 |
+
return image_transforms(image)[None, ...]
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def test(root):
|
| 85 |
+
from einops import rearrange
|
| 86 |
+
|
| 87 |
+
filter = DeepFloydDataFiltering(verbose=True)
|
| 88 |
+
for p in os.listdir((root)):
|
| 89 |
+
print(f"running on {p}...")
|
| 90 |
+
img = load_img(os.path.join(root, p))
|
| 91 |
+
filtered_img = filter(img)
|
| 92 |
+
filtered_img = rearrange(
|
| 93 |
+
255.0 * (filtered_img.numpy())[0], "c h w -> h w c"
|
| 94 |
+
).astype(np.uint8)
|
| 95 |
+
Image.fromarray(filtered_img).save(
|
| 96 |
+
os.path.join(root, f"{os.path.splitext(p)[0]}-filtered.jpg")
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
if __name__ == "__main__":
|
| 101 |
+
import fire
|
| 102 |
+
|
| 103 |
+
fire.Fire(test)
|
| 104 |
+
print("done.")
|
CCEdit-main/sgm/modules/diffusionmodules/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .denoiser import Denoiser
|
| 2 |
+
from .discretizer import Discretization
|
| 3 |
+
from .loss import StandardDiffusionLoss
|
| 4 |
+
from .model import Model, Encoder, Decoder
|
| 5 |
+
from .openaimodel import UNetModel
|
| 6 |
+
from .sampling import BaseDiffusionSampler
|
| 7 |
+
from .wrappers import OpenAIWrapper
|
CCEdit-main/sgm/modules/diffusionmodules/__pycache__/controlmodel.cpython-39.pyc
ADDED
|
Binary file (17.1 kB). View file
|
|
|
CCEdit-main/sgm/modules/diffusionmodules/__pycache__/denoiser.cpython-39.pyc
ADDED
|
Binary file (2.64 kB). View file
|
|
|
CCEdit-main/sgm/modules/diffusionmodules/__pycache__/denoiser_weighting.cpython-39.pyc
ADDED
|
Binary file (1.53 kB). View file
|
|
|
CCEdit-main/sgm/modules/diffusionmodules/__pycache__/discretizer.cpython-39.pyc
ADDED
|
Binary file (2.97 kB). View file
|
|
|
CCEdit-main/sgm/modules/diffusionmodules/__pycache__/guiders.cpython-39.pyc
ADDED
|
Binary file (2.47 kB). View file
|
|
|
CCEdit-main/sgm/modules/diffusionmodules/__pycache__/loss.cpython-39.pyc
ADDED
|
Binary file (2.44 kB). View file
|
|
|
CCEdit-main/sgm/modules/diffusionmodules/__pycache__/sampling.cpython-39.pyc
ADDED
|
Binary file (13.8 kB). View file
|
|
|
CCEdit-main/sgm/modules/diffusionmodules/__pycache__/util.cpython-39.pyc
ADDED
|
Binary file (14.5 kB). View file
|
|
|
CCEdit-main/sgm/modules/diffusionmodules/__pycache__/wrappers.cpython-39.pyc
ADDED
|
Binary file (6.91 kB). View file
|
|
|
FateZero-main/data/attribute/swan_swarov/00005.png
ADDED
|
Git LFS Details
|
FateZero-main/data/shape/man_skate/00007.png
ADDED
|
Git LFS Details
|
FateZero-main/data/shape/swan_swarov/00002.png
ADDED
|
Git LFS Details
|
RAVE-main/.gitignore
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py,cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
# .python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# poetry
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 102 |
+
#poetry.lock
|
| 103 |
+
|
| 104 |
+
# pdm
|
| 105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 106 |
+
#pdm.lock
|
| 107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 108 |
+
# in version control.
|
| 109 |
+
# https://pdm.fming.dev/#use-with-ide
|
| 110 |
+
.pdm.toml
|
| 111 |
+
|
| 112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 113 |
+
__pypackages__/
|
| 114 |
+
|
| 115 |
+
# Celery stuff
|
| 116 |
+
celerybeat-schedule
|
| 117 |
+
celerybeat.pid
|
| 118 |
+
|
| 119 |
+
# SageMath parsed files
|
| 120 |
+
*.sage.py
|
| 121 |
+
|
| 122 |
+
# Environments
|
| 123 |
+
.env
|
| 124 |
+
.venv
|
| 125 |
+
env/
|
| 126 |
+
venv/
|
| 127 |
+
ENV/
|
| 128 |
+
env.bak/
|
| 129 |
+
venv.bak/
|
| 130 |
+
|
| 131 |
+
# Spyder project settings
|
| 132 |
+
.spyderproject
|
| 133 |
+
.spyproject
|
| 134 |
+
|
| 135 |
+
# Rope project settings
|
| 136 |
+
.ropeproject
|
| 137 |
+
|
| 138 |
+
# mkdocs documentation
|
| 139 |
+
/site
|
| 140 |
+
|
| 141 |
+
# mypy
|
| 142 |
+
.mypy_cache/
|
| 143 |
+
.dmypy.json
|
| 144 |
+
dmypy.json
|
| 145 |
+
|
| 146 |
+
# Pyre type checker
|
| 147 |
+
.pyre/
|
| 148 |
+
|
| 149 |
+
# pytype static type analyzer
|
| 150 |
+
.pytype/
|
| 151 |
+
|
| 152 |
+
# Cython debug symbols
|
| 153 |
+
cython_debug/
|
| 154 |
+
|
| 155 |
+
*.pt
|
| 156 |
+
*.bin
|
| 157 |
+
CIVIT_AI/diffusers_models/*
|
| 158 |
+
generated/*
|
| 159 |
+
pretrained_models/*
|
| 160 |
+
results/*
|
| 161 |
+
*.safetensors
|
| 162 |
+
|
| 163 |
+
assets/notebook-generated/*
|
| 164 |
+
# PyCharm
|
| 165 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 166 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 167 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 168 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 169 |
+
#.idea/
|
RAVE-main/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2023 Rehg Lab
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
RAVE-main/README.md
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
### RAVE: Randomized Noise Shuffling for Fast and Consistent Video Editing with Diffusion Models - Official Repo
|
| 2 |
+
### CVPR 2024 (Highlight)
|
| 3 |
+
|
| 4 |
+
[Ozgur Kara](https://karaozgur.com/), [Bariscan Kurtkaya](https://bariscankurtkaya.github.io/), [Hidir Yesiltepe](https://sites.google.com/view/hidir-yesiltepe), [James M. Rehg](https://scholar.google.com/citations?hl=en&user=8kA3eDwAAAAJ), [Pinar Yanardag](https://scholar.google.com/citations?user=qzczdd8AAAAJ&hl=en)
|
| 5 |
+
|
| 6 |
+
<a href="https://huggingface.co/spaces/ozgurkara/RAVE"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm-dark.svg" alt="Web Demo">
|
| 7 |
+
<a href='https://arxiv.org/abs/2312.04524'><img src='https://img.shields.io/badge/ArXiv-2312.04524-red'></a>
|
| 8 |
+
<a href='https://rave-video.github.io/'><img src='https://img.shields.io/badge/Project-Page-green'></a>
|
| 9 |
+
<a href='https://youtu.be/2hQho5AC9T0?si=3R_jYDbcL2olODCV'><img src='https://img.shields.io/badge/YouTube-red?style=for-the-badge&logo=youtube&logoColor=white'></a>
|
| 10 |
+
<a href='https://rave-video.github.io/supp/supp.html'><img src='https://img.shields.io/badge/Supplementary-Page-yellow'></a>
|
| 11 |
+
[](https://github.com/rehg-lab/RAVE)
|
| 12 |
+
|
| 13 |
+

|
| 14 |
+
|
| 15 |
+
|
| 16 |
+

|
| 17 |
+
(Note that the videos on GitHub are heavily compressed. The full videos are available on the project webpage.)
|
| 18 |
+
|
| 19 |
+
## Abstract
|
| 20 |
+
<b>TL; DR:</b> RAVE is a zero-shot, lightweight, and fast framework for text-guided video editing, supporting videos of any length utilizing text-to-image pretrained diffusion models.
|
| 21 |
+
|
| 22 |
+
<details><summary>Click for the full abstract</summary>
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
> Recent advancements in diffusion-based models have demonstrated significant success in generating images from text. However, video editing models have not yet reached the same level of visual quality and user control. To address this, we introduce RAVE, a zero-shot video editing method that leverages pre-trained text-to-image diffusion models without additional training. RAVE takes an input video and a text prompt to produce high-quality videos while preserving the original motion and semantic structure. It employs a novel noise shuffling strategy, leveraging spatio-temporal interactions between frames, to produce temporally consistent videos faster than existing methods. It is also efficient in terms of memory requirements, allowing it to handle longer videos. RAVE is capable of a wide range of edits, from local attribute modifications to shape transformations. In order to demonstrate the versatility of RAVE, we create a comprehensive video evaluation dataset ranging from object-focused scenes to complex human activities like dancing and typing, and dynamic scenes featuring swimming fish and boats. Our qualitative and quantitative experiments highlight the effectiveness of RAVE in diverse video editing scenarios compared to existing methods.
|
| 26 |
+
</details>
|
| 27 |
+
|
| 28 |
+
<br>
|
| 29 |
+
|
| 30 |
+
**Features**:
|
| 31 |
+
- *Zero-shot framework*
|
| 32 |
+
- *Working fast*
|
| 33 |
+
- *No restriction on video length*
|
| 34 |
+
- *Standardized dataset for evaluating text-guided video-editing methods*
|
| 35 |
+
- *Compatible with off-the-shelf pre-trained approaches (e.g. [CivitAI](https://civitai.com/))*
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
## Updates
|
| 39 |
+
- [12/2023] Gradio demo is released, HuggingFace Space demo will be released soon
|
| 40 |
+
- [12/2023] Paper is available on ArXiv, project webpage is ready and code is released.
|
| 41 |
+
|
| 42 |
+
### TODO
|
| 43 |
+
- [ ] Share the dataset
|
| 44 |
+
- [X] Add more examples
|
| 45 |
+
- [X] Optimize preprocessing
|
| 46 |
+
- [X] Add CivitAI models to Grad.io
|
| 47 |
+
- [X] ~~Prepare a grad.io based GUI~~
|
| 48 |
+
- [X] ~~Integrate MultiControlNet~~
|
| 49 |
+
- [X] ~~Adapt CIVIT AI models~~
|
| 50 |
+
|
| 51 |
+
## Installation and Inference
|
| 52 |
+
|
| 53 |
+
### Setup Environment
|
| 54 |
+
Please install our environment using 'requirements.txt' file as:
|
| 55 |
+
```shell
|
| 56 |
+
conda create -n rave python=3.8
|
| 57 |
+
conda activate rave
|
| 58 |
+
conda install pip
|
| 59 |
+
pip cache purge
|
| 60 |
+
pip install -r requirements.txt
|
| 61 |
+
```
|
| 62 |
+
Also, please install PyTorch and Xformers as
|
| 63 |
+
```shell
|
| 64 |
+
pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu118
|
| 65 |
+
pip install xformers==0.0.20
|
| 66 |
+
```
|
| 67 |
+
to set up the Conda environment.
|
| 68 |
+
|
| 69 |
+
Our code was tested on Linux with the following versions:
|
| 70 |
+
```shell
|
| 71 |
+
timm==0.6.7 torch==2.0.1+cu118 xformers==0.0.20 diffusers==0.18.2 torch.version.cuda==11.8 python==3.8.0
|
| 72 |
+
```
|
| 73 |
+
|
| 74 |
+
### WebUI Demo
|
| 75 |
+
|
| 76 |
+
To run our grad.io based web demo, run the following command:
|
| 77 |
+
```shell
|
| 78 |
+
python webui.py
|
| 79 |
+
```
|
| 80 |
+
Then, specify your configurations and perform editing.
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
### Inference
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
To run RAVE, please follow these steps:
|
| 87 |
+
|
| 88 |
+
1- Put the video you want to edit under `data/mp4_videos` as an MP4 file. Note that we suggest using videos with a size of 512x512 or 512x320.
|
| 89 |
+
|
| 90 |
+
2- Prepare a config file under the `configs` directory. Change the name of the `video_name` parameter to the name of the MP4 file. You can find detailed descriptions of the parameters and example configurations there.
|
| 91 |
+
|
| 92 |
+
3- Run the following command:
|
| 93 |
+
```shell
|
| 94 |
+
python scripts/run_experiment.py [PATH OF CONFIG FILE]
|
| 95 |
+
```
|
| 96 |
+
4- The results will be generated under the `results` directory. Also, the latents and controls are saved under the `generated` directory to speed up the editing with different prompts on the same video.
|
| 97 |
+
Note that the names of the preprocessors available can be found in `utils/constants.py`.
|
| 98 |
+
|
| 99 |
+
### Use Customized Models from CIVIT AI
|
| 100 |
+
|
| 101 |
+
Our code allows to run any customized model from CIVIT AI. To use these models, please follow the steps:
|
| 102 |
+
|
| 103 |
+
1- Determine which model you want to use from CIVIT AI, and obtain its index. (e.g. the index for RealisticVision V5.1 is 130072, you can find the id of the model in the website link as a parameter assigned to 'VersionId', e.g. https://civitai.com/models/4201?modelVersionId=130072)
|
| 104 |
+
|
| 105 |
+
2- In the current directory, run the following code. It downloads the model in safetensors format, and converts it to '.bin' format that is compatible with diffusers.
|
| 106 |
+
```shell
|
| 107 |
+
bash CIVIT_AI/civit_ai.sh 130072
|
| 108 |
+
```
|
| 109 |
+
3- Copy the path of the converted model, `$CWD/CIVIT_AI/diffusers_models/[CUSTOMIZED MODEL]` (e.g. `CIVIT_AI/diffusers_models/realisticVisionV60B1_v51VAE` for 130072), and use the path in the config file.
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
## Dataset
|
| 113 |
+
|
| 114 |
+
Dataset will be released soon.
|
| 115 |
+
|
| 116 |
+
## Examples
|
| 117 |
+
### Type of Edits
|
| 118 |
+
<table>
|
| 119 |
+
<tr>
|
| 120 |
+
<td><img src="assets/examples/glitter.gif"></td>
|
| 121 |
+
<td><img src="assets/examples/watercolor-new.gif"></td>
|
| 122 |
+
<td><img src="assets/examples/coast.gif"></td>
|
| 123 |
+
</tr>
|
| 124 |
+
<tr>
|
| 125 |
+
<td width=33% style="text-align:center;">1- Local Editing</td>
|
| 126 |
+
<td width=33% style="text-align:center;">2- Visual Style Editing</td>
|
| 127 |
+
<td width=33% style="text-align:center;">3- Background Editing</td>
|
| 128 |
+
</tr>
|
| 129 |
+
</table>
|
| 130 |
+
|
| 131 |
+
<table>
|
| 132 |
+
<tr>
|
| 133 |
+
<td><img src="assets/examples/a_dinosaur.gif"></td>
|
| 134 |
+
<td><img src="assets/examples/tractor.gif"></td>
|
| 135 |
+
</tr>
|
| 136 |
+
|
| 137 |
+
<tr>
|
| 138 |
+
<td width=50% style="text-align:center;">4- Shape/Attribute Editing</td>
|
| 139 |
+
<td width=50% style="text-align:center;">5- Extreme Shape Editing</td>
|
| 140 |
+
</tr>
|
| 141 |
+
</table>
|
| 142 |
+
|
| 143 |
+
### Editing on Various Types of Motions
|
| 144 |
+
<table>
|
| 145 |
+
<tr>
|
| 146 |
+
<td><img src="assets/examples/crochet.gif"></td>
|
| 147 |
+
<td><img src="assets/examples/anime.gif"></td>
|
| 148 |
+
<td><img src="assets/examples/rave.gif"></td>
|
| 149 |
+
</tr>
|
| 150 |
+
<tr>
|
| 151 |
+
<td width=33% style="text-align:center;">1- Exo-motion</td>
|
| 152 |
+
<td width=33% style="text-align:center;">2- Ego-motion</td>
|
| 153 |
+
<td width=33% style="text-align:center;">3- Ego-exo motion</td>
|
| 154 |
+
</tr>
|
| 155 |
+
</table>
|
| 156 |
+
|
| 157 |
+
<table>
|
| 158 |
+
<tr>
|
| 159 |
+
<td><img src="assets/examples/cheetah.gif"></td>
|
| 160 |
+
<td><img src="assets/examples/whales.gif"></td>
|
| 161 |
+
</tr>
|
| 162 |
+
|
| 163 |
+
<tr>
|
| 164 |
+
<td width=50% style="text-align:center;">4- Occlusions</td>
|
| 165 |
+
<td width=50% style="text-align:center;">5- Multiple objects with appearance/disappearance</td>
|
| 166 |
+
</tr>
|
| 167 |
+
</table>
|
| 168 |
+
|
| 169 |
+
## Citation
|
| 170 |
+
|
| 171 |
+
```
|
| 172 |
+
@inproceedings{kara2024rave,
|
| 173 |
+
title={RAVE: Randomized Noise Shuffling for Fast and Consistent Video Editing with Diffusion Models},
|
| 174 |
+
author={Ozgur Kara and Bariscan Kurtkaya and Hidir Yesiltepe and James M. Rehg and Pinar Yanardag},
|
| 175 |
+
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
|
| 176 |
+
year={2024}
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
```
|
| 180 |
+
|
| 181 |
+
## Maintenance
|
| 182 |
+
|
| 183 |
+
This is the official repository for **RAVE: Randomized Noise Shuffling for Fast and Consistent Video Editing with Diffusion Models**. Feel free to contact for any questions or discussions [Ozgur Kara](ozgurrkara99@gmail.com).
|
RAVE-main/annotator/annotator_path.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import utils.constants as const
|
| 4 |
+
|
| 5 |
+
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 6 |
+
models_path = f'{const.CWD}/pretrained_models'
|
| 7 |
+
|
| 8 |
+
clip_vision_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'clip_vision')
|
| 9 |
+
# clip vision is always inside controlnet "extensions\sd-webui-controlnet"
|
| 10 |
+
# and any problem can be solved by removing controlnet and reinstall
|
| 11 |
+
|
| 12 |
+
models_path = os.path.realpath(models_path)
|
| 13 |
+
os.makedirs(models_path, exist_ok=True)
|
| 14 |
+
print(f'ControlNet preprocessor location: {models_path}')
|
| 15 |
+
# Make sure that the default location is inside controlnet "extensions\sd-webui-controlnet"
|
| 16 |
+
# so that any problem can be solved by removing controlnet and reinstall
|
| 17 |
+
# if users do not change configs on their own (otherwise users will know what is wrong)
|
RAVE-main/annotator/util.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def HWC3(x):
|
| 6 |
+
assert x.dtype == np.uint8
|
| 7 |
+
if x.ndim == 2:
|
| 8 |
+
x = x[:, :, None]
|
| 9 |
+
assert x.ndim == 3
|
| 10 |
+
H, W, C = x.shape
|
| 11 |
+
assert C == 1 or C == 3 or C == 4
|
| 12 |
+
if C == 3:
|
| 13 |
+
return x
|
| 14 |
+
if C == 1:
|
| 15 |
+
return np.concatenate([x, x, x], axis=2)
|
| 16 |
+
if C == 4:
|
| 17 |
+
color = x[:, :, 0:3].astype(np.float32)
|
| 18 |
+
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
|
| 19 |
+
y = color * alpha + 255.0 * (1.0 - alpha)
|
| 20 |
+
y = y.clip(0, 255).astype(np.uint8)
|
| 21 |
+
return y
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def make_noise_disk(H, W, C, F):
|
| 25 |
+
noise = np.random.uniform(low=0, high=1, size=((H // F) + 2, (W // F) + 2, C))
|
| 26 |
+
noise = cv2.resize(noise, (W + 2 * F, H + 2 * F), interpolation=cv2.INTER_CUBIC)
|
| 27 |
+
noise = noise[F: F + H, F: F + W]
|
| 28 |
+
noise -= np.min(noise)
|
| 29 |
+
noise /= np.max(noise)
|
| 30 |
+
if C == 1:
|
| 31 |
+
noise = noise[:, :, None]
|
| 32 |
+
return noise
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def nms(x, t, s):
|
| 36 |
+
x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s)
|
| 37 |
+
|
| 38 |
+
f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
|
| 39 |
+
f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
|
| 40 |
+
f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
|
| 41 |
+
f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
|
| 42 |
+
|
| 43 |
+
y = np.zeros_like(x)
|
| 44 |
+
|
| 45 |
+
for f in [f1, f2, f3, f4]:
|
| 46 |
+
np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
|
| 47 |
+
|
| 48 |
+
z = np.zeros_like(y, dtype=np.uint8)
|
| 49 |
+
z[y > t] = 255
|
| 50 |
+
return z
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def min_max_norm(x):
|
| 54 |
+
x -= np.min(x)
|
| 55 |
+
x /= np.maximum(np.max(x), 1e-5)
|
| 56 |
+
return x
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def safe_step(x, step=2):
|
| 60 |
+
y = x.astype(np.float32) * float(step + 1)
|
| 61 |
+
y = y.astype(np.int32).astype(np.float32) / float(step)
|
| 62 |
+
return y
|
RAVE-main/annotator/zoe/zoedepth/models/__init__.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# MIT License
|
| 2 |
+
|
| 3 |
+
# Copyright (c) 2022 Intelligent Systems Lab Org
|
| 4 |
+
|
| 5 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
# of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
# in the Software without restriction, including without limitation the rights
|
| 8 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
# copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
# furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
# The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
# copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
# SOFTWARE.
|
| 22 |
+
|
| 23 |
+
# File author: Shariq Farooq Bhat
|
| 24 |
+
|
RAVE-main/annotator/zoe/zoedepth/models/base_models/__init__.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# MIT License
|
| 2 |
+
|
| 3 |
+
# Copyright (c) 2022 Intelligent Systems Lab Org
|
| 4 |
+
|
| 5 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
# of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
# in the Software without restriction, including without limitation the rights
|
| 8 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
# copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
# furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
# The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
# copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
# SOFTWARE.
|
| 22 |
+
|
| 23 |
+
# File author: Shariq Farooq Bhat
|
| 24 |
+
|
RAVE-main/annotator/zoe/zoedepth/models/base_models/midas.py
ADDED
|
@@ -0,0 +1,379 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# MIT License
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
# Copyright (c) 2022 Intelligent Systems Lab Org
|
| 5 |
+
|
| 6 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 7 |
+
# of this software and associated documentation files (the "Software"), to deal
|
| 8 |
+
# in the Software without restriction, including without limitation the rights
|
| 9 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 10 |
+
# copies of the Software, and to permit persons to whom the Software is
|
| 11 |
+
# furnished to do so, subject to the following conditions:
|
| 12 |
+
|
| 13 |
+
# The above copyright notice and this permission notice shall be included in all
|
| 14 |
+
# copies or substantial portions of the Software.
|
| 15 |
+
|
| 16 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 17 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 18 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 19 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 20 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 21 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 22 |
+
# SOFTWARE.
|
| 23 |
+
|
| 24 |
+
# File author: Shariq Farooq Bhat
|
| 25 |
+
|
| 26 |
+
import torch
|
| 27 |
+
import torch.nn as nn
|
| 28 |
+
import numpy as np
|
| 29 |
+
from torchvision.transforms import Normalize
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def denormalize(x):
|
| 33 |
+
"""Reverses the imagenet normalization applied to the input.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
x (torch.Tensor - shape(N,3,H,W)): input tensor
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
torch.Tensor - shape(N,3,H,W): Denormalized input
|
| 40 |
+
"""
|
| 41 |
+
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(x.device)
|
| 42 |
+
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(x.device)
|
| 43 |
+
return x * std + mean
|
| 44 |
+
|
| 45 |
+
def get_activation(name, bank):
|
| 46 |
+
def hook(model, input, output):
|
| 47 |
+
bank[name] = output
|
| 48 |
+
return hook
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class Resize(object):
|
| 52 |
+
"""Resize sample to given size (width, height).
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def __init__(
|
| 56 |
+
self,
|
| 57 |
+
width,
|
| 58 |
+
height,
|
| 59 |
+
resize_target=True,
|
| 60 |
+
keep_aspect_ratio=False,
|
| 61 |
+
ensure_multiple_of=1,
|
| 62 |
+
resize_method="lower_bound",
|
| 63 |
+
):
|
| 64 |
+
"""Init.
|
| 65 |
+
Args:
|
| 66 |
+
width (int): desired output width
|
| 67 |
+
height (int): desired output height
|
| 68 |
+
resize_target (bool, optional):
|
| 69 |
+
True: Resize the full sample (image, mask, target).
|
| 70 |
+
False: Resize image only.
|
| 71 |
+
Defaults to True.
|
| 72 |
+
keep_aspect_ratio (bool, optional):
|
| 73 |
+
True: Keep the aspect ratio of the input sample.
|
| 74 |
+
Output sample might not have the given width and height, and
|
| 75 |
+
resize behaviour depends on the parameter 'resize_method'.
|
| 76 |
+
Defaults to False.
|
| 77 |
+
ensure_multiple_of (int, optional):
|
| 78 |
+
Output width and height is constrained to be multiple of this parameter.
|
| 79 |
+
Defaults to 1.
|
| 80 |
+
resize_method (str, optional):
|
| 81 |
+
"lower_bound": Output will be at least as large as the given size.
|
| 82 |
+
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
|
| 83 |
+
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
|
| 84 |
+
Defaults to "lower_bound".
|
| 85 |
+
"""
|
| 86 |
+
# print("Params passed to Resize transform:")
|
| 87 |
+
# print("\twidth: ", width)
|
| 88 |
+
# print("\theight: ", height)
|
| 89 |
+
# print("\tresize_target: ", resize_target)
|
| 90 |
+
# print("\tkeep_aspect_ratio: ", keep_aspect_ratio)
|
| 91 |
+
# print("\tensure_multiple_of: ", ensure_multiple_of)
|
| 92 |
+
# print("\tresize_method: ", resize_method)
|
| 93 |
+
|
| 94 |
+
self.__width = width
|
| 95 |
+
self.__height = height
|
| 96 |
+
|
| 97 |
+
self.__keep_aspect_ratio = keep_aspect_ratio
|
| 98 |
+
self.__multiple_of = ensure_multiple_of
|
| 99 |
+
self.__resize_method = resize_method
|
| 100 |
+
|
| 101 |
+
def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
|
| 102 |
+
y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
| 103 |
+
|
| 104 |
+
if max_val is not None and y > max_val:
|
| 105 |
+
y = (np.floor(x / self.__multiple_of)
|
| 106 |
+
* self.__multiple_of).astype(int)
|
| 107 |
+
|
| 108 |
+
if y < min_val:
|
| 109 |
+
y = (np.ceil(x / self.__multiple_of)
|
| 110 |
+
* self.__multiple_of).astype(int)
|
| 111 |
+
|
| 112 |
+
return y
|
| 113 |
+
|
| 114 |
+
def get_size(self, width, height):
|
| 115 |
+
# determine new height and width
|
| 116 |
+
scale_height = self.__height / height
|
| 117 |
+
scale_width = self.__width / width
|
| 118 |
+
|
| 119 |
+
if self.__keep_aspect_ratio:
|
| 120 |
+
if self.__resize_method == "lower_bound":
|
| 121 |
+
# scale such that output size is lower bound
|
| 122 |
+
if scale_width > scale_height:
|
| 123 |
+
# fit width
|
| 124 |
+
scale_height = scale_width
|
| 125 |
+
else:
|
| 126 |
+
# fit height
|
| 127 |
+
scale_width = scale_height
|
| 128 |
+
elif self.__resize_method == "upper_bound":
|
| 129 |
+
# scale such that output size is upper bound
|
| 130 |
+
if scale_width < scale_height:
|
| 131 |
+
# fit width
|
| 132 |
+
scale_height = scale_width
|
| 133 |
+
else:
|
| 134 |
+
# fit height
|
| 135 |
+
scale_width = scale_height
|
| 136 |
+
elif self.__resize_method == "minimal":
|
| 137 |
+
# scale as least as possbile
|
| 138 |
+
if abs(1 - scale_width) < abs(1 - scale_height):
|
| 139 |
+
# fit width
|
| 140 |
+
scale_height = scale_width
|
| 141 |
+
else:
|
| 142 |
+
# fit height
|
| 143 |
+
scale_width = scale_height
|
| 144 |
+
else:
|
| 145 |
+
raise ValueError(
|
| 146 |
+
f"resize_method {self.__resize_method} not implemented"
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
if self.__resize_method == "lower_bound":
|
| 150 |
+
new_height = self.constrain_to_multiple_of(
|
| 151 |
+
scale_height * height, min_val=self.__height
|
| 152 |
+
)
|
| 153 |
+
new_width = self.constrain_to_multiple_of(
|
| 154 |
+
scale_width * width, min_val=self.__width
|
| 155 |
+
)
|
| 156 |
+
elif self.__resize_method == "upper_bound":
|
| 157 |
+
new_height = self.constrain_to_multiple_of(
|
| 158 |
+
scale_height * height, max_val=self.__height
|
| 159 |
+
)
|
| 160 |
+
new_width = self.constrain_to_multiple_of(
|
| 161 |
+
scale_width * width, max_val=self.__width
|
| 162 |
+
)
|
| 163 |
+
elif self.__resize_method == "minimal":
|
| 164 |
+
new_height = self.constrain_to_multiple_of(scale_height * height)
|
| 165 |
+
new_width = self.constrain_to_multiple_of(scale_width * width)
|
| 166 |
+
else:
|
| 167 |
+
raise ValueError(
|
| 168 |
+
f"resize_method {self.__resize_method} not implemented")
|
| 169 |
+
|
| 170 |
+
return (new_width, new_height)
|
| 171 |
+
|
| 172 |
+
def __call__(self, x):
|
| 173 |
+
width, height = self.get_size(*x.shape[-2:][::-1])
|
| 174 |
+
return nn.functional.interpolate(x, (height, width), mode='bilinear', align_corners=True)
|
| 175 |
+
|
| 176 |
+
class PrepForMidas(object):
|
| 177 |
+
def __init__(self, resize_mode="minimal", keep_aspect_ratio=True, img_size=384, do_resize=True):
|
| 178 |
+
if isinstance(img_size, int):
|
| 179 |
+
img_size = (img_size, img_size)
|
| 180 |
+
net_h, net_w = img_size
|
| 181 |
+
self.normalization = Normalize(
|
| 182 |
+
mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
| 183 |
+
self.resizer = Resize(net_w, net_h, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=32, resize_method=resize_mode) \
|
| 184 |
+
if do_resize else nn.Identity()
|
| 185 |
+
|
| 186 |
+
def __call__(self, x):
|
| 187 |
+
return self.normalization(self.resizer(x))
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class MidasCore(nn.Module):
|
| 191 |
+
def __init__(self, midas, trainable=False, fetch_features=True, layer_names=('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'), freeze_bn=False, keep_aspect_ratio=True,
|
| 192 |
+
img_size=384, **kwargs):
|
| 193 |
+
"""Midas Base model used for multi-scale feature extraction.
|
| 194 |
+
|
| 195 |
+
Args:
|
| 196 |
+
midas (torch.nn.Module): Midas model.
|
| 197 |
+
trainable (bool, optional): Train midas model. Defaults to False.
|
| 198 |
+
fetch_features (bool, optional): Extract multi-scale features. Defaults to True.
|
| 199 |
+
layer_names (tuple, optional): Layers used for feature extraction. Order = (head output features, last layer features, ...decoder features). Defaults to ('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1').
|
| 200 |
+
freeze_bn (bool, optional): Freeze BatchNorm. Generally results in better finetuning performance. Defaults to False.
|
| 201 |
+
keep_aspect_ratio (bool, optional): Keep the aspect ratio of input images while resizing. Defaults to True.
|
| 202 |
+
img_size (int, tuple, optional): Input resolution. Defaults to 384.
|
| 203 |
+
"""
|
| 204 |
+
super().__init__()
|
| 205 |
+
self.core = midas
|
| 206 |
+
self.output_channels = None
|
| 207 |
+
self.core_out = {}
|
| 208 |
+
self.trainable = trainable
|
| 209 |
+
self.fetch_features = fetch_features
|
| 210 |
+
# midas.scratch.output_conv = nn.Identity()
|
| 211 |
+
self.handles = []
|
| 212 |
+
# self.layer_names = ['out_conv','l4_rn', 'r4', 'r3', 'r2', 'r1']
|
| 213 |
+
self.layer_names = layer_names
|
| 214 |
+
|
| 215 |
+
self.set_trainable(trainable)
|
| 216 |
+
self.set_fetch_features(fetch_features)
|
| 217 |
+
|
| 218 |
+
self.prep = PrepForMidas(keep_aspect_ratio=keep_aspect_ratio,
|
| 219 |
+
img_size=img_size, do_resize=kwargs.get('do_resize', True))
|
| 220 |
+
|
| 221 |
+
if freeze_bn:
|
| 222 |
+
self.freeze_bn()
|
| 223 |
+
|
| 224 |
+
def set_trainable(self, trainable):
|
| 225 |
+
self.trainable = trainable
|
| 226 |
+
if trainable:
|
| 227 |
+
self.unfreeze()
|
| 228 |
+
else:
|
| 229 |
+
self.freeze()
|
| 230 |
+
return self
|
| 231 |
+
|
| 232 |
+
def set_fetch_features(self, fetch_features):
|
| 233 |
+
self.fetch_features = fetch_features
|
| 234 |
+
if fetch_features:
|
| 235 |
+
if len(self.handles) == 0:
|
| 236 |
+
self.attach_hooks(self.core)
|
| 237 |
+
else:
|
| 238 |
+
self.remove_hooks()
|
| 239 |
+
return self
|
| 240 |
+
|
| 241 |
+
def freeze(self):
|
| 242 |
+
for p in self.parameters():
|
| 243 |
+
p.requires_grad = False
|
| 244 |
+
self.trainable = False
|
| 245 |
+
return self
|
| 246 |
+
|
| 247 |
+
def unfreeze(self):
|
| 248 |
+
for p in self.parameters():
|
| 249 |
+
p.requires_grad = True
|
| 250 |
+
self.trainable = True
|
| 251 |
+
return self
|
| 252 |
+
|
| 253 |
+
def freeze_bn(self):
|
| 254 |
+
for m in self.modules():
|
| 255 |
+
if isinstance(m, nn.BatchNorm2d):
|
| 256 |
+
m.eval()
|
| 257 |
+
return self
|
| 258 |
+
|
| 259 |
+
def forward(self, x, denorm=False, return_rel_depth=False):
|
| 260 |
+
with torch.no_grad():
|
| 261 |
+
if denorm:
|
| 262 |
+
x = denormalize(x)
|
| 263 |
+
x = self.prep(x)
|
| 264 |
+
# print("Shape after prep: ", x.shape)
|
| 265 |
+
|
| 266 |
+
with torch.set_grad_enabled(self.trainable):
|
| 267 |
+
|
| 268 |
+
# print("Input size to Midascore", x.shape)
|
| 269 |
+
rel_depth = self.core(x)
|
| 270 |
+
# print("Output from midas shape", rel_depth.shape)
|
| 271 |
+
if not self.fetch_features:
|
| 272 |
+
return rel_depth
|
| 273 |
+
out = [self.core_out[k] for k in self.layer_names]
|
| 274 |
+
|
| 275 |
+
if return_rel_depth:
|
| 276 |
+
return rel_depth, out
|
| 277 |
+
return out
|
| 278 |
+
|
| 279 |
+
def get_rel_pos_params(self):
|
| 280 |
+
for name, p in self.core.pretrained.named_parameters():
|
| 281 |
+
if "relative_position" in name:
|
| 282 |
+
yield p
|
| 283 |
+
|
| 284 |
+
def get_enc_params_except_rel_pos(self):
|
| 285 |
+
for name, p in self.core.pretrained.named_parameters():
|
| 286 |
+
if "relative_position" not in name:
|
| 287 |
+
yield p
|
| 288 |
+
|
| 289 |
+
def freeze_encoder(self, freeze_rel_pos=False):
|
| 290 |
+
if freeze_rel_pos:
|
| 291 |
+
for p in self.core.pretrained.parameters():
|
| 292 |
+
p.requires_grad = False
|
| 293 |
+
else:
|
| 294 |
+
for p in self.get_enc_params_except_rel_pos():
|
| 295 |
+
p.requires_grad = False
|
| 296 |
+
return self
|
| 297 |
+
|
| 298 |
+
def attach_hooks(self, midas):
|
| 299 |
+
if len(self.handles) > 0:
|
| 300 |
+
self.remove_hooks()
|
| 301 |
+
if "out_conv" in self.layer_names:
|
| 302 |
+
self.handles.append(list(midas.scratch.output_conv.children())[
|
| 303 |
+
3].register_forward_hook(get_activation("out_conv", self.core_out)))
|
| 304 |
+
if "r4" in self.layer_names:
|
| 305 |
+
self.handles.append(midas.scratch.refinenet4.register_forward_hook(
|
| 306 |
+
get_activation("r4", self.core_out)))
|
| 307 |
+
if "r3" in self.layer_names:
|
| 308 |
+
self.handles.append(midas.scratch.refinenet3.register_forward_hook(
|
| 309 |
+
get_activation("r3", self.core_out)))
|
| 310 |
+
if "r2" in self.layer_names:
|
| 311 |
+
self.handles.append(midas.scratch.refinenet2.register_forward_hook(
|
| 312 |
+
get_activation("r2", self.core_out)))
|
| 313 |
+
if "r1" in self.layer_names:
|
| 314 |
+
self.handles.append(midas.scratch.refinenet1.register_forward_hook(
|
| 315 |
+
get_activation("r1", self.core_out)))
|
| 316 |
+
if "l4_rn" in self.layer_names:
|
| 317 |
+
self.handles.append(midas.scratch.layer4_rn.register_forward_hook(
|
| 318 |
+
get_activation("l4_rn", self.core_out)))
|
| 319 |
+
|
| 320 |
+
return self
|
| 321 |
+
|
| 322 |
+
def remove_hooks(self):
|
| 323 |
+
for h in self.handles:
|
| 324 |
+
h.remove()
|
| 325 |
+
return self
|
| 326 |
+
|
| 327 |
+
def __del__(self):
|
| 328 |
+
self.remove_hooks()
|
| 329 |
+
|
| 330 |
+
def set_output_channels(self, model_type):
|
| 331 |
+
self.output_channels = MIDAS_SETTINGS[model_type]
|
| 332 |
+
|
| 333 |
+
@staticmethod
|
| 334 |
+
def build(midas_model_type="DPT_BEiT_L_384", train_midas=False, use_pretrained_midas=True, fetch_features=False, freeze_bn=True, force_keep_ar=False, force_reload=False, **kwargs):
|
| 335 |
+
if midas_model_type not in MIDAS_SETTINGS:
|
| 336 |
+
raise ValueError(
|
| 337 |
+
f"Invalid model type: {midas_model_type}. Must be one of {list(MIDAS_SETTINGS.keys())}")
|
| 338 |
+
if "img_size" in kwargs:
|
| 339 |
+
kwargs = MidasCore.parse_img_size(kwargs)
|
| 340 |
+
img_size = kwargs.pop("img_size", [384, 384])
|
| 341 |
+
print("img_size", img_size)
|
| 342 |
+
midas_path = os.path.join(os.path.dirname(__file__), 'midas_repo')
|
| 343 |
+
midas = torch.hub.load(midas_path, midas_model_type,
|
| 344 |
+
pretrained=use_pretrained_midas, force_reload=force_reload, source='local')
|
| 345 |
+
kwargs.update({'keep_aspect_ratio': force_keep_ar})
|
| 346 |
+
midas_core = MidasCore(midas, trainable=train_midas, fetch_features=fetch_features,
|
| 347 |
+
freeze_bn=freeze_bn, img_size=img_size, **kwargs)
|
| 348 |
+
midas_core.set_output_channels(midas_model_type)
|
| 349 |
+
return midas_core
|
| 350 |
+
|
| 351 |
+
@staticmethod
|
| 352 |
+
def build_from_config(config):
|
| 353 |
+
return MidasCore.build(**config)
|
| 354 |
+
|
| 355 |
+
@staticmethod
|
| 356 |
+
def parse_img_size(config):
|
| 357 |
+
assert 'img_size' in config
|
| 358 |
+
if isinstance(config['img_size'], str):
|
| 359 |
+
assert "," in config['img_size'], "img_size should be a string with comma separated img_size=H,W"
|
| 360 |
+
config['img_size'] = list(map(int, config['img_size'].split(",")))
|
| 361 |
+
assert len(
|
| 362 |
+
config['img_size']) == 2, "img_size should be a string with comma separated img_size=H,W"
|
| 363 |
+
elif isinstance(config['img_size'], int):
|
| 364 |
+
config['img_size'] = [config['img_size'], config['img_size']]
|
| 365 |
+
else:
|
| 366 |
+
assert isinstance(config['img_size'], list) and len(
|
| 367 |
+
config['img_size']) == 2, "img_size should be a list of H,W"
|
| 368 |
+
return config
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
nchannels2models = {
|
| 372 |
+
tuple([256]*5): ["DPT_BEiT_L_384", "DPT_BEiT_L_512", "DPT_BEiT_B_384", "DPT_SwinV2_L_384", "DPT_SwinV2_B_384", "DPT_SwinV2_T_256", "DPT_Large", "DPT_Hybrid"],
|
| 373 |
+
(512, 256, 128, 64, 64): ["MiDaS_small"]
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
# Model name to number of output channels
|
| 377 |
+
MIDAS_SETTINGS = {m: k for k, v in nchannels2models.items()
|
| 378 |
+
for m in v
|
| 379 |
+
}
|
RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/.gitignore
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
*.egg-info/
|
| 24 |
+
.installed.cfg
|
| 25 |
+
*.egg
|
| 26 |
+
MANIFEST
|
| 27 |
+
|
| 28 |
+
# PyInstaller
|
| 29 |
+
# Usually these files are written by a python script from a template
|
| 30 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 31 |
+
*.manifest
|
| 32 |
+
*.spec
|
| 33 |
+
|
| 34 |
+
# Installer logs
|
| 35 |
+
pip-log.txt
|
| 36 |
+
pip-delete-this-directory.txt
|
| 37 |
+
|
| 38 |
+
# Unit test / coverage reports
|
| 39 |
+
htmlcov/
|
| 40 |
+
.tox/
|
| 41 |
+
.coverage
|
| 42 |
+
.coverage.*
|
| 43 |
+
.cache
|
| 44 |
+
nosetests.xml
|
| 45 |
+
coverage.xml
|
| 46 |
+
*.cover
|
| 47 |
+
.hypothesis/
|
| 48 |
+
.pytest_cache/
|
| 49 |
+
|
| 50 |
+
# Translations
|
| 51 |
+
*.mo
|
| 52 |
+
*.pot
|
| 53 |
+
|
| 54 |
+
# Django stuff:
|
| 55 |
+
*.log
|
| 56 |
+
local_settings.py
|
| 57 |
+
db.sqlite3
|
| 58 |
+
|
| 59 |
+
# Flask stuff:
|
| 60 |
+
instance/
|
| 61 |
+
.webassets-cache
|
| 62 |
+
|
| 63 |
+
# Scrapy stuff:
|
| 64 |
+
.scrapy
|
| 65 |
+
|
| 66 |
+
# Sphinx documentation
|
| 67 |
+
docs/_build/
|
| 68 |
+
|
| 69 |
+
# PyBuilder
|
| 70 |
+
target/
|
| 71 |
+
|
| 72 |
+
# Jupyter Notebook
|
| 73 |
+
.ipynb_checkpoints
|
| 74 |
+
|
| 75 |
+
# pyenv
|
| 76 |
+
.python-version
|
| 77 |
+
|
| 78 |
+
# celery beat schedule file
|
| 79 |
+
celerybeat-schedule
|
| 80 |
+
|
| 81 |
+
# SageMath parsed files
|
| 82 |
+
*.sage.py
|
| 83 |
+
|
| 84 |
+
# Environments
|
| 85 |
+
.env
|
| 86 |
+
.venv
|
| 87 |
+
env/
|
| 88 |
+
venv/
|
| 89 |
+
ENV/
|
| 90 |
+
env.bak/
|
| 91 |
+
venv.bak/
|
| 92 |
+
|
| 93 |
+
# Spyder project settings
|
| 94 |
+
.spyderproject
|
| 95 |
+
.spyproject
|
| 96 |
+
|
| 97 |
+
# Rope project settings
|
| 98 |
+
.ropeproject
|
| 99 |
+
|
| 100 |
+
# mkdocs documentation
|
| 101 |
+
/site
|
| 102 |
+
|
| 103 |
+
# mypy
|
| 104 |
+
.mypy_cache/
|
| 105 |
+
|
| 106 |
+
*.png
|
| 107 |
+
*.pfm
|
| 108 |
+
*.jpg
|
| 109 |
+
*.jpeg
|
| 110 |
+
*.pt
|
RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/Dockerfile
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# enables cuda support in docker
|
| 2 |
+
FROM nvidia/cuda:10.2-cudnn7-runtime-ubuntu18.04
|
| 3 |
+
|
| 4 |
+
# install python 3.6, pip and requirements for opencv-python
|
| 5 |
+
# (see https://github.com/NVIDIA/nvidia-docker/issues/864)
|
| 6 |
+
RUN apt-get update && apt-get -y install \
|
| 7 |
+
python3 \
|
| 8 |
+
python3-pip \
|
| 9 |
+
libsm6 \
|
| 10 |
+
libxext6 \
|
| 11 |
+
libxrender-dev \
|
| 12 |
+
curl \
|
| 13 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 14 |
+
|
| 15 |
+
# install python dependencies
|
| 16 |
+
RUN pip3 install --upgrade pip
|
| 17 |
+
RUN pip3 install torch~=1.8 torchvision opencv-python-headless~=3.4 timm
|
| 18 |
+
|
| 19 |
+
# copy inference code
|
| 20 |
+
WORKDIR /opt/MiDaS
|
| 21 |
+
COPY ./midas ./midas
|
| 22 |
+
COPY ./*.py ./
|
| 23 |
+
|
| 24 |
+
# download model weights so the docker image can be used offline
|
| 25 |
+
RUN cd weights && {curl -OL https://github.com/isl-org/MiDaS/releases/download/v3/dpt_hybrid_384.pt; cd -; }
|
| 26 |
+
RUN python3 run.py --model_type dpt_hybrid; exit 0
|
| 27 |
+
|
| 28 |
+
# entrypoint (dont forget to mount input and output directories)
|
| 29 |
+
CMD python3 run.py --model_type dpt_hybrid
|
RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2019 Intel ISL (Intel Intelligent Systems Lab)
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/README.md
ADDED
|
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer
|
| 2 |
+
|
| 3 |
+
This repository contains code to compute depth from a single image. It accompanies our [paper](https://arxiv.org/abs/1907.01341v3):
|
| 4 |
+
|
| 5 |
+
>Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer
|
| 6 |
+
René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, Vladlen Koltun
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
and our [preprint](https://arxiv.org/abs/2103.13413):
|
| 10 |
+
|
| 11 |
+
> Vision Transformers for Dense Prediction
|
| 12 |
+
> René Ranftl, Alexey Bochkovskiy, Vladlen Koltun
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
MiDaS was trained on up to 12 datasets (ReDWeb, DIML, Movies, MegaDepth, WSVD, TartanAir, HRWSI, ApolloScape, BlendedMVS, IRS, KITTI, NYU Depth V2) with
|
| 16 |
+
multi-objective optimization.
|
| 17 |
+
The original model that was trained on 5 datasets (`MIX 5` in the paper) can be found [here](https://github.com/isl-org/MiDaS/releases/tag/v2).
|
| 18 |
+
The figure below shows an overview of the different MiDaS models; the bubble size scales with number of parameters.
|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
|
| 22 |
+
### Setup
|
| 23 |
+
|
| 24 |
+
1) Pick one or more models and download the corresponding weights to the `weights` folder:
|
| 25 |
+
|
| 26 |
+
MiDaS 3.1
|
| 27 |
+
- For highest quality: [dpt_beit_large_512](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt)
|
| 28 |
+
- For moderately less quality, but better speed-performance trade-off: [dpt_swin2_large_384](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_large_384.pt)
|
| 29 |
+
- For embedded devices: [dpt_swin2_tiny_256](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_tiny_256.pt), [dpt_levit_224](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_levit_224.pt)
|
| 30 |
+
- For inference on Intel CPUs, OpenVINO may be used for the small legacy model: openvino_midas_v21_small [.xml](https://github.com/isl-org/MiDaS/releases/download/v3_1/openvino_midas_v21_small_256.xml), [.bin](https://github.com/isl-org/MiDaS/releases/download/v3_1/openvino_midas_v21_small_256.bin)
|
| 31 |
+
|
| 32 |
+
MiDaS 3.0: Legacy transformer models [dpt_large_384](https://github.com/isl-org/MiDaS/releases/download/v3/dpt_large_384.pt) and [dpt_hybrid_384](https://github.com/isl-org/MiDaS/releases/download/v3/dpt_hybrid_384.pt)
|
| 33 |
+
|
| 34 |
+
MiDaS 2.1: Legacy convolutional models [midas_v21_384](https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_384.pt) and [midas_v21_small_256](https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_small_256.pt)
|
| 35 |
+
|
| 36 |
+
1) Set up dependencies:
|
| 37 |
+
|
| 38 |
+
```shell
|
| 39 |
+
conda env create -f environment.yaml
|
| 40 |
+
conda activate midas-py310
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
#### optional
|
| 44 |
+
|
| 45 |
+
For the Next-ViT model, execute
|
| 46 |
+
|
| 47 |
+
```shell
|
| 48 |
+
git submodule add https://github.com/isl-org/Next-ViT midas/external/next_vit
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
For the OpenVINO model, install
|
| 52 |
+
|
| 53 |
+
```shell
|
| 54 |
+
pip install openvino
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
### Usage
|
| 58 |
+
|
| 59 |
+
1) Place one or more input images in the folder `input`.
|
| 60 |
+
|
| 61 |
+
2) Run the model with
|
| 62 |
+
|
| 63 |
+
```shell
|
| 64 |
+
python run.py --model_type <model_type> --input_path input --output_path output
|
| 65 |
+
```
|
| 66 |
+
where ```<model_type>``` is chosen from [dpt_beit_large_512](#model_type), [dpt_beit_large_384](#model_type),
|
| 67 |
+
[dpt_beit_base_384](#model_type), [dpt_swin2_large_384](#model_type), [dpt_swin2_base_384](#model_type),
|
| 68 |
+
[dpt_swin2_tiny_256](#model_type), [dpt_swin_large_384](#model_type), [dpt_next_vit_large_384](#model_type),
|
| 69 |
+
[dpt_levit_224](#model_type), [dpt_large_384](#model_type), [dpt_hybrid_384](#model_type),
|
| 70 |
+
[midas_v21_384](#model_type), [midas_v21_small_256](#model_type), [openvino_midas_v21_small_256](#model_type).
|
| 71 |
+
|
| 72 |
+
3) The resulting depth maps are written to the `output` folder.
|
| 73 |
+
|
| 74 |
+
#### optional
|
| 75 |
+
|
| 76 |
+
1) By default, the inference resizes the height of input images to the size of a model to fit into the encoder. This
|
| 77 |
+
size is given by the numbers in the model names of the [accuracy table](#accuracy). Some models do not only support a single
|
| 78 |
+
inference height but a range of different heights. Feel free to explore different heights by appending the extra
|
| 79 |
+
command line argument `--height`. Unsupported height values will throw an error. Note that using this argument may
|
| 80 |
+
decrease the model accuracy.
|
| 81 |
+
2) By default, the inference keeps the aspect ratio of input images when feeding them into the encoder if this is
|
| 82 |
+
supported by a model (all models except for Swin, Swin2, LeViT). In order to resize to a square resolution,
|
| 83 |
+
disregarding the aspect ratio while preserving the height, use the command line argument `--square`.
|
| 84 |
+
|
| 85 |
+
#### via Camera
|
| 86 |
+
|
| 87 |
+
If you want the input images to be grabbed from the camera and shown in a window, leave the input and output paths
|
| 88 |
+
away and choose a model type as shown above:
|
| 89 |
+
|
| 90 |
+
```shell
|
| 91 |
+
python run.py --model_type <model_type> --side
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
The argument `--side` is optional and causes both the input RGB image and the output depth map to be shown
|
| 95 |
+
side-by-side for comparison.
|
| 96 |
+
|
| 97 |
+
#### via Docker
|
| 98 |
+
|
| 99 |
+
1) Make sure you have installed Docker and the
|
| 100 |
+
[NVIDIA Docker runtime](https://github.com/NVIDIA/nvidia-docker/wiki/Installation-\(Native-GPU-Support\)).
|
| 101 |
+
|
| 102 |
+
2) Build the Docker image:
|
| 103 |
+
|
| 104 |
+
```shell
|
| 105 |
+
docker build -t midas .
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
3) Run inference:
|
| 109 |
+
|
| 110 |
+
```shell
|
| 111 |
+
docker run --rm --gpus all -v $PWD/input:/opt/MiDaS/input -v $PWD/output:/opt/MiDaS/output -v $PWD/weights:/opt/MiDaS/weights midas
|
| 112 |
+
```
|
| 113 |
+
|
| 114 |
+
This command passes through all of your NVIDIA GPUs to the container, mounts the
|
| 115 |
+
`input` and `output` directories and then runs the inference.
|
| 116 |
+
|
| 117 |
+
#### via PyTorch Hub
|
| 118 |
+
|
| 119 |
+
The pretrained model is also available on [PyTorch Hub](https://pytorch.org/hub/intelisl_midas_v2/)
|
| 120 |
+
|
| 121 |
+
#### via TensorFlow or ONNX
|
| 122 |
+
|
| 123 |
+
See [README](https://github.com/isl-org/MiDaS/tree/master/tf) in the `tf` subdirectory.
|
| 124 |
+
|
| 125 |
+
Currently only supports MiDaS v2.1.
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
#### via Mobile (iOS / Android)
|
| 129 |
+
|
| 130 |
+
See [README](https://github.com/isl-org/MiDaS/tree/master/mobile) in the `mobile` subdirectory.
|
| 131 |
+
|
| 132 |
+
#### via ROS1 (Robot Operating System)
|
| 133 |
+
|
| 134 |
+
See [README](https://github.com/isl-org/MiDaS/tree/master/ros) in the `ros` subdirectory.
|
| 135 |
+
|
| 136 |
+
Currently only supports MiDaS v2.1. DPT-based models to be added.
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
### Accuracy
|
| 140 |
+
|
| 141 |
+
We provide a **zero-shot error** $\epsilon_d$ which is evaluated for 6 different datasets
|
| 142 |
+
(see [paper](https://arxiv.org/abs/1907.01341v3)). **Lower error values are better**.
|
| 143 |
+
$\color{green}{\textsf{Overall model quality is represented by the improvement}}$ ([Imp.](#improvement)) with respect to
|
| 144 |
+
MiDaS 3.0 DPT<sub>L-384</sub>. The models are grouped by the height used for inference, whereas the square training resolution is given by
|
| 145 |
+
the numbers in the model names. The table also shows the **number of parameters** (in millions) and the
|
| 146 |
+
**frames per second** for inference at the training resolution (for GPU RTX 3090):
|
| 147 |
+
|
| 148 |
+
| MiDaS Model | DIW </br><sup>WHDR</sup> | Eth3d </br><sup>AbsRel</sup> | Sintel </br><sup>AbsRel</sup> | TUM </br><sup>δ1</sup> | KITTI </br><sup>δ1</sup> | NYUv2 </br><sup>δ1</sup> | $\color{green}{\textsf{Imp.}}$ </br><sup>%</sup> | Par.</br><sup>M</sup> | FPS</br><sup> </sup> |
|
| 149 |
+
|-----------------------------------------------------------------------------------------------------------------------|-------------------------:|-----------------------------:|------------------------------:|-------------------------:|-------------------------:|-------------------------:|-------------------------------------------------:|----------------------:|--------------------------:|
|
| 150 |
+
| **Inference height 512** | | | | | | | | | |
|
| 151 |
+
| [v3.1 BEiT<sub>L-512</sub>](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt) | 0.1137 | 0.0659 | 0.2366 | **6.13** | 11.56* | **1.86*** | $\color{green}{\textsf{19}}$ | **345** | **5.7** |
|
| 152 |
+
| [v3.1 BEiT<sub>L-512</sub>](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt)$\tiny{\square}$ | **0.1121** | **0.0614** | **0.2090** | 6.46 | **5.00*** | 1.90* | $\color{green}{\textsf{34}}$ | **345** | **5.7** |
|
| 153 |
+
| | | | | | | | | | |
|
| 154 |
+
| **Inference height 384** | | | | | | | | | |
|
| 155 |
+
| [v3.1 BEiT<sub>L-512</sub>](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt) | 0.1245 | 0.0681 | **0.2176** | **6.13** | 6.28* | **2.16*** | $\color{green}{\textsf{28}}$ | 345 | 12 |
|
| 156 |
+
| [v3.1 Swin2<sub>L-384</sub>](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_large_384.pt)$\tiny{\square}$ | 0.1106 | 0.0732 | 0.2442 | 8.87 | **5.84*** | 2.92* | $\color{green}{\textsf{22}}$ | 213 | 41 |
|
| 157 |
+
| [v3.1 Swin2<sub>B-384</sub>](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_base_384.pt)$\tiny{\square}$ | 0.1095 | 0.0790 | 0.2404 | 8.93 | 5.97* | 3.28* | $\color{green}{\textsf{22}}$ | 102 | 39 |
|
| 158 |
+
| [v3.1 Swin<sub>L-384</sub>](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin_large_384.pt)$\tiny{\square}$ | 0.1126 | 0.0853 | 0.2428 | 8.74 | 6.60* | 3.34* | $\color{green}{\textsf{17}}$ | 213 | 49 |
|
| 159 |
+
| [v3.1 BEiT<sub>L-384</sub>](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_384.pt) | 0.1239 | **0.0667** | 0.2545 | 7.17 | 9.84* | 2.21* | $\color{green}{\textsf{17}}$ | 344 | 13 |
|
| 160 |
+
| [v3.1 Next-ViT<sub>L-384</sub>](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_next_vit_large_384.pt) | **0.1031** | 0.0954 | 0.2295 | 9.21 | 6.89* | 3.47* | $\color{green}{\textsf{16}}$ | **72** | 30 |
|
| 161 |
+
| [v3.1 BEiT<sub>B-384</sub>](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_base_384.pt) | 0.1159 | 0.0967 | 0.2901 | 9.88 | 26.60* | 3.91* | $\color{green}{\textsf{-31}}$ | 112 | 31 |
|
| 162 |
+
| [v3.0 DPT<sub>L-384</sub>](https://github.com/isl-org/MiDaS/releases/download/v3/dpt_large_384.pt) | 0.1082 | 0.0888 | 0.2697 | 9.97 | 8.46 | 8.32 | $\color{green}{\textsf{0}}$ | 344 | **61** |
|
| 163 |
+
| [v3.0 DPT<sub>H-384</sub>](https://github.com/isl-org/MiDaS/releases/download/v3/dpt_hybrid_384.pt) | 0.1106 | 0.0934 | 0.2741 | 10.89 | 11.56 | 8.69 | $\color{green}{\textsf{-10}}$ | 123 | 50 |
|
| 164 |
+
| [v2.1 Large<sub>384</sub>](https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_384.pt) | 0.1295 | 0.1155 | 0.3285 | 12.51 | 16.08 | 8.71 | $\color{green}{\textsf{-32}}$ | 105 | 47 |
|
| 165 |
+
| | | | | | | | | | |
|
| 166 |
+
| **Inference height 256** | | | | | | | | | |
|
| 167 |
+
| [v3.1 Swin2<sub>T-256</sub>](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_tiny_256.pt)$\tiny{\square}$ | **0.1211** | **0.1106** | **0.2868** | **13.43** | **10.13*** | **5.55*** | $\color{green}{\textsf{-11}}$ | 42 | 64 |
|
| 168 |
+
| [v2.1 Small<sub>256</sub>](https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_small_256.pt) | 0.1344 | 0.1344 | 0.3370 | 14.53 | 29.27 | 13.43 | $\color{green}{\textsf{-76}}$ | **21** | **90** |
|
| 169 |
+
| | | | | | | | | | |
|
| 170 |
+
| **Inference height 224** | | | | | | | | | |
|
| 171 |
+
| [v3.1 LeViT<sub>224</sub>](https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_levit_224.pt)$\tiny{\square}$ | **0.1314** | **0.1206** | **0.3148** | **18.21** | **15.27*** | **8.64*** | $\color{green}{\textsf{-40}}$ | **51** | **73** |
|
| 172 |
+
|
| 173 |
+
* No zero-shot error, because models are also trained on KITTI and NYU Depth V2\
|
| 174 |
+
$\square$ Validation performed at **square resolution**, either because the transformer encoder backbone of a model
|
| 175 |
+
does not support non-square resolutions (Swin, Swin2, LeViT) or for comparison with these models. All other
|
| 176 |
+
validations keep the aspect ratio. A difference in resolution limits the comparability of the zero-shot error and the
|
| 177 |
+
improvement, because these quantities are averages over the pixels of an image and do not take into account the
|
| 178 |
+
advantage of more details due to a higher resolution.\
|
| 179 |
+
Best values per column and same validation height in bold
|
| 180 |
+
|
| 181 |
+
#### Improvement
|
| 182 |
+
|
| 183 |
+
The improvement in the above table is defined as the relative zero-shot error with respect to MiDaS v3.0
|
| 184 |
+
DPT<sub>L-384</sub> and averaging over the datasets. So, if $\epsilon_d$ is the zero-shot error for dataset $d$, then
|
| 185 |
+
the $\color{green}{\textsf{improvement}}$ is given by $100(1-(1/6)\sum_d\epsilon_d/\epsilon_{d,\rm{DPT_{L-384}}})$%.
|
| 186 |
+
|
| 187 |
+
Note that the improvements of 10% for MiDaS v2.0 → v2.1 and 21% for MiDaS v2.1 → v3.0 are not visible from the
|
| 188 |
+
improvement column (Imp.) in the table but would require an evaluation with respect to MiDaS v2.1 Large<sub>384</sub>
|
| 189 |
+
and v2.0 Large<sub>384</sub> respectively instead of v3.0 DPT<sub>L-384</sub>.
|
| 190 |
+
|
| 191 |
+
### Depth map comparison
|
| 192 |
+
|
| 193 |
+
Zoom in for better visibility
|
| 194 |
+

|
| 195 |
+
|
| 196 |
+
### Speed on Camera Feed
|
| 197 |
+
|
| 198 |
+
Test configuration
|
| 199 |
+
- Windows 10
|
| 200 |
+
- 11th Gen Intel Core i7-1185G7 3.00GHz
|
| 201 |
+
- 16GB RAM
|
| 202 |
+
- Camera resolution 640x480
|
| 203 |
+
- openvino_midas_v21_small_256
|
| 204 |
+
|
| 205 |
+
Speed: 22 FPS
|
| 206 |
+
|
| 207 |
+
### Changelog
|
| 208 |
+
|
| 209 |
+
* [Dec 2022] Released MiDaS v3.1:
|
| 210 |
+
- New models based on 5 different types of transformers ([BEiT](https://arxiv.org/pdf/2106.08254.pdf), [Swin2](https://arxiv.org/pdf/2111.09883.pdf), [Swin](https://arxiv.org/pdf/2103.14030.pdf), [Next-ViT](https://arxiv.org/pdf/2207.05501.pdf), [LeViT](https://arxiv.org/pdf/2104.01136.pdf))
|
| 211 |
+
- Training datasets extended from 10 to 12, including also KITTI and NYU Depth V2 using [BTS](https://github.com/cleinc/bts) split
|
| 212 |
+
- Best model, BEiT<sub>Large 512</sub>, with resolution 512x512, is on average about [28% more accurate](#Accuracy) than MiDaS v3.0
|
| 213 |
+
- Integrated live depth estimation from camera feed
|
| 214 |
+
* [Sep 2021] Integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See [Gradio Web Demo](https://huggingface.co/spaces/akhaliq/DPT-Large).
|
| 215 |
+
* [Apr 2021] Released MiDaS v3.0:
|
| 216 |
+
- New models based on [Dense Prediction Transformers](https://arxiv.org/abs/2103.13413) are on average [21% more accurate](#Accuracy) than MiDaS v2.1
|
| 217 |
+
- Additional models can be found [here](https://github.com/isl-org/DPT)
|
| 218 |
+
* [Nov 2020] Released MiDaS v2.1:
|
| 219 |
+
- New model that was trained on 10 datasets and is on average about [10% more accurate](#Accuracy) than [MiDaS v2.0](https://github.com/isl-org/MiDaS/releases/tag/v2)
|
| 220 |
+
- New light-weight model that achieves [real-time performance](https://github.com/isl-org/MiDaS/tree/master/mobile) on mobile platforms.
|
| 221 |
+
- Sample applications for [iOS](https://github.com/isl-org/MiDaS/tree/master/mobile/ios) and [Android](https://github.com/isl-org/MiDaS/tree/master/mobile/android)
|
| 222 |
+
- [ROS package](https://github.com/isl-org/MiDaS/tree/master/ros) for easy deployment on robots
|
| 223 |
+
* [Jul 2020] Added TensorFlow and ONNX code. Added [online demo](http://35.202.76.57/).
|
| 224 |
+
* [Dec 2019] Released new version of MiDaS - the new model is significantly more accurate and robust
|
| 225 |
+
* [Jul 2019] Initial release of MiDaS ([Link](https://github.com/isl-org/MiDaS/releases/tag/v1))
|
| 226 |
+
|
| 227 |
+
### Citation
|
| 228 |
+
|
| 229 |
+
Please cite our paper if you use this code or any of the models:
|
| 230 |
+
```
|
| 231 |
+
@ARTICLE {Ranftl2022,
|
| 232 |
+
author = "Ren\'{e} Ranftl and Katrin Lasinger and David Hafner and Konrad Schindler and Vladlen Koltun",
|
| 233 |
+
title = "Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-Shot Cross-Dataset Transfer",
|
| 234 |
+
journal = "IEEE Transactions on Pattern Analysis and Machine Intelligence",
|
| 235 |
+
year = "2022",
|
| 236 |
+
volume = "44",
|
| 237 |
+
number = "3"
|
| 238 |
+
}
|
| 239 |
+
```
|
| 240 |
+
|
| 241 |
+
If you use a DPT-based model, please also cite:
|
| 242 |
+
|
| 243 |
+
```
|
| 244 |
+
@article{Ranftl2021,
|
| 245 |
+
author = {Ren\'{e} Ranftl and Alexey Bochkovskiy and Vladlen Koltun},
|
| 246 |
+
title = {Vision Transformers for Dense Prediction},
|
| 247 |
+
journal = {ICCV},
|
| 248 |
+
year = {2021},
|
| 249 |
+
}
|
| 250 |
+
```
|
| 251 |
+
|
| 252 |
+
### Acknowledgements
|
| 253 |
+
|
| 254 |
+
Our work builds on and uses code from [timm](https://github.com/rwightman/pytorch-image-models) and [Next-ViT](https://github.com/bytedance/Next-ViT).
|
| 255 |
+
We'd like to thank the authors for making these libraries available.
|
| 256 |
+
|
| 257 |
+
### License
|
| 258 |
+
|
| 259 |
+
MIT License
|
RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/environment.yaml
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: midas-py310
|
| 2 |
+
channels:
|
| 3 |
+
- pytorch
|
| 4 |
+
- defaults
|
| 5 |
+
dependencies:
|
| 6 |
+
- nvidia::cudatoolkit=11.7
|
| 7 |
+
- python=3.10.8
|
| 8 |
+
- pytorch::pytorch=1.13.0
|
| 9 |
+
- torchvision=0.14.0
|
| 10 |
+
- pip=22.3.1
|
| 11 |
+
- numpy=1.23.4
|
| 12 |
+
- pip:
|
| 13 |
+
- opencv-python==4.6.0.66
|
| 14 |
+
- imutils==0.5.4
|
| 15 |
+
- timm==0.6.12
|
| 16 |
+
- einops==0.6.0
|
RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/hubconf.py
ADDED
|
@@ -0,0 +1,435 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dependencies = ["torch"]
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from midas.dpt_depth import DPTDepthModel
|
| 6 |
+
from midas.midas_net import MidasNet
|
| 7 |
+
from midas.midas_net_custom import MidasNet_small
|
| 8 |
+
|
| 9 |
+
def DPT_BEiT_L_512(pretrained=True, **kwargs):
|
| 10 |
+
""" # This docstring shows up in hub.help()
|
| 11 |
+
MiDaS DPT_BEiT_L_512 model for monocular depth estimation
|
| 12 |
+
pretrained (bool): load pretrained weights into model
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
model = DPTDepthModel(
|
| 16 |
+
path=None,
|
| 17 |
+
backbone="beitl16_512",
|
| 18 |
+
non_negative=True,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
if pretrained:
|
| 22 |
+
checkpoint = (
|
| 23 |
+
"https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_512.pt"
|
| 24 |
+
)
|
| 25 |
+
state_dict = torch.hub.load_state_dict_from_url(
|
| 26 |
+
checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True
|
| 27 |
+
)
|
| 28 |
+
model.load_state_dict(state_dict)
|
| 29 |
+
|
| 30 |
+
return model
|
| 31 |
+
|
| 32 |
+
def DPT_BEiT_L_384(pretrained=True, **kwargs):
|
| 33 |
+
""" # This docstring shows up in hub.help()
|
| 34 |
+
MiDaS DPT_BEiT_L_384 model for monocular depth estimation
|
| 35 |
+
pretrained (bool): load pretrained weights into model
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
model = DPTDepthModel(
|
| 39 |
+
path=None,
|
| 40 |
+
backbone="beitl16_384",
|
| 41 |
+
non_negative=True,
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
if pretrained:
|
| 45 |
+
checkpoint = (
|
| 46 |
+
"https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_large_384.pt"
|
| 47 |
+
)
|
| 48 |
+
state_dict = torch.hub.load_state_dict_from_url(
|
| 49 |
+
checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True
|
| 50 |
+
)
|
| 51 |
+
model.load_state_dict(state_dict)
|
| 52 |
+
|
| 53 |
+
return model
|
| 54 |
+
|
| 55 |
+
def DPT_BEiT_B_384(pretrained=True, **kwargs):
|
| 56 |
+
""" # This docstring shows up in hub.help()
|
| 57 |
+
MiDaS DPT_BEiT_B_384 model for monocular depth estimation
|
| 58 |
+
pretrained (bool): load pretrained weights into model
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
model = DPTDepthModel(
|
| 62 |
+
path=None,
|
| 63 |
+
backbone="beitb16_384",
|
| 64 |
+
non_negative=True,
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
if pretrained:
|
| 68 |
+
checkpoint = (
|
| 69 |
+
"https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_beit_base_384.pt"
|
| 70 |
+
)
|
| 71 |
+
state_dict = torch.hub.load_state_dict_from_url(
|
| 72 |
+
checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True
|
| 73 |
+
)
|
| 74 |
+
model.load_state_dict(state_dict)
|
| 75 |
+
|
| 76 |
+
return model
|
| 77 |
+
|
| 78 |
+
def DPT_SwinV2_L_384(pretrained=True, **kwargs):
|
| 79 |
+
""" # This docstring shows up in hub.help()
|
| 80 |
+
MiDaS DPT_SwinV2_L_384 model for monocular depth estimation
|
| 81 |
+
pretrained (bool): load pretrained weights into model
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
model = DPTDepthModel(
|
| 85 |
+
path=None,
|
| 86 |
+
backbone="swin2l24_384",
|
| 87 |
+
non_negative=True,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
if pretrained:
|
| 91 |
+
checkpoint = (
|
| 92 |
+
"https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_large_384.pt"
|
| 93 |
+
)
|
| 94 |
+
state_dict = torch.hub.load_state_dict_from_url(
|
| 95 |
+
checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True
|
| 96 |
+
)
|
| 97 |
+
model.load_state_dict(state_dict)
|
| 98 |
+
|
| 99 |
+
return model
|
| 100 |
+
|
| 101 |
+
def DPT_SwinV2_B_384(pretrained=True, **kwargs):
|
| 102 |
+
""" # This docstring shows up in hub.help()
|
| 103 |
+
MiDaS DPT_SwinV2_B_384 model for monocular depth estimation
|
| 104 |
+
pretrained (bool): load pretrained weights into model
|
| 105 |
+
"""
|
| 106 |
+
|
| 107 |
+
model = DPTDepthModel(
|
| 108 |
+
path=None,
|
| 109 |
+
backbone="swin2b24_384",
|
| 110 |
+
non_negative=True,
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
if pretrained:
|
| 114 |
+
checkpoint = (
|
| 115 |
+
"https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_base_384.pt"
|
| 116 |
+
)
|
| 117 |
+
state_dict = torch.hub.load_state_dict_from_url(
|
| 118 |
+
checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True
|
| 119 |
+
)
|
| 120 |
+
model.load_state_dict(state_dict)
|
| 121 |
+
|
| 122 |
+
return model
|
| 123 |
+
|
| 124 |
+
def DPT_SwinV2_T_256(pretrained=True, **kwargs):
|
| 125 |
+
""" # This docstring shows up in hub.help()
|
| 126 |
+
MiDaS DPT_SwinV2_T_256 model for monocular depth estimation
|
| 127 |
+
pretrained (bool): load pretrained weights into model
|
| 128 |
+
"""
|
| 129 |
+
|
| 130 |
+
model = DPTDepthModel(
|
| 131 |
+
path=None,
|
| 132 |
+
backbone="swin2t16_256",
|
| 133 |
+
non_negative=True,
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
if pretrained:
|
| 137 |
+
checkpoint = (
|
| 138 |
+
"https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin2_tiny_256.pt"
|
| 139 |
+
)
|
| 140 |
+
state_dict = torch.hub.load_state_dict_from_url(
|
| 141 |
+
checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True
|
| 142 |
+
)
|
| 143 |
+
model.load_state_dict(state_dict)
|
| 144 |
+
|
| 145 |
+
return model
|
| 146 |
+
|
| 147 |
+
def DPT_Swin_L_384(pretrained=True, **kwargs):
|
| 148 |
+
""" # This docstring shows up in hub.help()
|
| 149 |
+
MiDaS DPT_Swin_L_384 model for monocular depth estimation
|
| 150 |
+
pretrained (bool): load pretrained weights into model
|
| 151 |
+
"""
|
| 152 |
+
|
| 153 |
+
model = DPTDepthModel(
|
| 154 |
+
path=None,
|
| 155 |
+
backbone="swinl12_384",
|
| 156 |
+
non_negative=True,
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
if pretrained:
|
| 160 |
+
checkpoint = (
|
| 161 |
+
"https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_swin_large_384.pt"
|
| 162 |
+
)
|
| 163 |
+
state_dict = torch.hub.load_state_dict_from_url(
|
| 164 |
+
checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True
|
| 165 |
+
)
|
| 166 |
+
model.load_state_dict(state_dict)
|
| 167 |
+
|
| 168 |
+
return model
|
| 169 |
+
|
| 170 |
+
def DPT_Next_ViT_L_384(pretrained=True, **kwargs):
|
| 171 |
+
""" # This docstring shows up in hub.help()
|
| 172 |
+
MiDaS DPT_Next_ViT_L_384 model for monocular depth estimation
|
| 173 |
+
pretrained (bool): load pretrained weights into model
|
| 174 |
+
"""
|
| 175 |
+
|
| 176 |
+
model = DPTDepthModel(
|
| 177 |
+
path=None,
|
| 178 |
+
backbone="next_vit_large_6m",
|
| 179 |
+
non_negative=True,
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
if pretrained:
|
| 183 |
+
checkpoint = (
|
| 184 |
+
"https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_next_vit_large_384.pt"
|
| 185 |
+
)
|
| 186 |
+
state_dict = torch.hub.load_state_dict_from_url(
|
| 187 |
+
checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True
|
| 188 |
+
)
|
| 189 |
+
model.load_state_dict(state_dict)
|
| 190 |
+
|
| 191 |
+
return model
|
| 192 |
+
|
| 193 |
+
def DPT_LeViT_224(pretrained=True, **kwargs):
|
| 194 |
+
""" # This docstring shows up in hub.help()
|
| 195 |
+
MiDaS DPT_LeViT_224 model for monocular depth estimation
|
| 196 |
+
pretrained (bool): load pretrained weights into model
|
| 197 |
+
"""
|
| 198 |
+
|
| 199 |
+
model = DPTDepthModel(
|
| 200 |
+
path=None,
|
| 201 |
+
backbone="levit_384",
|
| 202 |
+
non_negative=True,
|
| 203 |
+
head_features_1=64,
|
| 204 |
+
head_features_2=8,
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
if pretrained:
|
| 208 |
+
checkpoint = (
|
| 209 |
+
"https://github.com/isl-org/MiDaS/releases/download/v3_1/dpt_levit_224.pt"
|
| 210 |
+
)
|
| 211 |
+
state_dict = torch.hub.load_state_dict_from_url(
|
| 212 |
+
checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True
|
| 213 |
+
)
|
| 214 |
+
model.load_state_dict(state_dict)
|
| 215 |
+
|
| 216 |
+
return model
|
| 217 |
+
|
| 218 |
+
def DPT_Large(pretrained=True, **kwargs):
|
| 219 |
+
""" # This docstring shows up in hub.help()
|
| 220 |
+
MiDaS DPT-Large model for monocular depth estimation
|
| 221 |
+
pretrained (bool): load pretrained weights into model
|
| 222 |
+
"""
|
| 223 |
+
|
| 224 |
+
model = DPTDepthModel(
|
| 225 |
+
path=None,
|
| 226 |
+
backbone="vitl16_384",
|
| 227 |
+
non_negative=True,
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
if pretrained:
|
| 231 |
+
checkpoint = (
|
| 232 |
+
"https://github.com/isl-org/MiDaS/releases/download/v3/dpt_large_384.pt"
|
| 233 |
+
)
|
| 234 |
+
state_dict = torch.hub.load_state_dict_from_url(
|
| 235 |
+
checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True
|
| 236 |
+
)
|
| 237 |
+
model.load_state_dict(state_dict)
|
| 238 |
+
|
| 239 |
+
return model
|
| 240 |
+
|
| 241 |
+
def DPT_Hybrid(pretrained=True, **kwargs):
|
| 242 |
+
""" # This docstring shows up in hub.help()
|
| 243 |
+
MiDaS DPT-Hybrid model for monocular depth estimation
|
| 244 |
+
pretrained (bool): load pretrained weights into model
|
| 245 |
+
"""
|
| 246 |
+
|
| 247 |
+
model = DPTDepthModel(
|
| 248 |
+
path=None,
|
| 249 |
+
backbone="vitb_rn50_384",
|
| 250 |
+
non_negative=True,
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
if pretrained:
|
| 254 |
+
checkpoint = (
|
| 255 |
+
"https://github.com/isl-org/MiDaS/releases/download/v3/dpt_hybrid_384.pt"
|
| 256 |
+
)
|
| 257 |
+
state_dict = torch.hub.load_state_dict_from_url(
|
| 258 |
+
checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True
|
| 259 |
+
)
|
| 260 |
+
model.load_state_dict(state_dict)
|
| 261 |
+
|
| 262 |
+
return model
|
| 263 |
+
|
| 264 |
+
def MiDaS(pretrained=True, **kwargs):
|
| 265 |
+
""" # This docstring shows up in hub.help()
|
| 266 |
+
MiDaS v2.1 model for monocular depth estimation
|
| 267 |
+
pretrained (bool): load pretrained weights into model
|
| 268 |
+
"""
|
| 269 |
+
|
| 270 |
+
model = MidasNet()
|
| 271 |
+
|
| 272 |
+
if pretrained:
|
| 273 |
+
checkpoint = (
|
| 274 |
+
"https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_384.pt"
|
| 275 |
+
)
|
| 276 |
+
state_dict = torch.hub.load_state_dict_from_url(
|
| 277 |
+
checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True
|
| 278 |
+
)
|
| 279 |
+
model.load_state_dict(state_dict)
|
| 280 |
+
|
| 281 |
+
return model
|
| 282 |
+
|
| 283 |
+
def MiDaS_small(pretrained=True, **kwargs):
|
| 284 |
+
""" # This docstring shows up in hub.help()
|
| 285 |
+
MiDaS v2.1 small model for monocular depth estimation on resource-constrained devices
|
| 286 |
+
pretrained (bool): load pretrained weights into model
|
| 287 |
+
"""
|
| 288 |
+
|
| 289 |
+
model = MidasNet_small(None, features=64, backbone="efficientnet_lite3", exportable=True, non_negative=True, blocks={'expand': True})
|
| 290 |
+
|
| 291 |
+
if pretrained:
|
| 292 |
+
checkpoint = (
|
| 293 |
+
"https://github.com/isl-org/MiDaS/releases/download/v2_1/midas_v21_small_256.pt"
|
| 294 |
+
)
|
| 295 |
+
state_dict = torch.hub.load_state_dict_from_url(
|
| 296 |
+
checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True
|
| 297 |
+
)
|
| 298 |
+
model.load_state_dict(state_dict)
|
| 299 |
+
|
| 300 |
+
return model
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
def transforms():
|
| 304 |
+
import cv2
|
| 305 |
+
from torchvision.transforms import Compose
|
| 306 |
+
from midas.transforms import Resize, NormalizeImage, PrepareForNet
|
| 307 |
+
from midas import transforms
|
| 308 |
+
|
| 309 |
+
transforms.default_transform = Compose(
|
| 310 |
+
[
|
| 311 |
+
lambda img: {"image": img / 255.0},
|
| 312 |
+
Resize(
|
| 313 |
+
384,
|
| 314 |
+
384,
|
| 315 |
+
resize_target=None,
|
| 316 |
+
keep_aspect_ratio=True,
|
| 317 |
+
ensure_multiple_of=32,
|
| 318 |
+
resize_method="upper_bound",
|
| 319 |
+
image_interpolation_method=cv2.INTER_CUBIC,
|
| 320 |
+
),
|
| 321 |
+
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 322 |
+
PrepareForNet(),
|
| 323 |
+
lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0),
|
| 324 |
+
]
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
transforms.small_transform = Compose(
|
| 328 |
+
[
|
| 329 |
+
lambda img: {"image": img / 255.0},
|
| 330 |
+
Resize(
|
| 331 |
+
256,
|
| 332 |
+
256,
|
| 333 |
+
resize_target=None,
|
| 334 |
+
keep_aspect_ratio=True,
|
| 335 |
+
ensure_multiple_of=32,
|
| 336 |
+
resize_method="upper_bound",
|
| 337 |
+
image_interpolation_method=cv2.INTER_CUBIC,
|
| 338 |
+
),
|
| 339 |
+
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 340 |
+
PrepareForNet(),
|
| 341 |
+
lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0),
|
| 342 |
+
]
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
transforms.dpt_transform = Compose(
|
| 346 |
+
[
|
| 347 |
+
lambda img: {"image": img / 255.0},
|
| 348 |
+
Resize(
|
| 349 |
+
384,
|
| 350 |
+
384,
|
| 351 |
+
resize_target=None,
|
| 352 |
+
keep_aspect_ratio=True,
|
| 353 |
+
ensure_multiple_of=32,
|
| 354 |
+
resize_method="minimal",
|
| 355 |
+
image_interpolation_method=cv2.INTER_CUBIC,
|
| 356 |
+
),
|
| 357 |
+
NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
|
| 358 |
+
PrepareForNet(),
|
| 359 |
+
lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0),
|
| 360 |
+
]
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
transforms.beit512_transform = Compose(
|
| 364 |
+
[
|
| 365 |
+
lambda img: {"image": img / 255.0},
|
| 366 |
+
Resize(
|
| 367 |
+
512,
|
| 368 |
+
512,
|
| 369 |
+
resize_target=None,
|
| 370 |
+
keep_aspect_ratio=True,
|
| 371 |
+
ensure_multiple_of=32,
|
| 372 |
+
resize_method="minimal",
|
| 373 |
+
image_interpolation_method=cv2.INTER_CUBIC,
|
| 374 |
+
),
|
| 375 |
+
NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
|
| 376 |
+
PrepareForNet(),
|
| 377 |
+
lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0),
|
| 378 |
+
]
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
transforms.swin384_transform = Compose(
|
| 382 |
+
[
|
| 383 |
+
lambda img: {"image": img / 255.0},
|
| 384 |
+
Resize(
|
| 385 |
+
384,
|
| 386 |
+
384,
|
| 387 |
+
resize_target=None,
|
| 388 |
+
keep_aspect_ratio=False,
|
| 389 |
+
ensure_multiple_of=32,
|
| 390 |
+
resize_method="minimal",
|
| 391 |
+
image_interpolation_method=cv2.INTER_CUBIC,
|
| 392 |
+
),
|
| 393 |
+
NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
|
| 394 |
+
PrepareForNet(),
|
| 395 |
+
lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0),
|
| 396 |
+
]
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
transforms.swin256_transform = Compose(
|
| 400 |
+
[
|
| 401 |
+
lambda img: {"image": img / 255.0},
|
| 402 |
+
Resize(
|
| 403 |
+
256,
|
| 404 |
+
256,
|
| 405 |
+
resize_target=None,
|
| 406 |
+
keep_aspect_ratio=False,
|
| 407 |
+
ensure_multiple_of=32,
|
| 408 |
+
resize_method="minimal",
|
| 409 |
+
image_interpolation_method=cv2.INTER_CUBIC,
|
| 410 |
+
),
|
| 411 |
+
NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
|
| 412 |
+
PrepareForNet(),
|
| 413 |
+
lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0),
|
| 414 |
+
]
|
| 415 |
+
)
|
| 416 |
+
|
| 417 |
+
transforms.levit_transform = Compose(
|
| 418 |
+
[
|
| 419 |
+
lambda img: {"image": img / 255.0},
|
| 420 |
+
Resize(
|
| 421 |
+
224,
|
| 422 |
+
224,
|
| 423 |
+
resize_target=None,
|
| 424 |
+
keep_aspect_ratio=False,
|
| 425 |
+
ensure_multiple_of=32,
|
| 426 |
+
resize_method="minimal",
|
| 427 |
+
image_interpolation_method=cv2.INTER_CUBIC,
|
| 428 |
+
),
|
| 429 |
+
NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
|
| 430 |
+
PrepareForNet(),
|
| 431 |
+
lambda sample: torch.from_numpy(sample["image"]).unsqueeze(0),
|
| 432 |
+
]
|
| 433 |
+
)
|
| 434 |
+
|
| 435 |
+
return transforms
|
RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/input/.placeholder
ADDED
|
File without changes
|
RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/output/.placeholder
ADDED
|
File without changes
|
RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/run.py
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Compute depth maps for images in the input folder.
|
| 2 |
+
"""
|
| 3 |
+
import os
|
| 4 |
+
import glob
|
| 5 |
+
import torch
|
| 6 |
+
import utils
|
| 7 |
+
import cv2
|
| 8 |
+
import argparse
|
| 9 |
+
import time
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
from imutils.video import VideoStream
|
| 14 |
+
from midas.model_loader import default_models, load_model
|
| 15 |
+
|
| 16 |
+
first_execution = True
|
| 17 |
+
def process(device, model, model_type, image, input_size, target_size, optimize, use_camera):
|
| 18 |
+
"""
|
| 19 |
+
Run the inference and interpolate.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
device (torch.device): the torch device used
|
| 23 |
+
model: the model used for inference
|
| 24 |
+
model_type: the type of the model
|
| 25 |
+
image: the image fed into the neural network
|
| 26 |
+
input_size: the size (width, height) of the neural network input (for OpenVINO)
|
| 27 |
+
target_size: the size (width, height) the neural network output is interpolated to
|
| 28 |
+
optimize: optimize the model to half-floats on CUDA?
|
| 29 |
+
use_camera: is the camera used?
|
| 30 |
+
|
| 31 |
+
Returns:
|
| 32 |
+
the prediction
|
| 33 |
+
"""
|
| 34 |
+
global first_execution
|
| 35 |
+
|
| 36 |
+
if "openvino" in model_type:
|
| 37 |
+
if first_execution or not use_camera:
|
| 38 |
+
print(f" Input resized to {input_size[0]}x{input_size[1]} before entering the encoder")
|
| 39 |
+
first_execution = False
|
| 40 |
+
|
| 41 |
+
sample = [np.reshape(image, (1, 3, *input_size))]
|
| 42 |
+
prediction = model(sample)[model.output(0)][0]
|
| 43 |
+
prediction = cv2.resize(prediction, dsize=target_size,
|
| 44 |
+
interpolation=cv2.INTER_CUBIC)
|
| 45 |
+
else:
|
| 46 |
+
sample = torch.from_numpy(image).to(device).unsqueeze(0)
|
| 47 |
+
|
| 48 |
+
if optimize and device == torch.device("cuda"):
|
| 49 |
+
if first_execution:
|
| 50 |
+
print(" Optimization to half-floats activated. Use with caution, because models like Swin require\n"
|
| 51 |
+
" float precision to work properly and may yield non-finite depth values to some extent for\n"
|
| 52 |
+
" half-floats.")
|
| 53 |
+
sample = sample.to(memory_format=torch.channels_last)
|
| 54 |
+
sample = sample.half()
|
| 55 |
+
|
| 56 |
+
if first_execution or not use_camera:
|
| 57 |
+
height, width = sample.shape[2:]
|
| 58 |
+
print(f" Input resized to {width}x{height} before entering the encoder")
|
| 59 |
+
first_execution = False
|
| 60 |
+
|
| 61 |
+
prediction = model.forward(sample)
|
| 62 |
+
prediction = (
|
| 63 |
+
torch.nn.functional.interpolate(
|
| 64 |
+
prediction.unsqueeze(1),
|
| 65 |
+
size=target_size[::-1],
|
| 66 |
+
mode="bicubic",
|
| 67 |
+
align_corners=False,
|
| 68 |
+
)
|
| 69 |
+
.squeeze()
|
| 70 |
+
.cpu()
|
| 71 |
+
.numpy()
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
return prediction
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def create_side_by_side(image, depth, grayscale):
|
| 78 |
+
"""
|
| 79 |
+
Take an RGB image and depth map and place them side by side. This includes a proper normalization of the depth map
|
| 80 |
+
for better visibility.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
image: the RGB image
|
| 84 |
+
depth: the depth map
|
| 85 |
+
grayscale: use a grayscale colormap?
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
the image and depth map place side by side
|
| 89 |
+
"""
|
| 90 |
+
depth_min = depth.min()
|
| 91 |
+
depth_max = depth.max()
|
| 92 |
+
normalized_depth = 255 * (depth - depth_min) / (depth_max - depth_min)
|
| 93 |
+
normalized_depth *= 3
|
| 94 |
+
|
| 95 |
+
right_side = np.repeat(np.expand_dims(normalized_depth, 2), 3, axis=2) / 3
|
| 96 |
+
if not grayscale:
|
| 97 |
+
right_side = cv2.applyColorMap(np.uint8(right_side), cv2.COLORMAP_INFERNO)
|
| 98 |
+
|
| 99 |
+
if image is None:
|
| 100 |
+
return right_side
|
| 101 |
+
else:
|
| 102 |
+
return np.concatenate((image, right_side), axis=1)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def run(input_path, output_path, model_path, model_type="dpt_beit_large_512", optimize=False, side=False, height=None,
|
| 106 |
+
square=False, grayscale=False):
|
| 107 |
+
"""Run MonoDepthNN to compute depth maps.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
input_path (str): path to input folder
|
| 111 |
+
output_path (str): path to output folder
|
| 112 |
+
model_path (str): path to saved model
|
| 113 |
+
model_type (str): the model type
|
| 114 |
+
optimize (bool): optimize the model to half-floats on CUDA?
|
| 115 |
+
side (bool): RGB and depth side by side in output images?
|
| 116 |
+
height (int): inference encoder image height
|
| 117 |
+
square (bool): resize to a square resolution?
|
| 118 |
+
grayscale (bool): use a grayscale colormap?
|
| 119 |
+
"""
|
| 120 |
+
print("Initialize")
|
| 121 |
+
|
| 122 |
+
# select device
|
| 123 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 124 |
+
print("Device: %s" % device)
|
| 125 |
+
|
| 126 |
+
model, transform, net_w, net_h = load_model(device, model_path, model_type, optimize, height, square)
|
| 127 |
+
|
| 128 |
+
# get input
|
| 129 |
+
if input_path is not None:
|
| 130 |
+
image_names = glob.glob(os.path.join(input_path, "*"))
|
| 131 |
+
num_images = len(image_names)
|
| 132 |
+
else:
|
| 133 |
+
print("No input path specified. Grabbing images from camera.")
|
| 134 |
+
|
| 135 |
+
# create output folder
|
| 136 |
+
if output_path is not None:
|
| 137 |
+
os.makedirs(output_path, exist_ok=True)
|
| 138 |
+
|
| 139 |
+
print("Start processing")
|
| 140 |
+
|
| 141 |
+
if input_path is not None:
|
| 142 |
+
if output_path is None:
|
| 143 |
+
print("Warning: No output path specified. Images will be processed but not shown or stored anywhere.")
|
| 144 |
+
for index, image_name in enumerate(image_names):
|
| 145 |
+
|
| 146 |
+
print(" Processing {} ({}/{})".format(image_name, index + 1, num_images))
|
| 147 |
+
|
| 148 |
+
# input
|
| 149 |
+
original_image_rgb = utils.read_image(image_name) # in [0, 1]
|
| 150 |
+
image = transform({"image": original_image_rgb})["image"]
|
| 151 |
+
|
| 152 |
+
# compute
|
| 153 |
+
with torch.no_grad():
|
| 154 |
+
prediction = process(device, model, model_type, image, (net_w, net_h), original_image_rgb.shape[1::-1],
|
| 155 |
+
optimize, False)
|
| 156 |
+
|
| 157 |
+
# output
|
| 158 |
+
if output_path is not None:
|
| 159 |
+
filename = os.path.join(
|
| 160 |
+
output_path, os.path.splitext(os.path.basename(image_name))[0] + '-' + model_type
|
| 161 |
+
)
|
| 162 |
+
if not side:
|
| 163 |
+
utils.write_depth(filename, prediction, grayscale, bits=2)
|
| 164 |
+
else:
|
| 165 |
+
original_image_bgr = np.flip(original_image_rgb, 2)
|
| 166 |
+
content = create_side_by_side(original_image_bgr*255, prediction, grayscale)
|
| 167 |
+
cv2.imwrite(filename + ".png", content)
|
| 168 |
+
utils.write_pfm(filename + ".pfm", prediction.astype(np.float32))
|
| 169 |
+
|
| 170 |
+
else:
|
| 171 |
+
with torch.no_grad():
|
| 172 |
+
fps = 1
|
| 173 |
+
video = VideoStream(0).start()
|
| 174 |
+
time_start = time.time()
|
| 175 |
+
frame_index = 0
|
| 176 |
+
while True:
|
| 177 |
+
frame = video.read()
|
| 178 |
+
if frame is not None:
|
| 179 |
+
original_image_rgb = np.flip(frame, 2) # in [0, 255] (flip required to get RGB)
|
| 180 |
+
image = transform({"image": original_image_rgb/255})["image"]
|
| 181 |
+
|
| 182 |
+
prediction = process(device, model, model_type, image, (net_w, net_h),
|
| 183 |
+
original_image_rgb.shape[1::-1], optimize, True)
|
| 184 |
+
|
| 185 |
+
original_image_bgr = np.flip(original_image_rgb, 2) if side else None
|
| 186 |
+
content = create_side_by_side(original_image_bgr, prediction, grayscale)
|
| 187 |
+
cv2.imshow('MiDaS Depth Estimation - Press Escape to close window ', content/255)
|
| 188 |
+
|
| 189 |
+
if output_path is not None:
|
| 190 |
+
filename = os.path.join(output_path, 'Camera' + '-' + model_type + '_' + str(frame_index))
|
| 191 |
+
cv2.imwrite(filename + ".png", content)
|
| 192 |
+
|
| 193 |
+
alpha = 0.1
|
| 194 |
+
if time.time()-time_start > 0:
|
| 195 |
+
fps = (1 - alpha) * fps + alpha * 1 / (time.time()-time_start) # exponential moving average
|
| 196 |
+
time_start = time.time()
|
| 197 |
+
print(f"\rFPS: {round(fps,2)}", end="")
|
| 198 |
+
|
| 199 |
+
if cv2.waitKey(1) == 27: # Escape key
|
| 200 |
+
break
|
| 201 |
+
|
| 202 |
+
frame_index += 1
|
| 203 |
+
print()
|
| 204 |
+
|
| 205 |
+
print("Finished")
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
if __name__ == "__main__":
|
| 209 |
+
parser = argparse.ArgumentParser()
|
| 210 |
+
|
| 211 |
+
parser.add_argument('-i', '--input_path',
|
| 212 |
+
default=None,
|
| 213 |
+
help='Folder with input images (if no input path is specified, images are tried to be grabbed '
|
| 214 |
+
'from camera)'
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
parser.add_argument('-o', '--output_path',
|
| 218 |
+
default=None,
|
| 219 |
+
help='Folder for output images'
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
parser.add_argument('-m', '--model_weights',
|
| 223 |
+
default=None,
|
| 224 |
+
help='Path to the trained weights of model'
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
parser.add_argument('-t', '--model_type',
|
| 228 |
+
default='dpt_beit_large_512',
|
| 229 |
+
help='Model type: '
|
| 230 |
+
'dpt_beit_large_512, dpt_beit_large_384, dpt_beit_base_384, dpt_swin2_large_384, '
|
| 231 |
+
'dpt_swin2_base_384, dpt_swin2_tiny_256, dpt_swin_large_384, dpt_next_vit_large_384, '
|
| 232 |
+
'dpt_levit_224, dpt_large_384, dpt_hybrid_384, midas_v21_384, midas_v21_small_256 or '
|
| 233 |
+
'openvino_midas_v21_small_256'
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
parser.add_argument('-s', '--side',
|
| 237 |
+
action='store_true',
|
| 238 |
+
help='Output images contain RGB and depth images side by side'
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
parser.add_argument('--optimize', dest='optimize', action='store_true', help='Use half-float optimization')
|
| 242 |
+
parser.set_defaults(optimize=False)
|
| 243 |
+
|
| 244 |
+
parser.add_argument('--height',
|
| 245 |
+
type=int, default=None,
|
| 246 |
+
help='Preferred height of images feed into the encoder during inference. Note that the '
|
| 247 |
+
'preferred height may differ from the actual height, because an alignment to multiples of '
|
| 248 |
+
'32 takes place. Many models support only the height chosen during training, which is '
|
| 249 |
+
'used automatically if this parameter is not set.'
|
| 250 |
+
)
|
| 251 |
+
parser.add_argument('--square',
|
| 252 |
+
action='store_true',
|
| 253 |
+
help='Option to resize images to a square resolution by changing their widths when images are '
|
| 254 |
+
'fed into the encoder during inference. If this parameter is not set, the aspect ratio of '
|
| 255 |
+
'images is tried to be preserved if supported by the model.'
|
| 256 |
+
)
|
| 257 |
+
parser.add_argument('--grayscale',
|
| 258 |
+
action='store_true',
|
| 259 |
+
help='Use a grayscale colormap instead of the inferno one. Although the inferno colormap, '
|
| 260 |
+
'which is used by default, is better for visibility, it does not allow storing 16-bit '
|
| 261 |
+
'depth values in PNGs but only 8-bit ones due to the precision limitation of this '
|
| 262 |
+
'colormap.'
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
args = parser.parse_args()
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
if args.model_weights is None:
|
| 269 |
+
args.model_weights = default_models[args.model_type]
|
| 270 |
+
|
| 271 |
+
# set torch options
|
| 272 |
+
torch.backends.cudnn.enabled = True
|
| 273 |
+
torch.backends.cudnn.benchmark = True
|
| 274 |
+
|
| 275 |
+
# compute depth maps
|
| 276 |
+
run(args.input_path, args.output_path, args.model_weights, args.model_type, args.optimize, args.side, args.height,
|
| 277 |
+
args.square, args.grayscale)
|
RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/utils.py
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Utils for monoDepth.
|
| 2 |
+
"""
|
| 3 |
+
import sys
|
| 4 |
+
import re
|
| 5 |
+
import numpy as np
|
| 6 |
+
import cv2
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def read_pfm(path):
|
| 11 |
+
"""Read pfm file.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
path (str): path to file
|
| 15 |
+
|
| 16 |
+
Returns:
|
| 17 |
+
tuple: (data, scale)
|
| 18 |
+
"""
|
| 19 |
+
with open(path, "rb") as file:
|
| 20 |
+
|
| 21 |
+
color = None
|
| 22 |
+
width = None
|
| 23 |
+
height = None
|
| 24 |
+
scale = None
|
| 25 |
+
endian = None
|
| 26 |
+
|
| 27 |
+
header = file.readline().rstrip()
|
| 28 |
+
if header.decode("ascii") == "PF":
|
| 29 |
+
color = True
|
| 30 |
+
elif header.decode("ascii") == "Pf":
|
| 31 |
+
color = False
|
| 32 |
+
else:
|
| 33 |
+
raise Exception("Not a PFM file: " + path)
|
| 34 |
+
|
| 35 |
+
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
|
| 36 |
+
if dim_match:
|
| 37 |
+
width, height = list(map(int, dim_match.groups()))
|
| 38 |
+
else:
|
| 39 |
+
raise Exception("Malformed PFM header.")
|
| 40 |
+
|
| 41 |
+
scale = float(file.readline().decode("ascii").rstrip())
|
| 42 |
+
if scale < 0:
|
| 43 |
+
# little-endian
|
| 44 |
+
endian = "<"
|
| 45 |
+
scale = -scale
|
| 46 |
+
else:
|
| 47 |
+
# big-endian
|
| 48 |
+
endian = ">"
|
| 49 |
+
|
| 50 |
+
data = np.fromfile(file, endian + "f")
|
| 51 |
+
shape = (height, width, 3) if color else (height, width)
|
| 52 |
+
|
| 53 |
+
data = np.reshape(data, shape)
|
| 54 |
+
data = np.flipud(data)
|
| 55 |
+
|
| 56 |
+
return data, scale
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def write_pfm(path, image, scale=1):
|
| 60 |
+
"""Write pfm file.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
path (str): pathto file
|
| 64 |
+
image (array): data
|
| 65 |
+
scale (int, optional): Scale. Defaults to 1.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
with open(path, "wb") as file:
|
| 69 |
+
color = None
|
| 70 |
+
|
| 71 |
+
if image.dtype.name != "float32":
|
| 72 |
+
raise Exception("Image dtype must be float32.")
|
| 73 |
+
|
| 74 |
+
image = np.flipud(image)
|
| 75 |
+
|
| 76 |
+
if len(image.shape) == 3 and image.shape[2] == 3: # color image
|
| 77 |
+
color = True
|
| 78 |
+
elif (
|
| 79 |
+
len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
|
| 80 |
+
): # greyscale
|
| 81 |
+
color = False
|
| 82 |
+
else:
|
| 83 |
+
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
|
| 84 |
+
|
| 85 |
+
file.write("PF\n" if color else "Pf\n".encode())
|
| 86 |
+
file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
|
| 87 |
+
|
| 88 |
+
endian = image.dtype.byteorder
|
| 89 |
+
|
| 90 |
+
if endian == "<" or endian == "=" and sys.byteorder == "little":
|
| 91 |
+
scale = -scale
|
| 92 |
+
|
| 93 |
+
file.write("%f\n".encode() % scale)
|
| 94 |
+
|
| 95 |
+
image.tofile(file)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def read_image(path):
|
| 99 |
+
"""Read image and output RGB image (0-1).
|
| 100 |
+
|
| 101 |
+
Args:
|
| 102 |
+
path (str): path to file
|
| 103 |
+
|
| 104 |
+
Returns:
|
| 105 |
+
array: RGB image (0-1)
|
| 106 |
+
"""
|
| 107 |
+
img = cv2.imread(path)
|
| 108 |
+
|
| 109 |
+
if img.ndim == 2:
|
| 110 |
+
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
| 111 |
+
|
| 112 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
|
| 113 |
+
|
| 114 |
+
return img
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def resize_image(img):
|
| 118 |
+
"""Resize image and make it fit for network.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
img (array): image
|
| 122 |
+
|
| 123 |
+
Returns:
|
| 124 |
+
tensor: data ready for network
|
| 125 |
+
"""
|
| 126 |
+
height_orig = img.shape[0]
|
| 127 |
+
width_orig = img.shape[1]
|
| 128 |
+
|
| 129 |
+
if width_orig > height_orig:
|
| 130 |
+
scale = width_orig / 384
|
| 131 |
+
else:
|
| 132 |
+
scale = height_orig / 384
|
| 133 |
+
|
| 134 |
+
height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
|
| 135 |
+
width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
|
| 136 |
+
|
| 137 |
+
img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
|
| 138 |
+
|
| 139 |
+
img_resized = (
|
| 140 |
+
torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
|
| 141 |
+
)
|
| 142 |
+
img_resized = img_resized.unsqueeze(0)
|
| 143 |
+
|
| 144 |
+
return img_resized
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def resize_depth(depth, width, height):
|
| 148 |
+
"""Resize depth map and bring to CPU (numpy).
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
depth (tensor): depth
|
| 152 |
+
width (int): image width
|
| 153 |
+
height (int): image height
|
| 154 |
+
|
| 155 |
+
Returns:
|
| 156 |
+
array: processed depth
|
| 157 |
+
"""
|
| 158 |
+
depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
|
| 159 |
+
|
| 160 |
+
depth_resized = cv2.resize(
|
| 161 |
+
depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
return depth_resized
|
| 165 |
+
|
| 166 |
+
def write_depth(path, depth, grayscale, bits=1):
|
| 167 |
+
"""Write depth map to png file.
|
| 168 |
+
|
| 169 |
+
Args:
|
| 170 |
+
path (str): filepath without extension
|
| 171 |
+
depth (array): depth
|
| 172 |
+
grayscale (bool): use a grayscale colormap?
|
| 173 |
+
"""
|
| 174 |
+
if not grayscale:
|
| 175 |
+
bits = 1
|
| 176 |
+
|
| 177 |
+
if not np.isfinite(depth).all():
|
| 178 |
+
depth=np.nan_to_num(depth, nan=0.0, posinf=0.0, neginf=0.0)
|
| 179 |
+
print("WARNING: Non-finite depth values present")
|
| 180 |
+
|
| 181 |
+
depth_min = depth.min()
|
| 182 |
+
depth_max = depth.max()
|
| 183 |
+
|
| 184 |
+
max_val = (2**(8*bits))-1
|
| 185 |
+
|
| 186 |
+
if depth_max - depth_min > np.finfo("float").eps:
|
| 187 |
+
out = max_val * (depth - depth_min) / (depth_max - depth_min)
|
| 188 |
+
else:
|
| 189 |
+
out = np.zeros(depth.shape, dtype=depth.dtype)
|
| 190 |
+
|
| 191 |
+
if not grayscale:
|
| 192 |
+
out = cv2.applyColorMap(np.uint8(out), cv2.COLORMAP_INFERNO)
|
| 193 |
+
|
| 194 |
+
if bits == 1:
|
| 195 |
+
cv2.imwrite(path + ".png", out.astype("uint8"))
|
| 196 |
+
elif bits == 2:
|
| 197 |
+
cv2.imwrite(path + ".png", out.astype("uint16"))
|
| 198 |
+
|
| 199 |
+
return
|