Spaces:
Sleeping
Sleeping
Upload 7 files
Browse files- clip/__init__.py +1 -0
- clip/bpe_simple_vocab_16e6.txt.gz +3 -0
- clip/clip.py +241 -0
- clip/clipseg.py +538 -0
- clip/model.py +436 -0
- clip/simple_tokenizer.py +132 -0
- clip/vitseg.py +286 -0
clip/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .clip import *
|
clip/bpe_simple_vocab_16e6.txt.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
|
| 3 |
+
size 1356917
|
clip/clip.py
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
import os
|
| 3 |
+
import urllib
|
| 4 |
+
import warnings
|
| 5 |
+
from typing import Any, Union, List
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from PIL import Image
|
| 9 |
+
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
|
| 10 |
+
from tqdm import tqdm
|
| 11 |
+
|
| 12 |
+
from .model import build_model
|
| 13 |
+
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
from torchvision.transforms import InterpolationMode
|
| 17 |
+
BICUBIC = InterpolationMode.BICUBIC
|
| 18 |
+
except ImportError:
|
| 19 |
+
BICUBIC = Image.BICUBIC
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
__all__ = ["available_models", "load", "tokenize"]
|
| 24 |
+
_tokenizer = _Tokenizer()
|
| 25 |
+
|
| 26 |
+
_MODELS = {
|
| 27 |
+
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
|
| 28 |
+
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
|
| 29 |
+
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
|
| 30 |
+
"RN50x16": "https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
|
| 31 |
+
"RN50x64": "https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt",
|
| 32 |
+
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
|
| 33 |
+
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
|
| 34 |
+
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
|
| 35 |
+
"ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt",
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _download(url: str, root: str):
|
| 40 |
+
os.makedirs(root, exist_ok=True)
|
| 41 |
+
filename = os.path.basename(url)
|
| 42 |
+
|
| 43 |
+
expected_sha256 = url.split("/")[-2]
|
| 44 |
+
download_target = os.path.join(root, filename)
|
| 45 |
+
|
| 46 |
+
if os.path.exists(download_target) and not os.path.isfile(download_target):
|
| 47 |
+
raise RuntimeError(f"{download_target} exists and is not a regular file")
|
| 48 |
+
|
| 49 |
+
if os.path.isfile(download_target):
|
| 50 |
+
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
|
| 51 |
+
return download_target
|
| 52 |
+
else:
|
| 53 |
+
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
|
| 54 |
+
|
| 55 |
+
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
|
| 56 |
+
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
|
| 57 |
+
while True:
|
| 58 |
+
buffer = source.read(8192)
|
| 59 |
+
if not buffer:
|
| 60 |
+
break
|
| 61 |
+
|
| 62 |
+
output.write(buffer)
|
| 63 |
+
loop.update(len(buffer))
|
| 64 |
+
|
| 65 |
+
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
|
| 66 |
+
raise RuntimeError("Model has been downloaded but the SHA256 checksum does not not match")
|
| 67 |
+
|
| 68 |
+
return download_target
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def _convert_image_to_rgb(image):
|
| 72 |
+
return image.convert("RGB")
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def _transform(n_px):
|
| 76 |
+
return Compose([
|
| 77 |
+
Resize(n_px, interpolation=BICUBIC),
|
| 78 |
+
CenterCrop(n_px),
|
| 79 |
+
_convert_image_to_rgb,
|
| 80 |
+
ToTensor(),
|
| 81 |
+
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
|
| 82 |
+
])
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def available_models() -> List[str]:
|
| 86 |
+
"""Returns the names of available CLIP models"""
|
| 87 |
+
return list(_MODELS.keys())
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit: bool = False, download_root: str = None):
|
| 91 |
+
"""Load a CLIP model
|
| 92 |
+
|
| 93 |
+
Parameters
|
| 94 |
+
----------
|
| 95 |
+
name : str
|
| 96 |
+
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
|
| 97 |
+
|
| 98 |
+
device : Union[str, torch.device]
|
| 99 |
+
The device to put the loaded model
|
| 100 |
+
|
| 101 |
+
jit : bool
|
| 102 |
+
Whether to load the optimized JIT model or more hackable non-JIT model (default).
|
| 103 |
+
|
| 104 |
+
download_root: str
|
| 105 |
+
path to download the model files; by default, it uses "~/.cache/clip"
|
| 106 |
+
|
| 107 |
+
Returns
|
| 108 |
+
-------
|
| 109 |
+
model : torch.nn.Module
|
| 110 |
+
The CLIP model
|
| 111 |
+
|
| 112 |
+
preprocess : Callable[[PIL.Image], torch.Tensor]
|
| 113 |
+
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
|
| 114 |
+
"""
|
| 115 |
+
if name in _MODELS:
|
| 116 |
+
model_path = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/clip"))
|
| 117 |
+
elif os.path.isfile(name):
|
| 118 |
+
model_path = name
|
| 119 |
+
else:
|
| 120 |
+
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
|
| 121 |
+
|
| 122 |
+
with open(model_path, 'rb') as opened_file:
|
| 123 |
+
try:
|
| 124 |
+
# loading JIT archive
|
| 125 |
+
model = torch.jit.load(opened_file, map_location=device if jit else "cpu").eval()
|
| 126 |
+
state_dict = None
|
| 127 |
+
except RuntimeError:
|
| 128 |
+
# loading saved state dict
|
| 129 |
+
if jit:
|
| 130 |
+
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
|
| 131 |
+
jit = False
|
| 132 |
+
state_dict = torch.load(opened_file, map_location="cpu")
|
| 133 |
+
|
| 134 |
+
if not jit:
|
| 135 |
+
model = build_model(state_dict or model.state_dict()).to(device)
|
| 136 |
+
if str(device) == "cpu":
|
| 137 |
+
model.float()
|
| 138 |
+
return model, _transform(model.visual.input_resolution)
|
| 139 |
+
|
| 140 |
+
# patch the device names
|
| 141 |
+
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
|
| 142 |
+
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
|
| 143 |
+
|
| 144 |
+
def _node_get(node: torch._C.Node, key: str):
|
| 145 |
+
"""Gets attributes of a node which is polymorphic over return type.
|
| 146 |
+
|
| 147 |
+
From https://github.com/pytorch/pytorch/pull/82628
|
| 148 |
+
"""
|
| 149 |
+
sel = node.kindOf(key)
|
| 150 |
+
return getattr(node, sel)(key)
|
| 151 |
+
|
| 152 |
+
def patch_device(module):
|
| 153 |
+
try:
|
| 154 |
+
graphs = [module.graph] if hasattr(module, "graph") else []
|
| 155 |
+
except RuntimeError:
|
| 156 |
+
graphs = []
|
| 157 |
+
|
| 158 |
+
if hasattr(module, "forward1"):
|
| 159 |
+
graphs.append(module.forward1.graph)
|
| 160 |
+
|
| 161 |
+
for graph in graphs:
|
| 162 |
+
for node in graph.findAllNodes("prim::Constant"):
|
| 163 |
+
if "value" in node.attributeNames() and str(_node_get(node, "value")).startswith("cuda"):
|
| 164 |
+
node.copyAttributes(device_node)
|
| 165 |
+
|
| 166 |
+
model.apply(patch_device)
|
| 167 |
+
patch_device(model.encode_image)
|
| 168 |
+
patch_device(model.encode_text)
|
| 169 |
+
|
| 170 |
+
# patch dtype to float32 on CPU
|
| 171 |
+
if str(device) == "cpu":
|
| 172 |
+
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
|
| 173 |
+
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
|
| 174 |
+
float_node = float_input.node()
|
| 175 |
+
|
| 176 |
+
def patch_float(module):
|
| 177 |
+
try:
|
| 178 |
+
graphs = [module.graph] if hasattr(module, "graph") else []
|
| 179 |
+
except RuntimeError:
|
| 180 |
+
graphs = []
|
| 181 |
+
|
| 182 |
+
if hasattr(module, "forward1"):
|
| 183 |
+
graphs.append(module.forward1.graph)
|
| 184 |
+
|
| 185 |
+
for graph in graphs:
|
| 186 |
+
for node in graph.findAllNodes("aten::to"):
|
| 187 |
+
inputs = list(node.inputs())
|
| 188 |
+
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
|
| 189 |
+
if _node_get(inputs[i].node(), "value") == 5:
|
| 190 |
+
inputs[i].node().copyAttributes(float_node)
|
| 191 |
+
|
| 192 |
+
model.apply(patch_float)
|
| 193 |
+
patch_float(model.encode_image)
|
| 194 |
+
patch_float(model.encode_text)
|
| 195 |
+
|
| 196 |
+
model.float()
|
| 197 |
+
|
| 198 |
+
return model, _transform(model.input_resolution.item())
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def tokenize(texts: Union[str, List[str]], context_length: int = 77, truncate: bool = False) -> Union[torch.IntTensor, torch.LongTensor]:
|
| 202 |
+
"""
|
| 203 |
+
Returns the tokenized representation of given input string(s)
|
| 204 |
+
|
| 205 |
+
Parameters
|
| 206 |
+
----------
|
| 207 |
+
texts : Union[str, List[str]]
|
| 208 |
+
An input string or a list of input strings to tokenize
|
| 209 |
+
|
| 210 |
+
context_length : int
|
| 211 |
+
The context length to use; all CLIP models use 77 as the context length
|
| 212 |
+
|
| 213 |
+
truncate: bool
|
| 214 |
+
Whether to truncate the text in case its encoding is longer than the context length
|
| 215 |
+
|
| 216 |
+
Returns
|
| 217 |
+
-------
|
| 218 |
+
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length].
|
| 219 |
+
We return LongTensor when torch version is <1.8.0, since older index_select requires indices to be long.
|
| 220 |
+
"""
|
| 221 |
+
if isinstance(texts, str):
|
| 222 |
+
texts = [texts]
|
| 223 |
+
|
| 224 |
+
sot_token = _tokenizer.encoder["<|startoftext|>"]
|
| 225 |
+
eot_token = _tokenizer.encoder["<|endoftext|>"]
|
| 226 |
+
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
|
| 227 |
+
#if packaging.version.parse(torch.__version__) < packaging.version.parse("1.8.0"):
|
| 228 |
+
# result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
|
| 229 |
+
#else:
|
| 230 |
+
result = torch.zeros(len(all_tokens), context_length, dtype=torch.int)
|
| 231 |
+
|
| 232 |
+
for i, tokens in enumerate(all_tokens):
|
| 233 |
+
if len(tokens) > context_length:
|
| 234 |
+
if truncate:
|
| 235 |
+
tokens = tokens[:context_length]
|
| 236 |
+
tokens[-1] = eot_token
|
| 237 |
+
else:
|
| 238 |
+
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
|
| 239 |
+
result[i, :len(tokens)] = torch.tensor(tokens)
|
| 240 |
+
|
| 241 |
+
return result
|
clip/clipseg.py
ADDED
|
@@ -0,0 +1,538 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from os.path import basename, dirname, join, isfile
|
| 3 |
+
import torch
|
| 4 |
+
from torch import nn
|
| 5 |
+
from torch.nn import functional as nnf
|
| 6 |
+
from torch.nn.modules.activation import ReLU
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def get_prompt_list(prompt):
|
| 10 |
+
if prompt == 'plain':
|
| 11 |
+
return ['{}']
|
| 12 |
+
elif prompt == 'fixed':
|
| 13 |
+
return ['a photo of a {}.']
|
| 14 |
+
elif prompt == 'shuffle':
|
| 15 |
+
return ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.']
|
| 16 |
+
elif prompt == 'shuffle+':
|
| 17 |
+
return ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.',
|
| 18 |
+
'a cropped photo of a {}.', 'a good photo of a {}.', 'a photo of one {}.',
|
| 19 |
+
'a bad photo of a {}.', 'a photo of the {}.']
|
| 20 |
+
else:
|
| 21 |
+
raise ValueError('Invalid value for prompt')
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def forward_multihead_attention(x, b, with_aff=False, attn_mask=None):
|
| 25 |
+
"""
|
| 26 |
+
Simplified version of multihead attention (taken from torch source code but without tons of if clauses).
|
| 27 |
+
The mlp and layer norm come from CLIP.
|
| 28 |
+
x: input.
|
| 29 |
+
b: multihead attention module.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
x_ = b.ln_1(x)
|
| 33 |
+
q, k, v = nnf.linear(x_, b.attn.in_proj_weight, b.attn.in_proj_bias).chunk(3, dim=-1)
|
| 34 |
+
tgt_len, bsz, embed_dim = q.size()
|
| 35 |
+
|
| 36 |
+
head_dim = embed_dim // b.attn.num_heads
|
| 37 |
+
scaling = float(head_dim) ** -0.5
|
| 38 |
+
|
| 39 |
+
q = q.contiguous().view(tgt_len, bsz * b.attn.num_heads, b.attn.head_dim).transpose(0, 1)
|
| 40 |
+
k = k.contiguous().view(-1, bsz * b.attn.num_heads, b.attn.head_dim).transpose(0, 1)
|
| 41 |
+
v = v.contiguous().view(-1, bsz * b.attn.num_heads, b.attn.head_dim).transpose(0, 1)
|
| 42 |
+
|
| 43 |
+
q = q * scaling
|
| 44 |
+
|
| 45 |
+
attn_output_weights = torch.bmm(q, k.transpose(1, 2)) # n_heads * batch_size, tokens^2, tokens^2
|
| 46 |
+
if attn_mask is not None:
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
attn_mask_type, attn_mask = attn_mask
|
| 50 |
+
n_heads = attn_output_weights.size(0) // attn_mask.size(0)
|
| 51 |
+
attn_mask = attn_mask.repeat(n_heads, 1)
|
| 52 |
+
|
| 53 |
+
if attn_mask_type == 'cls_token':
|
| 54 |
+
# the mask only affects similarities compared to the readout-token.
|
| 55 |
+
attn_output_weights[:, 0, 1:] = attn_output_weights[:, 0, 1:] * attn_mask[None,...]
|
| 56 |
+
# attn_output_weights[:, 0, 0] = 0*attn_output_weights[:, 0, 0]
|
| 57 |
+
|
| 58 |
+
if attn_mask_type == 'all':
|
| 59 |
+
# print(attn_output_weights.shape, attn_mask[:, None].shape)
|
| 60 |
+
attn_output_weights[:, 1:, 1:] = attn_output_weights[:, 1:, 1:] * attn_mask[:, None]
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
attn_output_weights = torch.softmax(attn_output_weights, dim=-1)
|
| 64 |
+
|
| 65 |
+
attn_output = torch.bmm(attn_output_weights, v)
|
| 66 |
+
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
|
| 67 |
+
attn_output = b.attn.out_proj(attn_output)
|
| 68 |
+
|
| 69 |
+
x = x + attn_output
|
| 70 |
+
x = x + b.mlp(b.ln_2(x))
|
| 71 |
+
|
| 72 |
+
if with_aff:
|
| 73 |
+
return x, attn_output_weights
|
| 74 |
+
else:
|
| 75 |
+
return x
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class CLIPDenseBase(nn.Module):
|
| 79 |
+
|
| 80 |
+
def __init__(self, version, reduce_cond, reduce_dim, prompt, n_tokens):
|
| 81 |
+
super().__init__()
|
| 82 |
+
|
| 83 |
+
import clip
|
| 84 |
+
|
| 85 |
+
# prec = torch.FloatTensor
|
| 86 |
+
self.clip_model, _ = clip.load(version, device='cpu', jit=False)
|
| 87 |
+
self.model = self.clip_model.visual
|
| 88 |
+
|
| 89 |
+
# if not None, scale conv weights such that we obtain n_tokens.
|
| 90 |
+
self.n_tokens = n_tokens
|
| 91 |
+
|
| 92 |
+
for p in self.clip_model.parameters():
|
| 93 |
+
p.requires_grad_(False)
|
| 94 |
+
|
| 95 |
+
# conditional
|
| 96 |
+
if reduce_cond is not None:
|
| 97 |
+
self.reduce_cond = nn.Linear(512, reduce_cond)
|
| 98 |
+
for p in self.reduce_cond.parameters():
|
| 99 |
+
p.requires_grad_(False)
|
| 100 |
+
else:
|
| 101 |
+
self.reduce_cond = None
|
| 102 |
+
|
| 103 |
+
self.film_mul = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim)
|
| 104 |
+
self.film_add = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim)
|
| 105 |
+
|
| 106 |
+
self.reduce = nn.Linear(768, reduce_dim)
|
| 107 |
+
|
| 108 |
+
self.prompt_list = get_prompt_list(prompt)
|
| 109 |
+
|
| 110 |
+
# precomputed prompts
|
| 111 |
+
import pickle
|
| 112 |
+
if isfile('precomputed_prompt_vectors.pickle'):
|
| 113 |
+
precomp = pickle.load(open('precomputed_prompt_vectors.pickle', 'rb'))
|
| 114 |
+
self.precomputed_prompts = {k: torch.from_numpy(v) for k, v in precomp.items()}
|
| 115 |
+
else:
|
| 116 |
+
self.precomputed_prompts = dict()
|
| 117 |
+
|
| 118 |
+
def rescaled_pos_emb(self, new_size):
|
| 119 |
+
assert len(new_size) == 2
|
| 120 |
+
|
| 121 |
+
a = self.model.positional_embedding[1:].T.view(1, 768, *self.token_shape)
|
| 122 |
+
b = nnf.interpolate(a, new_size, mode='bicubic', align_corners=False).squeeze(0).view(768, new_size[0]*new_size[1]).T
|
| 123 |
+
return torch.cat([self.model.positional_embedding[:1], b])
|
| 124 |
+
|
| 125 |
+
def visual_forward(self, x_inp, extract_layers=(), skip=False, mask=None):
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
with torch.no_grad():
|
| 129 |
+
|
| 130 |
+
inp_size = x_inp.shape[2:]
|
| 131 |
+
|
| 132 |
+
if self.n_tokens is not None:
|
| 133 |
+
stride2 = x_inp.shape[2] // self.n_tokens
|
| 134 |
+
conv_weight2 = nnf.interpolate(self.model.conv1.weight, (stride2, stride2), mode='bilinear', align_corners=True)
|
| 135 |
+
x = nnf.conv2d(x_inp, conv_weight2, bias=self.model.conv1.bias, stride=stride2, dilation=self.model.conv1.dilation)
|
| 136 |
+
else:
|
| 137 |
+
x = self.model.conv1(x_inp) # shape = [*, width, grid, grid]
|
| 138 |
+
|
| 139 |
+
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
|
| 140 |
+
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
|
| 141 |
+
|
| 142 |
+
x = torch.cat([self.model.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
|
| 143 |
+
|
| 144 |
+
standard_n_tokens = 50 if self.model.conv1.kernel_size[0] == 32 else 197
|
| 145 |
+
|
| 146 |
+
if x.shape[1] != standard_n_tokens:
|
| 147 |
+
new_shape = int(math.sqrt(x.shape[1]-1))
|
| 148 |
+
x = x + self.rescaled_pos_emb((new_shape, new_shape)).to(x.dtype)[None,:,:]
|
| 149 |
+
else:
|
| 150 |
+
x = x + self.model.positional_embedding.to(x.dtype)
|
| 151 |
+
|
| 152 |
+
x = self.model.ln_pre(x)
|
| 153 |
+
|
| 154 |
+
x = x.permute(1, 0, 2) # NLD -> LND
|
| 155 |
+
|
| 156 |
+
activations, affinities = [], []
|
| 157 |
+
for i, res_block in enumerate(self.model.transformer.resblocks):
|
| 158 |
+
|
| 159 |
+
if mask is not None:
|
| 160 |
+
mask_layer, mask_type, mask_tensor = mask
|
| 161 |
+
if mask_layer == i or mask_layer == 'all':
|
| 162 |
+
# import ipdb; ipdb.set_trace()
|
| 163 |
+
size = int(math.sqrt(x.shape[0] - 1))
|
| 164 |
+
|
| 165 |
+
attn_mask = (mask_type, nnf.interpolate(mask_tensor.unsqueeze(1).float(), (size, size)).view(mask_tensor.shape[0], size * size))
|
| 166 |
+
|
| 167 |
+
else:
|
| 168 |
+
attn_mask = None
|
| 169 |
+
else:
|
| 170 |
+
attn_mask = None
|
| 171 |
+
|
| 172 |
+
x, aff_per_head = forward_multihead_attention(x, res_block, with_aff=True, attn_mask=attn_mask)
|
| 173 |
+
|
| 174 |
+
if i in extract_layers:
|
| 175 |
+
affinities += [aff_per_head]
|
| 176 |
+
|
| 177 |
+
#if self.n_tokens is not None:
|
| 178 |
+
# activations += [nnf.interpolate(x, inp_size, mode='bilinear', align_corners=True)]
|
| 179 |
+
#else:
|
| 180 |
+
activations += [x]
|
| 181 |
+
|
| 182 |
+
if len(extract_layers) > 0 and i == max(extract_layers) and skip:
|
| 183 |
+
print('early skip')
|
| 184 |
+
break
|
| 185 |
+
|
| 186 |
+
x = x.permute(1, 0, 2) # LND -> NLD
|
| 187 |
+
x = self.model.ln_post(x[:, 0, :])
|
| 188 |
+
|
| 189 |
+
if self.model.proj is not None:
|
| 190 |
+
x = x @ self.model.proj
|
| 191 |
+
|
| 192 |
+
return x, activations, affinities
|
| 193 |
+
|
| 194 |
+
def sample_prompts(self, words, prompt_list=None):
|
| 195 |
+
|
| 196 |
+
prompt_list = prompt_list if prompt_list is not None else self.prompt_list
|
| 197 |
+
|
| 198 |
+
prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True)
|
| 199 |
+
prompts = [prompt_list[i] for i in prompt_indices]
|
| 200 |
+
return [promt.format(w) for promt, w in zip(prompts, words)]
|
| 201 |
+
|
| 202 |
+
def get_cond_vec(self, conditional, batch_size):
|
| 203 |
+
# compute conditional from a single string
|
| 204 |
+
if conditional is not None and type(conditional) == str:
|
| 205 |
+
cond = self.compute_conditional(conditional)
|
| 206 |
+
cond = cond.repeat(batch_size, 1)
|
| 207 |
+
|
| 208 |
+
# compute conditional from string list/tuple
|
| 209 |
+
elif conditional is not None and type(conditional) in {list, tuple} and type(conditional[0]) == str:
|
| 210 |
+
assert len(conditional) == batch_size
|
| 211 |
+
cond = self.compute_conditional(conditional)
|
| 212 |
+
|
| 213 |
+
# use conditional directly
|
| 214 |
+
elif conditional is not None and type(conditional) == torch.Tensor and conditional.ndim == 2:
|
| 215 |
+
cond = conditional
|
| 216 |
+
|
| 217 |
+
# compute conditional from image
|
| 218 |
+
elif conditional is not None and type(conditional) == torch.Tensor:
|
| 219 |
+
with torch.no_grad():
|
| 220 |
+
cond, _, _ = self.visual_forward(conditional)
|
| 221 |
+
else:
|
| 222 |
+
raise ValueError('invalid conditional')
|
| 223 |
+
return cond
|
| 224 |
+
|
| 225 |
+
def compute_conditional(self, conditional):
|
| 226 |
+
import clip
|
| 227 |
+
|
| 228 |
+
dev = next(self.parameters()).device
|
| 229 |
+
|
| 230 |
+
if type(conditional) in {list, tuple}:
|
| 231 |
+
text_tokens = clip.tokenize(conditional).to(dev)
|
| 232 |
+
cond = self.clip_model.encode_text(text_tokens)
|
| 233 |
+
else:
|
| 234 |
+
if conditional in self.precomputed_prompts:
|
| 235 |
+
cond = self.precomputed_prompts[conditional].float().to(dev)
|
| 236 |
+
else:
|
| 237 |
+
text_tokens = clip.tokenize([conditional]).to(dev)
|
| 238 |
+
cond = self.clip_model.encode_text(text_tokens)[0]
|
| 239 |
+
|
| 240 |
+
if self.shift_vector is not None:
|
| 241 |
+
return cond + self.shift_vector
|
| 242 |
+
else:
|
| 243 |
+
return cond
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def clip_load_untrained(version):
|
| 247 |
+
assert version == 'ViT-B/16'
|
| 248 |
+
from clip.model import CLIP
|
| 249 |
+
from clip.clip import _MODELS, _download
|
| 250 |
+
model = torch.jit.load(_download(_MODELS['ViT-B/16'])).eval()
|
| 251 |
+
state_dict = model.state_dict()
|
| 252 |
+
|
| 253 |
+
vision_width = state_dict["visual.conv1.weight"].shape[0]
|
| 254 |
+
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
|
| 255 |
+
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
|
| 256 |
+
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
|
| 257 |
+
image_resolution = vision_patch_size * grid_size
|
| 258 |
+
embed_dim = state_dict["text_projection"].shape[1]
|
| 259 |
+
context_length = state_dict["positional_embedding"].shape[0]
|
| 260 |
+
vocab_size = state_dict["token_embedding.weight"].shape[0]
|
| 261 |
+
transformer_width = state_dict["ln_final.weight"].shape[0]
|
| 262 |
+
transformer_heads = transformer_width // 64
|
| 263 |
+
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
|
| 264 |
+
|
| 265 |
+
return CLIP(embed_dim, image_resolution, vision_layers, vision_width, vision_patch_size,
|
| 266 |
+
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers)
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
class CLIPDensePredT(CLIPDenseBase):
|
| 270 |
+
|
| 271 |
+
def __init__(self, version='ViT-B/32', extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4, prompt='fixed',
|
| 272 |
+
extra_blocks=0, reduce_cond=None, fix_shift=False,
|
| 273 |
+
learn_trans_conv_only=False, limit_to_clip_only=False, upsample=False,
|
| 274 |
+
add_calibration=False, rev_activations=False, trans_conv=None, n_tokens=None, complex_trans_conv=False):
|
| 275 |
+
|
| 276 |
+
super().__init__(version, reduce_cond, reduce_dim, prompt, n_tokens)
|
| 277 |
+
# device = 'cpu'
|
| 278 |
+
|
| 279 |
+
self.extract_layers = extract_layers
|
| 280 |
+
self.cond_layer = cond_layer
|
| 281 |
+
self.limit_to_clip_only = limit_to_clip_only
|
| 282 |
+
self.process_cond = None
|
| 283 |
+
self.rev_activations = rev_activations
|
| 284 |
+
|
| 285 |
+
depth = len(extract_layers)
|
| 286 |
+
|
| 287 |
+
if add_calibration:
|
| 288 |
+
self.calibration_conds = 1
|
| 289 |
+
|
| 290 |
+
self.upsample_proj = nn.Conv2d(reduce_dim, 1, kernel_size=1) if upsample else None
|
| 291 |
+
|
| 292 |
+
self.add_activation1 = True
|
| 293 |
+
|
| 294 |
+
self.version = version
|
| 295 |
+
|
| 296 |
+
self.token_shape = {'ViT-B/32': (7, 7), 'ViT-B/16': (14, 14)}[version]
|
| 297 |
+
|
| 298 |
+
if fix_shift:
|
| 299 |
+
# self.shift_vector = nn.Parameter(torch.load(join(dirname(basename(__file__)), 'clip_text_shift_vector.pth')), requires_grad=False)
|
| 300 |
+
self.shift_vector = nn.Parameter(torch.load(join(dirname(basename(__file__)), 'shift_text_to_vis.pth')), requires_grad=False)
|
| 301 |
+
# self.shift_vector = nn.Parameter(-1*torch.load(join(dirname(basename(__file__)), 'shift2.pth')), requires_grad=False)
|
| 302 |
+
else:
|
| 303 |
+
self.shift_vector = None
|
| 304 |
+
|
| 305 |
+
if trans_conv is None:
|
| 306 |
+
trans_conv_ks = {'ViT-B/32': (32, 32), 'ViT-B/16': (16, 16)}[version]
|
| 307 |
+
else:
|
| 308 |
+
# explicitly define transposed conv kernel size
|
| 309 |
+
trans_conv_ks = (trans_conv, trans_conv)
|
| 310 |
+
|
| 311 |
+
if not complex_trans_conv:
|
| 312 |
+
self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks)
|
| 313 |
+
else:
|
| 314 |
+
assert trans_conv_ks[0] == trans_conv_ks[1]
|
| 315 |
+
|
| 316 |
+
tp_kernels = (trans_conv_ks[0] // 4, trans_conv_ks[0] // 4)
|
| 317 |
+
|
| 318 |
+
self.trans_conv = nn.Sequential(
|
| 319 |
+
nn.Conv2d(reduce_dim, reduce_dim, kernel_size=3, padding=1),
|
| 320 |
+
nn.ReLU(),
|
| 321 |
+
nn.ConvTranspose2d(reduce_dim, reduce_dim // 2, kernel_size=tp_kernels[0], stride=tp_kernels[0]),
|
| 322 |
+
nn.ReLU(),
|
| 323 |
+
nn.ConvTranspose2d(reduce_dim // 2, 1, kernel_size=tp_kernels[1], stride=tp_kernels[1]),
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
# self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks)
|
| 327 |
+
|
| 328 |
+
assert len(self.extract_layers) == depth
|
| 329 |
+
|
| 330 |
+
self.reduces = nn.ModuleList([nn.Linear(768, reduce_dim) for _ in range(depth)])
|
| 331 |
+
self.blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(len(self.extract_layers))])
|
| 332 |
+
self.extra_blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(extra_blocks)])
|
| 333 |
+
|
| 334 |
+
# refinement and trans conv
|
| 335 |
+
|
| 336 |
+
if learn_trans_conv_only:
|
| 337 |
+
for p in self.parameters():
|
| 338 |
+
p.requires_grad_(False)
|
| 339 |
+
|
| 340 |
+
for p in self.trans_conv.parameters():
|
| 341 |
+
p.requires_grad_(True)
|
| 342 |
+
|
| 343 |
+
self.prompt_list = get_prompt_list(prompt)
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
def forward(self, inp_image, conditional=None, return_features=False, mask=None):
|
| 347 |
+
|
| 348 |
+
assert type(return_features) == bool
|
| 349 |
+
|
| 350 |
+
inp_image = inp_image.to(self.model.positional_embedding.device)
|
| 351 |
+
|
| 352 |
+
if mask is not None:
|
| 353 |
+
raise ValueError('mask not supported')
|
| 354 |
+
|
| 355 |
+
# x_inp = normalize(inp_image)
|
| 356 |
+
x_inp = inp_image
|
| 357 |
+
|
| 358 |
+
bs, dev = inp_image.shape[0], x_inp.device
|
| 359 |
+
|
| 360 |
+
cond = self.get_cond_vec(conditional, bs)
|
| 361 |
+
|
| 362 |
+
visual_q, activations, _ = self.visual_forward(x_inp, extract_layers=[0] + list(self.extract_layers))
|
| 363 |
+
|
| 364 |
+
activation1 = activations[0]
|
| 365 |
+
activations = activations[1:]
|
| 366 |
+
|
| 367 |
+
_activations = activations[::-1] if not self.rev_activations else activations
|
| 368 |
+
|
| 369 |
+
a = None
|
| 370 |
+
for i, (activation, block, reduce) in enumerate(zip(_activations, self.blocks, self.reduces)):
|
| 371 |
+
|
| 372 |
+
if a is not None:
|
| 373 |
+
a = reduce(activation) + a
|
| 374 |
+
else:
|
| 375 |
+
a = reduce(activation)
|
| 376 |
+
|
| 377 |
+
if i == self.cond_layer:
|
| 378 |
+
if self.reduce_cond is not None:
|
| 379 |
+
cond = self.reduce_cond(cond)
|
| 380 |
+
|
| 381 |
+
a = self.film_mul(cond) * a + self.film_add(cond)
|
| 382 |
+
|
| 383 |
+
a = block(a)
|
| 384 |
+
|
| 385 |
+
for block in self.extra_blocks:
|
| 386 |
+
a = a + block(a)
|
| 387 |
+
|
| 388 |
+
a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens
|
| 389 |
+
|
| 390 |
+
size = int(math.sqrt(a.shape[2]))
|
| 391 |
+
|
| 392 |
+
a = a.view(bs, a.shape[1], size, size)
|
| 393 |
+
|
| 394 |
+
a = self.trans_conv(a)
|
| 395 |
+
|
| 396 |
+
if self.n_tokens is not None:
|
| 397 |
+
a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear', align_corners=True)
|
| 398 |
+
|
| 399 |
+
if self.upsample_proj is not None:
|
| 400 |
+
a = self.upsample_proj(a)
|
| 401 |
+
a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear')
|
| 402 |
+
|
| 403 |
+
if return_features:
|
| 404 |
+
return a, visual_q, cond, [activation1] + activations
|
| 405 |
+
else:
|
| 406 |
+
return a,
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
class CLIPDensePredTMasked(CLIPDensePredT):
|
| 411 |
+
|
| 412 |
+
def __init__(self, version='ViT-B/32', extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4,
|
| 413 |
+
prompt='fixed', extra_blocks=0, reduce_cond=None, fix_shift=False, learn_trans_conv_only=False,
|
| 414 |
+
refine=None, limit_to_clip_only=False, upsample=False, add_calibration=False, n_tokens=None):
|
| 415 |
+
|
| 416 |
+
super().__init__(version=version, extract_layers=extract_layers, cond_layer=cond_layer, reduce_dim=reduce_dim,
|
| 417 |
+
n_heads=n_heads, prompt=prompt, extra_blocks=extra_blocks, reduce_cond=reduce_cond,
|
| 418 |
+
fix_shift=fix_shift, learn_trans_conv_only=learn_trans_conv_only,
|
| 419 |
+
limit_to_clip_only=limit_to_clip_only, upsample=upsample, add_calibration=add_calibration,
|
| 420 |
+
n_tokens=n_tokens)
|
| 421 |
+
|
| 422 |
+
def visual_forward_masked(self, img_s, seg_s):
|
| 423 |
+
return super().visual_forward(img_s, mask=('all', 'cls_token', seg_s))
|
| 424 |
+
|
| 425 |
+
def forward(self, img_q, cond_or_img_s, seg_s=None, return_features=False):
|
| 426 |
+
|
| 427 |
+
if seg_s is None:
|
| 428 |
+
cond = cond_or_img_s
|
| 429 |
+
else:
|
| 430 |
+
img_s = cond_or_img_s
|
| 431 |
+
|
| 432 |
+
with torch.no_grad():
|
| 433 |
+
cond, _, _ = self.visual_forward_masked(img_s, seg_s)
|
| 434 |
+
|
| 435 |
+
return super().forward(img_q, cond, return_features=return_features)
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
class CLIPDenseBaseline(CLIPDenseBase):
|
| 440 |
+
|
| 441 |
+
def __init__(self, version='ViT-B/32', cond_layer=0,
|
| 442 |
+
extract_layer=9, reduce_dim=128, reduce2_dim=None, prompt='fixed',
|
| 443 |
+
reduce_cond=None, limit_to_clip_only=False, n_tokens=None):
|
| 444 |
+
|
| 445 |
+
super().__init__(version, reduce_cond, reduce_dim, prompt, n_tokens)
|
| 446 |
+
device = 'cpu'
|
| 447 |
+
|
| 448 |
+
# self.cond_layer = cond_layer
|
| 449 |
+
self.extract_layer = extract_layer
|
| 450 |
+
self.limit_to_clip_only = limit_to_clip_only
|
| 451 |
+
self.shift_vector = None
|
| 452 |
+
|
| 453 |
+
self.token_shape = {'ViT-B/32': (7, 7), 'ViT-B/16': (14, 14)}[version]
|
| 454 |
+
|
| 455 |
+
assert reduce2_dim is not None
|
| 456 |
+
|
| 457 |
+
self.reduce2 = nn.Sequential(
|
| 458 |
+
nn.Linear(reduce_dim, reduce2_dim),
|
| 459 |
+
nn.ReLU(),
|
| 460 |
+
nn.Linear(reduce2_dim, reduce_dim)
|
| 461 |
+
)
|
| 462 |
+
|
| 463 |
+
trans_conv_ks = {'ViT-B/32': (32, 32), 'ViT-B/16': (16, 16)}[version]
|
| 464 |
+
self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks)
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
def forward(self, inp_image, conditional=None, return_features=False):
|
| 468 |
+
|
| 469 |
+
inp_image = inp_image.to(self.model.positional_embedding.device)
|
| 470 |
+
|
| 471 |
+
# x_inp = normalize(inp_image)
|
| 472 |
+
x_inp = inp_image
|
| 473 |
+
|
| 474 |
+
bs, dev = inp_image.shape[0], x_inp.device
|
| 475 |
+
|
| 476 |
+
cond = self.get_cond_vec(conditional, bs)
|
| 477 |
+
|
| 478 |
+
visual_q, activations, affinities = self.visual_forward(x_inp, extract_layers=[self.extract_layer])
|
| 479 |
+
|
| 480 |
+
a = activations[0]
|
| 481 |
+
a = self.reduce(a)
|
| 482 |
+
a = self.film_mul(cond) * a + self.film_add(cond)
|
| 483 |
+
|
| 484 |
+
if self.reduce2 is not None:
|
| 485 |
+
a = self.reduce2(a)
|
| 486 |
+
|
| 487 |
+
# the original model would execute a transformer block here
|
| 488 |
+
|
| 489 |
+
a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens
|
| 490 |
+
|
| 491 |
+
size = int(math.sqrt(a.shape[2]))
|
| 492 |
+
|
| 493 |
+
a = a.view(bs, a.shape[1], size, size)
|
| 494 |
+
a = self.trans_conv(a)
|
| 495 |
+
|
| 496 |
+
if return_features:
|
| 497 |
+
return a, visual_q, cond, activations
|
| 498 |
+
else:
|
| 499 |
+
return a,
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
class CLIPSegMultiLabel(nn.Module):
|
| 503 |
+
|
| 504 |
+
def __init__(self, model) -> None:
|
| 505 |
+
super().__init__()
|
| 506 |
+
|
| 507 |
+
from third_party.JoEm.data_loader import get_seen_idx, get_unseen_idx, VOC
|
| 508 |
+
|
| 509 |
+
self.pascal_classes = VOC
|
| 510 |
+
|
| 511 |
+
from clip.clipseg import CLIPDensePredT
|
| 512 |
+
from general_utils import load_model
|
| 513 |
+
# self.clipseg = load_model('rd64-vit16-neg0.2-phrasecut', strict=False)
|
| 514 |
+
self.clipseg = load_model(model, strict=False)
|
| 515 |
+
|
| 516 |
+
self.clipseg.eval()
|
| 517 |
+
|
| 518 |
+
def forward(self, x):
|
| 519 |
+
|
| 520 |
+
bs = x.shape[0]
|
| 521 |
+
out = torch.ones(21, bs, 352, 352).to(x.device) * -10
|
| 522 |
+
|
| 523 |
+
for class_id, class_name in enumerate(self.pascal_classes):
|
| 524 |
+
|
| 525 |
+
fac = 3 if class_name == 'background' else 1
|
| 526 |
+
|
| 527 |
+
with torch.no_grad():
|
| 528 |
+
pred = torch.sigmoid(self.clipseg(x, class_name)[0][:,0]) * fac
|
| 529 |
+
|
| 530 |
+
out[class_id] += pred
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
out = out.permute(1, 0, 2, 3)
|
| 534 |
+
|
| 535 |
+
return out
|
| 536 |
+
|
| 537 |
+
# construct output tensor
|
| 538 |
+
|
clip/model.py
ADDED
|
@@ -0,0 +1,436 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import OrderedDict
|
| 2 |
+
from typing import Tuple, Union
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
from torch import nn
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class Bottleneck(nn.Module):
|
| 11 |
+
expansion = 4
|
| 12 |
+
|
| 13 |
+
def __init__(self, inplanes, planes, stride=1):
|
| 14 |
+
super().__init__()
|
| 15 |
+
|
| 16 |
+
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
|
| 17 |
+
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
|
| 18 |
+
self.bn1 = nn.BatchNorm2d(planes)
|
| 19 |
+
self.relu1 = nn.ReLU(inplace=True)
|
| 20 |
+
|
| 21 |
+
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
|
| 22 |
+
self.bn2 = nn.BatchNorm2d(planes)
|
| 23 |
+
self.relu2 = nn.ReLU(inplace=True)
|
| 24 |
+
|
| 25 |
+
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
|
| 26 |
+
|
| 27 |
+
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
|
| 28 |
+
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
|
| 29 |
+
self.relu3 = nn.ReLU(inplace=True)
|
| 30 |
+
|
| 31 |
+
self.downsample = None
|
| 32 |
+
self.stride = stride
|
| 33 |
+
|
| 34 |
+
if stride > 1 or inplanes != planes * Bottleneck.expansion:
|
| 35 |
+
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
|
| 36 |
+
self.downsample = nn.Sequential(OrderedDict([
|
| 37 |
+
("-1", nn.AvgPool2d(stride)),
|
| 38 |
+
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
|
| 39 |
+
("1", nn.BatchNorm2d(planes * self.expansion))
|
| 40 |
+
]))
|
| 41 |
+
|
| 42 |
+
def forward(self, x: torch.Tensor):
|
| 43 |
+
identity = x
|
| 44 |
+
|
| 45 |
+
out = self.relu1(self.bn1(self.conv1(x)))
|
| 46 |
+
out = self.relu2(self.bn2(self.conv2(out)))
|
| 47 |
+
out = self.avgpool(out)
|
| 48 |
+
out = self.bn3(self.conv3(out))
|
| 49 |
+
|
| 50 |
+
if self.downsample is not None:
|
| 51 |
+
identity = self.downsample(x)
|
| 52 |
+
|
| 53 |
+
out += identity
|
| 54 |
+
out = self.relu3(out)
|
| 55 |
+
return out
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class AttentionPool2d(nn.Module):
|
| 59 |
+
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
|
| 60 |
+
super().__init__()
|
| 61 |
+
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
|
| 62 |
+
self.k_proj = nn.Linear(embed_dim, embed_dim)
|
| 63 |
+
self.q_proj = nn.Linear(embed_dim, embed_dim)
|
| 64 |
+
self.v_proj = nn.Linear(embed_dim, embed_dim)
|
| 65 |
+
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
|
| 66 |
+
self.num_heads = num_heads
|
| 67 |
+
|
| 68 |
+
def forward(self, x):
|
| 69 |
+
x = x.flatten(start_dim=2).permute(2, 0, 1) # NCHW -> (HW)NC
|
| 70 |
+
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
|
| 71 |
+
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
|
| 72 |
+
x, _ = F.multi_head_attention_forward(
|
| 73 |
+
query=x[:1], key=x, value=x,
|
| 74 |
+
embed_dim_to_check=x.shape[-1],
|
| 75 |
+
num_heads=self.num_heads,
|
| 76 |
+
q_proj_weight=self.q_proj.weight,
|
| 77 |
+
k_proj_weight=self.k_proj.weight,
|
| 78 |
+
v_proj_weight=self.v_proj.weight,
|
| 79 |
+
in_proj_weight=None,
|
| 80 |
+
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
|
| 81 |
+
bias_k=None,
|
| 82 |
+
bias_v=None,
|
| 83 |
+
add_zero_attn=False,
|
| 84 |
+
dropout_p=0,
|
| 85 |
+
out_proj_weight=self.c_proj.weight,
|
| 86 |
+
out_proj_bias=self.c_proj.bias,
|
| 87 |
+
use_separate_proj_weight=True,
|
| 88 |
+
training=self.training,
|
| 89 |
+
need_weights=False
|
| 90 |
+
)
|
| 91 |
+
return x.squeeze(0)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class ModifiedResNet(nn.Module):
|
| 95 |
+
"""
|
| 96 |
+
A ResNet class that is similar to torchvision's but contains the following changes:
|
| 97 |
+
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
|
| 98 |
+
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
|
| 99 |
+
- The final pooling layer is a QKV attention instead of an average pool
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
|
| 103 |
+
super().__init__()
|
| 104 |
+
self.output_dim = output_dim
|
| 105 |
+
self.input_resolution = input_resolution
|
| 106 |
+
|
| 107 |
+
# the 3-layer stem
|
| 108 |
+
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
|
| 109 |
+
self.bn1 = nn.BatchNorm2d(width // 2)
|
| 110 |
+
self.relu1 = nn.ReLU(inplace=True)
|
| 111 |
+
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
|
| 112 |
+
self.bn2 = nn.BatchNorm2d(width // 2)
|
| 113 |
+
self.relu2 = nn.ReLU(inplace=True)
|
| 114 |
+
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
|
| 115 |
+
self.bn3 = nn.BatchNorm2d(width)
|
| 116 |
+
self.relu3 = nn.ReLU(inplace=True)
|
| 117 |
+
self.avgpool = nn.AvgPool2d(2)
|
| 118 |
+
|
| 119 |
+
# residual layers
|
| 120 |
+
self._inplanes = width # this is a *mutable* variable used during construction
|
| 121 |
+
self.layer1 = self._make_layer(width, layers[0])
|
| 122 |
+
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
|
| 123 |
+
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
|
| 124 |
+
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
|
| 125 |
+
|
| 126 |
+
embed_dim = width * 32 # the ResNet feature dimension
|
| 127 |
+
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
|
| 128 |
+
|
| 129 |
+
def _make_layer(self, planes, blocks, stride=1):
|
| 130 |
+
layers = [Bottleneck(self._inplanes, planes, stride)]
|
| 131 |
+
|
| 132 |
+
self._inplanes = planes * Bottleneck.expansion
|
| 133 |
+
for _ in range(1, blocks):
|
| 134 |
+
layers.append(Bottleneck(self._inplanes, planes))
|
| 135 |
+
|
| 136 |
+
return nn.Sequential(*layers)
|
| 137 |
+
|
| 138 |
+
def forward(self, x):
|
| 139 |
+
def stem(x):
|
| 140 |
+
x = self.relu1(self.bn1(self.conv1(x)))
|
| 141 |
+
x = self.relu2(self.bn2(self.conv2(x)))
|
| 142 |
+
x = self.relu3(self.bn3(self.conv3(x)))
|
| 143 |
+
x = self.avgpool(x)
|
| 144 |
+
return x
|
| 145 |
+
|
| 146 |
+
x = x.type(self.conv1.weight.dtype)
|
| 147 |
+
x = stem(x)
|
| 148 |
+
x = self.layer1(x)
|
| 149 |
+
x = self.layer2(x)
|
| 150 |
+
x = self.layer3(x)
|
| 151 |
+
x = self.layer4(x)
|
| 152 |
+
x = self.attnpool(x)
|
| 153 |
+
|
| 154 |
+
return x
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class LayerNorm(nn.LayerNorm):
|
| 158 |
+
"""Subclass torch's LayerNorm to handle fp16."""
|
| 159 |
+
|
| 160 |
+
def forward(self, x: torch.Tensor):
|
| 161 |
+
orig_type = x.dtype
|
| 162 |
+
ret = super().forward(x.type(torch.float32))
|
| 163 |
+
return ret.type(orig_type)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class QuickGELU(nn.Module):
|
| 167 |
+
def forward(self, x: torch.Tensor):
|
| 168 |
+
return x * torch.sigmoid(1.702 * x)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
class ResidualAttentionBlock(nn.Module):
|
| 172 |
+
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
|
| 173 |
+
super().__init__()
|
| 174 |
+
|
| 175 |
+
self.attn = nn.MultiheadAttention(d_model, n_head)
|
| 176 |
+
self.ln_1 = LayerNorm(d_model)
|
| 177 |
+
self.mlp = nn.Sequential(OrderedDict([
|
| 178 |
+
("c_fc", nn.Linear(d_model, d_model * 4)),
|
| 179 |
+
("gelu", QuickGELU()),
|
| 180 |
+
("c_proj", nn.Linear(d_model * 4, d_model))
|
| 181 |
+
]))
|
| 182 |
+
self.ln_2 = LayerNorm(d_model)
|
| 183 |
+
self.attn_mask = attn_mask
|
| 184 |
+
|
| 185 |
+
def attention(self, x: torch.Tensor):
|
| 186 |
+
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
|
| 187 |
+
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
|
| 188 |
+
|
| 189 |
+
def forward(self, x: torch.Tensor):
|
| 190 |
+
x = x + self.attention(self.ln_1(x))
|
| 191 |
+
x = x + self.mlp(self.ln_2(x))
|
| 192 |
+
return x
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
class Transformer(nn.Module):
|
| 196 |
+
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
|
| 197 |
+
super().__init__()
|
| 198 |
+
self.width = width
|
| 199 |
+
self.layers = layers
|
| 200 |
+
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
|
| 201 |
+
|
| 202 |
+
def forward(self, x: torch.Tensor):
|
| 203 |
+
return self.resblocks(x)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
class VisionTransformer(nn.Module):
|
| 207 |
+
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
|
| 208 |
+
super().__init__()
|
| 209 |
+
self.input_resolution = input_resolution
|
| 210 |
+
self.output_dim = output_dim
|
| 211 |
+
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
|
| 212 |
+
|
| 213 |
+
scale = width ** -0.5
|
| 214 |
+
self.class_embedding = nn.Parameter(scale * torch.randn(width))
|
| 215 |
+
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
|
| 216 |
+
self.ln_pre = LayerNorm(width)
|
| 217 |
+
|
| 218 |
+
self.transformer = Transformer(width, layers, heads)
|
| 219 |
+
|
| 220 |
+
self.ln_post = LayerNorm(width)
|
| 221 |
+
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
|
| 222 |
+
|
| 223 |
+
def forward(self, x: torch.Tensor):
|
| 224 |
+
x = self.conv1(x) # shape = [*, width, grid, grid]
|
| 225 |
+
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
|
| 226 |
+
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
|
| 227 |
+
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
|
| 228 |
+
x = x + self.positional_embedding.to(x.dtype)
|
| 229 |
+
x = self.ln_pre(x)
|
| 230 |
+
|
| 231 |
+
x = x.permute(1, 0, 2) # NLD -> LND
|
| 232 |
+
x = self.transformer(x)
|
| 233 |
+
x = x.permute(1, 0, 2) # LND -> NLD
|
| 234 |
+
|
| 235 |
+
x = self.ln_post(x[:, 0, :])
|
| 236 |
+
|
| 237 |
+
if self.proj is not None:
|
| 238 |
+
x = x @ self.proj
|
| 239 |
+
|
| 240 |
+
return x
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
class CLIP(nn.Module):
|
| 244 |
+
def __init__(self,
|
| 245 |
+
embed_dim: int,
|
| 246 |
+
# vision
|
| 247 |
+
image_resolution: int,
|
| 248 |
+
vision_layers: Union[Tuple[int, int, int, int], int],
|
| 249 |
+
vision_width: int,
|
| 250 |
+
vision_patch_size: int,
|
| 251 |
+
# text
|
| 252 |
+
context_length: int,
|
| 253 |
+
vocab_size: int,
|
| 254 |
+
transformer_width: int,
|
| 255 |
+
transformer_heads: int,
|
| 256 |
+
transformer_layers: int
|
| 257 |
+
):
|
| 258 |
+
super().__init__()
|
| 259 |
+
|
| 260 |
+
self.context_length = context_length
|
| 261 |
+
|
| 262 |
+
if isinstance(vision_layers, (tuple, list)):
|
| 263 |
+
vision_heads = vision_width * 32 // 64
|
| 264 |
+
self.visual = ModifiedResNet(
|
| 265 |
+
layers=vision_layers,
|
| 266 |
+
output_dim=embed_dim,
|
| 267 |
+
heads=vision_heads,
|
| 268 |
+
input_resolution=image_resolution,
|
| 269 |
+
width=vision_width
|
| 270 |
+
)
|
| 271 |
+
else:
|
| 272 |
+
vision_heads = vision_width // 64
|
| 273 |
+
self.visual = VisionTransformer(
|
| 274 |
+
input_resolution=image_resolution,
|
| 275 |
+
patch_size=vision_patch_size,
|
| 276 |
+
width=vision_width,
|
| 277 |
+
layers=vision_layers,
|
| 278 |
+
heads=vision_heads,
|
| 279 |
+
output_dim=embed_dim
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
self.transformer = Transformer(
|
| 283 |
+
width=transformer_width,
|
| 284 |
+
layers=transformer_layers,
|
| 285 |
+
heads=transformer_heads,
|
| 286 |
+
attn_mask=self.build_attention_mask()
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
self.vocab_size = vocab_size
|
| 290 |
+
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
|
| 291 |
+
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
|
| 292 |
+
self.ln_final = LayerNorm(transformer_width)
|
| 293 |
+
|
| 294 |
+
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
|
| 295 |
+
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
|
| 296 |
+
|
| 297 |
+
self.initialize_parameters()
|
| 298 |
+
|
| 299 |
+
def initialize_parameters(self):
|
| 300 |
+
nn.init.normal_(self.token_embedding.weight, std=0.02)
|
| 301 |
+
nn.init.normal_(self.positional_embedding, std=0.01)
|
| 302 |
+
|
| 303 |
+
if isinstance(self.visual, ModifiedResNet):
|
| 304 |
+
if self.visual.attnpool is not None:
|
| 305 |
+
std = self.visual.attnpool.c_proj.in_features ** -0.5
|
| 306 |
+
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
|
| 307 |
+
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
|
| 308 |
+
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
|
| 309 |
+
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
|
| 310 |
+
|
| 311 |
+
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
|
| 312 |
+
for name, param in resnet_block.named_parameters():
|
| 313 |
+
if name.endswith("bn3.weight"):
|
| 314 |
+
nn.init.zeros_(param)
|
| 315 |
+
|
| 316 |
+
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
|
| 317 |
+
attn_std = self.transformer.width ** -0.5
|
| 318 |
+
fc_std = (2 * self.transformer.width) ** -0.5
|
| 319 |
+
for block in self.transformer.resblocks:
|
| 320 |
+
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
|
| 321 |
+
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
|
| 322 |
+
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
|
| 323 |
+
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
|
| 324 |
+
|
| 325 |
+
if self.text_projection is not None:
|
| 326 |
+
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
|
| 327 |
+
|
| 328 |
+
def build_attention_mask(self):
|
| 329 |
+
# lazily create causal attention mask, with full attention between the vision tokens
|
| 330 |
+
# pytorch uses additive attention mask; fill with -inf
|
| 331 |
+
mask = torch.empty(self.context_length, self.context_length)
|
| 332 |
+
mask.fill_(float("-inf"))
|
| 333 |
+
mask.triu_(1) # zero out the lower diagonal
|
| 334 |
+
return mask
|
| 335 |
+
|
| 336 |
+
@property
|
| 337 |
+
def dtype(self):
|
| 338 |
+
return self.visual.conv1.weight.dtype
|
| 339 |
+
|
| 340 |
+
def encode_image(self, image):
|
| 341 |
+
return self.visual(image.type(self.dtype))
|
| 342 |
+
|
| 343 |
+
def encode_text(self, text):
|
| 344 |
+
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
|
| 345 |
+
|
| 346 |
+
x = x + self.positional_embedding.type(self.dtype)
|
| 347 |
+
x = x.permute(1, 0, 2) # NLD -> LND
|
| 348 |
+
x = self.transformer(x)
|
| 349 |
+
x = x.permute(1, 0, 2) # LND -> NLD
|
| 350 |
+
x = self.ln_final(x).type(self.dtype)
|
| 351 |
+
|
| 352 |
+
# x.shape = [batch_size, n_ctx, transformer.width]
|
| 353 |
+
# take features from the eot embedding (eot_token is the highest number in each sequence)
|
| 354 |
+
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
|
| 355 |
+
|
| 356 |
+
return x
|
| 357 |
+
|
| 358 |
+
def forward(self, image, text):
|
| 359 |
+
image_features = self.encode_image(image)
|
| 360 |
+
text_features = self.encode_text(text)
|
| 361 |
+
|
| 362 |
+
# normalized features
|
| 363 |
+
image_features = image_features / image_features.norm(dim=1, keepdim=True)
|
| 364 |
+
text_features = text_features / text_features.norm(dim=1, keepdim=True)
|
| 365 |
+
|
| 366 |
+
# cosine similarity as logits
|
| 367 |
+
logit_scale = self.logit_scale.exp()
|
| 368 |
+
logits_per_image = logit_scale * image_features @ text_features.t()
|
| 369 |
+
logits_per_text = logits_per_image.t()
|
| 370 |
+
|
| 371 |
+
# shape = [global_batch_size, global_batch_size]
|
| 372 |
+
return logits_per_image, logits_per_text
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def convert_weights(model: nn.Module):
|
| 376 |
+
"""Convert applicable model parameters to fp16"""
|
| 377 |
+
|
| 378 |
+
def _convert_weights_to_fp16(l):
|
| 379 |
+
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
|
| 380 |
+
l.weight.data = l.weight.data.half()
|
| 381 |
+
if l.bias is not None:
|
| 382 |
+
l.bias.data = l.bias.data.half()
|
| 383 |
+
|
| 384 |
+
if isinstance(l, nn.MultiheadAttention):
|
| 385 |
+
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
|
| 386 |
+
tensor = getattr(l, attr)
|
| 387 |
+
if tensor is not None:
|
| 388 |
+
tensor.data = tensor.data.half()
|
| 389 |
+
|
| 390 |
+
for name in ["text_projection", "proj"]:
|
| 391 |
+
if hasattr(l, name):
|
| 392 |
+
attr = getattr(l, name)
|
| 393 |
+
if attr is not None:
|
| 394 |
+
attr.data = attr.data.half()
|
| 395 |
+
|
| 396 |
+
model.apply(_convert_weights_to_fp16)
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def build_model(state_dict: dict):
|
| 400 |
+
vit = "visual.proj" in state_dict
|
| 401 |
+
|
| 402 |
+
if vit:
|
| 403 |
+
vision_width = state_dict["visual.conv1.weight"].shape[0]
|
| 404 |
+
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
|
| 405 |
+
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
|
| 406 |
+
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
|
| 407 |
+
image_resolution = vision_patch_size * grid_size
|
| 408 |
+
else:
|
| 409 |
+
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
|
| 410 |
+
vision_layers = tuple(counts)
|
| 411 |
+
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
|
| 412 |
+
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
|
| 413 |
+
vision_patch_size = None
|
| 414 |
+
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
|
| 415 |
+
image_resolution = output_width * 32
|
| 416 |
+
|
| 417 |
+
embed_dim = state_dict["text_projection"].shape[1]
|
| 418 |
+
context_length = state_dict["positional_embedding"].shape[0]
|
| 419 |
+
vocab_size = state_dict["token_embedding.weight"].shape[0]
|
| 420 |
+
transformer_width = state_dict["ln_final.weight"].shape[0]
|
| 421 |
+
transformer_heads = transformer_width // 64
|
| 422 |
+
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith("transformer.resblocks")))
|
| 423 |
+
|
| 424 |
+
model = CLIP(
|
| 425 |
+
embed_dim,
|
| 426 |
+
image_resolution, vision_layers, vision_width, vision_patch_size,
|
| 427 |
+
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
|
| 428 |
+
)
|
| 429 |
+
|
| 430 |
+
for key in ["input_resolution", "context_length", "vocab_size"]:
|
| 431 |
+
if key in state_dict:
|
| 432 |
+
del state_dict[key]
|
| 433 |
+
|
| 434 |
+
convert_weights(model)
|
| 435 |
+
model.load_state_dict(state_dict)
|
| 436 |
+
return model.eval()
|
clip/simple_tokenizer.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gzip
|
| 2 |
+
import html
|
| 3 |
+
import os
|
| 4 |
+
from functools import lru_cache
|
| 5 |
+
|
| 6 |
+
import ftfy
|
| 7 |
+
import regex as re
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@lru_cache()
|
| 11 |
+
def default_bpe():
|
| 12 |
+
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@lru_cache()
|
| 16 |
+
def bytes_to_unicode():
|
| 17 |
+
"""
|
| 18 |
+
Returns list of utf-8 byte and a corresponding list of unicode strings.
|
| 19 |
+
The reversible bpe codes work on unicode strings.
|
| 20 |
+
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
|
| 21 |
+
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
|
| 22 |
+
This is a signficant percentage of your normal, say, 32K bpe vocab.
|
| 23 |
+
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
|
| 24 |
+
And avoids mapping to whitespace/control characters the bpe code barfs on.
|
| 25 |
+
"""
|
| 26 |
+
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
|
| 27 |
+
cs = bs[:]
|
| 28 |
+
n = 0
|
| 29 |
+
for b in range(2**8):
|
| 30 |
+
if b not in bs:
|
| 31 |
+
bs.append(b)
|
| 32 |
+
cs.append(2**8+n)
|
| 33 |
+
n += 1
|
| 34 |
+
cs = [chr(n) for n in cs]
|
| 35 |
+
return dict(zip(bs, cs))
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def get_pairs(word):
|
| 39 |
+
"""Return set of symbol pairs in a word.
|
| 40 |
+
Word is represented as tuple of symbols (symbols being variable-length strings).
|
| 41 |
+
"""
|
| 42 |
+
pairs = set()
|
| 43 |
+
prev_char = word[0]
|
| 44 |
+
for char in word[1:]:
|
| 45 |
+
pairs.add((prev_char, char))
|
| 46 |
+
prev_char = char
|
| 47 |
+
return pairs
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def basic_clean(text):
|
| 51 |
+
text = ftfy.fix_text(text)
|
| 52 |
+
text = html.unescape(html.unescape(text))
|
| 53 |
+
return text.strip()
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def whitespace_clean(text):
|
| 57 |
+
text = re.sub(r'\s+', ' ', text)
|
| 58 |
+
text = text.strip()
|
| 59 |
+
return text
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class SimpleTokenizer(object):
|
| 63 |
+
def __init__(self, bpe_path: str = default_bpe()):
|
| 64 |
+
self.byte_encoder = bytes_to_unicode()
|
| 65 |
+
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
|
| 66 |
+
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
|
| 67 |
+
merges = merges[1:49152-256-2+1]
|
| 68 |
+
merges = [tuple(merge.split()) for merge in merges]
|
| 69 |
+
vocab = list(bytes_to_unicode().values())
|
| 70 |
+
vocab = vocab + [v+'</w>' for v in vocab]
|
| 71 |
+
for merge in merges:
|
| 72 |
+
vocab.append(''.join(merge))
|
| 73 |
+
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
|
| 74 |
+
self.encoder = dict(zip(vocab, range(len(vocab))))
|
| 75 |
+
self.decoder = {v: k for k, v in self.encoder.items()}
|
| 76 |
+
self.bpe_ranks = dict(zip(merges, range(len(merges))))
|
| 77 |
+
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
|
| 78 |
+
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
|
| 79 |
+
|
| 80 |
+
def bpe(self, token):
|
| 81 |
+
if token in self.cache:
|
| 82 |
+
return self.cache[token]
|
| 83 |
+
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
|
| 84 |
+
pairs = get_pairs(word)
|
| 85 |
+
|
| 86 |
+
if not pairs:
|
| 87 |
+
return token+'</w>'
|
| 88 |
+
|
| 89 |
+
while True:
|
| 90 |
+
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
|
| 91 |
+
if bigram not in self.bpe_ranks:
|
| 92 |
+
break
|
| 93 |
+
first, second = bigram
|
| 94 |
+
new_word = []
|
| 95 |
+
i = 0
|
| 96 |
+
while i < len(word):
|
| 97 |
+
try:
|
| 98 |
+
j = word.index(first, i)
|
| 99 |
+
new_word.extend(word[i:j])
|
| 100 |
+
i = j
|
| 101 |
+
except:
|
| 102 |
+
new_word.extend(word[i:])
|
| 103 |
+
break
|
| 104 |
+
|
| 105 |
+
if word[i] == first and i < len(word)-1 and word[i+1] == second:
|
| 106 |
+
new_word.append(first+second)
|
| 107 |
+
i += 2
|
| 108 |
+
else:
|
| 109 |
+
new_word.append(word[i])
|
| 110 |
+
i += 1
|
| 111 |
+
new_word = tuple(new_word)
|
| 112 |
+
word = new_word
|
| 113 |
+
if len(word) == 1:
|
| 114 |
+
break
|
| 115 |
+
else:
|
| 116 |
+
pairs = get_pairs(word)
|
| 117 |
+
word = ' '.join(word)
|
| 118 |
+
self.cache[token] = word
|
| 119 |
+
return word
|
| 120 |
+
|
| 121 |
+
def encode(self, text):
|
| 122 |
+
bpe_tokens = []
|
| 123 |
+
text = whitespace_clean(basic_clean(text)).lower()
|
| 124 |
+
for token in re.findall(self.pat, text):
|
| 125 |
+
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
|
| 126 |
+
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
|
| 127 |
+
return bpe_tokens
|
| 128 |
+
|
| 129 |
+
def decode(self, tokens):
|
| 130 |
+
text = ''.join([self.decoder[token] for token in tokens])
|
| 131 |
+
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
|
| 132 |
+
return text
|
clip/vitseg.py
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from posixpath import basename, dirname, join
|
| 3 |
+
# import clip
|
| 4 |
+
from clip.model import convert_weights
|
| 5 |
+
import torch
|
| 6 |
+
import json
|
| 7 |
+
from torch import nn
|
| 8 |
+
from torch.nn import functional as nnf
|
| 9 |
+
from torch.nn.modules import activation
|
| 10 |
+
from torch.nn.modules.activation import ReLU
|
| 11 |
+
from torchvision import transforms
|
| 12 |
+
|
| 13 |
+
normalize = transforms.Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
|
| 14 |
+
|
| 15 |
+
from torchvision.models import ResNet
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def process_prompts(conditional, prompt_list, conditional_map):
|
| 19 |
+
# DEPRECATED
|
| 20 |
+
|
| 21 |
+
# randomly sample a synonym
|
| 22 |
+
words = [conditional_map[int(i)] for i in conditional]
|
| 23 |
+
words = [syns[torch.multinomial(torch.ones(len(syns)), 1, replacement=True).item()] for syns in words]
|
| 24 |
+
words = [w.replace('_', ' ') for w in words]
|
| 25 |
+
|
| 26 |
+
if prompt_list is not None:
|
| 27 |
+
prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True)
|
| 28 |
+
prompts = [prompt_list[i] for i in prompt_indices]
|
| 29 |
+
else:
|
| 30 |
+
prompts = ['a photo of {}'] * (len(words))
|
| 31 |
+
|
| 32 |
+
return [promt.format(w) for promt, w in zip(prompts, words)]
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class VITDenseBase(nn.Module):
|
| 36 |
+
|
| 37 |
+
def rescaled_pos_emb(self, new_size):
|
| 38 |
+
assert len(new_size) == 2
|
| 39 |
+
|
| 40 |
+
a = self.model.positional_embedding[1:].T.view(1, 768, *self.token_shape)
|
| 41 |
+
b = nnf.interpolate(a, new_size, mode='bicubic', align_corners=False).squeeze(0).view(768, new_size[0]*new_size[1]).T
|
| 42 |
+
return torch.cat([self.model.positional_embedding[:1], b])
|
| 43 |
+
|
| 44 |
+
def visual_forward(self, x_inp, extract_layers=(), skip=False, mask=None):
|
| 45 |
+
|
| 46 |
+
with torch.no_grad():
|
| 47 |
+
|
| 48 |
+
x_inp = nnf.interpolate(x_inp, (384, 384))
|
| 49 |
+
|
| 50 |
+
x = self.model.patch_embed(x_inp)
|
| 51 |
+
cls_token = self.model.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks
|
| 52 |
+
if self.model.dist_token is None:
|
| 53 |
+
x = torch.cat((cls_token, x), dim=1)
|
| 54 |
+
else:
|
| 55 |
+
x = torch.cat((cls_token, self.model.dist_token.expand(x.shape[0], -1, -1), x), dim=1)
|
| 56 |
+
x = self.model.pos_drop(x + self.model.pos_embed)
|
| 57 |
+
|
| 58 |
+
activations = []
|
| 59 |
+
for i, block in enumerate(self.model.blocks):
|
| 60 |
+
x = block(x)
|
| 61 |
+
|
| 62 |
+
if i in extract_layers:
|
| 63 |
+
# permute to be compatible with CLIP
|
| 64 |
+
activations += [x.permute(1,0,2)]
|
| 65 |
+
|
| 66 |
+
x = self.model.norm(x)
|
| 67 |
+
x = self.model.head(self.model.pre_logits(x[:, 0]))
|
| 68 |
+
|
| 69 |
+
# again for CLIP compatibility
|
| 70 |
+
# x = x.permute(1, 0, 2)
|
| 71 |
+
|
| 72 |
+
return x, activations, None
|
| 73 |
+
|
| 74 |
+
def sample_prompts(self, words, prompt_list=None):
|
| 75 |
+
|
| 76 |
+
prompt_list = prompt_list if prompt_list is not None else self.prompt_list
|
| 77 |
+
|
| 78 |
+
prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True)
|
| 79 |
+
prompts = [prompt_list[i] for i in prompt_indices]
|
| 80 |
+
return [promt.format(w) for promt, w in zip(prompts, words)]
|
| 81 |
+
|
| 82 |
+
def get_cond_vec(self, conditional, batch_size):
|
| 83 |
+
# compute conditional from a single string
|
| 84 |
+
if conditional is not None and type(conditional) == str:
|
| 85 |
+
cond = self.compute_conditional(conditional)
|
| 86 |
+
cond = cond.repeat(batch_size, 1)
|
| 87 |
+
|
| 88 |
+
# compute conditional from string list/tuple
|
| 89 |
+
elif conditional is not None and type(conditional) in {list, tuple} and type(conditional[0]) == str:
|
| 90 |
+
assert len(conditional) == batch_size
|
| 91 |
+
cond = self.compute_conditional(conditional)
|
| 92 |
+
|
| 93 |
+
# use conditional directly
|
| 94 |
+
elif conditional is not None and type(conditional) == torch.Tensor and conditional.ndim == 2:
|
| 95 |
+
cond = conditional
|
| 96 |
+
|
| 97 |
+
# compute conditional from image
|
| 98 |
+
elif conditional is not None and type(conditional) == torch.Tensor:
|
| 99 |
+
with torch.no_grad():
|
| 100 |
+
cond, _, _ = self.visual_forward(conditional)
|
| 101 |
+
else:
|
| 102 |
+
raise ValueError('invalid conditional')
|
| 103 |
+
return cond
|
| 104 |
+
|
| 105 |
+
def compute_conditional(self, conditional):
|
| 106 |
+
import clip
|
| 107 |
+
|
| 108 |
+
dev = next(self.parameters()).device
|
| 109 |
+
|
| 110 |
+
if type(conditional) in {list, tuple}:
|
| 111 |
+
text_tokens = clip.tokenize(conditional).to(dev)
|
| 112 |
+
cond = self.clip_model.encode_text(text_tokens)
|
| 113 |
+
else:
|
| 114 |
+
if conditional in self.precomputed_prompts:
|
| 115 |
+
cond = self.precomputed_prompts[conditional].float().to(dev)
|
| 116 |
+
else:
|
| 117 |
+
text_tokens = clip.tokenize([conditional]).to(dev)
|
| 118 |
+
cond = self.clip_model.encode_text(text_tokens)[0]
|
| 119 |
+
|
| 120 |
+
return cond
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class VITDensePredT(VITDenseBase):
|
| 124 |
+
|
| 125 |
+
def __init__(self, extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4, prompt='fixed',
|
| 126 |
+
depth=3, extra_blocks=0, reduce_cond=None, fix_shift=False,
|
| 127 |
+
learn_trans_conv_only=False, refine=None, limit_to_clip_only=False, upsample=False,
|
| 128 |
+
add_calibration=False, process_cond=None, not_pretrained=False):
|
| 129 |
+
super().__init__()
|
| 130 |
+
# device = 'cpu'
|
| 131 |
+
|
| 132 |
+
self.extract_layers = extract_layers
|
| 133 |
+
self.cond_layer = cond_layer
|
| 134 |
+
self.limit_to_clip_only = limit_to_clip_only
|
| 135 |
+
self.process_cond = None
|
| 136 |
+
|
| 137 |
+
if add_calibration:
|
| 138 |
+
self.calibration_conds = 1
|
| 139 |
+
|
| 140 |
+
self.upsample_proj = nn.Conv2d(reduce_dim, 1, kernel_size=1) if upsample else None
|
| 141 |
+
|
| 142 |
+
self.add_activation1 = True
|
| 143 |
+
|
| 144 |
+
import timm
|
| 145 |
+
self.model = timm.create_model('vit_base_patch16_384', pretrained=True)
|
| 146 |
+
self.model.head = nn.Linear(768, 512 if reduce_cond is None else reduce_cond)
|
| 147 |
+
|
| 148 |
+
for p in self.model.parameters():
|
| 149 |
+
p.requires_grad_(False)
|
| 150 |
+
|
| 151 |
+
import clip
|
| 152 |
+
self.clip_model, _ = clip.load('ViT-B/16', device='cpu', jit=False)
|
| 153 |
+
# del self.clip_model.visual
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
self.token_shape = (14, 14)
|
| 157 |
+
|
| 158 |
+
# conditional
|
| 159 |
+
if reduce_cond is not None:
|
| 160 |
+
self.reduce_cond = nn.Linear(512, reduce_cond)
|
| 161 |
+
for p in self.reduce_cond.parameters():
|
| 162 |
+
p.requires_grad_(False)
|
| 163 |
+
else:
|
| 164 |
+
self.reduce_cond = None
|
| 165 |
+
|
| 166 |
+
# self.film = AVAILABLE_BLOCKS['film'](512, 128)
|
| 167 |
+
self.film_mul = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim)
|
| 168 |
+
self.film_add = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim)
|
| 169 |
+
|
| 170 |
+
# DEPRECATED
|
| 171 |
+
# self.conditional_map = {c['id']: c['synonyms'] for c in json.load(open(cond_map))}
|
| 172 |
+
|
| 173 |
+
assert len(self.extract_layers) == depth
|
| 174 |
+
|
| 175 |
+
self.reduces = nn.ModuleList([nn.Linear(768, reduce_dim) for _ in range(depth)])
|
| 176 |
+
self.blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(len(self.extract_layers))])
|
| 177 |
+
self.extra_blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(extra_blocks)])
|
| 178 |
+
|
| 179 |
+
trans_conv_ks = (16, 16)
|
| 180 |
+
self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks)
|
| 181 |
+
|
| 182 |
+
# refinement and trans conv
|
| 183 |
+
|
| 184 |
+
if learn_trans_conv_only:
|
| 185 |
+
for p in self.parameters():
|
| 186 |
+
p.requires_grad_(False)
|
| 187 |
+
|
| 188 |
+
for p in self.trans_conv.parameters():
|
| 189 |
+
p.requires_grad_(True)
|
| 190 |
+
|
| 191 |
+
if prompt == 'fixed':
|
| 192 |
+
self.prompt_list = ['a photo of a {}.']
|
| 193 |
+
elif prompt == 'shuffle':
|
| 194 |
+
self.prompt_list = ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.']
|
| 195 |
+
elif prompt == 'shuffle+':
|
| 196 |
+
self.prompt_list = ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.',
|
| 197 |
+
'a cropped photo of a {}.', 'a good photo of a {}.', 'a photo of one {}.',
|
| 198 |
+
'a bad photo of a {}.', 'a photo of the {}.']
|
| 199 |
+
elif prompt == 'shuffle_clip':
|
| 200 |
+
from models.clip_prompts import imagenet_templates
|
| 201 |
+
self.prompt_list = imagenet_templates
|
| 202 |
+
|
| 203 |
+
if process_cond is not None:
|
| 204 |
+
if process_cond == 'clamp' or process_cond[0] == 'clamp':
|
| 205 |
+
|
| 206 |
+
val = process_cond[1] if type(process_cond) in {list, tuple} else 0.2
|
| 207 |
+
|
| 208 |
+
def clamp_vec(x):
|
| 209 |
+
return torch.clamp(x, -val, val)
|
| 210 |
+
|
| 211 |
+
self.process_cond = clamp_vec
|
| 212 |
+
|
| 213 |
+
elif process_cond.endswith('.pth'):
|
| 214 |
+
|
| 215 |
+
shift = torch.load(process_cond)
|
| 216 |
+
def add_shift(x):
|
| 217 |
+
return x + shift.to(x.device)
|
| 218 |
+
|
| 219 |
+
self.process_cond = add_shift
|
| 220 |
+
|
| 221 |
+
import pickle
|
| 222 |
+
precomp = pickle.load(open('precomputed_prompt_vectors.pickle', 'rb'))
|
| 223 |
+
self.precomputed_prompts = {k: torch.from_numpy(v) for k, v in precomp.items()}
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def forward(self, inp_image, conditional=None, return_features=False, mask=None):
|
| 227 |
+
|
| 228 |
+
assert type(return_features) == bool
|
| 229 |
+
|
| 230 |
+
# inp_image = inp_image.to(self.model.positional_embedding.device)
|
| 231 |
+
|
| 232 |
+
if mask is not None:
|
| 233 |
+
raise ValueError('mask not supported')
|
| 234 |
+
|
| 235 |
+
# x_inp = normalize(inp_image)
|
| 236 |
+
x_inp = inp_image
|
| 237 |
+
|
| 238 |
+
bs, dev = inp_image.shape[0], x_inp.device
|
| 239 |
+
|
| 240 |
+
inp_image_size = inp_image.shape[2:]
|
| 241 |
+
|
| 242 |
+
cond = self.get_cond_vec(conditional, bs)
|
| 243 |
+
|
| 244 |
+
visual_q, activations, _ = self.visual_forward(x_inp, extract_layers=[0] + list(self.extract_layers))
|
| 245 |
+
|
| 246 |
+
activation1 = activations[0]
|
| 247 |
+
activations = activations[1:]
|
| 248 |
+
|
| 249 |
+
a = None
|
| 250 |
+
for i, (activation, block, reduce) in enumerate(zip(activations[::-1], self.blocks, self.reduces)):
|
| 251 |
+
|
| 252 |
+
if a is not None:
|
| 253 |
+
a = reduce(activation) + a
|
| 254 |
+
else:
|
| 255 |
+
a = reduce(activation)
|
| 256 |
+
|
| 257 |
+
if i == self.cond_layer:
|
| 258 |
+
if self.reduce_cond is not None:
|
| 259 |
+
cond = self.reduce_cond(cond)
|
| 260 |
+
|
| 261 |
+
a = self.film_mul(cond) * a + self.film_add(cond)
|
| 262 |
+
|
| 263 |
+
a = block(a)
|
| 264 |
+
|
| 265 |
+
for block in self.extra_blocks:
|
| 266 |
+
a = a + block(a)
|
| 267 |
+
|
| 268 |
+
a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens
|
| 269 |
+
|
| 270 |
+
size = int(math.sqrt(a.shape[2]))
|
| 271 |
+
|
| 272 |
+
a = a.view(bs, a.shape[1], size, size)
|
| 273 |
+
|
| 274 |
+
if self.trans_conv is not None:
|
| 275 |
+
a = self.trans_conv(a)
|
| 276 |
+
|
| 277 |
+
if self.upsample_proj is not None:
|
| 278 |
+
a = self.upsample_proj(a)
|
| 279 |
+
a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear')
|
| 280 |
+
|
| 281 |
+
a = nnf.interpolate(a, inp_image_size)
|
| 282 |
+
|
| 283 |
+
if return_features:
|
| 284 |
+
return a, visual_q, cond, [activation1] + activations
|
| 285 |
+
else:
|
| 286 |
+
return a,
|