Spaces:
Runtime error
Runtime error
fist version
Browse files- feature_networks/clip/__init__.py +0 -1
- feature_networks/clip/__pycache__/__init__.cpython-39.pyc +0 -0
- feature_networks/clip/__pycache__/clip.cpython-39.pyc +0 -0
- feature_networks/clip/__pycache__/model.cpython-39.pyc +0 -0
- feature_networks/clip/__pycache__/simple_tokenizer.cpython-39.pyc +0 -0
- feature_networks/clip/bpe_simple_vocab_16e6.txt.gz +0 -3
- feature_networks/clip/clip.py +0 -244
- feature_networks/clip/model.py +0 -453
- feature_networks/clip/simple_tokenizer.py +0 -132
- feature_networks/pretrained_builder.py +1 -27
feature_networks/clip/__init__.py
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
from .clip import *
|
|
|
|
|
|
feature_networks/clip/__pycache__/__init__.cpython-39.pyc
DELETED
|
Binary file (254 Bytes)
|
|
|
feature_networks/clip/__pycache__/clip.cpython-39.pyc
DELETED
|
Binary file (9.19 kB)
|
|
|
feature_networks/clip/__pycache__/model.cpython-39.pyc
DELETED
|
Binary file (15.4 kB)
|
|
|
feature_networks/clip/__pycache__/simple_tokenizer.cpython-39.pyc
DELETED
|
Binary file (5.84 kB)
|
|
|
feature_networks/clip/bpe_simple_vocab_16e6.txt.gz
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
|
| 3 |
-
size 1356917
|
|
|
|
|
|
|
|
|
|
|
|
feature_networks/clip/clip.py
DELETED
|
@@ -1,244 +0,0 @@
|
|
| 1 |
-
import hashlib
|
| 2 |
-
import os
|
| 3 |
-
import urllib
|
| 4 |
-
import warnings
|
| 5 |
-
from typing import Union, List
|
| 6 |
-
|
| 7 |
-
import torch
|
| 8 |
-
import torch.nn as nn
|
| 9 |
-
from PIL import Image
|
| 10 |
-
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
|
| 11 |
-
from tqdm import tqdm
|
| 12 |
-
|
| 13 |
-
from .model import build_model
|
| 14 |
-
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
|
| 15 |
-
|
| 16 |
-
__all__ = ["available_models", "load", "tokenize"]
|
| 17 |
-
_tokenizer = _Tokenizer()
|
| 18 |
-
|
| 19 |
-
_MODELS = {
|
| 20 |
-
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
|
| 21 |
-
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
|
| 22 |
-
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
|
| 23 |
-
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
|
| 24 |
-
}
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
|
| 28 |
-
os.makedirs(root, exist_ok=True)
|
| 29 |
-
filename = os.path.basename(url)
|
| 30 |
-
|
| 31 |
-
expected_sha256 = url.split("/")[-2]
|
| 32 |
-
download_target = os.path.join(root, filename)
|
| 33 |
-
|
| 34 |
-
if os.path.exists(download_target) and not os.path.isfile(download_target):
|
| 35 |
-
raise RuntimeError(f"{download_target} exists and is not a regular file")
|
| 36 |
-
|
| 37 |
-
if os.path.isfile(download_target):
|
| 38 |
-
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
|
| 39 |
-
return download_target
|
| 40 |
-
else:
|
| 41 |
-
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
|
| 42 |
-
|
| 43 |
-
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
|
| 44 |
-
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
|
| 45 |
-
while True:
|
| 46 |
-
buffer = source.read(8192)
|
| 47 |
-
if not buffer:
|
| 48 |
-
break
|
| 49 |
-
|
| 50 |
-
output.write(buffer)
|
| 51 |
-
loop.update(len(buffer))
|
| 52 |
-
|
| 53 |
-
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
|
| 54 |
-
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
|
| 55 |
-
|
| 56 |
-
return download_target
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
def _transform(n_px):
|
| 60 |
-
return Compose([
|
| 61 |
-
Resize(n_px, interpolation=Image.BICUBIC),
|
| 62 |
-
CenterCrop(n_px),
|
| 63 |
-
lambda image: image.convert("RGB"),
|
| 64 |
-
ToTensor(),
|
| 65 |
-
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
|
| 66 |
-
])
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
def available_models() -> List[str]:
|
| 70 |
-
"""Returns the names of available CLIP models"""
|
| 71 |
-
return list(_MODELS.keys())
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True):
|
| 75 |
-
"""Load a CLIP model
|
| 76 |
-
|
| 77 |
-
Parameters
|
| 78 |
-
----------
|
| 79 |
-
name : str
|
| 80 |
-
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
|
| 81 |
-
|
| 82 |
-
device : Union[str, torch.device]
|
| 83 |
-
The device to put the loaded model
|
| 84 |
-
|
| 85 |
-
jit : bool
|
| 86 |
-
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
|
| 87 |
-
|
| 88 |
-
Returns
|
| 89 |
-
-------
|
| 90 |
-
model : torch.nn.Module
|
| 91 |
-
The CLIP model
|
| 92 |
-
|
| 93 |
-
preprocess : Callable[[PIL.Image], torch.Tensor]
|
| 94 |
-
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
|
| 95 |
-
"""
|
| 96 |
-
if name in _MODELS:
|
| 97 |
-
model_path = _download(_MODELS[name])
|
| 98 |
-
elif os.path.isfile(name):
|
| 99 |
-
model_path = name
|
| 100 |
-
else:
|
| 101 |
-
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
|
| 102 |
-
|
| 103 |
-
try:
|
| 104 |
-
# loading JIT archive
|
| 105 |
-
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
|
| 106 |
-
state_dict = None
|
| 107 |
-
except RuntimeError:
|
| 108 |
-
# loading saved state dict
|
| 109 |
-
if jit:
|
| 110 |
-
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
|
| 111 |
-
jit = False
|
| 112 |
-
state_dict = torch.load(model_path, map_location="cpu")
|
| 113 |
-
|
| 114 |
-
if not jit:
|
| 115 |
-
model = build_model(state_dict or model.state_dict()).to(device)
|
| 116 |
-
if str(device) == "cpu":
|
| 117 |
-
model.float()
|
| 118 |
-
return model, _transform(model.visual.input_resolution)
|
| 119 |
-
|
| 120 |
-
# patch the device names
|
| 121 |
-
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
|
| 122 |
-
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
|
| 123 |
-
|
| 124 |
-
def patch_device(module):
|
| 125 |
-
graphs = [module.graph] if hasattr(module, "graph") else []
|
| 126 |
-
if hasattr(module, "forward1"):
|
| 127 |
-
graphs.append(module.forward1.graph)
|
| 128 |
-
|
| 129 |
-
for graph in graphs:
|
| 130 |
-
for node in graph.findAllNodes("prim::Constant"):
|
| 131 |
-
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
|
| 132 |
-
node.copyAttributes(device_node)
|
| 133 |
-
|
| 134 |
-
model.apply(patch_device)
|
| 135 |
-
patch_device(model.encode_image)
|
| 136 |
-
patch_device(model.encode_text)
|
| 137 |
-
|
| 138 |
-
# patch dtype to float32 on CPU
|
| 139 |
-
if str(device) == "cpu":
|
| 140 |
-
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
|
| 141 |
-
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
|
| 142 |
-
float_node = float_input.node()
|
| 143 |
-
|
| 144 |
-
def patch_float(module):
|
| 145 |
-
graphs = [module.graph] if hasattr(module, "graph") else []
|
| 146 |
-
if hasattr(module, "forward1"):
|
| 147 |
-
graphs.append(module.forward1.graph)
|
| 148 |
-
|
| 149 |
-
for graph in graphs:
|
| 150 |
-
for node in graph.findAllNodes("aten::to"):
|
| 151 |
-
inputs = list(node.inputs())
|
| 152 |
-
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
|
| 153 |
-
if inputs[i].node()["value"] == 5:
|
| 154 |
-
inputs[i].node().copyAttributes(float_node)
|
| 155 |
-
|
| 156 |
-
model.apply(patch_float)
|
| 157 |
-
patch_float(model.encode_image)
|
| 158 |
-
patch_float(model.encode_text)
|
| 159 |
-
|
| 160 |
-
model.float()
|
| 161 |
-
|
| 162 |
-
return model, _transform(model.input_resolution.item())
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
|
| 166 |
-
"""
|
| 167 |
-
Returns the tokenized representation of given input string(s)
|
| 168 |
-
|
| 169 |
-
Parameters
|
| 170 |
-
----------
|
| 171 |
-
texts : Union[str, List[str]]
|
| 172 |
-
An input string or a list of input strings to tokenize
|
| 173 |
-
|
| 174 |
-
context_length : int
|
| 175 |
-
The context length to use; all CLIP models use 77 as the context length
|
| 176 |
-
|
| 177 |
-
Returns
|
| 178 |
-
-------
|
| 179 |
-
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
|
| 180 |
-
"""
|
| 181 |
-
if isinstance(texts, str):
|
| 182 |
-
texts = [texts]
|
| 183 |
-
|
| 184 |
-
sot_token = _tokenizer.encoder["<|startoftext|>"]
|
| 185 |
-
eot_token = _tokenizer.encoder["<|endoftext|>"]
|
| 186 |
-
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
|
| 187 |
-
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
|
| 188 |
-
|
| 189 |
-
for i, tokens in enumerate(all_tokens):
|
| 190 |
-
if len(tokens) > context_length:
|
| 191 |
-
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
|
| 192 |
-
result[i, :len(tokens)] = torch.tensor(tokens)
|
| 193 |
-
|
| 194 |
-
return result
|
| 195 |
-
|
| 196 |
-
def pdist(sample_1, sample_2, norm=2, eps=1e-5):
|
| 197 |
-
r"""Compute the matrix of all squared pairwise distances.
|
| 198 |
-
Arguments
|
| 199 |
-
---------
|
| 200 |
-
sample_1 : torch.Tensor or Variable
|
| 201 |
-
The first sample, should be of shape ``(n_1, d)``.
|
| 202 |
-
sample_2 : torch.Tensor or Variable
|
| 203 |
-
The second sample, should be of shape ``(n_2, d)``.
|
| 204 |
-
norm : float
|
| 205 |
-
The l_p norm to be used.
|
| 206 |
-
Returns
|
| 207 |
-
-------
|
| 208 |
-
torch.Tensor or Variable
|
| 209 |
-
Matrix of shape (n_1, n_2). The [i, j]-th entry is equal to
|
| 210 |
-
``|| sample_1[i, :] - sample_2[j, :] ||_p``."""
|
| 211 |
-
n_1, n_2 = sample_1.size(0), sample_2.size(0)
|
| 212 |
-
norm = float(norm)
|
| 213 |
-
if norm == 2.:
|
| 214 |
-
norms_1 = torch.sum(sample_1**2, dim=1, keepdim=True)
|
| 215 |
-
norms_2 = torch.sum(sample_2**2, dim=1, keepdim=True)
|
| 216 |
-
norms = (norms_1.expand(n_1, n_2) +
|
| 217 |
-
norms_2.transpose(0, 1).expand(n_1, n_2))
|
| 218 |
-
distances_squared = norms - 2 * sample_1.mm(sample_2.t())
|
| 219 |
-
return torch.sqrt(eps + torch.abs(distances_squared))
|
| 220 |
-
else:
|
| 221 |
-
dim = sample_1.size(1)
|
| 222 |
-
expanded_1 = sample_1.unsqueeze(1).expand(n_1, n_2, dim)
|
| 223 |
-
expanded_2 = sample_2.unsqueeze(0).expand(n_1, n_2, dim)
|
| 224 |
-
differences = torch.abs(expanded_1 - expanded_2) ** norm
|
| 225 |
-
inner = torch.sum(differences, dim=2, keepdim=False)
|
| 226 |
-
return (eps + inner) ** (1. / norm)
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
class ClipHead(nn.Module):
|
| 230 |
-
def __init__(self, prompt, device='cpu'):
|
| 231 |
-
super().__init__()
|
| 232 |
-
self.clip_model = load("RN50", device=device, jit=False)[0].eval()
|
| 233 |
-
self.prompt = prompt
|
| 234 |
-
|
| 235 |
-
def calc_loss(self, features):
|
| 236 |
-
dev = features['last'].get_device()
|
| 237 |
-
text_input = tokenize(self.prompt).to(dev)
|
| 238 |
-
|
| 239 |
-
text_features = self.clip_model.encode_text(text_input)
|
| 240 |
-
image_features = self.clip_model.encode_conv_features(features['last'])
|
| 241 |
-
loss = - torch.cosine_similarity(text_features, image_features, dim=1)
|
| 242 |
-
# loss -= (pdist(image_features, image_features)/image_features.max()).sum()
|
| 243 |
-
|
| 244 |
-
return loss.mean()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
feature_networks/clip/model.py
DELETED
|
@@ -1,453 +0,0 @@
|
|
| 1 |
-
from collections import OrderedDict
|
| 2 |
-
from typing import Tuple, Union
|
| 3 |
-
|
| 4 |
-
import numpy as np
|
| 5 |
-
import torch
|
| 6 |
-
import torch.nn.functional as F
|
| 7 |
-
from torch import nn
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
class Bottleneck(nn.Module):
|
| 11 |
-
expansion = 4
|
| 12 |
-
|
| 13 |
-
def __init__(self, inplanes, planes, stride=1):
|
| 14 |
-
super().__init__()
|
| 15 |
-
|
| 16 |
-
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
|
| 17 |
-
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
|
| 18 |
-
self.bn1 = nn.BatchNorm2d(planes)
|
| 19 |
-
|
| 20 |
-
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
|
| 21 |
-
self.bn2 = nn.BatchNorm2d(planes)
|
| 22 |
-
|
| 23 |
-
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
|
| 24 |
-
|
| 25 |
-
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
|
| 26 |
-
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
|
| 27 |
-
|
| 28 |
-
self.relu = nn.ReLU(inplace=True)
|
| 29 |
-
self.downsample = None
|
| 30 |
-
self.stride = stride
|
| 31 |
-
|
| 32 |
-
if stride > 1 or inplanes != planes * Bottleneck.expansion:
|
| 33 |
-
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
|
| 34 |
-
self.downsample = nn.Sequential(OrderedDict([
|
| 35 |
-
("-1", nn.AvgPool2d(stride)),
|
| 36 |
-
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
|
| 37 |
-
("1", nn.BatchNorm2d(planes * self.expansion))
|
| 38 |
-
]))
|
| 39 |
-
|
| 40 |
-
def forward(self, x: torch.Tensor):
|
| 41 |
-
identity = x
|
| 42 |
-
|
| 43 |
-
out = self.relu(self.bn1(self.conv1(x)))
|
| 44 |
-
out = self.relu(self.bn2(self.conv2(out)))
|
| 45 |
-
out = self.avgpool(out)
|
| 46 |
-
out = self.bn3(self.conv3(out))
|
| 47 |
-
|
| 48 |
-
if self.downsample is not None:
|
| 49 |
-
identity = self.downsample(x)
|
| 50 |
-
|
| 51 |
-
out += identity
|
| 52 |
-
out = self.relu(out)
|
| 53 |
-
return out
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
class AttentionPool2d(nn.Module):
|
| 57 |
-
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
|
| 58 |
-
super().__init__()
|
| 59 |
-
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
|
| 60 |
-
self.k_proj = nn.Linear(embed_dim, embed_dim)
|
| 61 |
-
self.q_proj = nn.Linear(embed_dim, embed_dim)
|
| 62 |
-
self.v_proj = nn.Linear(embed_dim, embed_dim)
|
| 63 |
-
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
|
| 64 |
-
self.num_heads = num_heads
|
| 65 |
-
|
| 66 |
-
def forward(self, x):
|
| 67 |
-
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
|
| 68 |
-
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
|
| 69 |
-
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
|
| 70 |
-
x, _ = F.multi_head_attention_forward(
|
| 71 |
-
query=x, key=x, value=x,
|
| 72 |
-
embed_dim_to_check=x.shape[-1],
|
| 73 |
-
num_heads=self.num_heads,
|
| 74 |
-
q_proj_weight=self.q_proj.weight,
|
| 75 |
-
k_proj_weight=self.k_proj.weight,
|
| 76 |
-
v_proj_weight=self.v_proj.weight,
|
| 77 |
-
in_proj_weight=None,
|
| 78 |
-
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
|
| 79 |
-
bias_k=None,
|
| 80 |
-
bias_v=None,
|
| 81 |
-
add_zero_attn=False,
|
| 82 |
-
dropout_p=0,
|
| 83 |
-
out_proj_weight=self.c_proj.weight,
|
| 84 |
-
out_proj_bias=self.c_proj.bias,
|
| 85 |
-
use_separate_proj_weight=True,
|
| 86 |
-
training=self.training,
|
| 87 |
-
need_weights=False
|
| 88 |
-
)
|
| 89 |
-
|
| 90 |
-
return x[0]
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
class ModifiedResNet(nn.Module):
|
| 94 |
-
"""
|
| 95 |
-
A ResNet class that is similar to torchvision's but contains the following changes:
|
| 96 |
-
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
|
| 97 |
-
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
|
| 98 |
-
- The final pooling layer is a QKV attention instead of an average pool
|
| 99 |
-
"""
|
| 100 |
-
|
| 101 |
-
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
|
| 102 |
-
super().__init__()
|
| 103 |
-
self.output_dim = output_dim
|
| 104 |
-
self.input_resolution = input_resolution
|
| 105 |
-
|
| 106 |
-
# the 3-layer stem
|
| 107 |
-
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
|
| 108 |
-
self.bn1 = nn.BatchNorm2d(width // 2)
|
| 109 |
-
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
|
| 110 |
-
self.bn2 = nn.BatchNorm2d(width // 2)
|
| 111 |
-
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
|
| 112 |
-
self.bn3 = nn.BatchNorm2d(width)
|
| 113 |
-
self.avgpool = nn.AvgPool2d(2)
|
| 114 |
-
self.relu = nn.ReLU(inplace=True)
|
| 115 |
-
|
| 116 |
-
# residual layers
|
| 117 |
-
self._inplanes = width # this is a *mutable* variable used during construction
|
| 118 |
-
self.layer1 = self._make_layer(width, layers[0])
|
| 119 |
-
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
|
| 120 |
-
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
|
| 121 |
-
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
|
| 122 |
-
|
| 123 |
-
embed_dim = width * 32 # the ResNet feature dimension
|
| 124 |
-
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
|
| 125 |
-
|
| 126 |
-
def _make_layer(self, planes, blocks, stride=1):
|
| 127 |
-
layers = [Bottleneck(self._inplanes, planes, stride)]
|
| 128 |
-
|
| 129 |
-
self._inplanes = planes * Bottleneck.expansion
|
| 130 |
-
for _ in range(1, blocks):
|
| 131 |
-
layers.append(Bottleneck(self._inplanes, planes))
|
| 132 |
-
|
| 133 |
-
return nn.Sequential(*layers)
|
| 134 |
-
|
| 135 |
-
def forward(self, x):
|
| 136 |
-
def stem(x):
|
| 137 |
-
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
|
| 138 |
-
x = self.relu(bn(conv(x)))
|
| 139 |
-
x = self.avgpool(x)
|
| 140 |
-
return x
|
| 141 |
-
|
| 142 |
-
x = x.type(self.conv1.weight.dtype)
|
| 143 |
-
x = stem(x)
|
| 144 |
-
x = self.layer1(x)
|
| 145 |
-
x = self.layer2(x)
|
| 146 |
-
x = self.layer3(x)
|
| 147 |
-
x = self.layer4(x)
|
| 148 |
-
x = self.attnpool(x)
|
| 149 |
-
|
| 150 |
-
return x
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
class LayerNorm(nn.LayerNorm):
|
| 154 |
-
"""Subclass torch's LayerNorm to handle fp16."""
|
| 155 |
-
|
| 156 |
-
def forward(self, x: torch.Tensor):
|
| 157 |
-
orig_type = x.dtype
|
| 158 |
-
ret = super().forward(x.type(torch.float32))
|
| 159 |
-
return ret.type(orig_type)
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
class QuickGELU(nn.Module):
|
| 163 |
-
def forward(self, x: torch.Tensor):
|
| 164 |
-
return x * torch.sigmoid(1.702 * x)
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
class ResidualAttentionBlock(nn.Module):
|
| 168 |
-
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
|
| 169 |
-
super().__init__()
|
| 170 |
-
|
| 171 |
-
self.attn = nn.MultiheadAttention(d_model, n_head)
|
| 172 |
-
self.ln_1 = LayerNorm(d_model)
|
| 173 |
-
self.mlp = nn.Sequential(OrderedDict([
|
| 174 |
-
("c_fc", nn.Linear(d_model, d_model * 4)),
|
| 175 |
-
("gelu", QuickGELU()),
|
| 176 |
-
("c_proj", nn.Linear(d_model * 4, d_model))
|
| 177 |
-
]))
|
| 178 |
-
self.ln_2 = LayerNorm(d_model)
|
| 179 |
-
self.attn_mask = attn_mask
|
| 180 |
-
|
| 181 |
-
def attention(self, x: torch.Tensor):
|
| 182 |
-
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
|
| 183 |
-
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
|
| 184 |
-
|
| 185 |
-
def forward(self, x: torch.Tensor):
|
| 186 |
-
x = x + self.attention(self.ln_1(x))
|
| 187 |
-
x = x + self.mlp(self.ln_2(x))
|
| 188 |
-
return x
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
class Transformer(nn.Module):
|
| 192 |
-
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
|
| 193 |
-
super().__init__()
|
| 194 |
-
self.width = width
|
| 195 |
-
self.layers = layers
|
| 196 |
-
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
|
| 197 |
-
|
| 198 |
-
def forward(self, x: torch.Tensor):
|
| 199 |
-
return self.resblocks(x)
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
class VisualTransformer(nn.Module):
|
| 203 |
-
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
|
| 204 |
-
super().__init__()
|
| 205 |
-
self.input_resolution = input_resolution
|
| 206 |
-
self.output_dim = output_dim
|
| 207 |
-
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
|
| 208 |
-
|
| 209 |
-
scale = width ** -0.5
|
| 210 |
-
self.class_embedding = nn.Parameter(scale * torch.randn(width))
|
| 211 |
-
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
|
| 212 |
-
self.ln_pre = LayerNorm(width)
|
| 213 |
-
|
| 214 |
-
self.transformer = Transformer(width, layers, heads)
|
| 215 |
-
|
| 216 |
-
self.ln_post = LayerNorm(width)
|
| 217 |
-
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
|
| 218 |
-
|
| 219 |
-
def forward(self, x: torch.Tensor):
|
| 220 |
-
x = self.conv1(x) # shape = [*, width, grid, grid]
|
| 221 |
-
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
|
| 222 |
-
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
|
| 223 |
-
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
|
| 224 |
-
x = x + self.positional_embedding.to(x.dtype)
|
| 225 |
-
x = self.ln_pre(x)
|
| 226 |
-
|
| 227 |
-
x = x.permute(1, 0, 2) # NLD -> LND
|
| 228 |
-
x = self.transformer(x)
|
| 229 |
-
x = x.permute(1, 0, 2) # LND -> NLD
|
| 230 |
-
|
| 231 |
-
x = self.ln_post(x[:, 0, :])
|
| 232 |
-
|
| 233 |
-
if self.proj is not None:
|
| 234 |
-
x = x @ self.proj
|
| 235 |
-
|
| 236 |
-
return x
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
class CLIP(nn.Module):
|
| 240 |
-
def __init__(self,
|
| 241 |
-
embed_dim: int,
|
| 242 |
-
# vision
|
| 243 |
-
image_resolution: int,
|
| 244 |
-
vision_layers: Union[Tuple[int, int, int, int], int],
|
| 245 |
-
vision_width: int,
|
| 246 |
-
vision_patch_size: int,
|
| 247 |
-
# text
|
| 248 |
-
context_length: int,
|
| 249 |
-
vocab_size: int,
|
| 250 |
-
transformer_width: int,
|
| 251 |
-
transformer_heads: int,
|
| 252 |
-
transformer_layers: int
|
| 253 |
-
):
|
| 254 |
-
super().__init__()
|
| 255 |
-
|
| 256 |
-
self.context_length = context_length
|
| 257 |
-
|
| 258 |
-
if isinstance(vision_layers, (tuple, list)):
|
| 259 |
-
vision_heads = vision_width * 32 // 64
|
| 260 |
-
self.visual = ModifiedResNet(
|
| 261 |
-
layers=vision_layers,
|
| 262 |
-
output_dim=embed_dim,
|
| 263 |
-
heads=vision_heads,
|
| 264 |
-
input_resolution=image_resolution,
|
| 265 |
-
width=vision_width
|
| 266 |
-
)
|
| 267 |
-
else:
|
| 268 |
-
vision_heads = vision_width // 64
|
| 269 |
-
self.visual = VisualTransformer(
|
| 270 |
-
input_resolution=image_resolution,
|
| 271 |
-
patch_size=vision_patch_size,
|
| 272 |
-
width=vision_width,
|
| 273 |
-
layers=vision_layers,
|
| 274 |
-
heads=vision_heads,
|
| 275 |
-
output_dim=embed_dim
|
| 276 |
-
)
|
| 277 |
-
|
| 278 |
-
self.transformer = Transformer(
|
| 279 |
-
width=transformer_width,
|
| 280 |
-
layers=transformer_layers,
|
| 281 |
-
heads=transformer_heads,
|
| 282 |
-
attn_mask=self.build_attention_mask()
|
| 283 |
-
)
|
| 284 |
-
|
| 285 |
-
self.vocab_size = vocab_size
|
| 286 |
-
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
|
| 287 |
-
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
|
| 288 |
-
self.ln_final = LayerNorm(transformer_width)
|
| 289 |
-
|
| 290 |
-
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
|
| 291 |
-
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
|
| 292 |
-
|
| 293 |
-
self.initialize_parameters()
|
| 294 |
-
|
| 295 |
-
def initialize_parameters(self):
|
| 296 |
-
nn.init.normal_(self.token_embedding.weight, std=0.02)
|
| 297 |
-
nn.init.normal_(self.positional_embedding, std=0.01)
|
| 298 |
-
|
| 299 |
-
if isinstance(self.visual, ModifiedResNet):
|
| 300 |
-
if self.visual.attnpool is not None:
|
| 301 |
-
std = self.visual.attnpool.c_proj.in_features ** -0.5
|
| 302 |
-
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
|
| 303 |
-
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
|
| 304 |
-
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
|
| 305 |
-
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
|
| 306 |
-
|
| 307 |
-
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
|
| 308 |
-
for name, param in resnet_block.named_parameters():
|
| 309 |
-
if name.endswith("bn3.weight"):
|
| 310 |
-
nn.init.zeros_(param)
|
| 311 |
-
|
| 312 |
-
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
|
| 313 |
-
attn_std = self.transformer.width ** -0.5
|
| 314 |
-
fc_std = (2 * self.transformer.width) ** -0.5
|
| 315 |
-
for block in self.transformer.resblocks:
|
| 316 |
-
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
|
| 317 |
-
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
|
| 318 |
-
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
|
| 319 |
-
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
|
| 320 |
-
|
| 321 |
-
if self.text_projection is not None:
|
| 322 |
-
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
|
| 323 |
-
|
| 324 |
-
def build_attention_mask(self):
|
| 325 |
-
# lazily create causal attention mask, with full attention between the vision tokens
|
| 326 |
-
# pytorch uses additive attention mask; fill with -inf
|
| 327 |
-
mask = torch.empty(self.context_length, self.context_length)
|
| 328 |
-
mask.fill_(float("-inf"))
|
| 329 |
-
mask.triu_(1) # zero out the lower diagonal
|
| 330 |
-
return mask
|
| 331 |
-
|
| 332 |
-
@property
|
| 333 |
-
def dtype(self):
|
| 334 |
-
return self.visual.conv1.weight.dtype
|
| 335 |
-
|
| 336 |
-
def encode_image(self, image):
|
| 337 |
-
return self.visual(image.type(self.dtype))
|
| 338 |
-
|
| 339 |
-
def encode_text(self, text):
|
| 340 |
-
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
|
| 341 |
-
|
| 342 |
-
x = x + self.positional_embedding.type(self.dtype)
|
| 343 |
-
x = x.permute(1, 0, 2) # NLD -> LND
|
| 344 |
-
x = self.transformer(x)
|
| 345 |
-
x = x.permute(1, 0, 2) # LND -> NLD
|
| 346 |
-
x = self.ln_final(x).type(self.dtype)
|
| 347 |
-
|
| 348 |
-
# x.shape = [batch_size, n_ctx, transformer.width]
|
| 349 |
-
# take features from the eot embedding (eot_token is the highest number in each sequence)
|
| 350 |
-
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
|
| 351 |
-
|
| 352 |
-
return x
|
| 353 |
-
|
| 354 |
-
def encode_conv_features(self, features):
|
| 355 |
-
# pool to 7, the feature map resolution for 224x224 input
|
| 356 |
-
features = nn.AdaptiveAvgPool2d(7)(features)
|
| 357 |
-
return self.visual.attnpool(features)
|
| 358 |
-
|
| 359 |
-
def forward(self, image, text):
|
| 360 |
-
image_features = self.encode_image(image)
|
| 361 |
-
text_features = self.encode_text(text)
|
| 362 |
-
|
| 363 |
-
# normalized features
|
| 364 |
-
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
|
| 365 |
-
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
|
| 366 |
-
|
| 367 |
-
# cosine similarity as logits
|
| 368 |
-
logit_scale = self.logit_scale.exp()
|
| 369 |
-
logits_per_image = logit_scale * image_features @ text_features.t()
|
| 370 |
-
logits_per_text = logit_scale * text_features @ image_features.t()
|
| 371 |
-
|
| 372 |
-
# shape = [global_batch_size, global_batch_size]
|
| 373 |
-
return logits_per_image, logits_per_text
|
| 374 |
-
|
| 375 |
-
def forward_features(self, features, text):
|
| 376 |
-
image_features = self.encode_conv_features(features)
|
| 377 |
-
text_features = self.encode_text(text)
|
| 378 |
-
|
| 379 |
-
# normalized features
|
| 380 |
-
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
|
| 381 |
-
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
|
| 382 |
-
|
| 383 |
-
# cosine similarity as logits
|
| 384 |
-
logit_scale = self.logit_scale.exp()
|
| 385 |
-
logits_per_image = logit_scale * image_features @ text_features.t()
|
| 386 |
-
logits_per_text = logit_scale * text_features @ image_features.t()
|
| 387 |
-
|
| 388 |
-
# shape = [global_batch_size, global_batch_size]
|
| 389 |
-
return logits_per_image, logits_per_text
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
def convert_weights(model: nn.Module):
|
| 393 |
-
"""Convert applicable model parameters to fp16"""
|
| 394 |
-
|
| 395 |
-
def _convert_weights_to_fp16(l):
|
| 396 |
-
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
|
| 397 |
-
l.weight.data = l.weight.data.half()
|
| 398 |
-
if l.bias is not None:
|
| 399 |
-
l.bias.data = l.bias.data.half()
|
| 400 |
-
|
| 401 |
-
if isinstance(l, nn.MultiheadAttention):
|
| 402 |
-
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
|
| 403 |
-
tensor = getattr(l, attr)
|
| 404 |
-
if tensor is not None:
|
| 405 |
-
tensor.data = tensor.data.half()
|
| 406 |
-
|
| 407 |
-
for name in ["text_projection", "proj"]:
|
| 408 |
-
if hasattr(l, name):
|
| 409 |
-
attr = getattr(l, name)
|
| 410 |
-
if attr is not None:
|
| 411 |
-
attr.data = attr.data.half()
|
| 412 |
-
|
| 413 |
-
model.apply(_convert_weights_to_fp16)
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
def build_model(state_dict: dict):
|
| 417 |
-
vit = "visual.proj" in state_dict
|
| 418 |
-
|
| 419 |
-
if vit:
|
| 420 |
-
vision_width = state_dict["visual.conv1.weight"].shape[0]
|
| 421 |
-
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
|
| 422 |
-
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
|
| 423 |
-
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
|
| 424 |
-
image_resolution = vision_patch_size * grid_size
|
| 425 |
-
else:
|
| 426 |
-
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
|
| 427 |
-
vision_layers = tuple(counts)
|
| 428 |
-
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
|
| 429 |
-
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
|
| 430 |
-
vision_patch_size = None
|
| 431 |
-
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
|
| 432 |
-
image_resolution = output_width * 32
|
| 433 |
-
|
| 434 |
-
embed_dim = state_dict["text_projection"].shape[1]
|
| 435 |
-
context_length = state_dict["positional_embedding"].shape[0]
|
| 436 |
-
vocab_size = state_dict["token_embedding.weight"].shape[0]
|
| 437 |
-
transformer_width = state_dict["ln_final.weight"].shape[0]
|
| 438 |
-
transformer_heads = transformer_width // 64
|
| 439 |
-
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
|
| 440 |
-
|
| 441 |
-
model = CLIP(
|
| 442 |
-
embed_dim,
|
| 443 |
-
image_resolution, vision_layers, vision_width, vision_patch_size,
|
| 444 |
-
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
|
| 445 |
-
)
|
| 446 |
-
|
| 447 |
-
for key in ["input_resolution", "context_length", "vocab_size"]:
|
| 448 |
-
if key in state_dict:
|
| 449 |
-
del state_dict[key]
|
| 450 |
-
|
| 451 |
-
# convert_weights(model)
|
| 452 |
-
model.load_state_dict(state_dict)
|
| 453 |
-
return model.eval()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
feature_networks/clip/simple_tokenizer.py
DELETED
|
@@ -1,132 +0,0 @@
|
|
| 1 |
-
import gzip
|
| 2 |
-
import html
|
| 3 |
-
import os
|
| 4 |
-
from functools import lru_cache
|
| 5 |
-
|
| 6 |
-
import ftfy
|
| 7 |
-
import regex as re
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
@lru_cache()
|
| 11 |
-
def default_bpe():
|
| 12 |
-
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
@lru_cache()
|
| 16 |
-
def bytes_to_unicode():
|
| 17 |
-
"""
|
| 18 |
-
Returns list of utf-8 byte and a corresponding list of unicode strings.
|
| 19 |
-
The reversible bpe codes work on unicode strings.
|
| 20 |
-
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
|
| 21 |
-
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
|
| 22 |
-
This is a signficant percentage of your normal, say, 32K bpe vocab.
|
| 23 |
-
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
|
| 24 |
-
And avoids mapping to whitespace/control characters the bpe code barfs on.
|
| 25 |
-
"""
|
| 26 |
-
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
|
| 27 |
-
cs = bs[:]
|
| 28 |
-
n = 0
|
| 29 |
-
for b in range(2**8):
|
| 30 |
-
if b not in bs:
|
| 31 |
-
bs.append(b)
|
| 32 |
-
cs.append(2**8+n)
|
| 33 |
-
n += 1
|
| 34 |
-
cs = [chr(n) for n in cs]
|
| 35 |
-
return dict(zip(bs, cs))
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
def get_pairs(word):
|
| 39 |
-
"""Return set of symbol pairs in a word.
|
| 40 |
-
Word is represented as tuple of symbols (symbols being variable-length strings).
|
| 41 |
-
"""
|
| 42 |
-
pairs = set()
|
| 43 |
-
prev_char = word[0]
|
| 44 |
-
for char in word[1:]:
|
| 45 |
-
pairs.add((prev_char, char))
|
| 46 |
-
prev_char = char
|
| 47 |
-
return pairs
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
def basic_clean(text):
|
| 51 |
-
text = ftfy.fix_text(text)
|
| 52 |
-
text = html.unescape(html.unescape(text))
|
| 53 |
-
return text.strip()
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
def whitespace_clean(text):
|
| 57 |
-
text = re.sub(r'\s+', ' ', text)
|
| 58 |
-
text = text.strip()
|
| 59 |
-
return text
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
class SimpleTokenizer(object):
|
| 63 |
-
def __init__(self, bpe_path: str = default_bpe()):
|
| 64 |
-
self.byte_encoder = bytes_to_unicode()
|
| 65 |
-
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
|
| 66 |
-
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
|
| 67 |
-
merges = merges[1:49152-256-2+1]
|
| 68 |
-
merges = [tuple(merge.split()) for merge in merges]
|
| 69 |
-
vocab = list(bytes_to_unicode().values())
|
| 70 |
-
vocab = vocab + [v+'</w>' for v in vocab]
|
| 71 |
-
for merge in merges:
|
| 72 |
-
vocab.append(''.join(merge))
|
| 73 |
-
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
|
| 74 |
-
self.encoder = dict(zip(vocab, range(len(vocab))))
|
| 75 |
-
self.decoder = {v: k for k, v in self.encoder.items()}
|
| 76 |
-
self.bpe_ranks = dict(zip(merges, range(len(merges))))
|
| 77 |
-
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
|
| 78 |
-
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
|
| 79 |
-
|
| 80 |
-
def bpe(self, token):
|
| 81 |
-
if token in self.cache:
|
| 82 |
-
return self.cache[token]
|
| 83 |
-
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
|
| 84 |
-
pairs = get_pairs(word)
|
| 85 |
-
|
| 86 |
-
if not pairs:
|
| 87 |
-
return token+'</w>'
|
| 88 |
-
|
| 89 |
-
while True:
|
| 90 |
-
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
|
| 91 |
-
if bigram not in self.bpe_ranks:
|
| 92 |
-
break
|
| 93 |
-
first, second = bigram
|
| 94 |
-
new_word = []
|
| 95 |
-
i = 0
|
| 96 |
-
while i < len(word):
|
| 97 |
-
try:
|
| 98 |
-
j = word.index(first, i)
|
| 99 |
-
new_word.extend(word[i:j])
|
| 100 |
-
i = j
|
| 101 |
-
except:
|
| 102 |
-
new_word.extend(word[i:])
|
| 103 |
-
break
|
| 104 |
-
|
| 105 |
-
if word[i] == first and i < len(word)-1 and word[i+1] == second:
|
| 106 |
-
new_word.append(first+second)
|
| 107 |
-
i += 2
|
| 108 |
-
else:
|
| 109 |
-
new_word.append(word[i])
|
| 110 |
-
i += 1
|
| 111 |
-
new_word = tuple(new_word)
|
| 112 |
-
word = new_word
|
| 113 |
-
if len(word) == 1:
|
| 114 |
-
break
|
| 115 |
-
else:
|
| 116 |
-
pairs = get_pairs(word)
|
| 117 |
-
word = ' '.join(word)
|
| 118 |
-
self.cache[token] = word
|
| 119 |
-
return word
|
| 120 |
-
|
| 121 |
-
def encode(self, text):
|
| 122 |
-
bpe_tokens = []
|
| 123 |
-
text = whitespace_clean(basic_clean(text)).lower()
|
| 124 |
-
for token in re.findall(self.pat, text):
|
| 125 |
-
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
|
| 126 |
-
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
|
| 127 |
-
return bpe_tokens
|
| 128 |
-
|
| 129 |
-
def decode(self, tokens):
|
| 130 |
-
text = ''.join([self.decoder[token] for token in tokens])
|
| 131 |
-
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
|
| 132 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
feature_networks/pretrained_builder.py
CHANGED
|
@@ -6,7 +6,6 @@ from torch.autograd import Function
|
|
| 6 |
|
| 7 |
import timm
|
| 8 |
|
| 9 |
-
from feature_networks import clip
|
| 10 |
from feature_networks.vit import _make_vit_b16_backbone, forward_vit
|
| 11 |
from feature_networks.constants import ALL_MODELS, VITS, EFFNETS, REGNETS
|
| 12 |
from pg_modules.blocks import Interpolate
|
|
@@ -57,29 +56,6 @@ def _make_resnet_v2(model):
|
|
| 57 |
pretrained.layer3 = model.stages[3]
|
| 58 |
return pretrained
|
| 59 |
|
| 60 |
-
def _make_resnet_clip(model):
|
| 61 |
-
pretrained = nn.Module()
|
| 62 |
-
|
| 63 |
-
# slightly more complicated than the standard resnet
|
| 64 |
-
pretrained.layer0 = nn.Sequential(
|
| 65 |
-
model.conv1,
|
| 66 |
-
model.bn1,
|
| 67 |
-
model.relu,
|
| 68 |
-
model.conv2,
|
| 69 |
-
model.bn2,
|
| 70 |
-
model.relu,
|
| 71 |
-
model.conv3,
|
| 72 |
-
model.bn3,
|
| 73 |
-
model.relu,
|
| 74 |
-
model.avgpool,
|
| 75 |
-
model.layer1,
|
| 76 |
-
)
|
| 77 |
-
|
| 78 |
-
pretrained.layer1 = model.layer2
|
| 79 |
-
pretrained.layer2 = model.layer3
|
| 80 |
-
pretrained.layer3 = model.layer4
|
| 81 |
-
|
| 82 |
-
return pretrained
|
| 83 |
|
| 84 |
def _make_densenet(model):
|
| 85 |
pretrained = nn.Module()
|
|
@@ -399,9 +375,7 @@ def _make_pretrained(backbone, verbose=False):
|
|
| 399 |
model = timm.create_model(backbone, pretrained=True)
|
| 400 |
pretrained = _make_vit(model, backbone)
|
| 401 |
|
| 402 |
-
|
| 403 |
-
model = clip.load('RN50', device='cpu', jit=False)[0].visual
|
| 404 |
-
pretrained = _make_resnet_clip(model)
|
| 405 |
|
| 406 |
else:
|
| 407 |
raise NotImplementedError('Wrong model name?')
|
|
|
|
| 6 |
|
| 7 |
import timm
|
| 8 |
|
|
|
|
| 9 |
from feature_networks.vit import _make_vit_b16_backbone, forward_vit
|
| 10 |
from feature_networks.constants import ALL_MODELS, VITS, EFFNETS, REGNETS
|
| 11 |
from pg_modules.blocks import Interpolate
|
|
|
|
| 56 |
pretrained.layer3 = model.stages[3]
|
| 57 |
return pretrained
|
| 58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
def _make_densenet(model):
|
| 61 |
pretrained = nn.Module()
|
|
|
|
| 375 |
model = timm.create_model(backbone, pretrained=True)
|
| 376 |
pretrained = _make_vit(model, backbone)
|
| 377 |
|
| 378 |
+
|
|
|
|
|
|
|
| 379 |
|
| 380 |
else:
|
| 381 |
raise NotImplementedError('Wrong model name?')
|