Upload 99 files
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +49 -0
- SDXL_EcomID_ComfyUI/.DS_Store +0 -0
- SDXL_EcomID_ComfyUI/.gitattributes +13 -0
- SDXL_EcomID_ComfyUI/CrossAttentionPatch.py +263 -0
- SDXL_EcomID_ComfyUI/EcomID.py +857 -0
- SDXL_EcomID_ComfyUI/LICENSE.txt +201 -0
- SDXL_EcomID_ComfyUI/README.md +118 -0
- SDXL_EcomID_ComfyUI/__init__.py +3 -0
- SDXL_EcomID_ComfyUI/encoders.py +63 -0
- SDXL_EcomID_ComfyUI/eva_clip/.DS_Store +0 -0
- SDXL_EcomID_ComfyUI/eva_clip/__init__.py +11 -0
- SDXL_EcomID_ComfyUI/eva_clip/bpe_simple_vocab_16e6.txt.gz +3 -0
- SDXL_EcomID_ComfyUI/eva_clip/constants.py +2 -0
- SDXL_EcomID_ComfyUI/eva_clip/eva_vit_model.py +548 -0
- SDXL_EcomID_ComfyUI/eva_clip/factory.py +517 -0
- SDXL_EcomID_ComfyUI/eva_clip/hf_configs.py +57 -0
- SDXL_EcomID_ComfyUI/eva_clip/hf_model.py +248 -0
- SDXL_EcomID_ComfyUI/eva_clip/loss.py +138 -0
- SDXL_EcomID_ComfyUI/eva_clip/model.py +439 -0
- SDXL_EcomID_ComfyUI/eva_clip/model_configs/.DS_Store +0 -0
- SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA01-CLIP-B-16.json +19 -0
- SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json +24 -0
- SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA01-CLIP-g-14.json +24 -0
- SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA02-CLIP-B-16.json +29 -0
- SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA02-CLIP-L-14-336.json +29 -0
- SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA02-CLIP-L-14.json +29 -0
- SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json +25 -0
- SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA02-CLIP-bigE-14.json +25 -0
- SDXL_EcomID_ComfyUI/eva_clip/modified_resnet.py +181 -0
- SDXL_EcomID_ComfyUI/eva_clip/openai.py +144 -0
- SDXL_EcomID_ComfyUI/eva_clip/pretrained.py +332 -0
- SDXL_EcomID_ComfyUI/eva_clip/rope.py +137 -0
- SDXL_EcomID_ComfyUI/eva_clip/timm_model.py +122 -0
- SDXL_EcomID_ComfyUI/eva_clip/tokenizer.py +201 -0
- SDXL_EcomID_ComfyUI/eva_clip/transform.py +103 -0
- SDXL_EcomID_ComfyUI/eva_clip/transformer.py +737 -0
- SDXL_EcomID_ComfyUI/eva_clip/utils.py +326 -0
- SDXL_EcomID_ComfyUI/examples/Multi-ControlNet.png +3 -0
- SDXL_EcomID_ComfyUI/examples/ecomid_basic_workflow.json +800 -0
- SDXL_EcomID_ComfyUI/examples/ecomid_basic_workflow.png +3 -0
- SDXL_EcomID_ComfyUI/examples/keypoint.png +3 -0
- SDXL_EcomID_ComfyUI/gitattributes.txt +13 -0
- SDXL_EcomID_ComfyUI/images/.DS_Store +0 -0
- SDXL_EcomID_ComfyUI/images/images_alibaba.png +0 -0
- SDXL_EcomID_ComfyUI/images/images_alimama.png +0 -0
- SDXL_EcomID_ComfyUI/images/overflow.png +3 -0
- SDXL_EcomID_ComfyUI/images/show_case/.DS_Store +0 -0
- SDXL_EcomID_ComfyUI/images/show_case/1.png +3 -0
- SDXL_EcomID_ComfyUI/images/show_case/10.png +3 -0
- SDXL_EcomID_ComfyUI/images/show_case/11.png +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,52 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
SDXL_EcomID_ComfyUI/examples/ecomid_basic_workflow.png filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
SDXL_EcomID_ComfyUI/examples/keypoint.png filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
SDXL_EcomID_ComfyUI/examples/Multi-ControlNet.png filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
SDXL_EcomID_ComfyUI/images/overflow.png filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
SDXL_EcomID_ComfyUI/images/show_case/1.png filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
SDXL_EcomID_ComfyUI/images/show_case/10.png filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
SDXL_EcomID_ComfyUI/images/show_case/11.png filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
SDXL_EcomID_ComfyUI/images/show_case/12.png filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
SDXL_EcomID_ComfyUI/images/show_case/13.png filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
SDXL_EcomID_ComfyUI/images/show_case/14.png filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
SDXL_EcomID_ComfyUI/images/show_case/15.png filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
SDXL_EcomID_ComfyUI/images/show_case/16.png filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
SDXL_EcomID_ComfyUI/images/show_case/17.png filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
SDXL_EcomID_ComfyUI/images/show_case/18.png filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
SDXL_EcomID_ComfyUI/images/show_case/19.png filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
SDXL_EcomID_ComfyUI/images/show_case/2.png filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
SDXL_EcomID_ComfyUI/images/show_case/21.png filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
SDXL_EcomID_ComfyUI/images/show_case/22.png filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
SDXL_EcomID_ComfyUI/images/show_case/23.png filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
SDXL_EcomID_ComfyUI/images/show_case/24.png filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
SDXL_EcomID_ComfyUI/images/show_case/26.png filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
SDXL_EcomID_ComfyUI/images/show_case/27.png filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
SDXL_EcomID_ComfyUI/images/show_case/28.png filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
SDXL_EcomID_ComfyUI/images/show_case/29.png filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
SDXL_EcomID_ComfyUI/images/show_case/3.png filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
SDXL_EcomID_ComfyUI/images/show_case/30.png filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
SDXL_EcomID_ComfyUI/images/show_case/31.png filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
SDXL_EcomID_ComfyUI/images/show_case/32.png filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
SDXL_EcomID_ComfyUI/images/show_case/34.png filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
SDXL_EcomID_ComfyUI/images/show_case/35.png filter=lfs diff=lfs merge=lfs -text
|
| 66 |
+
SDXL_EcomID_ComfyUI/images/show_case/36.png filter=lfs diff=lfs merge=lfs -text
|
| 67 |
+
SDXL_EcomID_ComfyUI/images/show_case/38.png filter=lfs diff=lfs merge=lfs -text
|
| 68 |
+
SDXL_EcomID_ComfyUI/images/show_case/39.png filter=lfs diff=lfs merge=lfs -text
|
| 69 |
+
SDXL_EcomID_ComfyUI/images/show_case/4.png filter=lfs diff=lfs merge=lfs -text
|
| 70 |
+
SDXL_EcomID_ComfyUI/images/show_case/40.png filter=lfs diff=lfs merge=lfs -text
|
| 71 |
+
SDXL_EcomID_ComfyUI/images/show_case/41.png filter=lfs diff=lfs merge=lfs -text
|
| 72 |
+
SDXL_EcomID_ComfyUI/images/show_case/42.png filter=lfs diff=lfs merge=lfs -text
|
| 73 |
+
SDXL_EcomID_ComfyUI/images/show_case/43.png filter=lfs diff=lfs merge=lfs -text
|
| 74 |
+
SDXL_EcomID_ComfyUI/images/show_case/44.png filter=lfs diff=lfs merge=lfs -text
|
| 75 |
+
SDXL_EcomID_ComfyUI/images/show_case/45.png filter=lfs diff=lfs merge=lfs -text
|
| 76 |
+
SDXL_EcomID_ComfyUI/images/show_case/46.png filter=lfs diff=lfs merge=lfs -text
|
| 77 |
+
SDXL_EcomID_ComfyUI/images/show_case/47.png filter=lfs diff=lfs merge=lfs -text
|
| 78 |
+
SDXL_EcomID_ComfyUI/images/show_case/48.png filter=lfs diff=lfs merge=lfs -text
|
| 79 |
+
SDXL_EcomID_ComfyUI/images/show_case/49.png filter=lfs diff=lfs merge=lfs -text
|
| 80 |
+
SDXL_EcomID_ComfyUI/images/show_case/50.png filter=lfs diff=lfs merge=lfs -text
|
| 81 |
+
SDXL_EcomID_ComfyUI/images/show_case/6.png filter=lfs diff=lfs merge=lfs -text
|
| 82 |
+
SDXL_EcomID_ComfyUI/images/show_case/7.png filter=lfs diff=lfs merge=lfs -text
|
| 83 |
+
SDXL_EcomID_ComfyUI/images/show_case/8.png filter=lfs diff=lfs merge=lfs -text
|
| 84 |
+
SDXL_EcomID_ComfyUI/images/show_case/9.png filter=lfs diff=lfs merge=lfs -text
|
SDXL_EcomID_ComfyUI/.DS_Store
ADDED
|
Binary file (8.2 kB). View file
|
|
|
SDXL_EcomID_ComfyUI/.gitattributes
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CrossAttentionPatch.py filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
EcomID.py filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
LICENSE filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
README.md filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
requirements.txt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
__init__.py filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
encoders.py filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
eva_clip filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
examples filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
images filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
pyproject.toml filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
resampler.py filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
utils.py filter=lfs diff=lfs merge=lfs -text
|
SDXL_EcomID_ComfyUI/CrossAttentionPatch.py
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import math
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from comfy.ldm.modules.attention import optimized_attention
|
| 5 |
+
from .utils import tensor_to_size
|
| 6 |
+
|
| 7 |
+
class Attn2Replace:
|
| 8 |
+
def __init__(self, callback=None, **kwargs):
|
| 9 |
+
self.callback = [callback]
|
| 10 |
+
self.kwargs = [kwargs]
|
| 11 |
+
|
| 12 |
+
def add(self, callback, **kwargs):
|
| 13 |
+
self.callback.append(callback)
|
| 14 |
+
self.kwargs.append(kwargs)
|
| 15 |
+
|
| 16 |
+
for key, value in kwargs.items():
|
| 17 |
+
setattr(self, key, value)
|
| 18 |
+
|
| 19 |
+
def __call__(self, q, k, v, extra_options):
|
| 20 |
+
dtype = q.dtype
|
| 21 |
+
out = optimized_attention(q, k, v, extra_options["n_heads"])
|
| 22 |
+
sigma = extra_options["sigmas"].detach().cpu()[0].item() if 'sigmas' in extra_options else 999999999.9
|
| 23 |
+
|
| 24 |
+
for i, callback in enumerate(self.callback):
|
| 25 |
+
if sigma <= self.kwargs[i]["sigma_start"] and sigma >= self.kwargs[i]["sigma_end"]:
|
| 26 |
+
out = out + callback(out, q, k, v, extra_options, **self.kwargs[i])
|
| 27 |
+
|
| 28 |
+
return out.to(dtype=dtype)
|
| 29 |
+
|
| 30 |
+
def pulid_attention(out, q, k, v, extra_options, module_key='', pulid=None, cond=None, uncond=None, weight=1.0,
|
| 31 |
+
ortho=False, ortho_v2=False, mask=None, **kwargs):
|
| 32 |
+
k_key = module_key + "_to_k_ip"
|
| 33 |
+
v_key = module_key + "_to_v_ip"
|
| 34 |
+
|
| 35 |
+
dtype = q.dtype
|
| 36 |
+
seq_len = q.shape[1]
|
| 37 |
+
cond_or_uncond = extra_options["cond_or_uncond"]
|
| 38 |
+
b = q.shape[0]
|
| 39 |
+
batch_prompt = b // len(cond_or_uncond)
|
| 40 |
+
_, _, oh, ow = extra_options["original_shape"]
|
| 41 |
+
|
| 42 |
+
# conds = torch.cat([uncond.repeat(batch_prompt, 1, 1), cond.repeat(batch_prompt, 1, 1)], dim=0)
|
| 43 |
+
# zero_tensor = torch.zeros((conds.size(0), num_zero, conds.size(-1)), dtype=conds.dtype, device=conds.device)
|
| 44 |
+
# conds = torch.cat([conds, zero_tensor], dim=1)
|
| 45 |
+
# ip_k = pulid.ip_layers.to_kvs[k_key](conds)
|
| 46 |
+
# ip_v = pulid.ip_layers.to_kvs[v_key](conds)
|
| 47 |
+
|
| 48 |
+
k_cond = pulid.ip_layers.to_kvs[k_key](cond).repeat(batch_prompt, 1, 1)
|
| 49 |
+
k_uncond = pulid.ip_layers.to_kvs[k_key](uncond).repeat(batch_prompt, 1, 1)
|
| 50 |
+
v_cond = pulid.ip_layers.to_kvs[v_key](cond).repeat(batch_prompt, 1, 1)
|
| 51 |
+
v_uncond = pulid.ip_layers.to_kvs[v_key](uncond).repeat(batch_prompt, 1, 1)
|
| 52 |
+
ip_k = torch.cat([(k_cond, k_uncond)[i] for i in cond_or_uncond], dim=0)
|
| 53 |
+
ip_v = torch.cat([(v_cond, v_uncond)[i] for i in cond_or_uncond], dim=0)
|
| 54 |
+
|
| 55 |
+
out_ip = optimized_attention(q, ip_k, ip_v, extra_options["n_heads"])
|
| 56 |
+
|
| 57 |
+
if ortho:
|
| 58 |
+
out = out.to(dtype=torch.float32)
|
| 59 |
+
out_ip = out_ip.to(dtype=torch.float32)
|
| 60 |
+
projection = (torch.sum((out * out_ip), dim=-2, keepdim=True) / torch.sum((out * out), dim=-2,
|
| 61 |
+
keepdim=True) * out)
|
| 62 |
+
orthogonal = out_ip - projection
|
| 63 |
+
out_ip = weight * orthogonal
|
| 64 |
+
elif ortho_v2:
|
| 65 |
+
out = out.to(dtype=torch.float32)
|
| 66 |
+
out_ip = out_ip.to(dtype=torch.float32)
|
| 67 |
+
attn_map = q @ ip_k.transpose(-2, -1)
|
| 68 |
+
attn_mean = attn_map.softmax(dim=-1).mean(dim=1, keepdim=True)
|
| 69 |
+
attn_mean = attn_mean[:, :, :5].sum(dim=-1, keepdim=True)
|
| 70 |
+
projection = (torch.sum((out * out_ip), dim=-2, keepdim=True) / torch.sum((out * out), dim=-2,
|
| 71 |
+
keepdim=True) * out)
|
| 72 |
+
orthogonal = out_ip + (attn_mean - 1) * projection
|
| 73 |
+
out_ip = weight * orthogonal
|
| 74 |
+
else:
|
| 75 |
+
out_ip = out_ip * weight
|
| 76 |
+
|
| 77 |
+
if mask is not None:
|
| 78 |
+
mask_h = oh / math.sqrt(oh * ow / seq_len)
|
| 79 |
+
mask_h = int(mask_h) + int((seq_len % int(mask_h)) != 0)
|
| 80 |
+
mask_w = seq_len // mask_h
|
| 81 |
+
|
| 82 |
+
mask = F.interpolate(mask.unsqueeze(1), size=(mask_h, mask_w), mode="bilinear").squeeze(1)
|
| 83 |
+
mask = tensor_to_size(mask, batch_prompt)
|
| 84 |
+
|
| 85 |
+
mask = mask.repeat(len(cond_or_uncond), 1, 1)
|
| 86 |
+
mask = mask.view(mask.shape[0], -1, 1).repeat(1, 1, out.shape[2])
|
| 87 |
+
|
| 88 |
+
# covers cases where extreme aspect ratios can cause the mask to have a wrong size
|
| 89 |
+
mask_len = mask_h * mask_w
|
| 90 |
+
if mask_len < seq_len:
|
| 91 |
+
pad_len = seq_len - mask_len
|
| 92 |
+
pad1 = pad_len // 2
|
| 93 |
+
pad2 = pad_len - pad1
|
| 94 |
+
mask = F.pad(mask, (0, 0, pad1, pad2), value=0.0)
|
| 95 |
+
elif mask_len > seq_len:
|
| 96 |
+
crop_start = (mask_len - seq_len) // 2
|
| 97 |
+
mask = mask[:, crop_start:crop_start + seq_len, :]
|
| 98 |
+
|
| 99 |
+
out_ip = out_ip * mask
|
| 100 |
+
|
| 101 |
+
return out_ip.to(dtype=dtype)
|
| 102 |
+
|
| 103 |
+
def instantid_attention(out, q, k, v, extra_options, module_key='', ipadapter=None, weight=1.0, cond=None, cond_alt=None, uncond=None, weight_type="linear", mask=None, sigma_start=0.0, sigma_end=1.0, unfold_batch=False, embeds_scaling='V only', **kwargs):
|
| 104 |
+
dtype = q.dtype
|
| 105 |
+
cond_or_uncond = extra_options["cond_or_uncond"]
|
| 106 |
+
block_type = extra_options["block"][0]
|
| 107 |
+
#block_id = extra_options["block"][1]
|
| 108 |
+
t_idx = extra_options["transformer_index"]
|
| 109 |
+
layers = 11 if '101_to_k_ip' in ipadapter.ip_layers.to_kvs else 16
|
| 110 |
+
k_key = module_key + "_to_k_ip"
|
| 111 |
+
v_key = module_key + "_to_v_ip"
|
| 112 |
+
|
| 113 |
+
# extra options for AnimateDiff
|
| 114 |
+
ad_params = extra_options['ad_params'] if "ad_params" in extra_options else None
|
| 115 |
+
|
| 116 |
+
b = q.shape[0]
|
| 117 |
+
seq_len = q.shape[1]
|
| 118 |
+
batch_prompt = b // len(cond_or_uncond)
|
| 119 |
+
_, _, oh, ow = extra_options["original_shape"]
|
| 120 |
+
|
| 121 |
+
if weight_type == 'ease in':
|
| 122 |
+
weight = weight * (0.05 + 0.95 * (1 - t_idx / layers))
|
| 123 |
+
elif weight_type == 'ease out':
|
| 124 |
+
weight = weight * (0.05 + 0.95 * (t_idx / layers))
|
| 125 |
+
elif weight_type == 'ease in-out':
|
| 126 |
+
weight = weight * (0.05 + 0.95 * (1 - abs(t_idx - (layers/2)) / (layers/2)))
|
| 127 |
+
elif weight_type == 'reverse in-out':
|
| 128 |
+
weight = weight * (0.05 + 0.95 * (abs(t_idx - (layers/2)) / (layers/2)))
|
| 129 |
+
elif weight_type == 'weak input' and block_type == 'input':
|
| 130 |
+
weight = weight * 0.2
|
| 131 |
+
elif weight_type == 'weak middle' and block_type == 'middle':
|
| 132 |
+
weight = weight * 0.2
|
| 133 |
+
elif weight_type == 'weak output' and block_type == 'output':
|
| 134 |
+
weight = weight * 0.2
|
| 135 |
+
elif weight_type == 'strong middle' and (block_type == 'input' or block_type == 'output'):
|
| 136 |
+
weight = weight * 0.2
|
| 137 |
+
elif isinstance(weight, dict):
|
| 138 |
+
if t_idx not in weight:
|
| 139 |
+
return 0
|
| 140 |
+
|
| 141 |
+
weight = weight[t_idx]
|
| 142 |
+
|
| 143 |
+
if cond_alt is not None and t_idx in cond_alt:
|
| 144 |
+
cond = cond_alt[t_idx]
|
| 145 |
+
del cond_alt
|
| 146 |
+
|
| 147 |
+
if unfold_batch:
|
| 148 |
+
# Check AnimateDiff context window
|
| 149 |
+
if ad_params is not None and ad_params["sub_idxs"] is not None:
|
| 150 |
+
if isinstance(weight, torch.Tensor):
|
| 151 |
+
weight = tensor_to_size(weight, ad_params["full_length"])
|
| 152 |
+
weight = torch.Tensor(weight[ad_params["sub_idxs"]])
|
| 153 |
+
if torch.all(weight == 0):
|
| 154 |
+
return 0
|
| 155 |
+
weight = weight.repeat(len(cond_or_uncond), 1, 1) # repeat for cond and uncond
|
| 156 |
+
elif weight == 0:
|
| 157 |
+
return 0
|
| 158 |
+
|
| 159 |
+
# if image length matches or exceeds full_length get sub_idx images
|
| 160 |
+
if cond.shape[0] >= ad_params["full_length"]:
|
| 161 |
+
cond = torch.Tensor(cond[ad_params["sub_idxs"]])
|
| 162 |
+
uncond = torch.Tensor(uncond[ad_params["sub_idxs"]])
|
| 163 |
+
# otherwise get sub_idxs images
|
| 164 |
+
else:
|
| 165 |
+
cond = tensor_to_size(cond, ad_params["full_length"])
|
| 166 |
+
uncond = tensor_to_size(uncond, ad_params["full_length"])
|
| 167 |
+
cond = cond[ad_params["sub_idxs"]]
|
| 168 |
+
uncond = uncond[ad_params["sub_idxs"]]
|
| 169 |
+
else:
|
| 170 |
+
if isinstance(weight, torch.Tensor):
|
| 171 |
+
weight = tensor_to_size(weight, batch_prompt)
|
| 172 |
+
if torch.all(weight == 0):
|
| 173 |
+
return 0
|
| 174 |
+
weight = weight.repeat(len(cond_or_uncond), 1, 1) # repeat for cond and uncond
|
| 175 |
+
elif weight == 0:
|
| 176 |
+
return 0
|
| 177 |
+
|
| 178 |
+
cond = tensor_to_size(cond, batch_prompt)
|
| 179 |
+
uncond = tensor_to_size(uncond, batch_prompt)
|
| 180 |
+
|
| 181 |
+
k_cond = ipadapter.ip_layers.to_kvs[k_key](cond)
|
| 182 |
+
k_uncond = ipadapter.ip_layers.to_kvs[k_key](uncond)
|
| 183 |
+
v_cond = ipadapter.ip_layers.to_kvs[v_key](cond)
|
| 184 |
+
v_uncond = ipadapter.ip_layers.to_kvs[v_key](uncond)
|
| 185 |
+
else:
|
| 186 |
+
# TODO: should we always convert the weights to a tensor?
|
| 187 |
+
if isinstance(weight, torch.Tensor):
|
| 188 |
+
weight = tensor_to_size(weight, batch_prompt)
|
| 189 |
+
if torch.all(weight == 0):
|
| 190 |
+
return 0
|
| 191 |
+
weight = weight.repeat(len(cond_or_uncond), 1, 1) # repeat for cond and uncond
|
| 192 |
+
elif weight == 0:
|
| 193 |
+
return 0
|
| 194 |
+
|
| 195 |
+
k_cond = ipadapter.ip_layers.to_kvs[k_key](cond).repeat(batch_prompt, 1, 1)
|
| 196 |
+
k_uncond = ipadapter.ip_layers.to_kvs[k_key](uncond).repeat(batch_prompt, 1, 1)
|
| 197 |
+
v_cond = ipadapter.ip_layers.to_kvs[v_key](cond).repeat(batch_prompt, 1, 1)
|
| 198 |
+
v_uncond = ipadapter.ip_layers.to_kvs[v_key](uncond).repeat(batch_prompt, 1, 1)
|
| 199 |
+
|
| 200 |
+
ip_k = torch.cat([(k_cond, k_uncond)[i] for i in cond_or_uncond], dim=0)
|
| 201 |
+
ip_v = torch.cat([(v_cond, v_uncond)[i] for i in cond_or_uncond], dim=0)
|
| 202 |
+
|
| 203 |
+
if embeds_scaling == 'K+mean(V) w/ C penalty':
|
| 204 |
+
scaling = float(ip_k.shape[2]) / 1280.0
|
| 205 |
+
weight = weight * scaling
|
| 206 |
+
ip_k = ip_k * weight
|
| 207 |
+
ip_v_mean = torch.mean(ip_v, dim=1, keepdim=True)
|
| 208 |
+
ip_v = (ip_v - ip_v_mean) + ip_v_mean * weight
|
| 209 |
+
out_ip = optimized_attention(q, ip_k, ip_v, extra_options["n_heads"])
|
| 210 |
+
del ip_v_mean
|
| 211 |
+
elif embeds_scaling == 'K+V w/ C penalty':
|
| 212 |
+
scaling = float(ip_k.shape[2]) / 1280.0
|
| 213 |
+
weight = weight * scaling
|
| 214 |
+
ip_k = ip_k * weight
|
| 215 |
+
ip_v = ip_v * weight
|
| 216 |
+
out_ip = optimized_attention(q, ip_k, ip_v, extra_options["n_heads"])
|
| 217 |
+
elif embeds_scaling == 'K+V':
|
| 218 |
+
ip_k = ip_k * weight
|
| 219 |
+
ip_v = ip_v * weight
|
| 220 |
+
out_ip = optimized_attention(q, ip_k, ip_v, extra_options["n_heads"])
|
| 221 |
+
else:
|
| 222 |
+
#ip_v = ip_v * weight
|
| 223 |
+
out_ip = optimized_attention(q, ip_k, ip_v, extra_options["n_heads"])
|
| 224 |
+
out_ip = out_ip * weight # I'm doing this to get the same results as before
|
| 225 |
+
|
| 226 |
+
if mask is not None:
|
| 227 |
+
mask_h = oh / math.sqrt(oh * ow / seq_len)
|
| 228 |
+
mask_h = int(mask_h) + int((seq_len % int(mask_h)) != 0)
|
| 229 |
+
mask_w = seq_len // mask_h
|
| 230 |
+
|
| 231 |
+
# check if using AnimateDiff and sliding context window
|
| 232 |
+
if (mask.shape[0] > 1 and ad_params is not None and ad_params["sub_idxs"] is not None):
|
| 233 |
+
# if mask length matches or exceeds full_length, get sub_idx masks
|
| 234 |
+
if mask.shape[0] >= ad_params["full_length"]:
|
| 235 |
+
mask = torch.Tensor(mask[ad_params["sub_idxs"]])
|
| 236 |
+
mask = F.interpolate(mask.unsqueeze(1), size=(mask_h, mask_w), mode="bilinear").squeeze(1)
|
| 237 |
+
else:
|
| 238 |
+
mask = F.interpolate(mask.unsqueeze(1), size=(mask_h, mask_w), mode="bilinear").squeeze(1)
|
| 239 |
+
mask = tensor_to_size(mask, ad_params["full_length"])
|
| 240 |
+
mask = mask[ad_params["sub_idxs"]]
|
| 241 |
+
else:
|
| 242 |
+
mask = F.interpolate(mask.unsqueeze(1), size=(mask_h, mask_w), mode="bilinear").squeeze(1)
|
| 243 |
+
mask = tensor_to_size(mask, batch_prompt)
|
| 244 |
+
|
| 245 |
+
mask = mask.repeat(len(cond_or_uncond), 1, 1)
|
| 246 |
+
mask = mask.view(mask.shape[0], -1, 1).repeat(1, 1, out.shape[2])
|
| 247 |
+
|
| 248 |
+
# covers cases where extreme aspect ratios can cause the mask to have a wrong size
|
| 249 |
+
mask_len = mask_h * mask_w
|
| 250 |
+
if mask_len < seq_len:
|
| 251 |
+
pad_len = seq_len - mask_len
|
| 252 |
+
pad1 = pad_len // 2
|
| 253 |
+
pad2 = pad_len - pad1
|
| 254 |
+
mask = F.pad(mask, (0, 0, pad1, pad2), value=0.0)
|
| 255 |
+
elif mask_len > seq_len:
|
| 256 |
+
crop_start = (mask_len - seq_len) // 2
|
| 257 |
+
mask = mask[:, crop_start:crop_start+seq_len, :]
|
| 258 |
+
|
| 259 |
+
out_ip = out_ip * mask
|
| 260 |
+
|
| 261 |
+
#out = out + out_ip
|
| 262 |
+
|
| 263 |
+
return out_ip.to(dtype=dtype)
|
SDXL_EcomID_ComfyUI/EcomID.py
ADDED
|
@@ -0,0 +1,857 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import os
|
| 3 |
+
import comfy.utils
|
| 4 |
+
import folder_paths
|
| 5 |
+
import numpy as np
|
| 6 |
+
import math
|
| 7 |
+
import cv2
|
| 8 |
+
import PIL.Image
|
| 9 |
+
from .resampler import Resampler
|
| 10 |
+
from .CrossAttentionPatch import Attn2Replace, instantid_attention, pulid_attention
|
| 11 |
+
from .utils import tensor_to_image
|
| 12 |
+
|
| 13 |
+
from insightface.app import FaceAnalysis
|
| 14 |
+
from facexlib.parsing import init_parsing_model
|
| 15 |
+
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
try:
|
| 19 |
+
import torchvision.transforms.v2 as T
|
| 20 |
+
except ImportError:
|
| 21 |
+
import torchvision.transforms as T
|
| 22 |
+
|
| 23 |
+
import torch.nn.functional as F
|
| 24 |
+
from torch import nn
|
| 25 |
+
|
| 26 |
+
MODELS_DIR = os.path.join(folder_paths.models_dir, "instantid")
|
| 27 |
+
if "instantid" not in folder_paths.folder_names_and_paths:
|
| 28 |
+
current_paths = [MODELS_DIR]
|
| 29 |
+
else:
|
| 30 |
+
current_paths, _ = folder_paths.folder_names_and_paths["instantid"]
|
| 31 |
+
folder_paths.folder_names_and_paths["instantid"] = (current_paths, folder_paths.supported_pt_extensions)
|
| 32 |
+
|
| 33 |
+
INSIGHTFACE_DIR = os.path.join(folder_paths.models_dir, "insightface")
|
| 34 |
+
|
| 35 |
+
from .eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
| 36 |
+
|
| 37 |
+
from .encoders import IDEncoder
|
| 38 |
+
|
| 39 |
+
INSIGHTFACE_DIR = os.path.join(folder_paths.models_dir, "insightface")
|
| 40 |
+
|
| 41 |
+
MODELS_DIR = os.path.join(folder_paths.models_dir, "pulid")
|
| 42 |
+
if "pulid" not in folder_paths.folder_names_and_paths:
|
| 43 |
+
current_paths = [MODELS_DIR]
|
| 44 |
+
else:
|
| 45 |
+
current_paths, _ = folder_paths.folder_names_and_paths["pulid"]
|
| 46 |
+
folder_paths.folder_names_and_paths["pulid"] = (current_paths, folder_paths.supported_pt_extensions)
|
| 47 |
+
|
| 48 |
+
class PulidModel(nn.Module):
|
| 49 |
+
def __init__(self, model):
|
| 50 |
+
super().__init__()
|
| 51 |
+
|
| 52 |
+
self.model = model
|
| 53 |
+
self.image_proj_model = self.init_id_adapter()
|
| 54 |
+
self.image_proj_model.load_state_dict(model["image_proj"])
|
| 55 |
+
self.ip_layers = To_KV(model["ip_adapter"])
|
| 56 |
+
|
| 57 |
+
def init_id_adapter(self):
|
| 58 |
+
image_proj_model = IDEncoder()
|
| 59 |
+
return image_proj_model
|
| 60 |
+
|
| 61 |
+
def get_image_embeds(self, face_embed, clip_embeds):
|
| 62 |
+
embeds = self.image_proj_model(face_embed, clip_embeds)
|
| 63 |
+
return embeds
|
| 64 |
+
|
| 65 |
+
def image_to_tensor(image):
|
| 66 |
+
tensor = torch.clamp(torch.from_numpy(image).float() / 255., 0, 1)
|
| 67 |
+
tensor = tensor[..., [2, 1, 0]]
|
| 68 |
+
return tensor
|
| 69 |
+
|
| 70 |
+
def tensor_to_size(source, dest_size):
|
| 71 |
+
if isinstance(dest_size, torch.Tensor):
|
| 72 |
+
dest_size = dest_size.shape[0]
|
| 73 |
+
source_size = source.shape[0]
|
| 74 |
+
|
| 75 |
+
if source_size < dest_size:
|
| 76 |
+
shape = [dest_size - source_size] + [1] * (source.dim() - 1)
|
| 77 |
+
source = torch.cat((source, source[-1:].repeat(shape)), dim=0)
|
| 78 |
+
elif source_size > dest_size:
|
| 79 |
+
source = source[:dest_size]
|
| 80 |
+
|
| 81 |
+
return source
|
| 82 |
+
|
| 83 |
+
def to_gray(img):
|
| 84 |
+
x = 0.299 * img[:, 0:1] + 0.587 * img[:, 1:2] + 0.114 * img[:, 2:3]
|
| 85 |
+
x = x.repeat(1, 3, 1, 1)
|
| 86 |
+
return x
|
| 87 |
+
|
| 88 |
+
def draw_kps(image_pil, kps, color_list=[(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]):
|
| 89 |
+
stickwidth = 4
|
| 90 |
+
limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
|
| 91 |
+
kps = np.array(kps)
|
| 92 |
+
|
| 93 |
+
h, w, _ = image_pil.shape
|
| 94 |
+
out_img = np.zeros([h, w, 3])
|
| 95 |
+
|
| 96 |
+
for i in range(len(limbSeq)):
|
| 97 |
+
index = limbSeq[i]
|
| 98 |
+
color = color_list[index[0]]
|
| 99 |
+
|
| 100 |
+
x = kps[index][:, 0]
|
| 101 |
+
y = kps[index][:, 1]
|
| 102 |
+
length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
|
| 103 |
+
angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
|
| 104 |
+
polygon = cv2.ellipse2Poly((int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
|
| 105 |
+
out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
|
| 106 |
+
out_img = (out_img * 0.6).astype(np.uint8)
|
| 107 |
+
|
| 108 |
+
for idx_kp, kp in enumerate(kps):
|
| 109 |
+
color = color_list[idx_kp]
|
| 110 |
+
x, y = kp
|
| 111 |
+
out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1)
|
| 112 |
+
|
| 113 |
+
out_img_pil = PIL.Image.fromarray(out_img.astype(np.uint8))
|
| 114 |
+
return out_img_pil
|
| 115 |
+
|
| 116 |
+
class InstantID(torch.nn.Module):
|
| 117 |
+
def __init__(self, instantid_model, cross_attention_dim=1280, output_cross_attention_dim=1024, clip_embeddings_dim=512, clip_extra_context_tokens=16):
|
| 118 |
+
super().__init__()
|
| 119 |
+
|
| 120 |
+
self.clip_embeddings_dim = clip_embeddings_dim
|
| 121 |
+
self.cross_attention_dim = cross_attention_dim
|
| 122 |
+
self.output_cross_attention_dim = output_cross_attention_dim
|
| 123 |
+
self.clip_extra_context_tokens = clip_extra_context_tokens
|
| 124 |
+
|
| 125 |
+
self.image_proj_model = self.init_proj()
|
| 126 |
+
|
| 127 |
+
self.image_proj_model.load_state_dict(instantid_model["image_proj"])
|
| 128 |
+
self.ip_layers = To_KV(instantid_model["ip_adapter"])
|
| 129 |
+
|
| 130 |
+
def init_proj(self):
|
| 131 |
+
image_proj_model = Resampler(
|
| 132 |
+
dim=self.cross_attention_dim,
|
| 133 |
+
depth=4,
|
| 134 |
+
dim_head=64,
|
| 135 |
+
heads=20,
|
| 136 |
+
num_queries=self.clip_extra_context_tokens,
|
| 137 |
+
embedding_dim=self.clip_embeddings_dim,
|
| 138 |
+
output_dim=self.output_cross_attention_dim,
|
| 139 |
+
ff_mult=4
|
| 140 |
+
)
|
| 141 |
+
return image_proj_model
|
| 142 |
+
|
| 143 |
+
@torch.inference_mode()
|
| 144 |
+
def get_image_embeds(self, clip_embed, clip_embed_zeroed):
|
| 145 |
+
#image_prompt_embeds = clip_embed.clone().detach()
|
| 146 |
+
image_prompt_embeds = self.image_proj_model(clip_embed)
|
| 147 |
+
#uncond_image_prompt_embeds = clip_embed_zeroed.clone().detach()
|
| 148 |
+
uncond_image_prompt_embeds = self.image_proj_model(clip_embed_zeroed)
|
| 149 |
+
|
| 150 |
+
return image_prompt_embeds, uncond_image_prompt_embeds
|
| 151 |
+
|
| 152 |
+
class ImageProjModel(torch.nn.Module):
|
| 153 |
+
def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024, clip_extra_context_tokens=4):
|
| 154 |
+
super().__init__()
|
| 155 |
+
|
| 156 |
+
self.cross_attention_dim = cross_attention_dim
|
| 157 |
+
self.clip_extra_context_tokens = clip_extra_context_tokens
|
| 158 |
+
self.proj = torch.nn.Linear(clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim)
|
| 159 |
+
self.norm = torch.nn.LayerNorm(cross_attention_dim)
|
| 160 |
+
|
| 161 |
+
def forward(self, image_embeds):
|
| 162 |
+
embeds = image_embeds
|
| 163 |
+
clip_extra_context_tokens = self.proj(embeds).reshape(-1, self.clip_extra_context_tokens, self.cross_attention_dim)
|
| 164 |
+
clip_extra_context_tokens = self.norm(clip_extra_context_tokens)
|
| 165 |
+
return clip_extra_context_tokens
|
| 166 |
+
|
| 167 |
+
class To_KV(torch.nn.Module):
|
| 168 |
+
def __init__(self, state_dict):
|
| 169 |
+
super().__init__()
|
| 170 |
+
|
| 171 |
+
self.to_kvs = torch.nn.ModuleDict()
|
| 172 |
+
for key, value in state_dict.items():
|
| 173 |
+
k = key.replace(".weight", "").replace(".", "_")
|
| 174 |
+
self.to_kvs[k] = torch.nn.Linear(value.shape[1], value.shape[0], bias=False)
|
| 175 |
+
self.to_kvs[k].weight.data = value
|
| 176 |
+
|
| 177 |
+
def _set_model_patch_replace(model, patch_kwargs, key):
|
| 178 |
+
to = model.model_options["transformer_options"].copy()
|
| 179 |
+
if "patches_replace" not in to:
|
| 180 |
+
to["patches_replace"] = {}
|
| 181 |
+
else:
|
| 182 |
+
to["patches_replace"] = to["patches_replace"].copy()
|
| 183 |
+
|
| 184 |
+
if "attn2" not in to["patches_replace"]:
|
| 185 |
+
to["patches_replace"]["attn2"] = {}
|
| 186 |
+
else:
|
| 187 |
+
to["patches_replace"]["attn2"] = to["patches_replace"]["attn2"].copy()
|
| 188 |
+
|
| 189 |
+
if key not in to["patches_replace"]["attn2"]:
|
| 190 |
+
to["patches_replace"]["attn2"][key] = Attn2Replace(pulid_attention, **patch_kwargs)
|
| 191 |
+
model.model_options["transformer_options"] = to
|
| 192 |
+
else:
|
| 193 |
+
to["patches_replace"]["attn2"][key].add(pulid_attention, **patch_kwargs)
|
| 194 |
+
|
| 195 |
+
class InstantID_IPA_ModelLoader:
|
| 196 |
+
@classmethod
|
| 197 |
+
def INPUT_TYPES(s):
|
| 198 |
+
return {"required": { "instantid_file": (folder_paths.get_filename_list("instantid"), )}}
|
| 199 |
+
|
| 200 |
+
RETURN_TYPES = ("INSTANTID",)
|
| 201 |
+
FUNCTION = "load_model"
|
| 202 |
+
CATEGORY = "EcomID"
|
| 203 |
+
|
| 204 |
+
def load_model(self, instantid_file):
|
| 205 |
+
ckpt_path = folder_paths.get_full_path("instantid", instantid_file)
|
| 206 |
+
|
| 207 |
+
model = comfy.utils.load_torch_file(ckpt_path, safe_load=True)
|
| 208 |
+
|
| 209 |
+
if ckpt_path.lower().endswith(".safetensors"):
|
| 210 |
+
st_model = {"image_proj": {}, "ip_adapter": {}}
|
| 211 |
+
for key in model.keys():
|
| 212 |
+
if key.startswith("image_proj."):
|
| 213 |
+
st_model["image_proj"][key.replace("image_proj.", "")] = model[key]
|
| 214 |
+
elif key.startswith("ip_adapter."):
|
| 215 |
+
st_model["ip_adapter"][key.replace("ip_adapter.", "")] = model[key]
|
| 216 |
+
model = st_model
|
| 217 |
+
|
| 218 |
+
model = InstantID(
|
| 219 |
+
model,
|
| 220 |
+
cross_attention_dim=1280,
|
| 221 |
+
output_cross_attention_dim=model["ip_adapter"]["1.to_k_ip.weight"].shape[1],
|
| 222 |
+
clip_embeddings_dim=512,
|
| 223 |
+
clip_extra_context_tokens=16,
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
return (model,)
|
| 227 |
+
|
| 228 |
+
def extractFeatures(insightface, image, extract_kps=False):
|
| 229 |
+
face_img = tensor_to_image(image)
|
| 230 |
+
out = []
|
| 231 |
+
|
| 232 |
+
insightface.det_model.input_size = (640,640) # reset the detection size
|
| 233 |
+
|
| 234 |
+
for i in range(face_img.shape[0]):
|
| 235 |
+
for size in [(size, size) for size in range(640, 128, -64)]:
|
| 236 |
+
insightface.det_model.input_size = size # TODO: hacky but seems to be working
|
| 237 |
+
face = insightface.get(face_img[i])
|
| 238 |
+
if face:
|
| 239 |
+
face = sorted(face, key=lambda x:(x['bbox'][2]-x['bbox'][0])*(x['bbox'][3]-x['bbox'][1]))[-1]
|
| 240 |
+
|
| 241 |
+
if extract_kps:
|
| 242 |
+
out.append(draw_kps(face_img[i], face['kps']))
|
| 243 |
+
else:
|
| 244 |
+
out.append(torch.from_numpy(face['embedding']).unsqueeze(0))
|
| 245 |
+
|
| 246 |
+
if 640 not in size:
|
| 247 |
+
print(f"\033[33mINFO: InsightFace detection resolution lowered to {size}.\033[0m")
|
| 248 |
+
break
|
| 249 |
+
|
| 250 |
+
if out:
|
| 251 |
+
if extract_kps:
|
| 252 |
+
out = torch.stack(T.ToTensor()(out), dim=0).permute([0,2,3,1])
|
| 253 |
+
else:
|
| 254 |
+
out = torch.stack(out, dim=0)
|
| 255 |
+
else:
|
| 256 |
+
out = None
|
| 257 |
+
|
| 258 |
+
return out
|
| 259 |
+
|
| 260 |
+
######
|
| 261 |
+
'''
|
| 262 |
+
node
|
| 263 |
+
'''
|
| 264 |
+
class EcomID_PulidModelLoader:
|
| 265 |
+
@classmethod
|
| 266 |
+
def INPUT_TYPES(s):
|
| 267 |
+
return {"required": { "pulid_file": (folder_paths.get_filename_list("pulid"), )}}
|
| 268 |
+
|
| 269 |
+
RETURN_TYPES = ("PULID",)
|
| 270 |
+
FUNCTION = "load_model"
|
| 271 |
+
CATEGORY = "EcomID"
|
| 272 |
+
|
| 273 |
+
def load_model(self, pulid_file):
|
| 274 |
+
ckpt_path = folder_paths.get_full_path("pulid", pulid_file)
|
| 275 |
+
|
| 276 |
+
model = comfy.utils.load_torch_file(ckpt_path, safe_load=True)
|
| 277 |
+
|
| 278 |
+
if ckpt_path.lower().endswith(".safetensors"):
|
| 279 |
+
st_model = {"image_proj": {}, "ip_adapter": {}}
|
| 280 |
+
for key in model.keys():
|
| 281 |
+
if key.startswith("image_proj."):
|
| 282 |
+
st_model["image_proj"][key.replace("image_proj.", "")] = model[key]
|
| 283 |
+
elif key.startswith("ip_adapter."):
|
| 284 |
+
st_model["ip_adapter"][key.replace("ip_adapter.", "")] = model[key]
|
| 285 |
+
model = st_model
|
| 286 |
+
|
| 287 |
+
return (model,)
|
| 288 |
+
|
| 289 |
+
class EcomIDEvaClipLoader:
|
| 290 |
+
@classmethod
|
| 291 |
+
def INPUT_TYPES(s):
|
| 292 |
+
return {
|
| 293 |
+
"required": {},
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
RETURN_TYPES = ("EVA_CLIP",)
|
| 297 |
+
FUNCTION = "load_eva_clip"
|
| 298 |
+
CATEGORY = "EcomID"
|
| 299 |
+
|
| 300 |
+
def load_eva_clip(self):
|
| 301 |
+
from .eva_clip.factory import create_model_and_transforms
|
| 302 |
+
|
| 303 |
+
model, _, _ = create_model_and_transforms('EVA02-CLIP-L-14-336', 'eva_clip', force_custom_clip=True)
|
| 304 |
+
|
| 305 |
+
model = model.visual
|
| 306 |
+
|
| 307 |
+
eva_transform_mean = getattr(model, 'image_mean', OPENAI_DATASET_MEAN)
|
| 308 |
+
eva_transform_std = getattr(model, 'image_std', OPENAI_DATASET_STD)
|
| 309 |
+
if not isinstance(eva_transform_mean, (list, tuple)):
|
| 310 |
+
model["image_mean"] = (eva_transform_mean,) * 3
|
| 311 |
+
if not isinstance(eva_transform_std, (list, tuple)):
|
| 312 |
+
model["image_std"] = (eva_transform_std,) * 3
|
| 313 |
+
|
| 314 |
+
return (model,)
|
| 315 |
+
|
| 316 |
+
class EcomIDFaceAnalysis:
|
| 317 |
+
@classmethod
|
| 318 |
+
def INPUT_TYPES(s):
|
| 319 |
+
return {
|
| 320 |
+
"required": {
|
| 321 |
+
"provider": (["CPU", "CUDA", "ROCM"], ),
|
| 322 |
+
},
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
RETURN_TYPES = ("FACEANALYSIS",)
|
| 326 |
+
FUNCTION = "load_insight_face"
|
| 327 |
+
CATEGORY = "EcomID"
|
| 328 |
+
|
| 329 |
+
def load_insight_face(self, provider):
|
| 330 |
+
model = FaceAnalysis(name="antelopev2", root=INSIGHTFACE_DIR, providers=[provider + 'ExecutionProvider',]) # alternative to buffalo_l
|
| 331 |
+
model.prepare(ctx_id=0, det_size=(640, 640))
|
| 332 |
+
|
| 333 |
+
return (model,)
|
| 334 |
+
|
| 335 |
+
class FaceKeypointsPreprocessor:
|
| 336 |
+
@classmethod
|
| 337 |
+
def INPUT_TYPES(s):
|
| 338 |
+
return {
|
| 339 |
+
"required": {
|
| 340 |
+
"faceanalysis": ("FACEANALYSIS", ),
|
| 341 |
+
"image": ("IMAGE", ),
|
| 342 |
+
},
|
| 343 |
+
}
|
| 344 |
+
RETURN_TYPES = ("IMAGE",)
|
| 345 |
+
FUNCTION = "preprocess_image"
|
| 346 |
+
CATEGORY = "EcomID"
|
| 347 |
+
|
| 348 |
+
def preprocess_image(self, faceanalysis, image):
|
| 349 |
+
face_kps = extractFeatures(faceanalysis, image, extract_kps=True)
|
| 350 |
+
|
| 351 |
+
if face_kps is None:
|
| 352 |
+
face_kps = torch.zeros_like(image)
|
| 353 |
+
print(f"\033[33mWARNING: no face detected, unable to extract the keypoints!\033[0m")
|
| 354 |
+
#raise Exception('Face Keypoints Image: No face detected.')
|
| 355 |
+
|
| 356 |
+
return (face_kps,)
|
| 357 |
+
|
| 358 |
+
def add_noise(image, factor):
|
| 359 |
+
seed = int(torch.sum(image).item()) % 1000000007
|
| 360 |
+
torch.manual_seed(seed)
|
| 361 |
+
mask = (torch.rand_like(image) < factor).float()
|
| 362 |
+
noise = torch.rand_like(image)
|
| 363 |
+
noise = torch.zeros_like(image) * (1-mask) + noise * mask
|
| 364 |
+
|
| 365 |
+
return factor*noise
|
| 366 |
+
|
| 367 |
+
class ApplyEcomID:
|
| 368 |
+
@classmethod
|
| 369 |
+
def INPUT_TYPES(s):
|
| 370 |
+
return {
|
| 371 |
+
"required": {
|
| 372 |
+
"instantid_ipa": ("INSTANTID", ),
|
| 373 |
+
"pulid": ("PULID", ),
|
| 374 |
+
"eva_clip": ("EVA_CLIP",),
|
| 375 |
+
"insightface": ("FACEANALYSIS", ),
|
| 376 |
+
"control_net": ("CONTROL_NET", ),
|
| 377 |
+
"image": ("IMAGE", ),
|
| 378 |
+
"model": ("MODEL", ),
|
| 379 |
+
"positive": ("CONDITIONING", ),
|
| 380 |
+
"negative": ("CONDITIONING", ),
|
| 381 |
+
"method": (["fidelity", "style", "neutral"],),
|
| 382 |
+
"weight": ("FLOAT", {"default": 1.0, "min": -1.0, "max": 5.0, "step": 0.05}),
|
| 383 |
+
"start_at": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
|
| 384 |
+
"end_at": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
|
| 385 |
+
},
|
| 386 |
+
"optional": {
|
| 387 |
+
"image_kps": ("IMAGE",),
|
| 388 |
+
"mask": ("MASK",),
|
| 389 |
+
}
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
RETURN_TYPES = ("MODEL", "CONDITIONING", "CONDITIONING",)
|
| 393 |
+
RETURN_NAMES = ("MODEL", "positive", "negative", )
|
| 394 |
+
FUNCTION = "apply_EcomID"
|
| 395 |
+
CATEGORY = "EcomID"
|
| 396 |
+
|
| 397 |
+
def apply_EcomID(self, instantid_ipa, pulid, eva_clip, insightface, control_net, image, model, positive, negative, start_at, end_at, weight=.8, ip_weight=None, cn_strength=None, noise=0.35, image_kps=None, mask=None, combine_embeds='average',
|
| 398 |
+
method=None, fidelity=None, projection=None):
|
| 399 |
+
self.dtype = torch.float16 if comfy.model_management.should_use_fp16() else torch.float32
|
| 400 |
+
self.device = comfy.model_management.get_torch_device()
|
| 401 |
+
|
| 402 |
+
ip_weight = weight if ip_weight is None else ip_weight
|
| 403 |
+
cn_strength = weight if cn_strength is None else cn_strength
|
| 404 |
+
|
| 405 |
+
face_embed = extractFeatures(insightface, image)
|
| 406 |
+
if face_embed is None:
|
| 407 |
+
raise Exception('Reference Image: No face detected.')
|
| 408 |
+
|
| 409 |
+
# if no keypoints image is provided, use the image itself (only the first one in the batch)
|
| 410 |
+
face_kps = extractFeatures(insightface, image_kps if image_kps is not None else image[0].unsqueeze(0), extract_kps=True)
|
| 411 |
+
|
| 412 |
+
if face_kps is None:
|
| 413 |
+
face_kps = torch.zeros_like(image) if image_kps is None else image_kps
|
| 414 |
+
print(f"\033[33mWARNING: No face detected in the keypoints image!\033[0m")
|
| 415 |
+
|
| 416 |
+
clip_embed = face_embed
|
| 417 |
+
# InstantID works better with averaged embeds (TODO: needs testing)
|
| 418 |
+
if clip_embed.shape[0] > 1:
|
| 419 |
+
if combine_embeds == 'average':
|
| 420 |
+
clip_embed = torch.mean(clip_embed, dim=0).unsqueeze(0)
|
| 421 |
+
elif combine_embeds == 'norm average':
|
| 422 |
+
clip_embed = torch.mean(clip_embed / torch.norm(clip_embed, dim=0, keepdim=True), dim=0).unsqueeze(0)
|
| 423 |
+
|
| 424 |
+
if noise > 0:
|
| 425 |
+
seed = int(torch.sum(clip_embed).item()) % 1000000007
|
| 426 |
+
torch.manual_seed(seed)
|
| 427 |
+
clip_embed_zeroed = noise * torch.rand_like(clip_embed)
|
| 428 |
+
#clip_embed_zeroed = add_noise(clip_embed, noise)
|
| 429 |
+
else:
|
| 430 |
+
clip_embed_zeroed = torch.zeros_like(clip_embed)
|
| 431 |
+
|
| 432 |
+
# 1: patch the attention
|
| 433 |
+
self.instantid = instantid_ipa
|
| 434 |
+
self.instantid.to(self.device, dtype=self.dtype)
|
| 435 |
+
|
| 436 |
+
# 提取第一种embedding
|
| 437 |
+
image_prompt_embeds, uncond_image_prompt_embeds = self.instantid.get_image_embeds(clip_embed.to(self.device, dtype=self.dtype), clip_embed_zeroed.to(self.device, dtype=self.dtype))
|
| 438 |
+
|
| 439 |
+
image_prompt_embeds = image_prompt_embeds.to(self.device, dtype=self.dtype)
|
| 440 |
+
uncond_image_prompt_embeds = uncond_image_prompt_embeds.to(self.device, dtype=self.dtype)
|
| 441 |
+
|
| 442 |
+
work_model = model.clone()
|
| 443 |
+
|
| 444 |
+
if mask is not None:
|
| 445 |
+
mask = mask.to(self.device)
|
| 446 |
+
|
| 447 |
+
device = comfy.model_management.get_torch_device()
|
| 448 |
+
dtype = comfy.model_management.unet_dtype()
|
| 449 |
+
if dtype not in [torch.float32, torch.float16, torch.bfloat16]:
|
| 450 |
+
dtype = torch.float16 if comfy.model_management.should_use_fp16() else torch.float32
|
| 451 |
+
|
| 452 |
+
eva_clip.to(device, dtype=dtype)
|
| 453 |
+
pulid_model = PulidModel(pulid).to(device, dtype=dtype)
|
| 454 |
+
|
| 455 |
+
if mask is not None:
|
| 456 |
+
if mask.dim() > 3:
|
| 457 |
+
mask = mask.squeeze(-1)
|
| 458 |
+
elif mask.dim() < 3:
|
| 459 |
+
mask = mask.unsqueeze(0)
|
| 460 |
+
mask = mask.to(device, dtype=dtype)
|
| 461 |
+
|
| 462 |
+
if method == "fidelity" or projection == "ortho_v2":
|
| 463 |
+
num_zero = 8
|
| 464 |
+
ortho = False
|
| 465 |
+
ortho_v2 = True
|
| 466 |
+
elif method == "style" or projection == "ortho":
|
| 467 |
+
num_zero = 16
|
| 468 |
+
ortho = True
|
| 469 |
+
ortho_v2 = False
|
| 470 |
+
else:
|
| 471 |
+
num_zero = 0
|
| 472 |
+
ortho = False
|
| 473 |
+
ortho_v2 = False
|
| 474 |
+
|
| 475 |
+
if fidelity is not None:
|
| 476 |
+
num_zero = fidelity
|
| 477 |
+
|
| 478 |
+
# face_analysis.det_model.input_size = (640,640)
|
| 479 |
+
image = tensor_to_image(image)
|
| 480 |
+
|
| 481 |
+
face_helper = FaceRestoreHelper(
|
| 482 |
+
upscale_factor=1,
|
| 483 |
+
face_size=512,
|
| 484 |
+
crop_ratio=(1, 1),
|
| 485 |
+
det_model='retinaface_resnet50',
|
| 486 |
+
save_ext='png',
|
| 487 |
+
device=device,
|
| 488 |
+
)
|
| 489 |
+
|
| 490 |
+
face_helper.face_parse = None
|
| 491 |
+
face_helper.face_parse = init_parsing_model(model_name='bisenet', device=device)
|
| 492 |
+
|
| 493 |
+
bg_label = [0, 16, 18, 7, 8, 9, 14, 15]
|
| 494 |
+
cond = []
|
| 495 |
+
uncond = []
|
| 496 |
+
|
| 497 |
+
for i in range(image.shape[0]):
|
| 498 |
+
# get insightface embeddings
|
| 499 |
+
iface_embeds = None
|
| 500 |
+
for size in [(size, size) for size in range(640, 256, -64)]:
|
| 501 |
+
insightface.det_model.input_size = size
|
| 502 |
+
face = insightface.get(image[i])
|
| 503 |
+
if face:
|
| 504 |
+
face = sorted(face, key=lambda x: (x.bbox[2] - x.bbox[0]) * (x.bbox[3] - x.bbox[1]), reverse=True)[
|
| 505 |
+
-1]
|
| 506 |
+
iface_embeds = torch.from_numpy(face.embedding).unsqueeze(0).to(device, dtype=dtype)
|
| 507 |
+
break
|
| 508 |
+
else:
|
| 509 |
+
raise Exception('insightface: No face detected.')
|
| 510 |
+
|
| 511 |
+
# get eva_clip embeddings
|
| 512 |
+
face_helper.clean_all()
|
| 513 |
+
face_helper.read_image(image[i])
|
| 514 |
+
face_helper.get_face_landmarks_5(only_center_face=True)
|
| 515 |
+
face_helper.align_warp_face()
|
| 516 |
+
|
| 517 |
+
if len(face_helper.cropped_faces) == 0:
|
| 518 |
+
raise Exception('facexlib: No face detected.')
|
| 519 |
+
|
| 520 |
+
face = face_helper.cropped_faces[0]
|
| 521 |
+
face = image_to_tensor(face).unsqueeze(0).permute(0, 3, 1, 2).to(device)
|
| 522 |
+
parsing_out = \
|
| 523 |
+
face_helper.face_parse(T.functional.normalize(face, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]))[0]
|
| 524 |
+
parsing_out = parsing_out.argmax(dim=1, keepdim=True)
|
| 525 |
+
bg = sum(parsing_out == i for i in bg_label).bool()
|
| 526 |
+
white_image = torch.ones_like(face)
|
| 527 |
+
face_features_image = torch.where(bg, white_image, to_gray(face))
|
| 528 |
+
face_features_image = T.functional.resize(face_features_image, eva_clip.image_size,
|
| 529 |
+
T.InterpolationMode.BICUBIC).to(device, dtype=dtype)
|
| 530 |
+
face_features_image = T.functional.normalize(face_features_image, eva_clip.image_mean, eva_clip.image_std)
|
| 531 |
+
|
| 532 |
+
id_cond_vit, id_vit_hidden = eva_clip(face_features_image, return_all_features=False, return_hidden=True,
|
| 533 |
+
shuffle=False)
|
| 534 |
+
id_cond_vit = id_cond_vit.to(device, dtype=dtype)
|
| 535 |
+
for idx in range(len(id_vit_hidden)):
|
| 536 |
+
id_vit_hidden[idx] = id_vit_hidden[idx].to(device, dtype=dtype)
|
| 537 |
+
|
| 538 |
+
id_cond_vit = torch.div(id_cond_vit, torch.norm(id_cond_vit, 2, 1, True))
|
| 539 |
+
|
| 540 |
+
# combine embeddings
|
| 541 |
+
id_cond = torch.cat([iface_embeds, id_cond_vit], dim=-1)
|
| 542 |
+
if noise == 0:
|
| 543 |
+
id_uncond = torch.zeros_like(id_cond)
|
| 544 |
+
else:
|
| 545 |
+
id_uncond = torch.rand_like(id_cond) * noise
|
| 546 |
+
id_vit_hidden_uncond = []
|
| 547 |
+
for idx in range(len(id_vit_hidden)):
|
| 548 |
+
if noise == 0:
|
| 549 |
+
id_vit_hidden_uncond.append(torch.zeros_like(id_vit_hidden[idx]))
|
| 550 |
+
else:
|
| 551 |
+
id_vit_hidden_uncond.append(torch.rand_like(id_vit_hidden[idx]) * noise)
|
| 552 |
+
# 提取第二种embedding
|
| 553 |
+
cond.append(pulid_model.get_image_embeds(id_cond, id_vit_hidden))
|
| 554 |
+
uncond.append(pulid_model.get_image_embeds(id_uncond, id_vit_hidden_uncond))
|
| 555 |
+
|
| 556 |
+
# average embeddings
|
| 557 |
+
cond = torch.cat(cond).to(device, dtype=dtype)
|
| 558 |
+
uncond = torch.cat(uncond).to(device, dtype=dtype)
|
| 559 |
+
if cond.shape[0] > 1:
|
| 560 |
+
cond = torch.mean(cond, dim=0, keepdim=True)
|
| 561 |
+
uncond = torch.mean(uncond, dim=0, keepdim=True)
|
| 562 |
+
|
| 563 |
+
if num_zero > 0:
|
| 564 |
+
if noise == 0:
|
| 565 |
+
zero_tensor = torch.zeros((cond.size(0), num_zero, cond.size(-1)), dtype=dtype, device=device)
|
| 566 |
+
else:
|
| 567 |
+
zero_tensor = torch.rand((cond.size(0), num_zero, cond.size(-1)), dtype=dtype, device=device) * noise
|
| 568 |
+
cond = torch.cat([cond, zero_tensor], dim=1)
|
| 569 |
+
uncond = torch.cat([uncond, zero_tensor], dim=1)
|
| 570 |
+
|
| 571 |
+
sigma_start = work_model.get_model_object("model_sampling").percent_to_sigma(start_at)
|
| 572 |
+
sigma_end = work_model.get_model_object("model_sampling").percent_to_sigma(end_at)
|
| 573 |
+
|
| 574 |
+
patch_kwargs = {
|
| 575 |
+
"pulid": pulid_model,
|
| 576 |
+
"weight": ip_weight,
|
| 577 |
+
"cond": cond,
|
| 578 |
+
"uncond": uncond,
|
| 579 |
+
"sigma_start": sigma_start,
|
| 580 |
+
"sigma_end": sigma_end,
|
| 581 |
+
"ortho": ortho,
|
| 582 |
+
"ortho_v2": ortho_v2,
|
| 583 |
+
"mask": mask,
|
| 584 |
+
}
|
| 585 |
+
|
| 586 |
+
number = 0
|
| 587 |
+
for id in [4, 5, 7, 8]: # id of input_blocks that have cross attention
|
| 588 |
+
block_indices = range(2) if id in [4, 5] else range(10) # transformer_depth
|
| 589 |
+
for index in block_indices:
|
| 590 |
+
patch_kwargs["module_key"] = str(number * 2 + 1)
|
| 591 |
+
_set_model_patch_replace(work_model, patch_kwargs, ("input", id, index))
|
| 592 |
+
number += 1
|
| 593 |
+
for id in range(6): # id of output_blocks that have cross attention
|
| 594 |
+
block_indices = range(2) if id in [3, 4, 5] else range(10) # transformer_depth
|
| 595 |
+
for index in block_indices:
|
| 596 |
+
patch_kwargs["module_key"] = str(number * 2 + 1)
|
| 597 |
+
_set_model_patch_replace(work_model, patch_kwargs, ("output", id, index))
|
| 598 |
+
number += 1
|
| 599 |
+
for index in range(10):
|
| 600 |
+
patch_kwargs["module_key"] = str(number * 2 + 1)
|
| 601 |
+
_set_model_patch_replace(work_model, patch_kwargs, ("middle", 0, index))
|
| 602 |
+
number += 1
|
| 603 |
+
|
| 604 |
+
# 2: do the ControlNet
|
| 605 |
+
if mask is not None and len(mask.shape) < 3:
|
| 606 |
+
mask = mask.unsqueeze(0)
|
| 607 |
+
|
| 608 |
+
cnets = {}
|
| 609 |
+
cond_uncond = []
|
| 610 |
+
|
| 611 |
+
is_cond = True
|
| 612 |
+
for conditioning in [positive, negative]:
|
| 613 |
+
c = []
|
| 614 |
+
for t in conditioning:
|
| 615 |
+
d = t[1].copy()
|
| 616 |
+
|
| 617 |
+
prev_cnet = d.get('control', None)
|
| 618 |
+
if prev_cnet in cnets:
|
| 619 |
+
c_net = cnets[prev_cnet]
|
| 620 |
+
else:
|
| 621 |
+
c_net = control_net.copy().set_cond_hint(face_kps.movedim(-1,1), cn_strength, (start_at, end_at))
|
| 622 |
+
c_net.set_previous_controlnet(prev_cnet)
|
| 623 |
+
cnets[prev_cnet] = c_net
|
| 624 |
+
|
| 625 |
+
d['control'] = c_net
|
| 626 |
+
d['control_apply_to_uncond'] = False
|
| 627 |
+
d['cross_attn_controlnet'] = image_prompt_embeds.to(comfy.model_management.intermediate_device()) if is_cond else uncond_image_prompt_embeds.to(comfy.model_management.intermediate_device())
|
| 628 |
+
|
| 629 |
+
if mask is not None and is_cond:
|
| 630 |
+
d['mask'] = mask
|
| 631 |
+
d['set_area_to_bounds'] = False
|
| 632 |
+
|
| 633 |
+
n = [t[0], d]
|
| 634 |
+
c.append(n)
|
| 635 |
+
cond_uncond.append(c)
|
| 636 |
+
is_cond = False
|
| 637 |
+
|
| 638 |
+
return(work_model, cond_uncond[0], cond_uncond[1], )
|
| 639 |
+
|
| 640 |
+
class ApplyEcomIDAdvanced(ApplyEcomID):
|
| 641 |
+
@classmethod
|
| 642 |
+
def INPUT_TYPES(s):
|
| 643 |
+
return {
|
| 644 |
+
"required": {
|
| 645 |
+
"instantid_ipa": ("INSTANTID", ),
|
| 646 |
+
"pulid": ("PULID",),
|
| 647 |
+
"eva_clip": ("EVA_CLIP",),
|
| 648 |
+
"insightface": ("FACEANALYSIS", ),
|
| 649 |
+
"control_net": ("CONTROL_NET", ),
|
| 650 |
+
"image": ("IMAGE", ),
|
| 651 |
+
"model": ("MODEL", ),
|
| 652 |
+
"positive": ("CONDITIONING", ),
|
| 653 |
+
"negative": ("CONDITIONING", ),
|
| 654 |
+
"method": (["fidelity", "style", "neutral"],),
|
| 655 |
+
"start_at": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
|
| 656 |
+
"end_at": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
|
| 657 |
+
"ip_weight": ("FLOAT", {"default": .8, "min": 0.0, "max": 3.0, "step": 0.01, }),
|
| 658 |
+
"cn_strength": ("FLOAT", {"default": .8, "min": 0.0, "max": 10.0, "step": 0.01, }),
|
| 659 |
+
"noise": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.1, }),
|
| 660 |
+
"combine_embeds": (['average', 'norm average', 'concat'], {"default": 'average'}),
|
| 661 |
+
},
|
| 662 |
+
"optional": {
|
| 663 |
+
"image_kps": ("IMAGE",),
|
| 664 |
+
"mask": ("MASK",),
|
| 665 |
+
}
|
| 666 |
+
}
|
| 667 |
+
|
| 668 |
+
class InstantIDAttentionPatch:
|
| 669 |
+
@classmethod
|
| 670 |
+
def INPUT_TYPES(s):
|
| 671 |
+
return {
|
| 672 |
+
"required": {
|
| 673 |
+
"instantid": ("INSTANTID", ),
|
| 674 |
+
"insightface": ("FACEANALYSIS", ),
|
| 675 |
+
"image": ("IMAGE", ),
|
| 676 |
+
"model": ("MODEL", ),
|
| 677 |
+
"weight": ("FLOAT", {"default": 1.0, "min": -1.0, "max": 3.0, "step": 0.01, }),
|
| 678 |
+
"start_at": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001, }),
|
| 679 |
+
"end_at": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001, }),
|
| 680 |
+
"noise": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.1, }),
|
| 681 |
+
},
|
| 682 |
+
"optional": {
|
| 683 |
+
"mask": ("MASK",),
|
| 684 |
+
}
|
| 685 |
+
}
|
| 686 |
+
|
| 687 |
+
RETURN_TYPES = ("MODEL", "FACE_EMBEDS")
|
| 688 |
+
FUNCTION = "patch_attention"
|
| 689 |
+
CATEGORY = "EcomID"
|
| 690 |
+
|
| 691 |
+
def patch_attention(self, instantid, insightface, image, model, weight, start_at, end_at, noise=0.0, mask=None):
|
| 692 |
+
self.dtype = torch.float16 if comfy.model_management.should_use_fp16() else torch.float32
|
| 693 |
+
self.device = comfy.model_management.get_torch_device()
|
| 694 |
+
|
| 695 |
+
face_embed = extractFeatures(insightface, image)
|
| 696 |
+
if face_embed is None:
|
| 697 |
+
raise Exception('Reference Image: No face detected.')
|
| 698 |
+
|
| 699 |
+
clip_embed = face_embed
|
| 700 |
+
# InstantID works better with averaged embeds (TODO: needs testing)
|
| 701 |
+
if clip_embed.shape[0] > 1:
|
| 702 |
+
clip_embed = torch.mean(clip_embed, dim=0).unsqueeze(0)
|
| 703 |
+
|
| 704 |
+
if noise > 0:
|
| 705 |
+
seed = int(torch.sum(clip_embed).item()) % 1000000007
|
| 706 |
+
torch.manual_seed(seed)
|
| 707 |
+
clip_embed_zeroed = noise * torch.rand_like(clip_embed)
|
| 708 |
+
else:
|
| 709 |
+
clip_embed_zeroed = torch.zeros_like(clip_embed)
|
| 710 |
+
|
| 711 |
+
# 1: patch the attention
|
| 712 |
+
self.instantid = instantid
|
| 713 |
+
self.instantid.to(self.device, dtype=self.dtype)
|
| 714 |
+
|
| 715 |
+
image_prompt_embeds, uncond_image_prompt_embeds = self.instantid.get_image_embeds(clip_embed.to(self.device, dtype=self.dtype), clip_embed_zeroed.to(self.device, dtype=self.dtype))
|
| 716 |
+
|
| 717 |
+
image_prompt_embeds = image_prompt_embeds.to(self.device, dtype=self.dtype)
|
| 718 |
+
uncond_image_prompt_embeds = uncond_image_prompt_embeds.to(self.device, dtype=self.dtype)
|
| 719 |
+
|
| 720 |
+
if weight == 0:
|
| 721 |
+
return (model, { "cond": image_prompt_embeds, "uncond": uncond_image_prompt_embeds } )
|
| 722 |
+
|
| 723 |
+
work_model = model.clone()
|
| 724 |
+
|
| 725 |
+
sigma_start = model.get_model_object("model_sampling").percent_to_sigma(start_at)
|
| 726 |
+
sigma_end = model.get_model_object("model_sampling").percent_to_sigma(end_at)
|
| 727 |
+
|
| 728 |
+
if mask is not None:
|
| 729 |
+
mask = mask.to(self.device)
|
| 730 |
+
|
| 731 |
+
patch_kwargs = {
|
| 732 |
+
"weight": weight,
|
| 733 |
+
"ipadapter": self.instantid,
|
| 734 |
+
"cond": image_prompt_embeds,
|
| 735 |
+
"uncond": uncond_image_prompt_embeds,
|
| 736 |
+
"mask": mask,
|
| 737 |
+
"sigma_start": sigma_start,
|
| 738 |
+
"sigma_end": sigma_end,
|
| 739 |
+
}
|
| 740 |
+
|
| 741 |
+
number = 0
|
| 742 |
+
for id in [4,5,7,8]: # id of input_blocks that have cross attention
|
| 743 |
+
block_indices = range(2) if id in [4, 5] else range(10) # transformer_depth
|
| 744 |
+
for index in block_indices:
|
| 745 |
+
patch_kwargs["module_key"] = str(number*2+1)
|
| 746 |
+
_set_model_patch_replace(work_model, patch_kwargs, ("input", id, index))
|
| 747 |
+
number += 1
|
| 748 |
+
for id in range(6): # id of output_blocks that have cross attention
|
| 749 |
+
block_indices = range(2) if id in [3, 4, 5] else range(10) # transformer_depth
|
| 750 |
+
for index in block_indices:
|
| 751 |
+
patch_kwargs["module_key"] = str(number*2+1)
|
| 752 |
+
_set_model_patch_replace(work_model, patch_kwargs, ("output", id, index))
|
| 753 |
+
number += 1
|
| 754 |
+
for index in range(10):
|
| 755 |
+
patch_kwargs["module_key"] = str(number*2+1)
|
| 756 |
+
_set_model_patch_replace(work_model, patch_kwargs, ("middle", 0, index))
|
| 757 |
+
number += 1
|
| 758 |
+
|
| 759 |
+
return(work_model, { "cond": image_prompt_embeds, "uncond": uncond_image_prompt_embeds }, )
|
| 760 |
+
|
| 761 |
+
class ApplyInstantIDControlNet:
|
| 762 |
+
@classmethod
|
| 763 |
+
def INPUT_TYPES(s):
|
| 764 |
+
return {
|
| 765 |
+
"required": {
|
| 766 |
+
"face_embeds": ("FACE_EMBEDS", ),
|
| 767 |
+
"control_net": ("CONTROL_NET", ),
|
| 768 |
+
"image_kps": ("IMAGE", ),
|
| 769 |
+
"positive": ("CONDITIONING", ),
|
| 770 |
+
"negative": ("CONDITIONING", ),
|
| 771 |
+
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, }),
|
| 772 |
+
"start_at": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001, }),
|
| 773 |
+
"end_at": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001, }),
|
| 774 |
+
},
|
| 775 |
+
"optional": {
|
| 776 |
+
"mask": ("MASK",),
|
| 777 |
+
}
|
| 778 |
+
}
|
| 779 |
+
|
| 780 |
+
RETURN_TYPES = ("CONDITIONING", "CONDITIONING",)
|
| 781 |
+
RETURN_NAMES = ("positive", "negative", )
|
| 782 |
+
FUNCTION = "apply_controlnet"
|
| 783 |
+
CATEGORY = "EcomID"
|
| 784 |
+
|
| 785 |
+
def apply_controlnet(self, face_embeds, control_net, image_kps, positive, negative, strength, start_at, end_at, mask=None):
|
| 786 |
+
self.device = comfy.model_management.get_torch_device()
|
| 787 |
+
|
| 788 |
+
if strength == 0:
|
| 789 |
+
return (positive, negative)
|
| 790 |
+
|
| 791 |
+
if mask is not None:
|
| 792 |
+
mask = mask.to(self.device)
|
| 793 |
+
|
| 794 |
+
if mask is not None and len(mask.shape) < 3:
|
| 795 |
+
mask = mask.unsqueeze(0)
|
| 796 |
+
|
| 797 |
+
image_prompt_embeds = face_embeds['cond']
|
| 798 |
+
uncond_image_prompt_embeds = face_embeds['uncond']
|
| 799 |
+
|
| 800 |
+
cnets = {}
|
| 801 |
+
cond_uncond = []
|
| 802 |
+
control_hint = image_kps.movedim(-1,1)
|
| 803 |
+
|
| 804 |
+
is_cond = True
|
| 805 |
+
for conditioning in [positive, negative]:
|
| 806 |
+
c = []
|
| 807 |
+
for t in conditioning:
|
| 808 |
+
d = t[1].copy()
|
| 809 |
+
|
| 810 |
+
prev_cnet = d.get('control', None)
|
| 811 |
+
if prev_cnet in cnets:
|
| 812 |
+
c_net = cnets[prev_cnet]
|
| 813 |
+
else:
|
| 814 |
+
c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_at, end_at))
|
| 815 |
+
c_net.set_previous_controlnet(prev_cnet)
|
| 816 |
+
cnets[prev_cnet] = c_net
|
| 817 |
+
|
| 818 |
+
d['control'] = c_net
|
| 819 |
+
d['control_apply_to_uncond'] = False
|
| 820 |
+
d['cross_attn_controlnet'] = image_prompt_embeds.to(comfy.model_management.intermediate_device()) if is_cond else uncond_image_prompt_embeds.to(comfy.model_management.intermediate_device())
|
| 821 |
+
|
| 822 |
+
if mask is not None and is_cond:
|
| 823 |
+
d['mask'] = mask
|
| 824 |
+
d['set_area_to_bounds'] = False
|
| 825 |
+
|
| 826 |
+
n = [t[0], d]
|
| 827 |
+
c.append(n)
|
| 828 |
+
cond_uncond.append(c)
|
| 829 |
+
is_cond = False
|
| 830 |
+
|
| 831 |
+
return(cond_uncond[0], cond_uncond[1])
|
| 832 |
+
|
| 833 |
+
NODE_CLASS_MAPPINGS = {
|
| 834 |
+
"InstantID_IPA_ModelLoader": InstantID_IPA_ModelLoader,
|
| 835 |
+
"EcomID_PulidModelLoader": EcomID_PulidModelLoader,
|
| 836 |
+
"EcomIDEvaClipLoader": EcomIDEvaClipLoader,
|
| 837 |
+
"EcomIDFaceAnalysis": EcomIDFaceAnalysis,
|
| 838 |
+
"ApplyEcomID": ApplyEcomID,
|
| 839 |
+
"ApplyEcomIDAdvanced": ApplyEcomIDAdvanced,
|
| 840 |
+
"FaceKeypointsPreprocessor": FaceKeypointsPreprocessor,
|
| 841 |
+
|
| 842 |
+
"InstantIDAttentionPatch": InstantIDAttentionPatch,
|
| 843 |
+
"ApplyInstantIDControlNet": ApplyInstantIDControlNet,
|
| 844 |
+
}
|
| 845 |
+
|
| 846 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
| 847 |
+
"InstantID_IPA_ModelLoader": "Load InstantID Ipa Model (EcomID)",
|
| 848 |
+
"EcomIDFaceAnalysis": "EcomID Face Analysis",
|
| 849 |
+
"EcomID_PulidModelLoader": "Load PuLID Model (EcomID)",
|
| 850 |
+
"EcomIDEvaClipLoader": "Load Eva Clip (EcomID)",
|
| 851 |
+
"ApplyEcomID": "Apply EcomID",
|
| 852 |
+
"ApplyEcomIDAdvanced": "Apply EcomID Advanced",
|
| 853 |
+
"FaceKeypointsPreprocessor": "Face Keypoints Preprocessor",
|
| 854 |
+
|
| 855 |
+
"InstantIDAttentionPatch": "InstantID Patch Attention",
|
| 856 |
+
"ApplyInstantIDControlNet": "InstantID Apply ControlNet",
|
| 857 |
+
}
|
SDXL_EcomID_ComfyUI/LICENSE.txt
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
SDXL_EcomID_ComfyUI/README.md
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div style="display: flex; justify-content: center; align-items: center;">
|
| 2 |
+
<img src="./images/images_alibaba.png" alt="alibaba" style="width: 20%; height: auto; margin-right: 5%;">
|
| 3 |
+
<img src="./images/images_alimama.png" alt="alimama" style="width: 20%; height: auto;">
|
| 4 |
+
</div>
|
| 5 |
+
|
| 6 |
+
# ComfyUI EcomID (Native Support)
|
| 7 |
+
|
| 8 |
+
Native [SDXL-EcomID](https://huggingface.co/alimama-creative/SDXL-EcomID) support for [ComfyUI](https://github.com/comfyanonymous/ComfyUI).
|
| 9 |
+
|
| 10 |
+
This extension doesn't use *diffusers* but instead implements EcomID natively and it fully integrates with ComfyUI.
|
| 11 |
+
|
| 12 |
+
## Show Cases
|
| 13 |
+
EcomID enhances portrait representation, delivering a more authentic and aesthetically pleasing appearance while ensuring semantic consistency and greater internal ID similarity (i.e., traits that do not vary with age, hairstyle, glasses, or other physical changes).
|
| 14 |
+
|
| 15 |
+
<table>
|
| 16 |
+
<tr>
|
| 17 |
+
<th style="width: 24%;">Prompt</th>
|
| 18 |
+
<th style="width: 19%;">Reference Image</th>
|
| 19 |
+
<th style="width: 19%;">EcomID</th>
|
| 20 |
+
<th style="width: 19%;">InstantID</th>
|
| 21 |
+
<th style="width: 19%;">PuLID</th>
|
| 22 |
+
</tr>
|
| 23 |
+
<tr>
|
| 24 |
+
<td style="font-size: 12px;">A close-up portrait of a <span style="color:red"><strong>little girl with double braids</strong></span>, wearing a white dress, standing on the beach during sunset.</td>
|
| 25 |
+
<td><img src="images/show_case/21.png" alt="参考图像" width="100%"></td>
|
| 26 |
+
<td><img src="images/show_case/22.png" alt="EcomID图像" width="100%"></td>
|
| 27 |
+
<td><img src="images/show_case/23.png" alt="InstantID图像" width="100%"></td>
|
| 28 |
+
<td><img src="images/show_case/24.png" alt="PuLID图像" width="100%"></td>
|
| 29 |
+
</tr>
|
| 30 |
+
<tr>
|
| 31 |
+
<td style="font-size: 12px;">A close-up portrait of a <span style="color:red"><strong>very little girl</strong></span> with double braids, wearing <span style="color:red"><strong>a hat</strong></span> and white dress, standing on the beach during sunset.</td>
|
| 32 |
+
<td><img src="images/show_case/44.png" alt="参考图像" width="100%"></td>
|
| 33 |
+
<td><img src="images/show_case/47.png" alt="EcomID图像" width="100%"></td>
|
| 34 |
+
<td><img src="images/show_case/46.png" alt="InstantID图像" width="100%"></td>
|
| 35 |
+
<td><img src="images/show_case/45.png" alt="PuLID图像" width="100%"></td>
|
| 36 |
+
</tr>
|
| 37 |
+
<tr>
|
| 38 |
+
<td style="font-size: 12px;">Agrizzled detective, <span style="color:red"><strong>fedora</strong></span> casting a shadow over his square jaw, a <span style="color:red"><strong>cigar dangling from his lips</strong></span>, his trench coat evocative of film noir, in a <span style="color:red"><strong>rainy alley</strong></span>.</td>
|
| 39 |
+
<td><img src="images/show_case/25.png" alt="参考图像" width="100%"></td>
|
| 40 |
+
<td><img src="images/show_case/26.png" alt="EcomID图像" width="100%"></td>
|
| 41 |
+
<td><img src="images/show_case/27.png" alt="InstantID图像" width="100%"></td>
|
| 42 |
+
<td><img src="images/show_case/28.png" alt="PuLID图像" width="100%"></td>
|
| 43 |
+
</tr>
|
| 44 |
+
<tr>
|
| 45 |
+
<td style="font-size: 12px;">A smiling girl with <span style="color:red"><strong>bangs and long hair</strong></span> in a school uniform stands under cherry trees, holding a book.</td>
|
| 46 |
+
<td><img src="images/show_case/29.png" alt="参考图像" width="100%"></td>
|
| 47 |
+
<td><img src="images/show_case/30.png" alt="EcomID图像" width="100%"></td>
|
| 48 |
+
<td><img src="images/show_case/31.png" alt="InstantID图像" width="100%"></td>
|
| 49 |
+
<td><img src="images/show_case/32.png" alt="PuLID图像" width="100%"></td>
|
| 50 |
+
</tr>
|
| 51 |
+
<tr>
|
| 52 |
+
<td style="font-size: 12px;">A <span style="color:red"><strong>very old</strong></span> witch, wearing a black cloak, with a pointed hat, holding a magic wand, against a background of a misty forest.</td>
|
| 53 |
+
<td><img src="images/show_case/33.png" alt="参考图像" width="100%"></td>
|
| 54 |
+
<td><img src="images/show_case/34.png" alt="EcomID图像" width="100%"></td>
|
| 55 |
+
<td><img src="images/show_case/35.png" alt="InstantID图像" width="100%"></td>
|
| 56 |
+
<td><img src="images/show_case/36.png" alt="PuLID图像" width="100%"></td>
|
| 57 |
+
</tr>
|
| 58 |
+
<tr>
|
| 59 |
+
<td style="font-size: 12px;">A man clad in cyberpunk fashion: <span style="color:red"><strong>neon accents, reflective sunglasses,</strong></span> and a leather jacket with glowing circuit patterns. He stands stoically amidst a soaked cityscape.</td>
|
| 60 |
+
<td><img src="images/show_case/37.png" alt="参考图像" width="100%"></td>
|
| 61 |
+
<td><img src="images/show_case/38.png" alt="EcomID图像" width="100%"></td>
|
| 62 |
+
<td><img src="images/show_case/39.png" alt="InstantID图像" width="100%"></td>
|
| 63 |
+
<td><img src="images/show_case/40.png" alt="PuLID图像" width="100%"></td>
|
| 64 |
+
</tr>
|
| 65 |
+
|
| 66 |
+
</table>
|
| 67 |
+
|
| 68 |
+
You can see more showcases in the [SDXL-EcomID](https://huggingface.co/alimama-creative/SDXL-EcomID) repository.
|
| 69 |
+
|
| 70 |
+
## Basic Workflow
|
| 71 |
+
|
| 72 |
+
In the `examples` directory you'll find the basic workflow.
|
| 73 |
+
|
| 74 |
+

|
| 75 |
+
|
| 76 |
+
## Installation
|
| 77 |
+
|
| 78 |
+
**Upgrade ComfyUI to the latest version!**
|
| 79 |
+
|
| 80 |
+
Download or `git clone` this repository into the `ComfyUI/custom_nodes/` directory or use the Manager.
|
| 81 |
+
|
| 82 |
+
EcomID requires `insightface`, you need to add it to your libraries together with `onnxruntime` and `onnxruntime-gpu`.
|
| 83 |
+
|
| 84 |
+
Models:
|
| 85 |
+
- [PuLID pre-trained model](https://huggingface.co/huchenlei/ipadapter_pulid/resolve/main/ip-adapter_pulid_sdxl_fp16.safetensors?download=true) goes in `ComfyUI/models/pulid/` (thanks to [Chenlei Hu](https://github.com/huchenlei) for converting them into IPAdapter format)
|
| 86 |
+
- The **EVA CLIP** is EVA02-CLIP-L-14-336, but should be downloaded automatically (will be located in the huggingface directory).
|
| 87 |
+
- `facexlib` dependency needs to be installed, the models are downloaded at first use
|
| 88 |
+
- You also need **InsightFace** with [AntelopeV2](https://huggingface.co/MonsterMMORPG/tools/tree/main), the unzipped models should be placed in `ComfyUI/models/insightface/models/antelopev2`
|
| 89 |
+
- The **ip_adapter model of InstantID** can be downloaded from [HuggingFace](https://huggingface.co/InstantX/InstantID/resolve/main/ip-adapter.bin?download=true) and should be placed into the `ComfyUI/models/instantid` directory. (Note that the model is called *ip_adapter* as it is based on the [IPAdapter](https://github.com/tencent-ailab/IP-Adapter)).
|
| 90 |
+
- You also needs a **[ControlNet](https://huggingface.co/alimama-creative/SDXL-EcomID/resolve/main/diffusion_pytorch_model.safetensors?download=true)** trained on 2M real human images. Please place it in the ComfyUI controlnet directory.
|
| 91 |
+
|
| 92 |
+
## Various Resolutions
|
| 93 |
+
|
| 94 |
+
Feel free to generate images in various resolutions, as we have trained the controlnet on 2 million high-quality images.
|
| 95 |
+
## Normal CFG
|
| 96 |
+
|
| 97 |
+
You can set a higher CFG value to achieve even better semantic consistency. Values from 1 to 7 are acceptable, with 6 being recommended.
|
| 98 |
+
|
| 99 |
+
## Face keypoints
|
| 100 |
+
|
| 101 |
+
The pose of the person is aligned with the keypoints generated from the reference image, just like in InstantID. You can achieve a different pose by sending an image to the **image_kps** input.
|
| 102 |
+
|
| 103 |
+
<img src="examples/keypoint.png" alt="KeyPoint" />
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
## Advanced Node
|
| 107 |
+
|
| 108 |
+
There's an EcomID advanced node available, at the moment the only difference with the standard one is that you can set the weights for the ip-adapter (PulID) models and the controlnet separately.
|
| 109 |
+
|
| 110 |
+
## Other notes
|
| 111 |
+
|
| 112 |
+
- Referenced the following repositories: [ComfyUI_InstantID](https://github.com/cubiq/ComfyUI_InstantID) and [PuLID_ComfyUI](https://github.com/cubiq/PuLID_ComfyUI).
|
| 113 |
+
|
| 114 |
+
- Remember at the moment this is only compatible with SDXL-based models, such as EcomXL, [leosams-helloworld-xl](https://civitai.com/models/43977/leosams-helloworld-xl), [dreamshaper-xl](https://civitai.com/models/112902/dreamshaper-xl), [stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and so on.
|
| 115 |
+
- It works very well with SDXL Turbo/Lighting, [EcomXL-Inpainting-ControlNet](https://huggingface.co/alimama-creative/EcomXL_controlnet_inpaint) and [EcomXL-Softedge-ControlNet](https://huggingface.co/alimama-creative/EcomXL_controlnet_softedge).
|
| 116 |
+
|
| 117 |
+
The results of the Multi-ControlNet are shown below:
|
| 118 |
+

|
SDXL_EcomID_ComfyUI/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .EcomID import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
|
| 2 |
+
|
| 3 |
+
__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS']
|
SDXL_EcomID_ComfyUI/encoders.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class IDEncoder(nn.Module):
|
| 5 |
+
def __init__(self, width=1280, context_dim=2048, num_token=5):
|
| 6 |
+
super().__init__()
|
| 7 |
+
self.num_token = num_token
|
| 8 |
+
self.context_dim = context_dim
|
| 9 |
+
h1 = min((context_dim * num_token) // 4, 1024)
|
| 10 |
+
h2 = min((context_dim * num_token) // 2, 1024)
|
| 11 |
+
self.body = nn.Sequential(
|
| 12 |
+
nn.Linear(width, h1),
|
| 13 |
+
nn.LayerNorm(h1),
|
| 14 |
+
nn.LeakyReLU(),
|
| 15 |
+
nn.Linear(h1, h2),
|
| 16 |
+
nn.LayerNorm(h2),
|
| 17 |
+
nn.LeakyReLU(),
|
| 18 |
+
nn.Linear(h2, context_dim * num_token),
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
for i in range(5):
|
| 22 |
+
setattr(
|
| 23 |
+
self,
|
| 24 |
+
f'mapping_{i}',
|
| 25 |
+
nn.Sequential(
|
| 26 |
+
nn.Linear(1024, 1024),
|
| 27 |
+
nn.LayerNorm(1024),
|
| 28 |
+
nn.LeakyReLU(),
|
| 29 |
+
nn.Linear(1024, 1024),
|
| 30 |
+
nn.LayerNorm(1024),
|
| 31 |
+
nn.LeakyReLU(),
|
| 32 |
+
nn.Linear(1024, context_dim),
|
| 33 |
+
),
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
setattr(
|
| 37 |
+
self,
|
| 38 |
+
f'mapping_patch_{i}',
|
| 39 |
+
nn.Sequential(
|
| 40 |
+
nn.Linear(1024, 1024),
|
| 41 |
+
nn.LayerNorm(1024),
|
| 42 |
+
nn.LeakyReLU(),
|
| 43 |
+
nn.Linear(1024, 1024),
|
| 44 |
+
nn.LayerNorm(1024),
|
| 45 |
+
nn.LeakyReLU(),
|
| 46 |
+
nn.Linear(1024, context_dim),
|
| 47 |
+
),
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
def forward(self, x, y):
|
| 51 |
+
# x shape [N, C]
|
| 52 |
+
x = self.body(x)
|
| 53 |
+
x = x.reshape(-1, self.num_token, self.context_dim)
|
| 54 |
+
|
| 55 |
+
hidden_states = ()
|
| 56 |
+
for i, emb in enumerate(y):
|
| 57 |
+
hidden_state = getattr(self, f'mapping_{i}')(emb[:, :1]) + getattr(self, f'mapping_patch_{i}')(
|
| 58 |
+
emb[:, 1:]
|
| 59 |
+
).mean(dim=1, keepdim=True)
|
| 60 |
+
hidden_states += (hidden_state,)
|
| 61 |
+
hidden_states = torch.cat(hidden_states, dim=1)
|
| 62 |
+
|
| 63 |
+
return torch.cat([x, hidden_states], dim=1)
|
SDXL_EcomID_ComfyUI/eva_clip/.DS_Store
ADDED
|
Binary file (8.2 kB). View file
|
|
|
SDXL_EcomID_ComfyUI/eva_clip/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
| 2 |
+
from .factory import create_model, create_model_and_transforms, create_model_from_pretrained, get_tokenizer, create_transforms
|
| 3 |
+
from .factory import list_models, add_model_config, get_model_config, load_checkpoint
|
| 4 |
+
from .loss import ClipLoss
|
| 5 |
+
from .model import CLIP, CustomCLIP, CLIPTextCfg, CLIPVisionCfg,\
|
| 6 |
+
convert_weights_to_lp, convert_weights_to_fp16, trace_model, get_cast_dtype
|
| 7 |
+
from .openai import load_openai_model, list_openai_models
|
| 8 |
+
from .pretrained import list_pretrained, list_pretrained_models_by_tag, list_pretrained_tags_by_model,\
|
| 9 |
+
get_pretrained_url, download_pretrained_from_url, is_pretrained_cfg, get_pretrained_cfg, download_pretrained
|
| 10 |
+
from .tokenizer import SimpleTokenizer, tokenize
|
| 11 |
+
from .transform import image_transform
|
SDXL_EcomID_ComfyUI/eva_clip/bpe_simple_vocab_16e6.txt.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
|
| 3 |
+
size 1356917
|
SDXL_EcomID_ComfyUI/eva_clip/constants.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)
|
| 2 |
+
OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)
|
SDXL_EcomID_ComfyUI/eva_clip/eva_vit_model.py
ADDED
|
@@ -0,0 +1,548 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# Adapted from https://github.com/microsoft/unilm/tree/master/beit
|
| 3 |
+
# --------------------------------------------------------
|
| 4 |
+
import math
|
| 5 |
+
import os
|
| 6 |
+
from functools import partial
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
try:
|
| 11 |
+
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
|
| 12 |
+
except:
|
| 13 |
+
from timm.layers import drop_path, to_2tuple, trunc_normal_
|
| 14 |
+
|
| 15 |
+
from .transformer import PatchDropout
|
| 16 |
+
from .rope import VisionRotaryEmbedding, VisionRotaryEmbeddingFast
|
| 17 |
+
|
| 18 |
+
if os.getenv('ENV_TYPE') == 'deepspeed':
|
| 19 |
+
try:
|
| 20 |
+
from deepspeed.runtime.activation_checkpointing.checkpointing import checkpoint
|
| 21 |
+
except:
|
| 22 |
+
from torch.utils.checkpoint import checkpoint
|
| 23 |
+
else:
|
| 24 |
+
from torch.utils.checkpoint import checkpoint
|
| 25 |
+
|
| 26 |
+
try:
|
| 27 |
+
import xformers
|
| 28 |
+
import xformers.ops as xops
|
| 29 |
+
XFORMERS_IS_AVAILBLE = True
|
| 30 |
+
except:
|
| 31 |
+
XFORMERS_IS_AVAILBLE = False
|
| 32 |
+
|
| 33 |
+
class DropPath(nn.Module):
|
| 34 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
| 35 |
+
"""
|
| 36 |
+
def __init__(self, drop_prob=None):
|
| 37 |
+
super(DropPath, self).__init__()
|
| 38 |
+
self.drop_prob = drop_prob
|
| 39 |
+
|
| 40 |
+
def forward(self, x):
|
| 41 |
+
return drop_path(x, self.drop_prob, self.training)
|
| 42 |
+
|
| 43 |
+
def extra_repr(self) -> str:
|
| 44 |
+
return 'p={}'.format(self.drop_prob)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class Mlp(nn.Module):
|
| 48 |
+
def __init__(
|
| 49 |
+
self,
|
| 50 |
+
in_features,
|
| 51 |
+
hidden_features=None,
|
| 52 |
+
out_features=None,
|
| 53 |
+
act_layer=nn.GELU,
|
| 54 |
+
norm_layer=nn.LayerNorm,
|
| 55 |
+
drop=0.,
|
| 56 |
+
subln=False,
|
| 57 |
+
|
| 58 |
+
):
|
| 59 |
+
super().__init__()
|
| 60 |
+
out_features = out_features or in_features
|
| 61 |
+
hidden_features = hidden_features or in_features
|
| 62 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
| 63 |
+
self.act = act_layer()
|
| 64 |
+
|
| 65 |
+
self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity()
|
| 66 |
+
|
| 67 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
| 68 |
+
self.drop = nn.Dropout(drop)
|
| 69 |
+
|
| 70 |
+
def forward(self, x):
|
| 71 |
+
x = self.fc1(x)
|
| 72 |
+
x = self.act(x)
|
| 73 |
+
# x = self.drop(x)
|
| 74 |
+
# commit this for the orignal BERT implement
|
| 75 |
+
x = self.ffn_ln(x)
|
| 76 |
+
|
| 77 |
+
x = self.fc2(x)
|
| 78 |
+
x = self.drop(x)
|
| 79 |
+
return x
|
| 80 |
+
|
| 81 |
+
class SwiGLU(nn.Module):
|
| 82 |
+
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.,
|
| 83 |
+
norm_layer=nn.LayerNorm, subln=False):
|
| 84 |
+
super().__init__()
|
| 85 |
+
out_features = out_features or in_features
|
| 86 |
+
hidden_features = hidden_features or in_features
|
| 87 |
+
|
| 88 |
+
self.w1 = nn.Linear(in_features, hidden_features)
|
| 89 |
+
self.w2 = nn.Linear(in_features, hidden_features)
|
| 90 |
+
|
| 91 |
+
self.act = act_layer()
|
| 92 |
+
self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity()
|
| 93 |
+
self.w3 = nn.Linear(hidden_features, out_features)
|
| 94 |
+
|
| 95 |
+
self.drop = nn.Dropout(drop)
|
| 96 |
+
|
| 97 |
+
def forward(self, x):
|
| 98 |
+
x1 = self.w1(x)
|
| 99 |
+
x2 = self.w2(x)
|
| 100 |
+
hidden = self.act(x1) * x2
|
| 101 |
+
x = self.ffn_ln(hidden)
|
| 102 |
+
x = self.w3(x)
|
| 103 |
+
x = self.drop(x)
|
| 104 |
+
return x
|
| 105 |
+
|
| 106 |
+
class Attention(nn.Module):
|
| 107 |
+
def __init__(
|
| 108 |
+
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
|
| 109 |
+
proj_drop=0., window_size=None, attn_head_dim=None, xattn=False, rope=None, subln=False, norm_layer=nn.LayerNorm):
|
| 110 |
+
super().__init__()
|
| 111 |
+
self.num_heads = num_heads
|
| 112 |
+
head_dim = dim // num_heads
|
| 113 |
+
if attn_head_dim is not None:
|
| 114 |
+
head_dim = attn_head_dim
|
| 115 |
+
all_head_dim = head_dim * self.num_heads
|
| 116 |
+
self.scale = qk_scale or head_dim ** -0.5
|
| 117 |
+
|
| 118 |
+
self.subln = subln
|
| 119 |
+
if self.subln:
|
| 120 |
+
self.q_proj = nn.Linear(dim, all_head_dim, bias=False)
|
| 121 |
+
self.k_proj = nn.Linear(dim, all_head_dim, bias=False)
|
| 122 |
+
self.v_proj = nn.Linear(dim, all_head_dim, bias=False)
|
| 123 |
+
else:
|
| 124 |
+
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
|
| 125 |
+
|
| 126 |
+
if qkv_bias:
|
| 127 |
+
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
|
| 128 |
+
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
|
| 129 |
+
else:
|
| 130 |
+
self.q_bias = None
|
| 131 |
+
self.v_bias = None
|
| 132 |
+
|
| 133 |
+
if window_size:
|
| 134 |
+
self.window_size = window_size
|
| 135 |
+
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
|
| 136 |
+
self.relative_position_bias_table = nn.Parameter(
|
| 137 |
+
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
| 138 |
+
# cls to token & token 2 cls & cls to cls
|
| 139 |
+
|
| 140 |
+
# get pair-wise relative position index for each token inside the window
|
| 141 |
+
coords_h = torch.arange(window_size[0])
|
| 142 |
+
coords_w = torch.arange(window_size[1])
|
| 143 |
+
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
| 144 |
+
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
| 145 |
+
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
| 146 |
+
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
| 147 |
+
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
|
| 148 |
+
relative_coords[:, :, 1] += window_size[1] - 1
|
| 149 |
+
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
|
| 150 |
+
relative_position_index = \
|
| 151 |
+
torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype)
|
| 152 |
+
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
| 153 |
+
relative_position_index[0, 0:] = self.num_relative_distance - 3
|
| 154 |
+
relative_position_index[0:, 0] = self.num_relative_distance - 2
|
| 155 |
+
relative_position_index[0, 0] = self.num_relative_distance - 1
|
| 156 |
+
|
| 157 |
+
self.register_buffer("relative_position_index", relative_position_index)
|
| 158 |
+
else:
|
| 159 |
+
self.window_size = None
|
| 160 |
+
self.relative_position_bias_table = None
|
| 161 |
+
self.relative_position_index = None
|
| 162 |
+
|
| 163 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 164 |
+
self.inner_attn_ln = norm_layer(all_head_dim) if subln else nn.Identity()
|
| 165 |
+
# self.proj = nn.Linear(all_head_dim, all_head_dim)
|
| 166 |
+
self.proj = nn.Linear(all_head_dim, dim)
|
| 167 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
| 168 |
+
self.xattn = xattn
|
| 169 |
+
self.xattn_drop = attn_drop
|
| 170 |
+
|
| 171 |
+
self.rope = rope
|
| 172 |
+
|
| 173 |
+
def forward(self, x, rel_pos_bias=None, attn_mask=None):
|
| 174 |
+
B, N, C = x.shape
|
| 175 |
+
if self.subln:
|
| 176 |
+
q = F.linear(input=x, weight=self.q_proj.weight, bias=self.q_bias)
|
| 177 |
+
k = F.linear(input=x, weight=self.k_proj.weight, bias=None)
|
| 178 |
+
v = F.linear(input=x, weight=self.v_proj.weight, bias=self.v_bias)
|
| 179 |
+
|
| 180 |
+
q = q.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) # B, num_heads, N, C
|
| 181 |
+
k = k.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
|
| 182 |
+
v = v.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
|
| 183 |
+
else:
|
| 184 |
+
|
| 185 |
+
qkv_bias = None
|
| 186 |
+
if self.q_bias is not None:
|
| 187 |
+
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
|
| 188 |
+
|
| 189 |
+
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
|
| 190 |
+
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # 3, B, num_heads, N, C
|
| 191 |
+
q, k, v = qkv[0], qkv[1], qkv[2]
|
| 192 |
+
|
| 193 |
+
if self.rope:
|
| 194 |
+
# slightly fast impl
|
| 195 |
+
q_t = q[:, :, 1:, :]
|
| 196 |
+
ro_q_t = self.rope(q_t)
|
| 197 |
+
q = torch.cat((q[:, :, :1, :], ro_q_t), -2).type_as(v)
|
| 198 |
+
|
| 199 |
+
k_t = k[:, :, 1:, :]
|
| 200 |
+
ro_k_t = self.rope(k_t)
|
| 201 |
+
k = torch.cat((k[:, :, :1, :], ro_k_t), -2).type_as(v)
|
| 202 |
+
|
| 203 |
+
if self.xattn:
|
| 204 |
+
q = q.permute(0, 2, 1, 3) # B, num_heads, N, C -> B, N, num_heads, C
|
| 205 |
+
k = k.permute(0, 2, 1, 3)
|
| 206 |
+
v = v.permute(0, 2, 1, 3)
|
| 207 |
+
|
| 208 |
+
x = xops.memory_efficient_attention(
|
| 209 |
+
q, k, v,
|
| 210 |
+
p=self.xattn_drop,
|
| 211 |
+
scale=self.scale,
|
| 212 |
+
)
|
| 213 |
+
x = x.reshape(B, N, -1)
|
| 214 |
+
x = self.inner_attn_ln(x)
|
| 215 |
+
x = self.proj(x)
|
| 216 |
+
x = self.proj_drop(x)
|
| 217 |
+
else:
|
| 218 |
+
q = q * self.scale
|
| 219 |
+
attn = (q @ k.transpose(-2, -1))
|
| 220 |
+
|
| 221 |
+
if self.relative_position_bias_table is not None:
|
| 222 |
+
relative_position_bias = \
|
| 223 |
+
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
| 224 |
+
self.window_size[0] * self.window_size[1] + 1,
|
| 225 |
+
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
|
| 226 |
+
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
| 227 |
+
attn = attn + relative_position_bias.unsqueeze(0).type_as(attn)
|
| 228 |
+
|
| 229 |
+
if rel_pos_bias is not None:
|
| 230 |
+
attn = attn + rel_pos_bias.type_as(attn)
|
| 231 |
+
|
| 232 |
+
if attn_mask is not None:
|
| 233 |
+
attn_mask = attn_mask.bool()
|
| 234 |
+
attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf"))
|
| 235 |
+
|
| 236 |
+
attn = attn.softmax(dim=-1)
|
| 237 |
+
attn = self.attn_drop(attn)
|
| 238 |
+
|
| 239 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
|
| 240 |
+
x = self.inner_attn_ln(x)
|
| 241 |
+
x = self.proj(x)
|
| 242 |
+
x = self.proj_drop(x)
|
| 243 |
+
return x
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
class Block(nn.Module):
|
| 247 |
+
|
| 248 |
+
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
| 249 |
+
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
|
| 250 |
+
window_size=None, attn_head_dim=None, xattn=False, rope=None, postnorm=False,
|
| 251 |
+
subln=False, naiveswiglu=False):
|
| 252 |
+
super().__init__()
|
| 253 |
+
self.norm1 = norm_layer(dim)
|
| 254 |
+
self.attn = Attention(
|
| 255 |
+
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
| 256 |
+
attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim,
|
| 257 |
+
xattn=xattn, rope=rope, subln=subln, norm_layer=norm_layer)
|
| 258 |
+
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
|
| 259 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
| 260 |
+
self.norm2 = norm_layer(dim)
|
| 261 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
| 262 |
+
|
| 263 |
+
if naiveswiglu:
|
| 264 |
+
self.mlp = SwiGLU(
|
| 265 |
+
in_features=dim,
|
| 266 |
+
hidden_features=mlp_hidden_dim,
|
| 267 |
+
subln=subln,
|
| 268 |
+
norm_layer=norm_layer,
|
| 269 |
+
)
|
| 270 |
+
else:
|
| 271 |
+
self.mlp = Mlp(
|
| 272 |
+
in_features=dim,
|
| 273 |
+
hidden_features=mlp_hidden_dim,
|
| 274 |
+
act_layer=act_layer,
|
| 275 |
+
subln=subln,
|
| 276 |
+
drop=drop
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
if init_values is not None and init_values > 0:
|
| 280 |
+
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
|
| 281 |
+
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
|
| 282 |
+
else:
|
| 283 |
+
self.gamma_1, self.gamma_2 = None, None
|
| 284 |
+
|
| 285 |
+
self.postnorm = postnorm
|
| 286 |
+
|
| 287 |
+
def forward(self, x, rel_pos_bias=None, attn_mask=None):
|
| 288 |
+
if self.gamma_1 is None:
|
| 289 |
+
if self.postnorm:
|
| 290 |
+
x = x + self.drop_path(self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)))
|
| 291 |
+
x = x + self.drop_path(self.norm2(self.mlp(x)))
|
| 292 |
+
else:
|
| 293 |
+
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))
|
| 294 |
+
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
| 295 |
+
else:
|
| 296 |
+
if self.postnorm:
|
| 297 |
+
x = x + self.drop_path(self.gamma_1 * self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)))
|
| 298 |
+
x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x)))
|
| 299 |
+
else:
|
| 300 |
+
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))
|
| 301 |
+
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
|
| 302 |
+
return x
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
class PatchEmbed(nn.Module):
|
| 306 |
+
""" Image to Patch Embedding
|
| 307 |
+
"""
|
| 308 |
+
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
|
| 309 |
+
super().__init__()
|
| 310 |
+
img_size = to_2tuple(img_size)
|
| 311 |
+
patch_size = to_2tuple(patch_size)
|
| 312 |
+
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
|
| 313 |
+
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
|
| 314 |
+
self.img_size = img_size
|
| 315 |
+
self.patch_size = patch_size
|
| 316 |
+
self.num_patches = num_patches
|
| 317 |
+
|
| 318 |
+
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
| 319 |
+
|
| 320 |
+
def forward(self, x, **kwargs):
|
| 321 |
+
B, C, H, W = x.shape
|
| 322 |
+
# FIXME look at relaxing size constraints
|
| 323 |
+
assert H == self.img_size[0] and W == self.img_size[1], \
|
| 324 |
+
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
| 325 |
+
x = self.proj(x).flatten(2).transpose(1, 2)
|
| 326 |
+
return x
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
class RelativePositionBias(nn.Module):
|
| 330 |
+
|
| 331 |
+
def __init__(self, window_size, num_heads):
|
| 332 |
+
super().__init__()
|
| 333 |
+
self.window_size = window_size
|
| 334 |
+
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
|
| 335 |
+
self.relative_position_bias_table = nn.Parameter(
|
| 336 |
+
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
| 337 |
+
# cls to token & token 2 cls & cls to cls
|
| 338 |
+
|
| 339 |
+
# get pair-wise relative position index for each token inside the window
|
| 340 |
+
coords_h = torch.arange(window_size[0])
|
| 341 |
+
coords_w = torch.arange(window_size[1])
|
| 342 |
+
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
| 343 |
+
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
| 344 |
+
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
| 345 |
+
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
| 346 |
+
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
|
| 347 |
+
relative_coords[:, :, 1] += window_size[1] - 1
|
| 348 |
+
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
|
| 349 |
+
relative_position_index = \
|
| 350 |
+
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
|
| 351 |
+
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
| 352 |
+
relative_position_index[0, 0:] = self.num_relative_distance - 3
|
| 353 |
+
relative_position_index[0:, 0] = self.num_relative_distance - 2
|
| 354 |
+
relative_position_index[0, 0] = self.num_relative_distance - 1
|
| 355 |
+
|
| 356 |
+
self.register_buffer("relative_position_index", relative_position_index)
|
| 357 |
+
|
| 358 |
+
def forward(self):
|
| 359 |
+
relative_position_bias = \
|
| 360 |
+
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
| 361 |
+
self.window_size[0] * self.window_size[1] + 1,
|
| 362 |
+
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
|
| 363 |
+
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
class EVAVisionTransformer(nn.Module):
|
| 367 |
+
""" Vision Transformer with support for patch or hybrid CNN input stage
|
| 368 |
+
"""
|
| 369 |
+
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
|
| 370 |
+
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
|
| 371 |
+
drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, patch_dropout=0.,
|
| 372 |
+
use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, rope=False,
|
| 373 |
+
use_mean_pooling=True, init_scale=0.001, grad_checkpointing=False, xattn=False, postnorm=False,
|
| 374 |
+
pt_hw_seq_len=16, intp_freq=False, naiveswiglu=False, subln=False):
|
| 375 |
+
super().__init__()
|
| 376 |
+
|
| 377 |
+
if not XFORMERS_IS_AVAILBLE:
|
| 378 |
+
xattn = False
|
| 379 |
+
|
| 380 |
+
self.image_size = img_size
|
| 381 |
+
self.num_classes = num_classes
|
| 382 |
+
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
| 383 |
+
|
| 384 |
+
self.patch_embed = PatchEmbed(
|
| 385 |
+
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
|
| 386 |
+
num_patches = self.patch_embed.num_patches
|
| 387 |
+
|
| 388 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
| 389 |
+
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
| 390 |
+
if use_abs_pos_emb:
|
| 391 |
+
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
|
| 392 |
+
else:
|
| 393 |
+
self.pos_embed = None
|
| 394 |
+
self.pos_drop = nn.Dropout(p=drop_rate)
|
| 395 |
+
|
| 396 |
+
if use_shared_rel_pos_bias:
|
| 397 |
+
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
|
| 398 |
+
else:
|
| 399 |
+
self.rel_pos_bias = None
|
| 400 |
+
|
| 401 |
+
if rope:
|
| 402 |
+
half_head_dim = embed_dim // num_heads // 2
|
| 403 |
+
hw_seq_len = img_size // patch_size
|
| 404 |
+
self.rope = VisionRotaryEmbeddingFast(
|
| 405 |
+
dim=half_head_dim,
|
| 406 |
+
pt_seq_len=pt_hw_seq_len,
|
| 407 |
+
ft_seq_len=hw_seq_len if intp_freq else None,
|
| 408 |
+
# patch_dropout=patch_dropout
|
| 409 |
+
)
|
| 410 |
+
else:
|
| 411 |
+
self.rope = None
|
| 412 |
+
|
| 413 |
+
self.naiveswiglu = naiveswiglu
|
| 414 |
+
|
| 415 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
| 416 |
+
self.use_rel_pos_bias = use_rel_pos_bias
|
| 417 |
+
self.blocks = nn.ModuleList([
|
| 418 |
+
Block(
|
| 419 |
+
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
| 420 |
+
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
|
| 421 |
+
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None,
|
| 422 |
+
xattn=xattn, rope=self.rope, postnorm=postnorm, subln=subln, naiveswiglu=naiveswiglu)
|
| 423 |
+
for i in range(depth)])
|
| 424 |
+
self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
|
| 425 |
+
self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
|
| 426 |
+
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
|
| 427 |
+
|
| 428 |
+
if self.pos_embed is not None:
|
| 429 |
+
trunc_normal_(self.pos_embed, std=.02)
|
| 430 |
+
|
| 431 |
+
trunc_normal_(self.cls_token, std=.02)
|
| 432 |
+
# trunc_normal_(self.mask_token, std=.02)
|
| 433 |
+
|
| 434 |
+
self.apply(self._init_weights)
|
| 435 |
+
self.fix_init_weight()
|
| 436 |
+
|
| 437 |
+
if isinstance(self.head, nn.Linear):
|
| 438 |
+
trunc_normal_(self.head.weight, std=.02)
|
| 439 |
+
self.head.weight.data.mul_(init_scale)
|
| 440 |
+
self.head.bias.data.mul_(init_scale)
|
| 441 |
+
|
| 442 |
+
# setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn
|
| 443 |
+
self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()
|
| 444 |
+
|
| 445 |
+
self.grad_checkpointing = grad_checkpointing
|
| 446 |
+
|
| 447 |
+
def fix_init_weight(self):
|
| 448 |
+
def rescale(param, layer_id):
|
| 449 |
+
param.div_(math.sqrt(2.0 * layer_id))
|
| 450 |
+
|
| 451 |
+
for layer_id, layer in enumerate(self.blocks):
|
| 452 |
+
rescale(layer.attn.proj.weight.data, layer_id + 1)
|
| 453 |
+
if self.naiveswiglu:
|
| 454 |
+
rescale(layer.mlp.w3.weight.data, layer_id + 1)
|
| 455 |
+
else:
|
| 456 |
+
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
|
| 457 |
+
|
| 458 |
+
def get_cast_dtype(self) -> torch.dtype:
|
| 459 |
+
return self.blocks[0].mlp.fc2.weight.dtype
|
| 460 |
+
|
| 461 |
+
def _init_weights(self, m):
|
| 462 |
+
if isinstance(m, nn.Linear):
|
| 463 |
+
trunc_normal_(m.weight, std=.02)
|
| 464 |
+
if m.bias is not None:
|
| 465 |
+
nn.init.constant_(m.bias, 0)
|
| 466 |
+
elif isinstance(m, nn.LayerNorm):
|
| 467 |
+
nn.init.constant_(m.bias, 0)
|
| 468 |
+
nn.init.constant_(m.weight, 1.0)
|
| 469 |
+
|
| 470 |
+
def get_num_layers(self):
|
| 471 |
+
return len(self.blocks)
|
| 472 |
+
|
| 473 |
+
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
|
| 474 |
+
assert unlocked_groups == 0, 'partial locking not currently supported for this model'
|
| 475 |
+
for param in self.parameters():
|
| 476 |
+
param.requires_grad = False
|
| 477 |
+
|
| 478 |
+
@torch.jit.ignore
|
| 479 |
+
def set_grad_checkpointing(self, enable=True):
|
| 480 |
+
self.grad_checkpointing = enable
|
| 481 |
+
|
| 482 |
+
@torch.jit.ignore
|
| 483 |
+
def no_weight_decay(self):
|
| 484 |
+
return {'pos_embed', 'cls_token'}
|
| 485 |
+
|
| 486 |
+
def get_classifier(self):
|
| 487 |
+
return self.head
|
| 488 |
+
|
| 489 |
+
def reset_classifier(self, num_classes, global_pool=''):
|
| 490 |
+
self.num_classes = num_classes
|
| 491 |
+
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
|
| 492 |
+
|
| 493 |
+
def forward_features(self, x, return_all_features=False, return_hidden=False, shuffle=False):
|
| 494 |
+
|
| 495 |
+
x = self.patch_embed(x)
|
| 496 |
+
batch_size, seq_len, _ = x.size()
|
| 497 |
+
|
| 498 |
+
if shuffle:
|
| 499 |
+
idx = torch.randperm(x.shape[1]) + 1
|
| 500 |
+
zero = torch.LongTensor([0, ])
|
| 501 |
+
idx = torch.cat([zero, idx])
|
| 502 |
+
pos_embed = self.pos_embed[:, idx]
|
| 503 |
+
|
| 504 |
+
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
|
| 505 |
+
x = torch.cat((cls_tokens, x), dim=1)
|
| 506 |
+
if shuffle:
|
| 507 |
+
x = x + pos_embed
|
| 508 |
+
elif self.pos_embed is not None:
|
| 509 |
+
x = x + self.pos_embed
|
| 510 |
+
x = self.pos_drop(x)
|
| 511 |
+
|
| 512 |
+
# a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in
|
| 513 |
+
if os.getenv('RoPE') == '1':
|
| 514 |
+
if self.training and not isinstance(self.patch_dropout, nn.Identity):
|
| 515 |
+
x, patch_indices_keep = self.patch_dropout(x)
|
| 516 |
+
self.rope.forward = partial(self.rope.forward, patch_indices_keep=patch_indices_keep)
|
| 517 |
+
else:
|
| 518 |
+
self.rope.forward = partial(self.rope.forward, patch_indices_keep=None)
|
| 519 |
+
x = self.patch_dropout(x)
|
| 520 |
+
else:
|
| 521 |
+
x = self.patch_dropout(x)
|
| 522 |
+
|
| 523 |
+
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
|
| 524 |
+
hidden_states = []
|
| 525 |
+
for idx, blk in enumerate(self.blocks):
|
| 526 |
+
if (0 < idx <= 20) and (idx % 4 == 0) and return_hidden:
|
| 527 |
+
hidden_states.append(x)
|
| 528 |
+
if self.grad_checkpointing:
|
| 529 |
+
x = checkpoint(blk, x, (rel_pos_bias,))
|
| 530 |
+
else:
|
| 531 |
+
x = blk(x, rel_pos_bias=rel_pos_bias)
|
| 532 |
+
|
| 533 |
+
if not return_all_features:
|
| 534 |
+
x = self.norm(x)
|
| 535 |
+
if self.fc_norm is not None:
|
| 536 |
+
return self.fc_norm(x.mean(1)), hidden_states
|
| 537 |
+
else:
|
| 538 |
+
return x[:, 0], hidden_states
|
| 539 |
+
return x
|
| 540 |
+
|
| 541 |
+
def forward(self, x, return_all_features=False, return_hidden=False, shuffle=False):
|
| 542 |
+
if return_all_features:
|
| 543 |
+
return self.forward_features(x, return_all_features, return_hidden, shuffle)
|
| 544 |
+
x, hidden_states = self.forward_features(x, return_all_features, return_hidden, shuffle)
|
| 545 |
+
x = self.head(x)
|
| 546 |
+
if return_hidden:
|
| 547 |
+
return x, hidden_states
|
| 548 |
+
return x
|
SDXL_EcomID_ComfyUI/eva_clip/factory.py
ADDED
|
@@ -0,0 +1,517 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
import pathlib
|
| 5 |
+
import re
|
| 6 |
+
from copy import deepcopy
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from typing import Optional, Tuple, Union, Dict, Any
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
| 12 |
+
from .model import CLIP, CustomCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
|
| 13 |
+
get_cast_dtype
|
| 14 |
+
from .openai import load_openai_model
|
| 15 |
+
from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained, list_pretrained_tags_by_model
|
| 16 |
+
from .transform import image_transform
|
| 17 |
+
from .tokenizer import HFTokenizer, tokenize
|
| 18 |
+
from .utils import resize_clip_pos_embed, resize_evaclip_pos_embed, resize_visual_pos_embed, resize_eva_pos_embed
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
|
| 22 |
+
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _natural_key(string_):
|
| 26 |
+
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def _rescan_model_configs():
|
| 30 |
+
global _MODEL_CONFIGS
|
| 31 |
+
|
| 32 |
+
config_ext = ('.json',)
|
| 33 |
+
config_files = []
|
| 34 |
+
for config_path in _MODEL_CONFIG_PATHS:
|
| 35 |
+
if config_path.is_file() and config_path.suffix in config_ext:
|
| 36 |
+
config_files.append(config_path)
|
| 37 |
+
elif config_path.is_dir():
|
| 38 |
+
for ext in config_ext:
|
| 39 |
+
config_files.extend(config_path.glob(f'*{ext}'))
|
| 40 |
+
|
| 41 |
+
for cf in config_files:
|
| 42 |
+
with open(cf, "r", encoding="utf8") as f:
|
| 43 |
+
model_cfg = json.load(f)
|
| 44 |
+
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
|
| 45 |
+
_MODEL_CONFIGS[cf.stem] = model_cfg
|
| 46 |
+
|
| 47 |
+
_MODEL_CONFIGS = dict(sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0])))
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
_rescan_model_configs() # initial populate of model config registry
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def list_models():
|
| 54 |
+
""" enumerate available model architectures based on config files """
|
| 55 |
+
return list(_MODEL_CONFIGS.keys())
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def add_model_config(path):
|
| 59 |
+
""" add model config path or file and update registry """
|
| 60 |
+
if not isinstance(path, Path):
|
| 61 |
+
path = Path(path)
|
| 62 |
+
_MODEL_CONFIG_PATHS.append(path)
|
| 63 |
+
_rescan_model_configs()
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def get_model_config(model_name):
|
| 67 |
+
if model_name in _MODEL_CONFIGS:
|
| 68 |
+
return deepcopy(_MODEL_CONFIGS[model_name])
|
| 69 |
+
else:
|
| 70 |
+
return None
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def get_tokenizer(model_name):
|
| 74 |
+
config = get_model_config(model_name)
|
| 75 |
+
tokenizer = HFTokenizer(config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize
|
| 76 |
+
return tokenizer
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# loading openai CLIP weights when is_openai=True for training
|
| 80 |
+
def load_state_dict(checkpoint_path: str, map_location: str='cpu', model_key: str='model|module|state_dict', is_openai: bool=False, skip_list: list=[]):
|
| 81 |
+
if is_openai:
|
| 82 |
+
model = torch.jit.load(checkpoint_path, map_location="cpu").eval()
|
| 83 |
+
state_dict = model.state_dict()
|
| 84 |
+
for key in ["input_resolution", "context_length", "vocab_size"]:
|
| 85 |
+
state_dict.pop(key, None)
|
| 86 |
+
else:
|
| 87 |
+
checkpoint = torch.load(checkpoint_path, map_location=map_location)
|
| 88 |
+
for mk in model_key.split('|'):
|
| 89 |
+
if isinstance(checkpoint, dict) and mk in checkpoint:
|
| 90 |
+
state_dict = checkpoint[mk]
|
| 91 |
+
break
|
| 92 |
+
else:
|
| 93 |
+
state_dict = checkpoint
|
| 94 |
+
if next(iter(state_dict.items()))[0].startswith('module'):
|
| 95 |
+
state_dict = {k[7:]: v for k, v in state_dict.items()}
|
| 96 |
+
|
| 97 |
+
for k in skip_list:
|
| 98 |
+
if k in list(state_dict.keys()):
|
| 99 |
+
logging.info(f"Removing key {k} from pretrained checkpoint")
|
| 100 |
+
del state_dict[k]
|
| 101 |
+
|
| 102 |
+
if os.getenv('RoPE') == '1':
|
| 103 |
+
for k in list(state_dict.keys()):
|
| 104 |
+
if 'freqs_cos' in k or 'freqs_sin' in k:
|
| 105 |
+
del state_dict[k]
|
| 106 |
+
return state_dict
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def load_checkpoint(model, checkpoint_path, model_key="model|module|state_dict", strict=True):
|
| 111 |
+
state_dict = load_state_dict(checkpoint_path, model_key=model_key, is_openai=False)
|
| 112 |
+
# detect old format and make compatible with new format
|
| 113 |
+
if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'):
|
| 114 |
+
state_dict = convert_to_custom_text_state_dict(state_dict)
|
| 115 |
+
if 'text.logit_scale' in state_dict and hasattr(model, 'logit_scale'):
|
| 116 |
+
state_dict['logit_scale'] = state_dict['text.logit_scale']
|
| 117 |
+
del state_dict['text.logit_scale']
|
| 118 |
+
|
| 119 |
+
# resize_clip_pos_embed for CLIP and open CLIP
|
| 120 |
+
if 'visual.positional_embedding' in state_dict:
|
| 121 |
+
resize_clip_pos_embed(state_dict, model)
|
| 122 |
+
# specified to eva_vit_model
|
| 123 |
+
elif 'visual.pos_embed' in state_dict:
|
| 124 |
+
resize_evaclip_pos_embed(state_dict, model)
|
| 125 |
+
|
| 126 |
+
# resize_clip_pos_embed(state_dict, model)
|
| 127 |
+
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
|
| 128 |
+
logging.info(f"incompatible_keys.missing_keys: {incompatible_keys.missing_keys}")
|
| 129 |
+
return incompatible_keys
|
| 130 |
+
|
| 131 |
+
def load_clip_visual_state_dict(checkpoint_path: str, map_location: str='cpu', is_openai: bool=False, skip_list:list=[]):
|
| 132 |
+
state_dict = load_state_dict(checkpoint_path, map_location=map_location, is_openai=is_openai, skip_list=skip_list)
|
| 133 |
+
|
| 134 |
+
for k in list(state_dict.keys()):
|
| 135 |
+
if not k.startswith('visual.'):
|
| 136 |
+
del state_dict[k]
|
| 137 |
+
for k in list(state_dict.keys()):
|
| 138 |
+
if k.startswith('visual.'):
|
| 139 |
+
new_k = k[7:]
|
| 140 |
+
state_dict[new_k] = state_dict[k]
|
| 141 |
+
del state_dict[k]
|
| 142 |
+
return state_dict
|
| 143 |
+
|
| 144 |
+
def load_clip_text_state_dict(checkpoint_path: str, map_location: str='cpu', is_openai: bool=False, skip_list:list=[]):
|
| 145 |
+
state_dict = load_state_dict(checkpoint_path, map_location=map_location, is_openai=is_openai, skip_list=skip_list)
|
| 146 |
+
|
| 147 |
+
for k in list(state_dict.keys()):
|
| 148 |
+
if k.startswith('visual.'):
|
| 149 |
+
del state_dict[k]
|
| 150 |
+
return state_dict
|
| 151 |
+
|
| 152 |
+
def get_pretrained_tag(pretrained_model):
|
| 153 |
+
pretrained_model = pretrained_model.lower()
|
| 154 |
+
if "laion" in pretrained_model or "open_clip" in pretrained_model:
|
| 155 |
+
return "open_clip"
|
| 156 |
+
elif "openai" in pretrained_model:
|
| 157 |
+
return "clip"
|
| 158 |
+
elif "eva" in pretrained_model and "clip" in pretrained_model:
|
| 159 |
+
return "eva_clip"
|
| 160 |
+
else:
|
| 161 |
+
return "other"
|
| 162 |
+
|
| 163 |
+
def load_pretrained_checkpoint(
|
| 164 |
+
model,
|
| 165 |
+
visual_checkpoint_path,
|
| 166 |
+
text_checkpoint_path,
|
| 167 |
+
strict=True,
|
| 168 |
+
visual_model=None,
|
| 169 |
+
text_model=None,
|
| 170 |
+
model_key="model|module|state_dict",
|
| 171 |
+
skip_list=[]):
|
| 172 |
+
visual_tag = get_pretrained_tag(visual_model)
|
| 173 |
+
text_tag = get_pretrained_tag(text_model)
|
| 174 |
+
|
| 175 |
+
logging.info(f"num of model state_dict keys: {len(model.state_dict().keys())}")
|
| 176 |
+
visual_incompatible_keys, text_incompatible_keys = None, None
|
| 177 |
+
if visual_checkpoint_path:
|
| 178 |
+
if visual_tag == "eva_clip" or visual_tag == "open_clip":
|
| 179 |
+
visual_state_dict = load_clip_visual_state_dict(visual_checkpoint_path, is_openai=False, skip_list=skip_list)
|
| 180 |
+
elif visual_tag == "clip":
|
| 181 |
+
visual_state_dict = load_clip_visual_state_dict(visual_checkpoint_path, is_openai=True, skip_list=skip_list)
|
| 182 |
+
else:
|
| 183 |
+
visual_state_dict = load_state_dict(visual_checkpoint_path, model_key=model_key, is_openai=False, skip_list=skip_list)
|
| 184 |
+
|
| 185 |
+
# resize_clip_pos_embed for CLIP and open CLIP
|
| 186 |
+
if 'positional_embedding' in visual_state_dict:
|
| 187 |
+
resize_visual_pos_embed(visual_state_dict, model)
|
| 188 |
+
# specified to EVA model
|
| 189 |
+
elif 'pos_embed' in visual_state_dict:
|
| 190 |
+
resize_eva_pos_embed(visual_state_dict, model)
|
| 191 |
+
|
| 192 |
+
visual_incompatible_keys = model.visual.load_state_dict(visual_state_dict, strict=strict)
|
| 193 |
+
logging.info(f"num of loaded visual_state_dict keys: {len(visual_state_dict.keys())}")
|
| 194 |
+
logging.info(f"visual_incompatible_keys.missing_keys: {visual_incompatible_keys.missing_keys}")
|
| 195 |
+
|
| 196 |
+
if text_checkpoint_path:
|
| 197 |
+
if text_tag == "eva_clip" or text_tag == "open_clip":
|
| 198 |
+
text_state_dict = load_clip_text_state_dict(text_checkpoint_path, is_openai=False, skip_list=skip_list)
|
| 199 |
+
elif text_tag == "clip":
|
| 200 |
+
text_state_dict = load_clip_text_state_dict(text_checkpoint_path, is_openai=True, skip_list=skip_list)
|
| 201 |
+
else:
|
| 202 |
+
text_state_dict = load_state_dict(visual_checkpoint_path, model_key=model_key, is_openai=False, skip_list=skip_list)
|
| 203 |
+
|
| 204 |
+
text_incompatible_keys = model.text.load_state_dict(text_state_dict, strict=strict)
|
| 205 |
+
|
| 206 |
+
logging.info(f"num of loaded text_state_dict keys: {len(text_state_dict.keys())}")
|
| 207 |
+
logging.info(f"text_incompatible_keys.missing_keys: {text_incompatible_keys.missing_keys}")
|
| 208 |
+
|
| 209 |
+
return visual_incompatible_keys, text_incompatible_keys
|
| 210 |
+
|
| 211 |
+
def create_model(
|
| 212 |
+
model_name: str,
|
| 213 |
+
pretrained: Optional[str] = None,
|
| 214 |
+
precision: str = 'fp32',
|
| 215 |
+
device: Union[str, torch.device] = 'cpu',
|
| 216 |
+
jit: bool = False,
|
| 217 |
+
force_quick_gelu: bool = False,
|
| 218 |
+
force_custom_clip: bool = False,
|
| 219 |
+
force_patch_dropout: Optional[float] = None,
|
| 220 |
+
pretrained_image: str = '',
|
| 221 |
+
pretrained_text: str = '',
|
| 222 |
+
pretrained_hf: bool = True,
|
| 223 |
+
pretrained_visual_model: str = None,
|
| 224 |
+
pretrained_text_model: str = None,
|
| 225 |
+
cache_dir: Optional[str] = None,
|
| 226 |
+
skip_list: list = [],
|
| 227 |
+
):
|
| 228 |
+
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
|
| 229 |
+
if isinstance(device, str):
|
| 230 |
+
device = torch.device(device)
|
| 231 |
+
|
| 232 |
+
if pretrained and pretrained.lower() == 'openai':
|
| 233 |
+
logging.info(f'Loading pretrained {model_name} from OpenAI.')
|
| 234 |
+
model = load_openai_model(
|
| 235 |
+
model_name,
|
| 236 |
+
precision=precision,
|
| 237 |
+
device=device,
|
| 238 |
+
jit=jit,
|
| 239 |
+
cache_dir=cache_dir,
|
| 240 |
+
)
|
| 241 |
+
else:
|
| 242 |
+
model_cfg = get_model_config(model_name)
|
| 243 |
+
if model_cfg is not None:
|
| 244 |
+
logging.info(f'Loaded {model_name} model config.')
|
| 245 |
+
else:
|
| 246 |
+
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
|
| 247 |
+
raise RuntimeError(f'Model config for {model_name} not found.')
|
| 248 |
+
|
| 249 |
+
if 'rope' in model_cfg.get('vision_cfg', {}):
|
| 250 |
+
if model_cfg['vision_cfg']['rope']:
|
| 251 |
+
os.environ['RoPE'] = "1"
|
| 252 |
+
else:
|
| 253 |
+
os.environ['RoPE'] = "0"
|
| 254 |
+
|
| 255 |
+
if force_quick_gelu:
|
| 256 |
+
# override for use of QuickGELU on non-OpenAI transformer models
|
| 257 |
+
model_cfg["quick_gelu"] = True
|
| 258 |
+
|
| 259 |
+
if force_patch_dropout is not None:
|
| 260 |
+
# override the default patch dropout value
|
| 261 |
+
model_cfg['vision_cfg']["patch_dropout"] = force_patch_dropout
|
| 262 |
+
|
| 263 |
+
cast_dtype = get_cast_dtype(precision)
|
| 264 |
+
custom_clip = model_cfg.pop('custom_text', False) or force_custom_clip or ('hf_model_name' in model_cfg['text_cfg'])
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
if custom_clip:
|
| 268 |
+
if 'hf_model_name' in model_cfg.get('text_cfg', {}):
|
| 269 |
+
model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf
|
| 270 |
+
model = CustomCLIP(**model_cfg, cast_dtype=cast_dtype)
|
| 271 |
+
else:
|
| 272 |
+
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
|
| 273 |
+
|
| 274 |
+
pretrained_cfg = {}
|
| 275 |
+
if pretrained:
|
| 276 |
+
checkpoint_path = ''
|
| 277 |
+
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
|
| 278 |
+
if pretrained_cfg:
|
| 279 |
+
checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
|
| 280 |
+
elif os.path.exists(pretrained):
|
| 281 |
+
checkpoint_path = pretrained
|
| 282 |
+
|
| 283 |
+
if checkpoint_path:
|
| 284 |
+
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
|
| 285 |
+
load_checkpoint(model,
|
| 286 |
+
checkpoint_path,
|
| 287 |
+
model_key="model|module|state_dict",
|
| 288 |
+
strict=False
|
| 289 |
+
)
|
| 290 |
+
else:
|
| 291 |
+
error_str = (
|
| 292 |
+
f'Pretrained weights ({pretrained}) not found for model {model_name}.'
|
| 293 |
+
f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
|
| 294 |
+
logging.warning(error_str)
|
| 295 |
+
raise RuntimeError(error_str)
|
| 296 |
+
else:
|
| 297 |
+
visual_checkpoint_path = ''
|
| 298 |
+
text_checkpoint_path = ''
|
| 299 |
+
|
| 300 |
+
if pretrained_image:
|
| 301 |
+
pretrained_visual_model = pretrained_visual_model.replace('/', '-') # for callers using old naming with / in ViT names
|
| 302 |
+
pretrained_image_cfg = get_pretrained_cfg(pretrained_visual_model, pretrained_image)
|
| 303 |
+
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
|
| 304 |
+
# pretrained weight loading for timm models set via vision_cfg
|
| 305 |
+
model_cfg['vision_cfg']['timm_model_pretrained'] = True
|
| 306 |
+
elif pretrained_image_cfg:
|
| 307 |
+
visual_checkpoint_path = download_pretrained(pretrained_image_cfg, cache_dir=cache_dir)
|
| 308 |
+
elif os.path.exists(pretrained_image):
|
| 309 |
+
visual_checkpoint_path = pretrained_image
|
| 310 |
+
else:
|
| 311 |
+
logging.warning(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.')
|
| 312 |
+
raise RuntimeError(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.')
|
| 313 |
+
|
| 314 |
+
if pretrained_text:
|
| 315 |
+
pretrained_text_model = pretrained_text_model.replace('/', '-') # for callers using old naming with / in ViT names
|
| 316 |
+
pretrained_text_cfg = get_pretrained_cfg(pretrained_text_model, pretrained_text)
|
| 317 |
+
if pretrained_image_cfg:
|
| 318 |
+
text_checkpoint_path = download_pretrained(pretrained_text_cfg, cache_dir=cache_dir)
|
| 319 |
+
elif os.path.exists(pretrained_text):
|
| 320 |
+
text_checkpoint_path = pretrained_text
|
| 321 |
+
else:
|
| 322 |
+
logging.warning(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.')
|
| 323 |
+
raise RuntimeError(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.')
|
| 324 |
+
|
| 325 |
+
if visual_checkpoint_path:
|
| 326 |
+
logging.info(f'Loading pretrained {model_name}.visual weights ({visual_checkpoint_path}).')
|
| 327 |
+
if text_checkpoint_path:
|
| 328 |
+
logging.info(f'Loading pretrained {model_name}.text weights ({text_checkpoint_path}).')
|
| 329 |
+
|
| 330 |
+
if visual_checkpoint_path or text_checkpoint_path:
|
| 331 |
+
load_pretrained_checkpoint(
|
| 332 |
+
model,
|
| 333 |
+
visual_checkpoint_path,
|
| 334 |
+
text_checkpoint_path,
|
| 335 |
+
strict=False,
|
| 336 |
+
visual_model=pretrained_visual_model,
|
| 337 |
+
text_model=pretrained_text_model,
|
| 338 |
+
model_key="model|module|state_dict",
|
| 339 |
+
skip_list=skip_list
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
if "fp16" in precision or "bf16" in precision:
|
| 343 |
+
logging.info(f'convert precision to {precision}')
|
| 344 |
+
model = model.to(torch.bfloat16) if 'bf16' in precision else model.to(torch.float16)
|
| 345 |
+
|
| 346 |
+
model.to(device=device)
|
| 347 |
+
|
| 348 |
+
# set image / mean metadata from pretrained_cfg if available, or use default
|
| 349 |
+
model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN
|
| 350 |
+
model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD
|
| 351 |
+
|
| 352 |
+
if jit:
|
| 353 |
+
model = torch.jit.script(model)
|
| 354 |
+
|
| 355 |
+
return model
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
def create_model_and_transforms(
|
| 359 |
+
model_name: str,
|
| 360 |
+
pretrained: Optional[str] = None,
|
| 361 |
+
precision: str = 'fp32',
|
| 362 |
+
device: Union[str, torch.device] = 'cpu',
|
| 363 |
+
jit: bool = False,
|
| 364 |
+
force_quick_gelu: bool = False,
|
| 365 |
+
force_custom_clip: bool = False,
|
| 366 |
+
force_patch_dropout: Optional[float] = None,
|
| 367 |
+
pretrained_image: str = '',
|
| 368 |
+
pretrained_text: str = '',
|
| 369 |
+
pretrained_hf: bool = True,
|
| 370 |
+
pretrained_visual_model: str = None,
|
| 371 |
+
pretrained_text_model: str = None,
|
| 372 |
+
image_mean: Optional[Tuple[float, ...]] = None,
|
| 373 |
+
image_std: Optional[Tuple[float, ...]] = None,
|
| 374 |
+
cache_dir: Optional[str] = None,
|
| 375 |
+
skip_list: list = [],
|
| 376 |
+
):
|
| 377 |
+
model = create_model(
|
| 378 |
+
model_name,
|
| 379 |
+
pretrained,
|
| 380 |
+
precision=precision,
|
| 381 |
+
device=device,
|
| 382 |
+
jit=jit,
|
| 383 |
+
force_quick_gelu=force_quick_gelu,
|
| 384 |
+
force_custom_clip=force_custom_clip,
|
| 385 |
+
force_patch_dropout=force_patch_dropout,
|
| 386 |
+
pretrained_image=pretrained_image,
|
| 387 |
+
pretrained_text=pretrained_text,
|
| 388 |
+
pretrained_hf=pretrained_hf,
|
| 389 |
+
pretrained_visual_model=pretrained_visual_model,
|
| 390 |
+
pretrained_text_model=pretrained_text_model,
|
| 391 |
+
cache_dir=cache_dir,
|
| 392 |
+
skip_list=skip_list,
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
|
| 396 |
+
image_std = image_std or getattr(model.visual, 'image_std', None)
|
| 397 |
+
preprocess_train = image_transform(
|
| 398 |
+
model.visual.image_size,
|
| 399 |
+
is_train=True,
|
| 400 |
+
mean=image_mean,
|
| 401 |
+
std=image_std
|
| 402 |
+
)
|
| 403 |
+
preprocess_val = image_transform(
|
| 404 |
+
model.visual.image_size,
|
| 405 |
+
is_train=False,
|
| 406 |
+
mean=image_mean,
|
| 407 |
+
std=image_std
|
| 408 |
+
)
|
| 409 |
+
|
| 410 |
+
return model, preprocess_train, preprocess_val
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def create_transforms(
|
| 414 |
+
model_name: str,
|
| 415 |
+
pretrained: Optional[str] = None,
|
| 416 |
+
precision: str = 'fp32',
|
| 417 |
+
device: Union[str, torch.device] = 'cpu',
|
| 418 |
+
jit: bool = False,
|
| 419 |
+
force_quick_gelu: bool = False,
|
| 420 |
+
force_custom_clip: bool = False,
|
| 421 |
+
force_patch_dropout: Optional[float] = None,
|
| 422 |
+
pretrained_image: str = '',
|
| 423 |
+
pretrained_text: str = '',
|
| 424 |
+
pretrained_hf: bool = True,
|
| 425 |
+
pretrained_visual_model: str = None,
|
| 426 |
+
pretrained_text_model: str = None,
|
| 427 |
+
image_mean: Optional[Tuple[float, ...]] = None,
|
| 428 |
+
image_std: Optional[Tuple[float, ...]] = None,
|
| 429 |
+
cache_dir: Optional[str] = None,
|
| 430 |
+
skip_list: list = [],
|
| 431 |
+
):
|
| 432 |
+
model = create_model(
|
| 433 |
+
model_name,
|
| 434 |
+
pretrained,
|
| 435 |
+
precision=precision,
|
| 436 |
+
device=device,
|
| 437 |
+
jit=jit,
|
| 438 |
+
force_quick_gelu=force_quick_gelu,
|
| 439 |
+
force_custom_clip=force_custom_clip,
|
| 440 |
+
force_patch_dropout=force_patch_dropout,
|
| 441 |
+
pretrained_image=pretrained_image,
|
| 442 |
+
pretrained_text=pretrained_text,
|
| 443 |
+
pretrained_hf=pretrained_hf,
|
| 444 |
+
pretrained_visual_model=pretrained_visual_model,
|
| 445 |
+
pretrained_text_model=pretrained_text_model,
|
| 446 |
+
cache_dir=cache_dir,
|
| 447 |
+
skip_list=skip_list,
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
|
| 452 |
+
image_std = image_std or getattr(model.visual, 'image_std', None)
|
| 453 |
+
preprocess_train = image_transform(
|
| 454 |
+
model.visual.image_size,
|
| 455 |
+
is_train=True,
|
| 456 |
+
mean=image_mean,
|
| 457 |
+
std=image_std
|
| 458 |
+
)
|
| 459 |
+
preprocess_val = image_transform(
|
| 460 |
+
model.visual.image_size,
|
| 461 |
+
is_train=False,
|
| 462 |
+
mean=image_mean,
|
| 463 |
+
std=image_std
|
| 464 |
+
)
|
| 465 |
+
del model
|
| 466 |
+
|
| 467 |
+
return preprocess_train, preprocess_val
|
| 468 |
+
|
| 469 |
+
def create_model_from_pretrained(
|
| 470 |
+
model_name: str,
|
| 471 |
+
pretrained: str,
|
| 472 |
+
precision: str = 'fp32',
|
| 473 |
+
device: Union[str, torch.device] = 'cpu',
|
| 474 |
+
jit: bool = False,
|
| 475 |
+
force_quick_gelu: bool = False,
|
| 476 |
+
force_custom_clip: bool = False,
|
| 477 |
+
force_patch_dropout: Optional[float] = None,
|
| 478 |
+
return_transform: bool = True,
|
| 479 |
+
image_mean: Optional[Tuple[float, ...]] = None,
|
| 480 |
+
image_std: Optional[Tuple[float, ...]] = None,
|
| 481 |
+
cache_dir: Optional[str] = None,
|
| 482 |
+
is_frozen: bool = False,
|
| 483 |
+
):
|
| 484 |
+
if not is_pretrained_cfg(model_name, pretrained) and not os.path.exists(pretrained):
|
| 485 |
+
raise RuntimeError(
|
| 486 |
+
f'{pretrained} is not a valid pretrained cfg or checkpoint for {model_name}.'
|
| 487 |
+
f' Use open_clip.list_pretrained() to find one.')
|
| 488 |
+
|
| 489 |
+
model = create_model(
|
| 490 |
+
model_name,
|
| 491 |
+
pretrained,
|
| 492 |
+
precision=precision,
|
| 493 |
+
device=device,
|
| 494 |
+
jit=jit,
|
| 495 |
+
force_quick_gelu=force_quick_gelu,
|
| 496 |
+
force_custom_clip=force_custom_clip,
|
| 497 |
+
force_patch_dropout=force_patch_dropout,
|
| 498 |
+
cache_dir=cache_dir,
|
| 499 |
+
)
|
| 500 |
+
|
| 501 |
+
if is_frozen:
|
| 502 |
+
for param in model.parameters():
|
| 503 |
+
param.requires_grad = False
|
| 504 |
+
|
| 505 |
+
if not return_transform:
|
| 506 |
+
return model
|
| 507 |
+
|
| 508 |
+
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
|
| 509 |
+
image_std = image_std or getattr(model.visual, 'image_std', None)
|
| 510 |
+
preprocess = image_transform(
|
| 511 |
+
model.visual.image_size,
|
| 512 |
+
is_train=False,
|
| 513 |
+
mean=image_mean,
|
| 514 |
+
std=image_std
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
return model, preprocess
|
SDXL_EcomID_ComfyUI/eva_clip/hf_configs.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# HF architecture dict:
|
| 2 |
+
arch_dict = {
|
| 3 |
+
# https://huggingface.co/docs/transformers/model_doc/roberta#roberta
|
| 4 |
+
"roberta": {
|
| 5 |
+
"config_names": {
|
| 6 |
+
"context_length": "max_position_embeddings",
|
| 7 |
+
"vocab_size": "vocab_size",
|
| 8 |
+
"width": "hidden_size",
|
| 9 |
+
"heads": "num_attention_heads",
|
| 10 |
+
"layers": "num_hidden_layers",
|
| 11 |
+
"layer_attr": "layer",
|
| 12 |
+
"token_embeddings_attr": "embeddings"
|
| 13 |
+
},
|
| 14 |
+
"pooler": "mean_pooler",
|
| 15 |
+
},
|
| 16 |
+
# https://huggingface.co/docs/transformers/model_doc/xlm-roberta#transformers.XLMRobertaConfig
|
| 17 |
+
"xlm-roberta": {
|
| 18 |
+
"config_names": {
|
| 19 |
+
"context_length": "max_position_embeddings",
|
| 20 |
+
"vocab_size": "vocab_size",
|
| 21 |
+
"width": "hidden_size",
|
| 22 |
+
"heads": "num_attention_heads",
|
| 23 |
+
"layers": "num_hidden_layers",
|
| 24 |
+
"layer_attr": "layer",
|
| 25 |
+
"token_embeddings_attr": "embeddings"
|
| 26 |
+
},
|
| 27 |
+
"pooler": "mean_pooler",
|
| 28 |
+
},
|
| 29 |
+
# https://huggingface.co/docs/transformers/model_doc/mt5#mt5
|
| 30 |
+
"mt5": {
|
| 31 |
+
"config_names": {
|
| 32 |
+
# unlimited seqlen
|
| 33 |
+
# https://github.com/google-research/text-to-text-transfer-transformer/issues/273
|
| 34 |
+
# https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/t5/modeling_t5.py#L374
|
| 35 |
+
"context_length": "",
|
| 36 |
+
"vocab_size": "vocab_size",
|
| 37 |
+
"width": "d_model",
|
| 38 |
+
"heads": "num_heads",
|
| 39 |
+
"layers": "num_layers",
|
| 40 |
+
"layer_attr": "block",
|
| 41 |
+
"token_embeddings_attr": "embed_tokens"
|
| 42 |
+
},
|
| 43 |
+
"pooler": "mean_pooler",
|
| 44 |
+
},
|
| 45 |
+
"bert": {
|
| 46 |
+
"config_names": {
|
| 47 |
+
"context_length": "max_position_embeddings",
|
| 48 |
+
"vocab_size": "vocab_size",
|
| 49 |
+
"width": "hidden_size",
|
| 50 |
+
"heads": "num_attention_heads",
|
| 51 |
+
"layers": "num_hidden_layers",
|
| 52 |
+
"layer_attr": "layer",
|
| 53 |
+
"token_embeddings_attr": "embeddings"
|
| 54 |
+
},
|
| 55 |
+
"pooler": "mean_pooler",
|
| 56 |
+
}
|
| 57 |
+
}
|
SDXL_EcomID_ComfyUI/eva_clip/hf_model.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" huggingface model adapter
|
| 2 |
+
|
| 3 |
+
Wraps HuggingFace transformers (https://github.com/huggingface/transformers) models for use as a text tower in CLIP model.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import re
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
from torch.nn import functional as F
|
| 11 |
+
from torch import TensorType
|
| 12 |
+
try:
|
| 13 |
+
import transformers
|
| 14 |
+
from transformers import AutoModel, AutoModelForMaskedLM, AutoTokenizer, AutoConfig, PretrainedConfig
|
| 15 |
+
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, \
|
| 16 |
+
BaseModelOutputWithPoolingAndCrossAttentions
|
| 17 |
+
except ImportError as e:
|
| 18 |
+
transformers = None
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class BaseModelOutput:
|
| 22 |
+
pass
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class PretrainedConfig:
|
| 26 |
+
pass
|
| 27 |
+
|
| 28 |
+
from .hf_configs import arch_dict
|
| 29 |
+
|
| 30 |
+
# utils
|
| 31 |
+
def _camel2snake(s):
|
| 32 |
+
return re.sub(r'(?<!^)(?=[A-Z])', '_', s).lower()
|
| 33 |
+
|
| 34 |
+
# TODO: ?last - for gpt-like models
|
| 35 |
+
_POOLERS = {}
|
| 36 |
+
|
| 37 |
+
def register_pooler(cls):
|
| 38 |
+
"""Decorator registering pooler class"""
|
| 39 |
+
_POOLERS[_camel2snake(cls.__name__)] = cls
|
| 40 |
+
return cls
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
@register_pooler
|
| 44 |
+
class MeanPooler(nn.Module):
|
| 45 |
+
"""Mean pooling"""
|
| 46 |
+
def forward(self, x:BaseModelOutput, attention_mask:TensorType):
|
| 47 |
+
masked_output = x.last_hidden_state * attention_mask.unsqueeze(-1)
|
| 48 |
+
return masked_output.sum(dim=1) / attention_mask.sum(-1, keepdim=True)
|
| 49 |
+
|
| 50 |
+
@register_pooler
|
| 51 |
+
class MaxPooler(nn.Module):
|
| 52 |
+
"""Max pooling"""
|
| 53 |
+
def forward(self, x:BaseModelOutput, attention_mask:TensorType):
|
| 54 |
+
masked_output = x.last_hidden_state.masked_fill(attention_mask.unsqueeze(-1), -torch.inf)
|
| 55 |
+
return masked_output.max(1).values
|
| 56 |
+
|
| 57 |
+
@register_pooler
|
| 58 |
+
class ClsPooler(nn.Module):
|
| 59 |
+
"""CLS token pooling"""
|
| 60 |
+
def __init__(self, use_pooler_output=True):
|
| 61 |
+
super().__init__()
|
| 62 |
+
self.cls_token_position = 0
|
| 63 |
+
self.use_pooler_output = use_pooler_output
|
| 64 |
+
|
| 65 |
+
def forward(self, x:BaseModelOutput, attention_mask:TensorType):
|
| 66 |
+
|
| 67 |
+
if (self.use_pooler_output and
|
| 68 |
+
isinstance(x, (BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions)) and
|
| 69 |
+
(x.pooler_output is not None)
|
| 70 |
+
):
|
| 71 |
+
return x.pooler_output
|
| 72 |
+
|
| 73 |
+
return x.last_hidden_state[:, self.cls_token_position, :]
|
| 74 |
+
|
| 75 |
+
class HFTextEncoder(nn.Module):
|
| 76 |
+
"""HuggingFace model adapter"""
|
| 77 |
+
def __init__(
|
| 78 |
+
self,
|
| 79 |
+
model_name_or_path: str,
|
| 80 |
+
output_dim: int,
|
| 81 |
+
tokenizer_name: str = None,
|
| 82 |
+
config: PretrainedConfig = None,
|
| 83 |
+
pooler_type: str = None,
|
| 84 |
+
proj: str = None,
|
| 85 |
+
pretrained: bool = True,
|
| 86 |
+
masked_language_modeling: bool = False):
|
| 87 |
+
super().__init__()
|
| 88 |
+
|
| 89 |
+
self.output_dim = output_dim
|
| 90 |
+
|
| 91 |
+
# TODO: find better way to get this information
|
| 92 |
+
uses_transformer_pooler = (pooler_type == "cls_pooler")
|
| 93 |
+
|
| 94 |
+
if transformers is None:
|
| 95 |
+
raise RuntimeError("Please `pip install transformers` to use pre-trained HuggingFace models")
|
| 96 |
+
if config is None:
|
| 97 |
+
self.config = AutoConfig.from_pretrained(model_name_or_path)
|
| 98 |
+
if masked_language_modeling:
|
| 99 |
+
create_func, model_args = (AutoModelForMaskedLM.from_pretrained, model_name_or_path) if pretrained else (
|
| 100 |
+
AutoModelForMaskedLM.from_config, self.config)
|
| 101 |
+
else:
|
| 102 |
+
create_func, model_args = (AutoModel.from_pretrained, model_name_or_path) if pretrained else (
|
| 103 |
+
AutoModel.from_config, self.config)
|
| 104 |
+
# TODO: do all model configs have this attribute? PretrainedConfig does so yes??
|
| 105 |
+
if hasattr(self.config, "is_encoder_decoder") and self.config.is_encoder_decoder:
|
| 106 |
+
self.transformer = create_func(model_args)
|
| 107 |
+
self.transformer = self.transformer.encoder
|
| 108 |
+
else:
|
| 109 |
+
self.transformer = create_func(model_args, add_pooling_layer=uses_transformer_pooler)
|
| 110 |
+
else:
|
| 111 |
+
self.config = config
|
| 112 |
+
if masked_language_modeling:
|
| 113 |
+
self.transformer = AutoModelForMaskedLM.from_config(config)
|
| 114 |
+
else:
|
| 115 |
+
self.transformer = AutoModel.from_config(config)
|
| 116 |
+
|
| 117 |
+
if pooler_type is None: # get default arch pooler
|
| 118 |
+
self.pooler = _POOLERS[(arch_dict[self.config.model_type]["pooler"])]()
|
| 119 |
+
else:
|
| 120 |
+
self.pooler = _POOLERS[pooler_type]()
|
| 121 |
+
|
| 122 |
+
d_model = getattr(self.config, arch_dict[self.config.model_type]["config_names"]["width"])
|
| 123 |
+
if (d_model == output_dim) and (proj is None): # do we always need a proj?
|
| 124 |
+
self.proj = nn.Identity()
|
| 125 |
+
elif proj == 'linear':
|
| 126 |
+
self.proj = nn.Linear(d_model, output_dim, bias=False)
|
| 127 |
+
elif proj == 'mlp':
|
| 128 |
+
hidden_size = (d_model + output_dim) // 2
|
| 129 |
+
self.proj = nn.Sequential(
|
| 130 |
+
nn.Linear(d_model, hidden_size, bias=False),
|
| 131 |
+
nn.GELU(),
|
| 132 |
+
nn.Linear(hidden_size, output_dim, bias=False),
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
# self.itm_proj = nn.Linear(d_model, 2, bias=False)
|
| 136 |
+
# self.mlm_proj = nn.Linear(d_model, self.config.vocab_size), bias=False)
|
| 137 |
+
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
| 138 |
+
|
| 139 |
+
# def forward_itm(self, x:TensorType, image_embeds:TensorType) -> TensorType:
|
| 140 |
+
# image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(x.device)
|
| 141 |
+
# attn_mask = (x != self.config.pad_token_id).long()
|
| 142 |
+
# out = self.transformer(
|
| 143 |
+
# input_ids=x,
|
| 144 |
+
# attention_mask=attn_mask,
|
| 145 |
+
# encoder_hidden_states = image_embeds,
|
| 146 |
+
# encoder_attention_mask = image_atts,
|
| 147 |
+
# )
|
| 148 |
+
# pooled_out = self.pooler(out, attn_mask)
|
| 149 |
+
|
| 150 |
+
# return self.itm_proj(pooled_out)
|
| 151 |
+
|
| 152 |
+
def mask(self, input_ids, vocab_size, device, targets=None, masked_indices=None, probability_matrix=None):
|
| 153 |
+
if masked_indices is None:
|
| 154 |
+
masked_indices = torch.bernoulli(probability_matrix).bool()
|
| 155 |
+
|
| 156 |
+
masked_indices[input_ids == self.tokenizer.pad_token_id] = False
|
| 157 |
+
masked_indices[input_ids == self.tokenizer.cls_token_id] = False
|
| 158 |
+
|
| 159 |
+
if targets is not None:
|
| 160 |
+
targets[~masked_indices] = -100 # We only compute loss on masked tokens
|
| 161 |
+
|
| 162 |
+
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
|
| 163 |
+
indices_replaced = torch.bernoulli(torch.full(input_ids.shape, 0.8)).bool() & masked_indices
|
| 164 |
+
input_ids[indices_replaced] = self.tokenizer.mask_token_id
|
| 165 |
+
|
| 166 |
+
# 10% of the time, we replace masked input tokens with random word
|
| 167 |
+
indices_random = torch.bernoulli(torch.full(input_ids.shape, 0.5)).bool() & masked_indices & ~indices_replaced
|
| 168 |
+
random_words = torch.randint(vocab_size, input_ids.shape, dtype=torch.long).to(device)
|
| 169 |
+
input_ids[indices_random] = random_words[indices_random]
|
| 170 |
+
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
|
| 171 |
+
|
| 172 |
+
if targets is not None:
|
| 173 |
+
return input_ids, targets
|
| 174 |
+
else:
|
| 175 |
+
return input_ids
|
| 176 |
+
|
| 177 |
+
def forward_mlm(self, input_ids, image_embeds, mlm_probability=0.25):
|
| 178 |
+
labels = input_ids.clone()
|
| 179 |
+
attn_mask = (input_ids != self.config.pad_token_id).long()
|
| 180 |
+
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(input_ids.device)
|
| 181 |
+
vocab_size = getattr(self.config, arch_dict[self.config.model_type]["config_names"]["vocab_size"])
|
| 182 |
+
probability_matrix = torch.full(labels.shape, mlm_probability)
|
| 183 |
+
input_ids, labels = self.mask(input_ids, vocab_size, input_ids.device, targets=labels,
|
| 184 |
+
probability_matrix = probability_matrix)
|
| 185 |
+
mlm_output = self.transformer(input_ids,
|
| 186 |
+
attention_mask = attn_mask,
|
| 187 |
+
encoder_hidden_states = image_embeds,
|
| 188 |
+
encoder_attention_mask = image_atts,
|
| 189 |
+
return_dict = True,
|
| 190 |
+
labels = labels,
|
| 191 |
+
)
|
| 192 |
+
return mlm_output.loss
|
| 193 |
+
# mlm_output = self.transformer(input_ids,
|
| 194 |
+
# attention_mask = attn_mask,
|
| 195 |
+
# encoder_hidden_states = image_embeds,
|
| 196 |
+
# encoder_attention_mask = image_atts,
|
| 197 |
+
# return_dict = True,
|
| 198 |
+
# ).last_hidden_state
|
| 199 |
+
# logits = self.mlm_proj(mlm_output)
|
| 200 |
+
|
| 201 |
+
# # logits = logits[:, :-1, :].contiguous().view(-1, vocab_size)
|
| 202 |
+
# logits = logits[:, 1:, :].contiguous().view(-1, vocab_size)
|
| 203 |
+
# labels = labels[:, 1:].contiguous().view(-1)
|
| 204 |
+
|
| 205 |
+
# mlm_loss = F.cross_entropy(
|
| 206 |
+
# logits,
|
| 207 |
+
# labels,
|
| 208 |
+
# # label_smoothing=0.1,
|
| 209 |
+
# )
|
| 210 |
+
# return mlm_loss
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def forward(self, x:TensorType) -> TensorType:
|
| 214 |
+
attn_mask = (x != self.config.pad_token_id).long()
|
| 215 |
+
out = self.transformer(input_ids=x, attention_mask=attn_mask)
|
| 216 |
+
pooled_out = self.pooler(out, attn_mask)
|
| 217 |
+
|
| 218 |
+
return self.proj(pooled_out)
|
| 219 |
+
|
| 220 |
+
def lock(self, unlocked_layers:int=0, freeze_layer_norm:bool=True):
|
| 221 |
+
if not unlocked_layers: # full freezing
|
| 222 |
+
for n, p in self.transformer.named_parameters():
|
| 223 |
+
p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
|
| 224 |
+
return
|
| 225 |
+
|
| 226 |
+
encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer
|
| 227 |
+
layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
|
| 228 |
+
print(f"Unlocking {unlocked_layers}/{len(layer_list) + 1} layers of hf model")
|
| 229 |
+
embeddings = getattr(
|
| 230 |
+
self.transformer, arch_dict[self.config.model_type]["config_names"]["token_embeddings_attr"])
|
| 231 |
+
modules = [embeddings, *layer_list][:-unlocked_layers]
|
| 232 |
+
# freeze layers
|
| 233 |
+
for module in modules:
|
| 234 |
+
for n, p in module.named_parameters():
|
| 235 |
+
p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
@torch.jit.ignore
|
| 239 |
+
def set_grad_checkpointing(self, enable=True):
|
| 240 |
+
self.transformer.gradient_checkpointing_enable()
|
| 241 |
+
|
| 242 |
+
def get_num_layers(self):
|
| 243 |
+
encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer
|
| 244 |
+
layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
|
| 245 |
+
return len(layer_list)
|
| 246 |
+
|
| 247 |
+
def init_parameters(self):
|
| 248 |
+
pass
|
SDXL_EcomID_ComfyUI/eva_clip/loss.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from torch.nn import functional as F
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
import torch.distributed.nn
|
| 8 |
+
from torch import distributed as dist
|
| 9 |
+
has_distributed = True
|
| 10 |
+
except ImportError:
|
| 11 |
+
has_distributed = False
|
| 12 |
+
|
| 13 |
+
try:
|
| 14 |
+
import horovod.torch as hvd
|
| 15 |
+
except ImportError:
|
| 16 |
+
hvd = None
|
| 17 |
+
|
| 18 |
+
from timm.loss import LabelSmoothingCrossEntropy
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def gather_features(
|
| 22 |
+
image_features,
|
| 23 |
+
text_features,
|
| 24 |
+
local_loss=False,
|
| 25 |
+
gather_with_grad=False,
|
| 26 |
+
rank=0,
|
| 27 |
+
world_size=1,
|
| 28 |
+
use_horovod=False
|
| 29 |
+
):
|
| 30 |
+
assert has_distributed, 'torch.distributed did not import correctly, please use a PyTorch version with support.'
|
| 31 |
+
if use_horovod:
|
| 32 |
+
assert hvd is not None, 'Please install horovod'
|
| 33 |
+
if gather_with_grad:
|
| 34 |
+
all_image_features = hvd.allgather(image_features)
|
| 35 |
+
all_text_features = hvd.allgather(text_features)
|
| 36 |
+
else:
|
| 37 |
+
with torch.no_grad():
|
| 38 |
+
all_image_features = hvd.allgather(image_features)
|
| 39 |
+
all_text_features = hvd.allgather(text_features)
|
| 40 |
+
if not local_loss:
|
| 41 |
+
# ensure grads for local rank when all_* features don't have a gradient
|
| 42 |
+
gathered_image_features = list(all_image_features.chunk(world_size, dim=0))
|
| 43 |
+
gathered_text_features = list(all_text_features.chunk(world_size, dim=0))
|
| 44 |
+
gathered_image_features[rank] = image_features
|
| 45 |
+
gathered_text_features[rank] = text_features
|
| 46 |
+
all_image_features = torch.cat(gathered_image_features, dim=0)
|
| 47 |
+
all_text_features = torch.cat(gathered_text_features, dim=0)
|
| 48 |
+
else:
|
| 49 |
+
# We gather tensors from all gpus
|
| 50 |
+
if gather_with_grad:
|
| 51 |
+
all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features), dim=0)
|
| 52 |
+
all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features), dim=0)
|
| 53 |
+
# all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features, async_op=True), dim=0)
|
| 54 |
+
# all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features, async_op=True), dim=0)
|
| 55 |
+
else:
|
| 56 |
+
gathered_image_features = [torch.zeros_like(image_features) for _ in range(world_size)]
|
| 57 |
+
gathered_text_features = [torch.zeros_like(text_features) for _ in range(world_size)]
|
| 58 |
+
dist.all_gather(gathered_image_features, image_features)
|
| 59 |
+
dist.all_gather(gathered_text_features, text_features)
|
| 60 |
+
if not local_loss:
|
| 61 |
+
# ensure grads for local rank when all_* features don't have a gradient
|
| 62 |
+
gathered_image_features[rank] = image_features
|
| 63 |
+
gathered_text_features[rank] = text_features
|
| 64 |
+
all_image_features = torch.cat(gathered_image_features, dim=0)
|
| 65 |
+
all_text_features = torch.cat(gathered_text_features, dim=0)
|
| 66 |
+
|
| 67 |
+
return all_image_features, all_text_features
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class ClipLoss(nn.Module):
|
| 71 |
+
|
| 72 |
+
def __init__(
|
| 73 |
+
self,
|
| 74 |
+
local_loss=False,
|
| 75 |
+
gather_with_grad=False,
|
| 76 |
+
cache_labels=False,
|
| 77 |
+
rank=0,
|
| 78 |
+
world_size=1,
|
| 79 |
+
use_horovod=False,
|
| 80 |
+
smoothing=0.,
|
| 81 |
+
):
|
| 82 |
+
super().__init__()
|
| 83 |
+
self.local_loss = local_loss
|
| 84 |
+
self.gather_with_grad = gather_with_grad
|
| 85 |
+
self.cache_labels = cache_labels
|
| 86 |
+
self.rank = rank
|
| 87 |
+
self.world_size = world_size
|
| 88 |
+
self.use_horovod = use_horovod
|
| 89 |
+
self.label_smoothing_cross_entropy = LabelSmoothingCrossEntropy(smoothing=smoothing) if smoothing > 0 else None
|
| 90 |
+
|
| 91 |
+
# cache state
|
| 92 |
+
self.prev_num_logits = 0
|
| 93 |
+
self.labels = {}
|
| 94 |
+
|
| 95 |
+
def forward(self, image_features, text_features, logit_scale=1.):
|
| 96 |
+
device = image_features.device
|
| 97 |
+
if self.world_size > 1:
|
| 98 |
+
all_image_features, all_text_features = gather_features(
|
| 99 |
+
image_features, text_features,
|
| 100 |
+
self.local_loss, self.gather_with_grad, self.rank, self.world_size, self.use_horovod)
|
| 101 |
+
|
| 102 |
+
if self.local_loss:
|
| 103 |
+
logits_per_image = logit_scale * image_features @ all_text_features.T
|
| 104 |
+
logits_per_text = logit_scale * text_features @ all_image_features.T
|
| 105 |
+
else:
|
| 106 |
+
logits_per_image = logit_scale * all_image_features @ all_text_features.T
|
| 107 |
+
logits_per_text = logits_per_image.T
|
| 108 |
+
else:
|
| 109 |
+
logits_per_image = logit_scale * image_features @ text_features.T
|
| 110 |
+
logits_per_text = logit_scale * text_features @ image_features.T
|
| 111 |
+
# calculated ground-truth and cache if enabled
|
| 112 |
+
num_logits = logits_per_image.shape[0]
|
| 113 |
+
if self.prev_num_logits != num_logits or device not in self.labels:
|
| 114 |
+
labels = torch.arange(num_logits, device=device, dtype=torch.long)
|
| 115 |
+
if self.world_size > 1 and self.local_loss:
|
| 116 |
+
labels = labels + num_logits * self.rank
|
| 117 |
+
if self.cache_labels:
|
| 118 |
+
self.labels[device] = labels
|
| 119 |
+
self.prev_num_logits = num_logits
|
| 120 |
+
else:
|
| 121 |
+
labels = self.labels[device]
|
| 122 |
+
|
| 123 |
+
if self.label_smoothing_cross_entropy:
|
| 124 |
+
total_loss = (
|
| 125 |
+
self.label_smoothing_cross_entropy(logits_per_image, labels) +
|
| 126 |
+
self.label_smoothing_cross_entropy(logits_per_text, labels)
|
| 127 |
+
) / 2
|
| 128 |
+
else:
|
| 129 |
+
total_loss = (
|
| 130 |
+
F.cross_entropy(logits_per_image, labels) +
|
| 131 |
+
F.cross_entropy(logits_per_text, labels)
|
| 132 |
+
) / 2
|
| 133 |
+
|
| 134 |
+
acc = None
|
| 135 |
+
i2t_acc = (logits_per_image.argmax(-1) == labels).sum() / len(logits_per_image)
|
| 136 |
+
t2i_acc = (logits_per_text.argmax(-1) == labels).sum() / len(logits_per_text)
|
| 137 |
+
acc = {"i2t": i2t_acc, "t2i": t2i_acc}
|
| 138 |
+
return total_loss, acc
|
SDXL_EcomID_ComfyUI/eva_clip/model.py
ADDED
|
@@ -0,0 +1,439 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" CLIP Model
|
| 2 |
+
|
| 3 |
+
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
|
| 4 |
+
"""
|
| 5 |
+
import os
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from typing import Optional, Tuple, Union
|
| 8 |
+
from functools import partial
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn.functional as F
|
| 13 |
+
from torch import nn
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
from .hf_model import HFTextEncoder
|
| 17 |
+
except:
|
| 18 |
+
HFTextEncoder = None
|
| 19 |
+
from .modified_resnet import ModifiedResNet
|
| 20 |
+
from .timm_model import TimmModel
|
| 21 |
+
from .eva_vit_model import EVAVisionTransformer
|
| 22 |
+
from .transformer import LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer
|
| 23 |
+
|
| 24 |
+
try:
|
| 25 |
+
from apex.normalization import FusedLayerNorm
|
| 26 |
+
except:
|
| 27 |
+
FusedLayerNorm = LayerNorm
|
| 28 |
+
print("Please 'pip install apex'")
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
import xformers.ops as xops
|
| 32 |
+
except ImportError:
|
| 33 |
+
xops = None
|
| 34 |
+
print("Please 'pip install xformers'")
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class CLIPVisionCfg:
|
| 38 |
+
layers: Union[Tuple[int, int, int, int], int] = 12
|
| 39 |
+
width: int = 768
|
| 40 |
+
head_width: int = 64
|
| 41 |
+
mlp_ratio: float = 4.0
|
| 42 |
+
patch_size: int = 16
|
| 43 |
+
image_size: Union[Tuple[int, int], int] = 224
|
| 44 |
+
ls_init_value: Optional[float] = None # layer scale initial value
|
| 45 |
+
patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results
|
| 46 |
+
global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580)
|
| 47 |
+
drop_path_rate: Optional[float] = None # drop path rate
|
| 48 |
+
timm_model_name: str = None # a valid model name overrides layers, width, patch_size
|
| 49 |
+
timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model
|
| 50 |
+
timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
|
| 51 |
+
timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '')
|
| 52 |
+
timm_proj_bias: bool = False # enable bias final projection
|
| 53 |
+
eva_model_name: str = None # a valid eva model name overrides layers, width, patch_size
|
| 54 |
+
qkv_bias: bool = True
|
| 55 |
+
fusedLN: bool = False
|
| 56 |
+
xattn: bool = False
|
| 57 |
+
postnorm: bool = False
|
| 58 |
+
rope: bool = False
|
| 59 |
+
pt_hw_seq_len: int = 16 # 224/14
|
| 60 |
+
intp_freq: bool = False
|
| 61 |
+
naiveswiglu: bool = False
|
| 62 |
+
subln: bool = False
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
@dataclass
|
| 66 |
+
class CLIPTextCfg:
|
| 67 |
+
context_length: int = 77
|
| 68 |
+
vocab_size: int = 49408
|
| 69 |
+
width: int = 512
|
| 70 |
+
heads: int = 8
|
| 71 |
+
layers: int = 12
|
| 72 |
+
ls_init_value: Optional[float] = None # layer scale initial value
|
| 73 |
+
hf_model_name: str = None
|
| 74 |
+
hf_tokenizer_name: str = None
|
| 75 |
+
hf_model_pretrained: bool = True
|
| 76 |
+
proj: str = 'mlp'
|
| 77 |
+
pooler_type: str = 'mean_pooler'
|
| 78 |
+
masked_language_modeling: bool = False
|
| 79 |
+
fusedLN: bool = False
|
| 80 |
+
xattn: bool = False
|
| 81 |
+
attn_mask: bool = True
|
| 82 |
+
|
| 83 |
+
def get_cast_dtype(precision: str):
|
| 84 |
+
cast_dtype = None
|
| 85 |
+
if precision == 'bf16':
|
| 86 |
+
cast_dtype = torch.bfloat16
|
| 87 |
+
elif precision == 'fp16':
|
| 88 |
+
cast_dtype = torch.float16
|
| 89 |
+
return cast_dtype
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def _build_vision_tower(
|
| 93 |
+
embed_dim: int,
|
| 94 |
+
vision_cfg: CLIPVisionCfg,
|
| 95 |
+
quick_gelu: bool = False,
|
| 96 |
+
cast_dtype: Optional[torch.dtype] = None
|
| 97 |
+
):
|
| 98 |
+
if isinstance(vision_cfg, dict):
|
| 99 |
+
vision_cfg = CLIPVisionCfg(**vision_cfg)
|
| 100 |
+
|
| 101 |
+
# OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
|
| 102 |
+
# memory efficient in recent PyTorch releases (>= 1.10).
|
| 103 |
+
# NOTE: timm models always use native GELU regardless of quick_gelu flag.
|
| 104 |
+
act_layer = QuickGELU if quick_gelu else nn.GELU
|
| 105 |
+
|
| 106 |
+
if vision_cfg.eva_model_name:
|
| 107 |
+
vision_heads = vision_cfg.width // vision_cfg.head_width
|
| 108 |
+
norm_layer = LayerNorm
|
| 109 |
+
|
| 110 |
+
visual = EVAVisionTransformer(
|
| 111 |
+
img_size=vision_cfg.image_size,
|
| 112 |
+
patch_size=vision_cfg.patch_size,
|
| 113 |
+
num_classes=embed_dim,
|
| 114 |
+
use_mean_pooling=vision_cfg.global_average_pool, #False
|
| 115 |
+
init_values=vision_cfg.ls_init_value,
|
| 116 |
+
patch_dropout=vision_cfg.patch_dropout,
|
| 117 |
+
embed_dim=vision_cfg.width,
|
| 118 |
+
depth=vision_cfg.layers,
|
| 119 |
+
num_heads=vision_heads,
|
| 120 |
+
mlp_ratio=vision_cfg.mlp_ratio,
|
| 121 |
+
qkv_bias=vision_cfg.qkv_bias,
|
| 122 |
+
drop_path_rate=vision_cfg.drop_path_rate,
|
| 123 |
+
norm_layer= partial(FusedLayerNorm, eps=1e-6) if vision_cfg.fusedLN else partial(norm_layer, eps=1e-6),
|
| 124 |
+
xattn=vision_cfg.xattn,
|
| 125 |
+
rope=vision_cfg.rope,
|
| 126 |
+
postnorm=vision_cfg.postnorm,
|
| 127 |
+
pt_hw_seq_len= vision_cfg.pt_hw_seq_len, # 224/14
|
| 128 |
+
intp_freq= vision_cfg.intp_freq,
|
| 129 |
+
naiveswiglu= vision_cfg.naiveswiglu,
|
| 130 |
+
subln= vision_cfg.subln
|
| 131 |
+
)
|
| 132 |
+
elif vision_cfg.timm_model_name:
|
| 133 |
+
visual = TimmModel(
|
| 134 |
+
vision_cfg.timm_model_name,
|
| 135 |
+
pretrained=vision_cfg.timm_model_pretrained,
|
| 136 |
+
pool=vision_cfg.timm_pool,
|
| 137 |
+
proj=vision_cfg.timm_proj,
|
| 138 |
+
proj_bias=vision_cfg.timm_proj_bias,
|
| 139 |
+
embed_dim=embed_dim,
|
| 140 |
+
image_size=vision_cfg.image_size
|
| 141 |
+
)
|
| 142 |
+
act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models
|
| 143 |
+
elif isinstance(vision_cfg.layers, (tuple, list)):
|
| 144 |
+
vision_heads = vision_cfg.width * 32 // vision_cfg.head_width
|
| 145 |
+
visual = ModifiedResNet(
|
| 146 |
+
layers=vision_cfg.layers,
|
| 147 |
+
output_dim=embed_dim,
|
| 148 |
+
heads=vision_heads,
|
| 149 |
+
image_size=vision_cfg.image_size,
|
| 150 |
+
width=vision_cfg.width
|
| 151 |
+
)
|
| 152 |
+
else:
|
| 153 |
+
vision_heads = vision_cfg.width // vision_cfg.head_width
|
| 154 |
+
norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
|
| 155 |
+
visual = VisionTransformer(
|
| 156 |
+
image_size=vision_cfg.image_size,
|
| 157 |
+
patch_size=vision_cfg.patch_size,
|
| 158 |
+
width=vision_cfg.width,
|
| 159 |
+
layers=vision_cfg.layers,
|
| 160 |
+
heads=vision_heads,
|
| 161 |
+
mlp_ratio=vision_cfg.mlp_ratio,
|
| 162 |
+
ls_init_value=vision_cfg.ls_init_value,
|
| 163 |
+
patch_dropout=vision_cfg.patch_dropout,
|
| 164 |
+
global_average_pool=vision_cfg.global_average_pool,
|
| 165 |
+
output_dim=embed_dim,
|
| 166 |
+
act_layer=act_layer,
|
| 167 |
+
norm_layer=norm_layer,
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
return visual
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def _build_text_tower(
|
| 174 |
+
embed_dim: int,
|
| 175 |
+
text_cfg: CLIPTextCfg,
|
| 176 |
+
quick_gelu: bool = False,
|
| 177 |
+
cast_dtype: Optional[torch.dtype] = None,
|
| 178 |
+
):
|
| 179 |
+
if isinstance(text_cfg, dict):
|
| 180 |
+
text_cfg = CLIPTextCfg(**text_cfg)
|
| 181 |
+
|
| 182 |
+
if text_cfg.hf_model_name:
|
| 183 |
+
text = HFTextEncoder(
|
| 184 |
+
text_cfg.hf_model_name,
|
| 185 |
+
output_dim=embed_dim,
|
| 186 |
+
tokenizer_name=text_cfg.hf_tokenizer_name,
|
| 187 |
+
proj=text_cfg.proj,
|
| 188 |
+
pooler_type=text_cfg.pooler_type,
|
| 189 |
+
masked_language_modeling=text_cfg.masked_language_modeling
|
| 190 |
+
)
|
| 191 |
+
else:
|
| 192 |
+
act_layer = QuickGELU if quick_gelu else nn.GELU
|
| 193 |
+
norm_layer = LayerNorm
|
| 194 |
+
|
| 195 |
+
text = TextTransformer(
|
| 196 |
+
context_length=text_cfg.context_length,
|
| 197 |
+
vocab_size=text_cfg.vocab_size,
|
| 198 |
+
width=text_cfg.width,
|
| 199 |
+
heads=text_cfg.heads,
|
| 200 |
+
layers=text_cfg.layers,
|
| 201 |
+
ls_init_value=text_cfg.ls_init_value,
|
| 202 |
+
output_dim=embed_dim,
|
| 203 |
+
act_layer=act_layer,
|
| 204 |
+
norm_layer= FusedLayerNorm if text_cfg.fusedLN else norm_layer,
|
| 205 |
+
xattn=text_cfg.xattn,
|
| 206 |
+
attn_mask=text_cfg.attn_mask,
|
| 207 |
+
)
|
| 208 |
+
return text
|
| 209 |
+
|
| 210 |
+
class CLIP(nn.Module):
|
| 211 |
+
def __init__(
|
| 212 |
+
self,
|
| 213 |
+
embed_dim: int,
|
| 214 |
+
vision_cfg: CLIPVisionCfg,
|
| 215 |
+
text_cfg: CLIPTextCfg,
|
| 216 |
+
quick_gelu: bool = False,
|
| 217 |
+
cast_dtype: Optional[torch.dtype] = None,
|
| 218 |
+
):
|
| 219 |
+
super().__init__()
|
| 220 |
+
self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
|
| 221 |
+
|
| 222 |
+
text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
|
| 223 |
+
self.transformer = text.transformer
|
| 224 |
+
self.vocab_size = text.vocab_size
|
| 225 |
+
self.token_embedding = text.token_embedding
|
| 226 |
+
self.positional_embedding = text.positional_embedding
|
| 227 |
+
self.ln_final = text.ln_final
|
| 228 |
+
self.text_projection = text.text_projection
|
| 229 |
+
self.register_buffer('attn_mask', text.attn_mask, persistent=False)
|
| 230 |
+
|
| 231 |
+
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
|
| 232 |
+
|
| 233 |
+
def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
|
| 234 |
+
# lock image tower as per LiT - https://arxiv.org/abs/2111.07991
|
| 235 |
+
self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
|
| 236 |
+
|
| 237 |
+
@torch.jit.ignore
|
| 238 |
+
def set_grad_checkpointing(self, enable=True):
|
| 239 |
+
self.visual.set_grad_checkpointing(enable)
|
| 240 |
+
self.transformer.grad_checkpointing = enable
|
| 241 |
+
|
| 242 |
+
@torch.jit.ignore
|
| 243 |
+
def no_weight_decay(self):
|
| 244 |
+
return {'logit_scale'}
|
| 245 |
+
|
| 246 |
+
def encode_image(self, image, normalize: bool = False):
|
| 247 |
+
features = self.visual(image)
|
| 248 |
+
return F.normalize(features, dim=-1) if normalize else features
|
| 249 |
+
|
| 250 |
+
def encode_text(self, text, normalize: bool = False):
|
| 251 |
+
cast_dtype = self.transformer.get_cast_dtype()
|
| 252 |
+
|
| 253 |
+
x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
|
| 254 |
+
|
| 255 |
+
x = x + self.positional_embedding.to(cast_dtype)
|
| 256 |
+
x = x.permute(1, 0, 2) # NLD -> LND
|
| 257 |
+
x = self.transformer(x, attn_mask=self.attn_mask)
|
| 258 |
+
x = x.permute(1, 0, 2) # LND -> NLD
|
| 259 |
+
x = self.ln_final(x) # [batch_size, n_ctx, transformer.width]
|
| 260 |
+
# take features from the eot embedding (eot_token is the highest number in each sequence)
|
| 261 |
+
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
|
| 262 |
+
return F.normalize(x, dim=-1) if normalize else x
|
| 263 |
+
|
| 264 |
+
def forward(self, image, text):
|
| 265 |
+
image_features = self.encode_image(image, normalize=True)
|
| 266 |
+
text_features = self.encode_text(text, normalize=True)
|
| 267 |
+
return image_features, text_features, self.logit_scale.exp()
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
class CustomCLIP(nn.Module):
|
| 271 |
+
def __init__(
|
| 272 |
+
self,
|
| 273 |
+
embed_dim: int,
|
| 274 |
+
vision_cfg: CLIPVisionCfg,
|
| 275 |
+
text_cfg: CLIPTextCfg,
|
| 276 |
+
quick_gelu: bool = False,
|
| 277 |
+
cast_dtype: Optional[torch.dtype] = None,
|
| 278 |
+
itm_task: bool = False,
|
| 279 |
+
):
|
| 280 |
+
super().__init__()
|
| 281 |
+
self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
|
| 282 |
+
self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
|
| 283 |
+
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
|
| 284 |
+
|
| 285 |
+
def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
|
| 286 |
+
# lock image tower as per LiT - https://arxiv.org/abs/2111.07991
|
| 287 |
+
self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
|
| 288 |
+
|
| 289 |
+
def lock_text_tower(self, unlocked_layers:int=0, freeze_layer_norm:bool=True):
|
| 290 |
+
self.text.lock(unlocked_layers, freeze_layer_norm)
|
| 291 |
+
|
| 292 |
+
@torch.jit.ignore
|
| 293 |
+
def set_grad_checkpointing(self, enable=True):
|
| 294 |
+
self.visual.set_grad_checkpointing(enable)
|
| 295 |
+
self.text.set_grad_checkpointing(enable)
|
| 296 |
+
|
| 297 |
+
@torch.jit.ignore
|
| 298 |
+
def no_weight_decay(self):
|
| 299 |
+
return {'logit_scale'}
|
| 300 |
+
|
| 301 |
+
def encode_image(self, image, normalize: bool = False):
|
| 302 |
+
features = self.visual(image)
|
| 303 |
+
return F.normalize(features, dim=-1) if normalize else features
|
| 304 |
+
|
| 305 |
+
def encode_text(self, text, normalize: bool = False):
|
| 306 |
+
features = self.text(text)
|
| 307 |
+
return F.normalize(features, dim=-1) if normalize else features
|
| 308 |
+
|
| 309 |
+
def forward(self, image, text):
|
| 310 |
+
image_features = self.encode_image(image, normalize=True)
|
| 311 |
+
text_features = self.encode_text(text, normalize=True)
|
| 312 |
+
return image_features, text_features, self.logit_scale.exp()
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
def convert_weights_to_lp(model: nn.Module, dtype=torch.float16):
|
| 316 |
+
"""Convert applicable model parameters to low-precision (bf16 or fp16)"""
|
| 317 |
+
|
| 318 |
+
def _convert_weights(l):
|
| 319 |
+
|
| 320 |
+
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
|
| 321 |
+
l.weight.data = l.weight.data.to(dtype)
|
| 322 |
+
if l.bias is not None:
|
| 323 |
+
l.bias.data = l.bias.data.to(dtype)
|
| 324 |
+
|
| 325 |
+
if isinstance(l, (nn.MultiheadAttention, Attention)):
|
| 326 |
+
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
|
| 327 |
+
tensor = getattr(l, attr, None)
|
| 328 |
+
if tensor is not None:
|
| 329 |
+
tensor.data = tensor.data.to(dtype)
|
| 330 |
+
|
| 331 |
+
if isinstance(l, nn.Parameter):
|
| 332 |
+
l.data = l.data.to(dtype)
|
| 333 |
+
|
| 334 |
+
for name in ["text_projection", "proj"]:
|
| 335 |
+
if hasattr(l, name) and isinstance(l, nn.Parameter):
|
| 336 |
+
attr = getattr(l, name, None)
|
| 337 |
+
if attr is not None:
|
| 338 |
+
attr.data = attr.data.to(dtype)
|
| 339 |
+
|
| 340 |
+
model.apply(_convert_weights)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
convert_weights_to_fp16 = convert_weights_to_lp # backwards compat
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
# used to maintain checkpoint compatibility
|
| 347 |
+
def convert_to_custom_text_state_dict(state_dict: dict):
|
| 348 |
+
if 'text_projection' in state_dict:
|
| 349 |
+
# old format state_dict, move text tower -> .text
|
| 350 |
+
new_state_dict = {}
|
| 351 |
+
for k, v in state_dict.items():
|
| 352 |
+
if any(k.startswith(p) for p in (
|
| 353 |
+
'text_projection',
|
| 354 |
+
'positional_embedding',
|
| 355 |
+
'token_embedding',
|
| 356 |
+
'transformer',
|
| 357 |
+
'ln_final',
|
| 358 |
+
'logit_scale'
|
| 359 |
+
)):
|
| 360 |
+
k = 'text.' + k
|
| 361 |
+
new_state_dict[k] = v
|
| 362 |
+
return new_state_dict
|
| 363 |
+
return state_dict
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
def build_model_from_openai_state_dict(
|
| 367 |
+
state_dict: dict,
|
| 368 |
+
quick_gelu=True,
|
| 369 |
+
cast_dtype=torch.float16,
|
| 370 |
+
):
|
| 371 |
+
vit = "visual.proj" in state_dict
|
| 372 |
+
|
| 373 |
+
if vit:
|
| 374 |
+
vision_width = state_dict["visual.conv1.weight"].shape[0]
|
| 375 |
+
vision_layers = len(
|
| 376 |
+
[k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
|
| 377 |
+
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
|
| 378 |
+
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
|
| 379 |
+
image_size = vision_patch_size * grid_size
|
| 380 |
+
else:
|
| 381 |
+
counts: list = [
|
| 382 |
+
len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
|
| 383 |
+
vision_layers = tuple(counts)
|
| 384 |
+
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
|
| 385 |
+
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
|
| 386 |
+
vision_patch_size = None
|
| 387 |
+
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
|
| 388 |
+
image_size = output_width * 32
|
| 389 |
+
|
| 390 |
+
embed_dim = state_dict["text_projection"].shape[1]
|
| 391 |
+
context_length = state_dict["positional_embedding"].shape[0]
|
| 392 |
+
vocab_size = state_dict["token_embedding.weight"].shape[0]
|
| 393 |
+
transformer_width = state_dict["ln_final.weight"].shape[0]
|
| 394 |
+
transformer_heads = transformer_width // 64
|
| 395 |
+
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
|
| 396 |
+
|
| 397 |
+
vision_cfg = CLIPVisionCfg(
|
| 398 |
+
layers=vision_layers,
|
| 399 |
+
width=vision_width,
|
| 400 |
+
patch_size=vision_patch_size,
|
| 401 |
+
image_size=image_size,
|
| 402 |
+
)
|
| 403 |
+
text_cfg = CLIPTextCfg(
|
| 404 |
+
context_length=context_length,
|
| 405 |
+
vocab_size=vocab_size,
|
| 406 |
+
width=transformer_width,
|
| 407 |
+
heads=transformer_heads,
|
| 408 |
+
layers=transformer_layers
|
| 409 |
+
)
|
| 410 |
+
model = CLIP(
|
| 411 |
+
embed_dim,
|
| 412 |
+
vision_cfg=vision_cfg,
|
| 413 |
+
text_cfg=text_cfg,
|
| 414 |
+
quick_gelu=quick_gelu, # OpenAI models were trained with QuickGELU
|
| 415 |
+
cast_dtype=cast_dtype,
|
| 416 |
+
)
|
| 417 |
+
|
| 418 |
+
for key in ["input_resolution", "context_length", "vocab_size"]:
|
| 419 |
+
state_dict.pop(key, None)
|
| 420 |
+
|
| 421 |
+
convert_weights_to_fp16(model) # OpenAI state dicts are partially converted to float16
|
| 422 |
+
model.load_state_dict(state_dict)
|
| 423 |
+
return model.eval()
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
def trace_model(model, batch_size=256, device=torch.device('cpu')):
|
| 427 |
+
model.eval()
|
| 428 |
+
image_size = model.visual.image_size
|
| 429 |
+
example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)
|
| 430 |
+
example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)
|
| 431 |
+
model = torch.jit.trace_module(
|
| 432 |
+
model,
|
| 433 |
+
inputs=dict(
|
| 434 |
+
forward=(example_images, example_text),
|
| 435 |
+
encode_text=(example_text,),
|
| 436 |
+
encode_image=(example_images,)
|
| 437 |
+
))
|
| 438 |
+
model.visual.image_size = image_size
|
| 439 |
+
return model
|
SDXL_EcomID_ComfyUI/eva_clip/model_configs/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA01-CLIP-B-16.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 512,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 224,
|
| 5 |
+
"layers": 12,
|
| 6 |
+
"width": 768,
|
| 7 |
+
"patch_size": 16,
|
| 8 |
+
"eva_model_name": "eva-clip-b-16",
|
| 9 |
+
"ls_init_value": 0.1,
|
| 10 |
+
"drop_path_rate": 0.0
|
| 11 |
+
},
|
| 12 |
+
"text_cfg": {
|
| 13 |
+
"context_length": 77,
|
| 14 |
+
"vocab_size": 49408,
|
| 15 |
+
"width": 512,
|
| 16 |
+
"heads": 8,
|
| 17 |
+
"layers": 12
|
| 18 |
+
}
|
| 19 |
+
}
|
SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 1024,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 224,
|
| 5 |
+
"layers": 40,
|
| 6 |
+
"width": 1408,
|
| 7 |
+
"head_width": 88,
|
| 8 |
+
"mlp_ratio": 4.3637,
|
| 9 |
+
"patch_size": 14,
|
| 10 |
+
"eva_model_name": "eva-clip-g-14-x",
|
| 11 |
+
"drop_path_rate": 0,
|
| 12 |
+
"xattn": true,
|
| 13 |
+
"fusedLN": true
|
| 14 |
+
},
|
| 15 |
+
"text_cfg": {
|
| 16 |
+
"context_length": 77,
|
| 17 |
+
"vocab_size": 49408,
|
| 18 |
+
"width": 1024,
|
| 19 |
+
"heads": 16,
|
| 20 |
+
"layers": 24,
|
| 21 |
+
"xattn": false,
|
| 22 |
+
"fusedLN": true
|
| 23 |
+
}
|
| 24 |
+
}
|
SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA01-CLIP-g-14.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 1024,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 224,
|
| 5 |
+
"layers": 40,
|
| 6 |
+
"width": 1408,
|
| 7 |
+
"head_width": 88,
|
| 8 |
+
"mlp_ratio": 4.3637,
|
| 9 |
+
"patch_size": 14,
|
| 10 |
+
"eva_model_name": "eva-clip-g-14-x",
|
| 11 |
+
"drop_path_rate": 0.4,
|
| 12 |
+
"xattn": true,
|
| 13 |
+
"fusedLN": true
|
| 14 |
+
},
|
| 15 |
+
"text_cfg": {
|
| 16 |
+
"context_length": 77,
|
| 17 |
+
"vocab_size": 49408,
|
| 18 |
+
"width": 768,
|
| 19 |
+
"heads": 12,
|
| 20 |
+
"layers": 12,
|
| 21 |
+
"xattn": false,
|
| 22 |
+
"fusedLN": true
|
| 23 |
+
}
|
| 24 |
+
}
|
SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA02-CLIP-B-16.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 512,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 224,
|
| 5 |
+
"layers": 12,
|
| 6 |
+
"width": 768,
|
| 7 |
+
"head_width": 64,
|
| 8 |
+
"patch_size": 16,
|
| 9 |
+
"mlp_ratio": 2.6667,
|
| 10 |
+
"eva_model_name": "eva-clip-b-16-X",
|
| 11 |
+
"drop_path_rate": 0.0,
|
| 12 |
+
"xattn": true,
|
| 13 |
+
"fusedLN": true,
|
| 14 |
+
"rope": true,
|
| 15 |
+
"pt_hw_seq_len": 16,
|
| 16 |
+
"intp_freq": true,
|
| 17 |
+
"naiveswiglu": true,
|
| 18 |
+
"subln": true
|
| 19 |
+
},
|
| 20 |
+
"text_cfg": {
|
| 21 |
+
"context_length": 77,
|
| 22 |
+
"vocab_size": 49408,
|
| 23 |
+
"width": 512,
|
| 24 |
+
"heads": 8,
|
| 25 |
+
"layers": 12,
|
| 26 |
+
"xattn": true,
|
| 27 |
+
"fusedLN": true
|
| 28 |
+
}
|
| 29 |
+
}
|
SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA02-CLIP-L-14-336.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 768,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 336,
|
| 5 |
+
"layers": 24,
|
| 6 |
+
"width": 1024,
|
| 7 |
+
"drop_path_rate": 0,
|
| 8 |
+
"head_width": 64,
|
| 9 |
+
"mlp_ratio": 2.6667,
|
| 10 |
+
"patch_size": 14,
|
| 11 |
+
"eva_model_name": "eva-clip-l-14-336",
|
| 12 |
+
"xattn": true,
|
| 13 |
+
"fusedLN": true,
|
| 14 |
+
"rope": true,
|
| 15 |
+
"pt_hw_seq_len": 16,
|
| 16 |
+
"intp_freq": true,
|
| 17 |
+
"naiveswiglu": true,
|
| 18 |
+
"subln": true
|
| 19 |
+
},
|
| 20 |
+
"text_cfg": {
|
| 21 |
+
"context_length": 77,
|
| 22 |
+
"vocab_size": 49408,
|
| 23 |
+
"width": 768,
|
| 24 |
+
"heads": 12,
|
| 25 |
+
"layers": 12,
|
| 26 |
+
"xattn": false,
|
| 27 |
+
"fusedLN": true
|
| 28 |
+
}
|
| 29 |
+
}
|
SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA02-CLIP-L-14.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 768,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 224,
|
| 5 |
+
"layers": 24,
|
| 6 |
+
"width": 1024,
|
| 7 |
+
"drop_path_rate": 0,
|
| 8 |
+
"head_width": 64,
|
| 9 |
+
"mlp_ratio": 2.6667,
|
| 10 |
+
"patch_size": 14,
|
| 11 |
+
"eva_model_name": "eva-clip-l-14",
|
| 12 |
+
"xattn": true,
|
| 13 |
+
"fusedLN": true,
|
| 14 |
+
"rope": true,
|
| 15 |
+
"pt_hw_seq_len": 16,
|
| 16 |
+
"intp_freq": true,
|
| 17 |
+
"naiveswiglu": true,
|
| 18 |
+
"subln": true
|
| 19 |
+
},
|
| 20 |
+
"text_cfg": {
|
| 21 |
+
"context_length": 77,
|
| 22 |
+
"vocab_size": 49408,
|
| 23 |
+
"width": 768,
|
| 24 |
+
"heads": 12,
|
| 25 |
+
"layers": 12,
|
| 26 |
+
"xattn": false,
|
| 27 |
+
"fusedLN": true
|
| 28 |
+
}
|
| 29 |
+
}
|
SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 1024,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 224,
|
| 5 |
+
"layers": 64,
|
| 6 |
+
"width": 1792,
|
| 7 |
+
"head_width": 112,
|
| 8 |
+
"mlp_ratio": 8.571428571428571,
|
| 9 |
+
"patch_size": 14,
|
| 10 |
+
"eva_model_name": "eva-clip-4b-14-x",
|
| 11 |
+
"drop_path_rate": 0,
|
| 12 |
+
"xattn": true,
|
| 13 |
+
"postnorm": true,
|
| 14 |
+
"fusedLN": true
|
| 15 |
+
},
|
| 16 |
+
"text_cfg": {
|
| 17 |
+
"context_length": 77,
|
| 18 |
+
"vocab_size": 49408,
|
| 19 |
+
"width": 1280,
|
| 20 |
+
"heads": 20,
|
| 21 |
+
"layers": 32,
|
| 22 |
+
"xattn": false,
|
| 23 |
+
"fusedLN": true
|
| 24 |
+
}
|
| 25 |
+
}
|
SDXL_EcomID_ComfyUI/eva_clip/model_configs/EVA02-CLIP-bigE-14.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"embed_dim": 1024,
|
| 3 |
+
"vision_cfg": {
|
| 4 |
+
"image_size": 224,
|
| 5 |
+
"layers": 64,
|
| 6 |
+
"width": 1792,
|
| 7 |
+
"head_width": 112,
|
| 8 |
+
"mlp_ratio": 8.571428571428571,
|
| 9 |
+
"patch_size": 14,
|
| 10 |
+
"eva_model_name": "eva-clip-4b-14-x",
|
| 11 |
+
"drop_path_rate": 0,
|
| 12 |
+
"xattn": true,
|
| 13 |
+
"postnorm": true,
|
| 14 |
+
"fusedLN": true
|
| 15 |
+
},
|
| 16 |
+
"text_cfg": {
|
| 17 |
+
"context_length": 77,
|
| 18 |
+
"vocab_size": 49408,
|
| 19 |
+
"width": 1024,
|
| 20 |
+
"heads": 16,
|
| 21 |
+
"layers": 24,
|
| 22 |
+
"xattn": false,
|
| 23 |
+
"fusedLN": true
|
| 24 |
+
}
|
| 25 |
+
}
|
SDXL_EcomID_ComfyUI/eva_clip/modified_resnet.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import OrderedDict
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch import nn
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
|
| 7 |
+
from .utils import freeze_batch_norm_2d
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class Bottleneck(nn.Module):
|
| 11 |
+
expansion = 4
|
| 12 |
+
|
| 13 |
+
def __init__(self, inplanes, planes, stride=1):
|
| 14 |
+
super().__init__()
|
| 15 |
+
|
| 16 |
+
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
|
| 17 |
+
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
|
| 18 |
+
self.bn1 = nn.BatchNorm2d(planes)
|
| 19 |
+
self.act1 = nn.ReLU(inplace=True)
|
| 20 |
+
|
| 21 |
+
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
|
| 22 |
+
self.bn2 = nn.BatchNorm2d(planes)
|
| 23 |
+
self.act2 = nn.ReLU(inplace=True)
|
| 24 |
+
|
| 25 |
+
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
|
| 26 |
+
|
| 27 |
+
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
|
| 28 |
+
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
|
| 29 |
+
self.act3 = nn.ReLU(inplace=True)
|
| 30 |
+
|
| 31 |
+
self.downsample = None
|
| 32 |
+
self.stride = stride
|
| 33 |
+
|
| 34 |
+
if stride > 1 or inplanes != planes * Bottleneck.expansion:
|
| 35 |
+
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
|
| 36 |
+
self.downsample = nn.Sequential(OrderedDict([
|
| 37 |
+
("-1", nn.AvgPool2d(stride)),
|
| 38 |
+
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
|
| 39 |
+
("1", nn.BatchNorm2d(planes * self.expansion))
|
| 40 |
+
]))
|
| 41 |
+
|
| 42 |
+
def forward(self, x: torch.Tensor):
|
| 43 |
+
identity = x
|
| 44 |
+
|
| 45 |
+
out = self.act1(self.bn1(self.conv1(x)))
|
| 46 |
+
out = self.act2(self.bn2(self.conv2(out)))
|
| 47 |
+
out = self.avgpool(out)
|
| 48 |
+
out = self.bn3(self.conv3(out))
|
| 49 |
+
|
| 50 |
+
if self.downsample is not None:
|
| 51 |
+
identity = self.downsample(x)
|
| 52 |
+
|
| 53 |
+
out += identity
|
| 54 |
+
out = self.act3(out)
|
| 55 |
+
return out
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class AttentionPool2d(nn.Module):
|
| 59 |
+
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
|
| 60 |
+
super().__init__()
|
| 61 |
+
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
|
| 62 |
+
self.k_proj = nn.Linear(embed_dim, embed_dim)
|
| 63 |
+
self.q_proj = nn.Linear(embed_dim, embed_dim)
|
| 64 |
+
self.v_proj = nn.Linear(embed_dim, embed_dim)
|
| 65 |
+
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
|
| 66 |
+
self.num_heads = num_heads
|
| 67 |
+
|
| 68 |
+
def forward(self, x):
|
| 69 |
+
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
|
| 70 |
+
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
|
| 71 |
+
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
|
| 72 |
+
x, _ = F.multi_head_attention_forward(
|
| 73 |
+
query=x, key=x, value=x,
|
| 74 |
+
embed_dim_to_check=x.shape[-1],
|
| 75 |
+
num_heads=self.num_heads,
|
| 76 |
+
q_proj_weight=self.q_proj.weight,
|
| 77 |
+
k_proj_weight=self.k_proj.weight,
|
| 78 |
+
v_proj_weight=self.v_proj.weight,
|
| 79 |
+
in_proj_weight=None,
|
| 80 |
+
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
|
| 81 |
+
bias_k=None,
|
| 82 |
+
bias_v=None,
|
| 83 |
+
add_zero_attn=False,
|
| 84 |
+
dropout_p=0.,
|
| 85 |
+
out_proj_weight=self.c_proj.weight,
|
| 86 |
+
out_proj_bias=self.c_proj.bias,
|
| 87 |
+
use_separate_proj_weight=True,
|
| 88 |
+
training=self.training,
|
| 89 |
+
need_weights=False
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
return x[0]
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class ModifiedResNet(nn.Module):
|
| 96 |
+
"""
|
| 97 |
+
A ResNet class that is similar to torchvision's but contains the following changes:
|
| 98 |
+
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
|
| 99 |
+
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
|
| 100 |
+
- The final pooling layer is a QKV attention instead of an average pool
|
| 101 |
+
"""
|
| 102 |
+
|
| 103 |
+
def __init__(self, layers, output_dim, heads, image_size=224, width=64):
|
| 104 |
+
super().__init__()
|
| 105 |
+
self.output_dim = output_dim
|
| 106 |
+
self.image_size = image_size
|
| 107 |
+
|
| 108 |
+
# the 3-layer stem
|
| 109 |
+
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
|
| 110 |
+
self.bn1 = nn.BatchNorm2d(width // 2)
|
| 111 |
+
self.act1 = nn.ReLU(inplace=True)
|
| 112 |
+
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
|
| 113 |
+
self.bn2 = nn.BatchNorm2d(width // 2)
|
| 114 |
+
self.act2 = nn.ReLU(inplace=True)
|
| 115 |
+
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
|
| 116 |
+
self.bn3 = nn.BatchNorm2d(width)
|
| 117 |
+
self.act3 = nn.ReLU(inplace=True)
|
| 118 |
+
self.avgpool = nn.AvgPool2d(2)
|
| 119 |
+
|
| 120 |
+
# residual layers
|
| 121 |
+
self._inplanes = width # this is a *mutable* variable used during construction
|
| 122 |
+
self.layer1 = self._make_layer(width, layers[0])
|
| 123 |
+
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
|
| 124 |
+
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
|
| 125 |
+
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
|
| 126 |
+
|
| 127 |
+
embed_dim = width * 32 # the ResNet feature dimension
|
| 128 |
+
self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
|
| 129 |
+
|
| 130 |
+
self.init_parameters()
|
| 131 |
+
|
| 132 |
+
def _make_layer(self, planes, blocks, stride=1):
|
| 133 |
+
layers = [Bottleneck(self._inplanes, planes, stride)]
|
| 134 |
+
|
| 135 |
+
self._inplanes = planes * Bottleneck.expansion
|
| 136 |
+
for _ in range(1, blocks):
|
| 137 |
+
layers.append(Bottleneck(self._inplanes, planes))
|
| 138 |
+
|
| 139 |
+
return nn.Sequential(*layers)
|
| 140 |
+
|
| 141 |
+
def init_parameters(self):
|
| 142 |
+
if self.attnpool is not None:
|
| 143 |
+
std = self.attnpool.c_proj.in_features ** -0.5
|
| 144 |
+
nn.init.normal_(self.attnpool.q_proj.weight, std=std)
|
| 145 |
+
nn.init.normal_(self.attnpool.k_proj.weight, std=std)
|
| 146 |
+
nn.init.normal_(self.attnpool.v_proj.weight, std=std)
|
| 147 |
+
nn.init.normal_(self.attnpool.c_proj.weight, std=std)
|
| 148 |
+
|
| 149 |
+
for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
|
| 150 |
+
for name, param in resnet_block.named_parameters():
|
| 151 |
+
if name.endswith("bn3.weight"):
|
| 152 |
+
nn.init.zeros_(param)
|
| 153 |
+
|
| 154 |
+
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
|
| 155 |
+
assert unlocked_groups == 0, 'partial locking not currently supported for this model'
|
| 156 |
+
for param in self.parameters():
|
| 157 |
+
param.requires_grad = False
|
| 158 |
+
if freeze_bn_stats:
|
| 159 |
+
freeze_batch_norm_2d(self)
|
| 160 |
+
|
| 161 |
+
@torch.jit.ignore
|
| 162 |
+
def set_grad_checkpointing(self, enable=True):
|
| 163 |
+
# FIXME support for non-transformer
|
| 164 |
+
pass
|
| 165 |
+
|
| 166 |
+
def stem(self, x):
|
| 167 |
+
x = self.act1(self.bn1(self.conv1(x)))
|
| 168 |
+
x = self.act2(self.bn2(self.conv2(x)))
|
| 169 |
+
x = self.act3(self.bn3(self.conv3(x)))
|
| 170 |
+
x = self.avgpool(x)
|
| 171 |
+
return x
|
| 172 |
+
|
| 173 |
+
def forward(self, x):
|
| 174 |
+
x = self.stem(x)
|
| 175 |
+
x = self.layer1(x)
|
| 176 |
+
x = self.layer2(x)
|
| 177 |
+
x = self.layer3(x)
|
| 178 |
+
x = self.layer4(x)
|
| 179 |
+
x = self.attnpool(x)
|
| 180 |
+
|
| 181 |
+
return x
|
SDXL_EcomID_ComfyUI/eva_clip/openai.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" OpenAI pretrained model functions
|
| 2 |
+
|
| 3 |
+
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import warnings
|
| 8 |
+
from typing import List, Optional, Union
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
from .model import build_model_from_openai_state_dict, convert_weights_to_lp, get_cast_dtype
|
| 13 |
+
from .pretrained import get_pretrained_url, list_pretrained_models_by_tag, download_pretrained_from_url
|
| 14 |
+
|
| 15 |
+
__all__ = ["list_openai_models", "load_openai_model"]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def list_openai_models() -> List[str]:
|
| 19 |
+
"""Returns the names of available CLIP models"""
|
| 20 |
+
return list_pretrained_models_by_tag('openai')
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def load_openai_model(
|
| 24 |
+
name: str,
|
| 25 |
+
precision: Optional[str] = None,
|
| 26 |
+
device: Optional[Union[str, torch.device]] = None,
|
| 27 |
+
jit: bool = True,
|
| 28 |
+
cache_dir: Optional[str] = None,
|
| 29 |
+
):
|
| 30 |
+
"""Load a CLIP model
|
| 31 |
+
|
| 32 |
+
Parameters
|
| 33 |
+
----------
|
| 34 |
+
name : str
|
| 35 |
+
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
|
| 36 |
+
precision: str
|
| 37 |
+
Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'.
|
| 38 |
+
device : Union[str, torch.device]
|
| 39 |
+
The device to put the loaded model
|
| 40 |
+
jit : bool
|
| 41 |
+
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
|
| 42 |
+
cache_dir : Optional[str]
|
| 43 |
+
The directory to cache the downloaded model weights
|
| 44 |
+
|
| 45 |
+
Returns
|
| 46 |
+
-------
|
| 47 |
+
model : torch.nn.Module
|
| 48 |
+
The CLIP model
|
| 49 |
+
preprocess : Callable[[PIL.Image], torch.Tensor]
|
| 50 |
+
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
|
| 51 |
+
"""
|
| 52 |
+
if device is None:
|
| 53 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 54 |
+
if precision is None:
|
| 55 |
+
precision = 'fp32' if device == 'cpu' else 'fp16'
|
| 56 |
+
|
| 57 |
+
if get_pretrained_url(name, 'openai'):
|
| 58 |
+
model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir)
|
| 59 |
+
elif os.path.isfile(name):
|
| 60 |
+
model_path = name
|
| 61 |
+
else:
|
| 62 |
+
raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
|
| 63 |
+
|
| 64 |
+
try:
|
| 65 |
+
# loading JIT archive
|
| 66 |
+
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
|
| 67 |
+
state_dict = None
|
| 68 |
+
except RuntimeError:
|
| 69 |
+
# loading saved state dict
|
| 70 |
+
if jit:
|
| 71 |
+
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
|
| 72 |
+
jit = False
|
| 73 |
+
state_dict = torch.load(model_path, map_location="cpu")
|
| 74 |
+
|
| 75 |
+
if not jit:
|
| 76 |
+
# Build a non-jit model from the OpenAI jitted model state dict
|
| 77 |
+
cast_dtype = get_cast_dtype(precision)
|
| 78 |
+
try:
|
| 79 |
+
model = build_model_from_openai_state_dict(state_dict or model.state_dict(), cast_dtype=cast_dtype)
|
| 80 |
+
except KeyError:
|
| 81 |
+
sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
|
| 82 |
+
model = build_model_from_openai_state_dict(sd, cast_dtype=cast_dtype)
|
| 83 |
+
|
| 84 |
+
# model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use
|
| 85 |
+
model = model.to(device)
|
| 86 |
+
if precision.startswith('amp') or precision == 'fp32':
|
| 87 |
+
model.float()
|
| 88 |
+
elif precision == 'bf16':
|
| 89 |
+
convert_weights_to_lp(model, dtype=torch.bfloat16)
|
| 90 |
+
|
| 91 |
+
return model
|
| 92 |
+
|
| 93 |
+
# patch the device names
|
| 94 |
+
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
|
| 95 |
+
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
|
| 96 |
+
|
| 97 |
+
def patch_device(module):
|
| 98 |
+
try:
|
| 99 |
+
graphs = [module.graph] if hasattr(module, "graph") else []
|
| 100 |
+
except RuntimeError:
|
| 101 |
+
graphs = []
|
| 102 |
+
|
| 103 |
+
if hasattr(module, "forward1"):
|
| 104 |
+
graphs.append(module.forward1.graph)
|
| 105 |
+
|
| 106 |
+
for graph in graphs:
|
| 107 |
+
for node in graph.findAllNodes("prim::Constant"):
|
| 108 |
+
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
|
| 109 |
+
node.copyAttributes(device_node)
|
| 110 |
+
|
| 111 |
+
model.apply(patch_device)
|
| 112 |
+
patch_device(model.encode_image)
|
| 113 |
+
patch_device(model.encode_text)
|
| 114 |
+
|
| 115 |
+
# patch dtype to float32 (typically for CPU)
|
| 116 |
+
if precision == 'fp32':
|
| 117 |
+
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
|
| 118 |
+
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
|
| 119 |
+
float_node = float_input.node()
|
| 120 |
+
|
| 121 |
+
def patch_float(module):
|
| 122 |
+
try:
|
| 123 |
+
graphs = [module.graph] if hasattr(module, "graph") else []
|
| 124 |
+
except RuntimeError:
|
| 125 |
+
graphs = []
|
| 126 |
+
|
| 127 |
+
if hasattr(module, "forward1"):
|
| 128 |
+
graphs.append(module.forward1.graph)
|
| 129 |
+
|
| 130 |
+
for graph in graphs:
|
| 131 |
+
for node in graph.findAllNodes("aten::to"):
|
| 132 |
+
inputs = list(node.inputs())
|
| 133 |
+
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
|
| 134 |
+
if inputs[i].node()["value"] == 5:
|
| 135 |
+
inputs[i].node().copyAttributes(float_node)
|
| 136 |
+
|
| 137 |
+
model.apply(patch_float)
|
| 138 |
+
patch_float(model.encode_image)
|
| 139 |
+
patch_float(model.encode_text)
|
| 140 |
+
model.float()
|
| 141 |
+
|
| 142 |
+
# ensure image_size attr available at consistent location for both jit and non-jit
|
| 143 |
+
model.visual.image_size = model.input_resolution.item()
|
| 144 |
+
return model
|
SDXL_EcomID_ComfyUI/eva_clip/pretrained.py
ADDED
|
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
import os
|
| 3 |
+
import urllib
|
| 4 |
+
import warnings
|
| 5 |
+
from functools import partial
|
| 6 |
+
from typing import Dict, Union
|
| 7 |
+
|
| 8 |
+
from tqdm import tqdm
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
from huggingface_hub import hf_hub_download
|
| 12 |
+
_has_hf_hub = True
|
| 13 |
+
except ImportError:
|
| 14 |
+
hf_hub_download = None
|
| 15 |
+
_has_hf_hub = False
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def _pcfg(url='', hf_hub='', filename='', mean=None, std=None):
|
| 19 |
+
return dict(
|
| 20 |
+
url=url,
|
| 21 |
+
hf_hub=hf_hub,
|
| 22 |
+
mean=mean,
|
| 23 |
+
std=std,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
_VITB32 = dict(
|
| 27 |
+
openai=_pcfg(
|
| 28 |
+
"https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"),
|
| 29 |
+
laion400m_e31=_pcfg(
|
| 30 |
+
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"),
|
| 31 |
+
laion400m_e32=_pcfg(
|
| 32 |
+
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"),
|
| 33 |
+
laion2b_e16=_pcfg(
|
| 34 |
+
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-laion2b_e16-af8dbd0c.pth"),
|
| 35 |
+
laion2b_s34b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-laion2B-s34B-b79K/')
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
_VITB32_quickgelu = dict(
|
| 39 |
+
openai=_pcfg(
|
| 40 |
+
"https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"),
|
| 41 |
+
laion400m_e31=_pcfg(
|
| 42 |
+
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"),
|
| 43 |
+
laion400m_e32=_pcfg(
|
| 44 |
+
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"),
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
_VITB16 = dict(
|
| 48 |
+
openai=_pcfg(
|
| 49 |
+
"https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt"),
|
| 50 |
+
laion400m_e31=_pcfg(
|
| 51 |
+
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e31-00efa78f.pt"),
|
| 52 |
+
laion400m_e32=_pcfg(
|
| 53 |
+
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e32-55e67d44.pt"),
|
| 54 |
+
laion2b_s34b_b88k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-laion2B-s34B-b88K/'),
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
_EVAB16 = dict(
|
| 58 |
+
eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_B_psz14to16.pt'),
|
| 59 |
+
eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_B_psz14to16.pt'),
|
| 60 |
+
eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_B_psz16_s8B.pt'),
|
| 61 |
+
eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_B_psz16_s8B.pt'),
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
_VITB16_PLUS_240 = dict(
|
| 65 |
+
laion400m_e31=_pcfg(
|
| 66 |
+
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e31-8fb26589.pt"),
|
| 67 |
+
laion400m_e32=_pcfg(
|
| 68 |
+
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e32-699c4b84.pt"),
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
_VITL14 = dict(
|
| 72 |
+
openai=_pcfg(
|
| 73 |
+
"https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"),
|
| 74 |
+
laion400m_e31=_pcfg(
|
| 75 |
+
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e31-69988bb6.pt"),
|
| 76 |
+
laion400m_e32=_pcfg(
|
| 77 |
+
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e32-3d133497.pt"),
|
| 78 |
+
laion2b_s32b_b82k=_pcfg(
|
| 79 |
+
hf_hub='laion/CLIP-ViT-L-14-laion2B-s32B-b82K/',
|
| 80 |
+
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
_EVAL14 = dict(
|
| 84 |
+
eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_L_psz14.pt'),
|
| 85 |
+
eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_L_psz14.pt'),
|
| 86 |
+
eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_s4B.pt'),
|
| 87 |
+
eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_s4B.pt'),
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
_VITL14_336 = dict(
|
| 91 |
+
openai=_pcfg(
|
| 92 |
+
"https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt"),
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
_EVAL14_336 = dict(
|
| 96 |
+
eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_336_psz14_s6B.pt'),
|
| 97 |
+
eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_336_psz14_s6B.pt'),
|
| 98 |
+
eva_clip_224to336=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_224to336.pt'),
|
| 99 |
+
eva02_clip_224to336=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_224to336.pt'),
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
_VITH14 = dict(
|
| 103 |
+
laion2b_s32b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-H-14-laion2B-s32B-b79K/'),
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
_VITg14 = dict(
|
| 107 |
+
laion2b_s12b_b42k=_pcfg(hf_hub='laion/CLIP-ViT-g-14-laion2B-s12B-b42K/'),
|
| 108 |
+
laion2b_s34b_b88k=_pcfg(hf_hub='laion/CLIP-ViT-g-14-laion2B-s34B-b88K/'),
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
_EVAg14 = dict(
|
| 112 |
+
eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/'),
|
| 113 |
+
eva01=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_g_psz14.pt'),
|
| 114 |
+
eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_psz14_s11B.pt'),
|
| 115 |
+
eva01_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_psz14_s11B.pt'),
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
_EVAg14_PLUS = dict(
|
| 119 |
+
eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/'),
|
| 120 |
+
eva01=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_g_psz14.pt'),
|
| 121 |
+
eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_plus_psz14_s11B.pt'),
|
| 122 |
+
eva01_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_plus_psz14_s11B.pt'),
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
_VITbigG14 = dict(
|
| 126 |
+
laion2b_s39b_b160k=_pcfg(hf_hub='laion/CLIP-ViT-bigG-14-laion2B-39B-b160k/'),
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
_EVAbigE14 = dict(
|
| 130 |
+
eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
|
| 131 |
+
eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
|
| 132 |
+
eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_s4B.pt'),
|
| 133 |
+
eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_s4B.pt'),
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
_EVAbigE14_PLUS = dict(
|
| 137 |
+
eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
|
| 138 |
+
eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
|
| 139 |
+
eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_plus_s9B.pt'),
|
| 140 |
+
eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_plus_s9B.pt'),
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
_PRETRAINED = {
|
| 145 |
+
# "ViT-B-32": _VITB32,
|
| 146 |
+
"OpenaiCLIP-B-32": _VITB32,
|
| 147 |
+
"OpenCLIP-B-32": _VITB32,
|
| 148 |
+
|
| 149 |
+
# "ViT-B-32-quickgelu": _VITB32_quickgelu,
|
| 150 |
+
"OpenaiCLIP-B-32-quickgelu": _VITB32_quickgelu,
|
| 151 |
+
"OpenCLIP-B-32-quickgelu": _VITB32_quickgelu,
|
| 152 |
+
|
| 153 |
+
# "ViT-B-16": _VITB16,
|
| 154 |
+
"OpenaiCLIP-B-16": _VITB16,
|
| 155 |
+
"OpenCLIP-B-16": _VITB16,
|
| 156 |
+
|
| 157 |
+
"EVA02-B-16": _EVAB16,
|
| 158 |
+
"EVA02-CLIP-B-16": _EVAB16,
|
| 159 |
+
|
| 160 |
+
# "ViT-B-16-plus-240": _VITB16_PLUS_240,
|
| 161 |
+
"OpenCLIP-B-16-plus-240": _VITB16_PLUS_240,
|
| 162 |
+
|
| 163 |
+
# "ViT-L-14": _VITL14,
|
| 164 |
+
"OpenaiCLIP-L-14": _VITL14,
|
| 165 |
+
"OpenCLIP-L-14": _VITL14,
|
| 166 |
+
|
| 167 |
+
"EVA02-L-14": _EVAL14,
|
| 168 |
+
"EVA02-CLIP-L-14": _EVAL14,
|
| 169 |
+
|
| 170 |
+
# "ViT-L-14-336": _VITL14_336,
|
| 171 |
+
"OpenaiCLIP-L-14-336": _VITL14_336,
|
| 172 |
+
|
| 173 |
+
"EVA02-CLIP-L-14-336": _EVAL14_336,
|
| 174 |
+
|
| 175 |
+
# "ViT-H-14": _VITH14,
|
| 176 |
+
# "ViT-g-14": _VITg14,
|
| 177 |
+
"OpenCLIP-H-14": _VITH14,
|
| 178 |
+
"OpenCLIP-g-14": _VITg14,
|
| 179 |
+
|
| 180 |
+
"EVA01-CLIP-g-14": _EVAg14,
|
| 181 |
+
"EVA01-CLIP-g-14-plus": _EVAg14_PLUS,
|
| 182 |
+
|
| 183 |
+
# "ViT-bigG-14": _VITbigG14,
|
| 184 |
+
"OpenCLIP-bigG-14": _VITbigG14,
|
| 185 |
+
|
| 186 |
+
"EVA02-CLIP-bigE-14": _EVAbigE14,
|
| 187 |
+
"EVA02-CLIP-bigE-14-plus": _EVAbigE14_PLUS,
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def _clean_tag(tag: str):
|
| 192 |
+
# normalize pretrained tags
|
| 193 |
+
return tag.lower().replace('-', '_')
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def list_pretrained(as_str: bool = False):
|
| 197 |
+
""" returns list of pretrained models
|
| 198 |
+
Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True
|
| 199 |
+
"""
|
| 200 |
+
return [':'.join([k, t]) if as_str else (k, t) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()]
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def list_pretrained_models_by_tag(tag: str):
|
| 204 |
+
""" return all models having the specified pretrain tag """
|
| 205 |
+
models = []
|
| 206 |
+
tag = _clean_tag(tag)
|
| 207 |
+
for k in _PRETRAINED.keys():
|
| 208 |
+
if tag in _PRETRAINED[k]:
|
| 209 |
+
models.append(k)
|
| 210 |
+
return models
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def list_pretrained_tags_by_model(model: str):
|
| 214 |
+
""" return all pretrain tags for the specified model architecture """
|
| 215 |
+
tags = []
|
| 216 |
+
if model in _PRETRAINED:
|
| 217 |
+
tags.extend(_PRETRAINED[model].keys())
|
| 218 |
+
return tags
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def is_pretrained_cfg(model: str, tag: str):
|
| 222 |
+
if model not in _PRETRAINED:
|
| 223 |
+
return False
|
| 224 |
+
return _clean_tag(tag) in _PRETRAINED[model]
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def get_pretrained_cfg(model: str, tag: str):
|
| 228 |
+
if model not in _PRETRAINED:
|
| 229 |
+
return {}
|
| 230 |
+
model_pretrained = _PRETRAINED[model]
|
| 231 |
+
return model_pretrained.get(_clean_tag(tag), {})
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def get_pretrained_url(model: str, tag: str):
|
| 235 |
+
cfg = get_pretrained_cfg(model, _clean_tag(tag))
|
| 236 |
+
return cfg.get('url', '')
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def download_pretrained_from_url(
|
| 240 |
+
url: str,
|
| 241 |
+
cache_dir: Union[str, None] = None,
|
| 242 |
+
):
|
| 243 |
+
if not cache_dir:
|
| 244 |
+
cache_dir = os.path.expanduser("~/.cache/clip")
|
| 245 |
+
os.makedirs(cache_dir, exist_ok=True)
|
| 246 |
+
filename = os.path.basename(url)
|
| 247 |
+
|
| 248 |
+
if 'openaipublic' in url:
|
| 249 |
+
expected_sha256 = url.split("/")[-2]
|
| 250 |
+
elif 'mlfoundations' in url:
|
| 251 |
+
expected_sha256 = os.path.splitext(filename)[0].split("-")[-1]
|
| 252 |
+
else:
|
| 253 |
+
expected_sha256 = ''
|
| 254 |
+
|
| 255 |
+
download_target = os.path.join(cache_dir, filename)
|
| 256 |
+
|
| 257 |
+
if os.path.exists(download_target) and not os.path.isfile(download_target):
|
| 258 |
+
raise RuntimeError(f"{download_target} exists and is not a regular file")
|
| 259 |
+
|
| 260 |
+
if os.path.isfile(download_target):
|
| 261 |
+
if expected_sha256:
|
| 262 |
+
if hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256):
|
| 263 |
+
return download_target
|
| 264 |
+
else:
|
| 265 |
+
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
|
| 266 |
+
else:
|
| 267 |
+
return download_target
|
| 268 |
+
|
| 269 |
+
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
|
| 270 |
+
with tqdm(total=int(source.headers.get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
|
| 271 |
+
while True:
|
| 272 |
+
buffer = source.read(8192)
|
| 273 |
+
if not buffer:
|
| 274 |
+
break
|
| 275 |
+
|
| 276 |
+
output.write(buffer)
|
| 277 |
+
loop.update(len(buffer))
|
| 278 |
+
|
| 279 |
+
if expected_sha256 and not hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256):
|
| 280 |
+
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
|
| 281 |
+
|
| 282 |
+
return download_target
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def has_hf_hub(necessary=False):
|
| 286 |
+
if not _has_hf_hub and necessary:
|
| 287 |
+
# if no HF Hub module installed, and it is necessary to continue, raise error
|
| 288 |
+
raise RuntimeError(
|
| 289 |
+
'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.')
|
| 290 |
+
return _has_hf_hub
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def download_pretrained_from_hf(
|
| 294 |
+
model_id: str,
|
| 295 |
+
filename: str = 'open_clip_pytorch_model.bin',
|
| 296 |
+
revision=None,
|
| 297 |
+
cache_dir: Union[str, None] = None,
|
| 298 |
+
):
|
| 299 |
+
has_hf_hub(True)
|
| 300 |
+
cached_file = hf_hub_download(model_id, filename, revision=revision, cache_dir=cache_dir)
|
| 301 |
+
return cached_file
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def download_pretrained(
|
| 305 |
+
cfg: Dict,
|
| 306 |
+
force_hf_hub: bool = False,
|
| 307 |
+
cache_dir: Union[str, None] = None,
|
| 308 |
+
):
|
| 309 |
+
target = ''
|
| 310 |
+
if not cfg:
|
| 311 |
+
return target
|
| 312 |
+
|
| 313 |
+
download_url = cfg.get('url', '')
|
| 314 |
+
download_hf_hub = cfg.get('hf_hub', '')
|
| 315 |
+
if download_hf_hub and force_hf_hub:
|
| 316 |
+
# use HF hub even if url exists
|
| 317 |
+
download_url = ''
|
| 318 |
+
|
| 319 |
+
if download_url:
|
| 320 |
+
target = download_pretrained_from_url(download_url, cache_dir=cache_dir)
|
| 321 |
+
elif download_hf_hub:
|
| 322 |
+
has_hf_hub(True)
|
| 323 |
+
# we assume the hf_hub entries in pretrained config combine model_id + filename in
|
| 324 |
+
# 'org/model_name/filename.pt' form. To specify just the model id w/o filename and
|
| 325 |
+
# use 'open_clip_pytorch_model.bin' default, there must be a trailing slash 'org/model_name/'.
|
| 326 |
+
model_id, filename = os.path.split(download_hf_hub)
|
| 327 |
+
if filename:
|
| 328 |
+
target = download_pretrained_from_hf(model_id, filename=filename, cache_dir=cache_dir)
|
| 329 |
+
else:
|
| 330 |
+
target = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
|
| 331 |
+
|
| 332 |
+
return target
|
SDXL_EcomID_ComfyUI/eva_clip/rope.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from math import pi
|
| 2 |
+
import torch
|
| 3 |
+
from torch import nn
|
| 4 |
+
from einops import rearrange, repeat
|
| 5 |
+
import logging
|
| 6 |
+
|
| 7 |
+
def broadcat(tensors, dim = -1):
|
| 8 |
+
num_tensors = len(tensors)
|
| 9 |
+
shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
|
| 10 |
+
assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
|
| 11 |
+
shape_len = list(shape_lens)[0]
|
| 12 |
+
dim = (dim + shape_len) if dim < 0 else dim
|
| 13 |
+
dims = list(zip(*map(lambda t: list(t.shape), tensors)))
|
| 14 |
+
expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
|
| 15 |
+
assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'
|
| 16 |
+
max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
|
| 17 |
+
expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
|
| 18 |
+
expanded_dims.insert(dim, (dim, dims[dim]))
|
| 19 |
+
expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
|
| 20 |
+
tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
|
| 21 |
+
return torch.cat(tensors, dim = dim)
|
| 22 |
+
|
| 23 |
+
def rotate_half(x):
|
| 24 |
+
x = rearrange(x, '... (d r) -> ... d r', r = 2)
|
| 25 |
+
x1, x2 = x.unbind(dim = -1)
|
| 26 |
+
x = torch.stack((-x2, x1), dim = -1)
|
| 27 |
+
return rearrange(x, '... d r -> ... (d r)')
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class VisionRotaryEmbedding(nn.Module):
|
| 31 |
+
def __init__(
|
| 32 |
+
self,
|
| 33 |
+
dim,
|
| 34 |
+
pt_seq_len,
|
| 35 |
+
ft_seq_len=None,
|
| 36 |
+
custom_freqs = None,
|
| 37 |
+
freqs_for = 'lang',
|
| 38 |
+
theta = 10000,
|
| 39 |
+
max_freq = 10,
|
| 40 |
+
num_freqs = 1,
|
| 41 |
+
):
|
| 42 |
+
super().__init__()
|
| 43 |
+
if custom_freqs:
|
| 44 |
+
freqs = custom_freqs
|
| 45 |
+
elif freqs_for == 'lang':
|
| 46 |
+
freqs = 1. / (theta ** (torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))
|
| 47 |
+
elif freqs_for == 'pixel':
|
| 48 |
+
freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi
|
| 49 |
+
elif freqs_for == 'constant':
|
| 50 |
+
freqs = torch.ones(num_freqs).float()
|
| 51 |
+
else:
|
| 52 |
+
raise ValueError(f'unknown modality {freqs_for}')
|
| 53 |
+
|
| 54 |
+
if ft_seq_len is None: ft_seq_len = pt_seq_len
|
| 55 |
+
t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len
|
| 56 |
+
|
| 57 |
+
freqs_h = torch.einsum('..., f -> ... f', t, freqs)
|
| 58 |
+
freqs_h = repeat(freqs_h, '... n -> ... (n r)', r = 2)
|
| 59 |
+
|
| 60 |
+
freqs_w = torch.einsum('..., f -> ... f', t, freqs)
|
| 61 |
+
freqs_w = repeat(freqs_w, '... n -> ... (n r)', r = 2)
|
| 62 |
+
|
| 63 |
+
freqs = broadcat((freqs_h[:, None, :], freqs_w[None, :, :]), dim = -1)
|
| 64 |
+
|
| 65 |
+
self.register_buffer("freqs_cos", freqs.cos())
|
| 66 |
+
self.register_buffer("freqs_sin", freqs.sin())
|
| 67 |
+
|
| 68 |
+
logging.info(f'Shape of rope freq: {self.freqs_cos.shape}')
|
| 69 |
+
|
| 70 |
+
def forward(self, t, start_index = 0):
|
| 71 |
+
rot_dim = self.freqs_cos.shape[-1]
|
| 72 |
+
end_index = start_index + rot_dim
|
| 73 |
+
assert rot_dim <= t.shape[-1], f'feature dimension {t.shape[-1]} is not of sufficient size to rotate in all the positions {rot_dim}'
|
| 74 |
+
t_left, t, t_right = t[..., :start_index], t[..., start_index:end_index], t[..., end_index:]
|
| 75 |
+
t = (t * self.freqs_cos) + (rotate_half(t) * self.freqs_sin)
|
| 76 |
+
|
| 77 |
+
return torch.cat((t_left, t, t_right), dim = -1)
|
| 78 |
+
|
| 79 |
+
class VisionRotaryEmbeddingFast(nn.Module):
|
| 80 |
+
def __init__(
|
| 81 |
+
self,
|
| 82 |
+
dim,
|
| 83 |
+
pt_seq_len,
|
| 84 |
+
ft_seq_len=None,
|
| 85 |
+
custom_freqs = None,
|
| 86 |
+
freqs_for = 'lang',
|
| 87 |
+
theta = 10000,
|
| 88 |
+
max_freq = 10,
|
| 89 |
+
num_freqs = 1,
|
| 90 |
+
patch_dropout = 0.
|
| 91 |
+
):
|
| 92 |
+
super().__init__()
|
| 93 |
+
if custom_freqs:
|
| 94 |
+
freqs = custom_freqs
|
| 95 |
+
elif freqs_for == 'lang':
|
| 96 |
+
freqs = 1. / (theta ** (torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))
|
| 97 |
+
elif freqs_for == 'pixel':
|
| 98 |
+
freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi
|
| 99 |
+
elif freqs_for == 'constant':
|
| 100 |
+
freqs = torch.ones(num_freqs).float()
|
| 101 |
+
else:
|
| 102 |
+
raise ValueError(f'unknown modality {freqs_for}')
|
| 103 |
+
|
| 104 |
+
if ft_seq_len is None: ft_seq_len = pt_seq_len
|
| 105 |
+
t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len
|
| 106 |
+
|
| 107 |
+
freqs = torch.einsum('..., f -> ... f', t, freqs)
|
| 108 |
+
freqs = repeat(freqs, '... n -> ... (n r)', r = 2)
|
| 109 |
+
freqs = broadcat((freqs[:, None, :], freqs[None, :, :]), dim = -1)
|
| 110 |
+
|
| 111 |
+
freqs_cos = freqs.cos().view(-1, freqs.shape[-1])
|
| 112 |
+
freqs_sin = freqs.sin().view(-1, freqs.shape[-1])
|
| 113 |
+
|
| 114 |
+
self.patch_dropout = patch_dropout
|
| 115 |
+
|
| 116 |
+
self.register_buffer("freqs_cos", freqs_cos)
|
| 117 |
+
self.register_buffer("freqs_sin", freqs_sin)
|
| 118 |
+
|
| 119 |
+
logging.info(f'Shape of rope freq: {self.freqs_cos.shape}')
|
| 120 |
+
|
| 121 |
+
def forward(self, t, patch_indices_keep=None):
|
| 122 |
+
if patch_indices_keep is not None:
|
| 123 |
+
batch = t.size()[0]
|
| 124 |
+
batch_indices = torch.arange(batch)
|
| 125 |
+
batch_indices = batch_indices[..., None]
|
| 126 |
+
|
| 127 |
+
freqs_cos = repeat(self.freqs_cos, 'i j -> n i m j', n=t.shape[0], m=t.shape[1])
|
| 128 |
+
freqs_sin = repeat(self.freqs_sin, 'i j -> n i m j', n=t.shape[0], m=t.shape[1])
|
| 129 |
+
|
| 130 |
+
freqs_cos = freqs_cos[batch_indices, patch_indices_keep]
|
| 131 |
+
freqs_cos = rearrange(freqs_cos, 'n i m j -> n m i j')
|
| 132 |
+
freqs_sin = freqs_sin[batch_indices, patch_indices_keep]
|
| 133 |
+
freqs_sin = rearrange(freqs_sin, 'n i m j -> n m i j')
|
| 134 |
+
|
| 135 |
+
return t * freqs_cos + rotate_half(t) * freqs_sin
|
| 136 |
+
|
| 137 |
+
return t * self.freqs_cos + rotate_half(t) * self.freqs_sin
|
SDXL_EcomID_ComfyUI/eva_clip/timm_model.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" timm model adapter
|
| 2 |
+
|
| 3 |
+
Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model.
|
| 4 |
+
"""
|
| 5 |
+
import logging
|
| 6 |
+
from collections import OrderedDict
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
import timm
|
| 13 |
+
from timm.models.layers import Mlp, to_2tuple
|
| 14 |
+
try:
|
| 15 |
+
# old timm imports < 0.8.1
|
| 16 |
+
from timm.models.layers.attention_pool2d import RotAttentionPool2d
|
| 17 |
+
from timm.models.layers.attention_pool2d import AttentionPool2d as AbsAttentionPool2d
|
| 18 |
+
except ImportError:
|
| 19 |
+
# new timm imports >= 0.8.1
|
| 20 |
+
from timm.layers import RotAttentionPool2d
|
| 21 |
+
from timm.layers import AttentionPool2d as AbsAttentionPool2d
|
| 22 |
+
except ImportError:
|
| 23 |
+
timm = None
|
| 24 |
+
|
| 25 |
+
from .utils import freeze_batch_norm_2d
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class TimmModel(nn.Module):
|
| 29 |
+
""" timm model adapter
|
| 30 |
+
# FIXME this adapter is a work in progress, may change in ways that break weight compat
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
model_name,
|
| 36 |
+
embed_dim,
|
| 37 |
+
image_size=224,
|
| 38 |
+
pool='avg',
|
| 39 |
+
proj='linear',
|
| 40 |
+
proj_bias=False,
|
| 41 |
+
drop=0.,
|
| 42 |
+
pretrained=False):
|
| 43 |
+
super().__init__()
|
| 44 |
+
if timm is None:
|
| 45 |
+
raise RuntimeError("Please `pip install timm` to use timm models.")
|
| 46 |
+
|
| 47 |
+
self.image_size = to_2tuple(image_size)
|
| 48 |
+
self.trunk = timm.create_model(model_name, pretrained=pretrained)
|
| 49 |
+
feat_size = self.trunk.default_cfg.get('pool_size', None)
|
| 50 |
+
feature_ndim = 1 if not feat_size else 2
|
| 51 |
+
if pool in ('abs_attn', 'rot_attn'):
|
| 52 |
+
assert feature_ndim == 2
|
| 53 |
+
# if attn pooling used, remove both classifier and default pool
|
| 54 |
+
self.trunk.reset_classifier(0, global_pool='')
|
| 55 |
+
else:
|
| 56 |
+
# reset global pool if pool config set, otherwise leave as network default
|
| 57 |
+
reset_kwargs = dict(global_pool=pool) if pool else {}
|
| 58 |
+
self.trunk.reset_classifier(0, **reset_kwargs)
|
| 59 |
+
prev_chs = self.trunk.num_features
|
| 60 |
+
|
| 61 |
+
head_layers = OrderedDict()
|
| 62 |
+
if pool == 'abs_attn':
|
| 63 |
+
head_layers['pool'] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim)
|
| 64 |
+
prev_chs = embed_dim
|
| 65 |
+
elif pool == 'rot_attn':
|
| 66 |
+
head_layers['pool'] = RotAttentionPool2d(prev_chs, out_features=embed_dim)
|
| 67 |
+
prev_chs = embed_dim
|
| 68 |
+
else:
|
| 69 |
+
assert proj, 'projection layer needed if non-attention pooling is used.'
|
| 70 |
+
|
| 71 |
+
# NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used
|
| 72 |
+
if proj == 'linear':
|
| 73 |
+
head_layers['drop'] = nn.Dropout(drop)
|
| 74 |
+
head_layers['proj'] = nn.Linear(prev_chs, embed_dim, bias=proj_bias)
|
| 75 |
+
elif proj == 'mlp':
|
| 76 |
+
head_layers['mlp'] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop, bias=(True, proj_bias))
|
| 77 |
+
|
| 78 |
+
self.head = nn.Sequential(head_layers)
|
| 79 |
+
|
| 80 |
+
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
|
| 81 |
+
""" lock modules
|
| 82 |
+
Args:
|
| 83 |
+
unlocked_groups (int): leave last n layer groups unlocked (default: 0)
|
| 84 |
+
"""
|
| 85 |
+
if not unlocked_groups:
|
| 86 |
+
# lock full model
|
| 87 |
+
for param in self.trunk.parameters():
|
| 88 |
+
param.requires_grad = False
|
| 89 |
+
if freeze_bn_stats:
|
| 90 |
+
freeze_batch_norm_2d(self.trunk)
|
| 91 |
+
else:
|
| 92 |
+
# NOTE: partial freeze requires latest timm (master) branch and is subject to change
|
| 93 |
+
try:
|
| 94 |
+
# FIXME import here until API stable and in an official release
|
| 95 |
+
from timm.models.helpers import group_parameters, group_modules
|
| 96 |
+
except ImportError:
|
| 97 |
+
raise RuntimeError(
|
| 98 |
+
'Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`')
|
| 99 |
+
matcher = self.trunk.group_matcher()
|
| 100 |
+
gparams = group_parameters(self.trunk, matcher)
|
| 101 |
+
max_layer_id = max(gparams.keys())
|
| 102 |
+
max_layer_id = max_layer_id - unlocked_groups
|
| 103 |
+
for group_idx in range(max_layer_id + 1):
|
| 104 |
+
group = gparams[group_idx]
|
| 105 |
+
for param in group:
|
| 106 |
+
self.trunk.get_parameter(param).requires_grad = False
|
| 107 |
+
if freeze_bn_stats:
|
| 108 |
+
gmodules = group_modules(self.trunk, matcher, reverse=True)
|
| 109 |
+
gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}
|
| 110 |
+
freeze_batch_norm_2d(self.trunk, gmodules)
|
| 111 |
+
|
| 112 |
+
@torch.jit.ignore
|
| 113 |
+
def set_grad_checkpointing(self, enable=True):
|
| 114 |
+
try:
|
| 115 |
+
self.trunk.set_grad_checkpointing(enable)
|
| 116 |
+
except Exception as e:
|
| 117 |
+
logging.warning('grad checkpointing not supported for this timm image tower, continuing without...')
|
| 118 |
+
|
| 119 |
+
def forward(self, x):
|
| 120 |
+
x = self.trunk(x)
|
| 121 |
+
x = self.head(x)
|
| 122 |
+
return x
|
SDXL_EcomID_ComfyUI/eva_clip/tokenizer.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" CLIP tokenizer
|
| 2 |
+
|
| 3 |
+
Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
|
| 4 |
+
"""
|
| 5 |
+
import gzip
|
| 6 |
+
import html
|
| 7 |
+
import os
|
| 8 |
+
from functools import lru_cache
|
| 9 |
+
from typing import Union, List
|
| 10 |
+
|
| 11 |
+
import ftfy
|
| 12 |
+
import regex as re
|
| 13 |
+
import torch
|
| 14 |
+
|
| 15 |
+
# https://stackoverflow.com/q/62691279
|
| 16 |
+
import os
|
| 17 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@lru_cache()
|
| 21 |
+
def default_bpe():
|
| 22 |
+
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@lru_cache()
|
| 26 |
+
def bytes_to_unicode():
|
| 27 |
+
"""
|
| 28 |
+
Returns list of utf-8 byte and a corresponding list of unicode strings.
|
| 29 |
+
The reversible bpe codes work on unicode strings.
|
| 30 |
+
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
|
| 31 |
+
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
|
| 32 |
+
This is a signficant percentage of your normal, say, 32K bpe vocab.
|
| 33 |
+
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
|
| 34 |
+
And avoids mapping to whitespace/control characters the bpe code barfs on.
|
| 35 |
+
"""
|
| 36 |
+
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
|
| 37 |
+
cs = bs[:]
|
| 38 |
+
n = 0
|
| 39 |
+
for b in range(2**8):
|
| 40 |
+
if b not in bs:
|
| 41 |
+
bs.append(b)
|
| 42 |
+
cs.append(2**8+n)
|
| 43 |
+
n += 1
|
| 44 |
+
cs = [chr(n) for n in cs]
|
| 45 |
+
return dict(zip(bs, cs))
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def get_pairs(word):
|
| 49 |
+
"""Return set of symbol pairs in a word.
|
| 50 |
+
Word is represented as tuple of symbols (symbols being variable-length strings).
|
| 51 |
+
"""
|
| 52 |
+
pairs = set()
|
| 53 |
+
prev_char = word[0]
|
| 54 |
+
for char in word[1:]:
|
| 55 |
+
pairs.add((prev_char, char))
|
| 56 |
+
prev_char = char
|
| 57 |
+
return pairs
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def basic_clean(text):
|
| 61 |
+
text = ftfy.fix_text(text)
|
| 62 |
+
text = html.unescape(html.unescape(text))
|
| 63 |
+
return text.strip()
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def whitespace_clean(text):
|
| 67 |
+
text = re.sub(r'\s+', ' ', text)
|
| 68 |
+
text = text.strip()
|
| 69 |
+
return text
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class SimpleTokenizer(object):
|
| 73 |
+
def __init__(self, bpe_path: str = default_bpe(), special_tokens=None):
|
| 74 |
+
self.byte_encoder = bytes_to_unicode()
|
| 75 |
+
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
|
| 76 |
+
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
|
| 77 |
+
merges = merges[1:49152-256-2+1]
|
| 78 |
+
merges = [tuple(merge.split()) for merge in merges]
|
| 79 |
+
vocab = list(bytes_to_unicode().values())
|
| 80 |
+
vocab = vocab + [v+'</w>' for v in vocab]
|
| 81 |
+
for merge in merges:
|
| 82 |
+
vocab.append(''.join(merge))
|
| 83 |
+
if not special_tokens:
|
| 84 |
+
special_tokens = ['<start_of_text>', '<end_of_text>']
|
| 85 |
+
else:
|
| 86 |
+
special_tokens = ['<start_of_text>', '<end_of_text>'] + special_tokens
|
| 87 |
+
vocab.extend(special_tokens)
|
| 88 |
+
self.encoder = dict(zip(vocab, range(len(vocab))))
|
| 89 |
+
self.decoder = {v: k for k, v in self.encoder.items()}
|
| 90 |
+
self.bpe_ranks = dict(zip(merges, range(len(merges))))
|
| 91 |
+
self.cache = {t:t for t in special_tokens}
|
| 92 |
+
special = "|".join(special_tokens)
|
| 93 |
+
self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
|
| 94 |
+
|
| 95 |
+
self.vocab_size = len(self.encoder)
|
| 96 |
+
self.all_special_ids = [self.encoder[t] for t in special_tokens]
|
| 97 |
+
|
| 98 |
+
def bpe(self, token):
|
| 99 |
+
if token in self.cache:
|
| 100 |
+
return self.cache[token]
|
| 101 |
+
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
|
| 102 |
+
pairs = get_pairs(word)
|
| 103 |
+
|
| 104 |
+
if not pairs:
|
| 105 |
+
return token+'</w>'
|
| 106 |
+
|
| 107 |
+
while True:
|
| 108 |
+
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
|
| 109 |
+
if bigram not in self.bpe_ranks:
|
| 110 |
+
break
|
| 111 |
+
first, second = bigram
|
| 112 |
+
new_word = []
|
| 113 |
+
i = 0
|
| 114 |
+
while i < len(word):
|
| 115 |
+
try:
|
| 116 |
+
j = word.index(first, i)
|
| 117 |
+
new_word.extend(word[i:j])
|
| 118 |
+
i = j
|
| 119 |
+
except:
|
| 120 |
+
new_word.extend(word[i:])
|
| 121 |
+
break
|
| 122 |
+
|
| 123 |
+
if word[i] == first and i < len(word)-1 and word[i+1] == second:
|
| 124 |
+
new_word.append(first+second)
|
| 125 |
+
i += 2
|
| 126 |
+
else:
|
| 127 |
+
new_word.append(word[i])
|
| 128 |
+
i += 1
|
| 129 |
+
new_word = tuple(new_word)
|
| 130 |
+
word = new_word
|
| 131 |
+
if len(word) == 1:
|
| 132 |
+
break
|
| 133 |
+
else:
|
| 134 |
+
pairs = get_pairs(word)
|
| 135 |
+
word = ' '.join(word)
|
| 136 |
+
self.cache[token] = word
|
| 137 |
+
return word
|
| 138 |
+
|
| 139 |
+
def encode(self, text):
|
| 140 |
+
bpe_tokens = []
|
| 141 |
+
text = whitespace_clean(basic_clean(text)).lower()
|
| 142 |
+
for token in re.findall(self.pat, text):
|
| 143 |
+
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
|
| 144 |
+
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
|
| 145 |
+
return bpe_tokens
|
| 146 |
+
|
| 147 |
+
def decode(self, tokens):
|
| 148 |
+
text = ''.join([self.decoder[token] for token in tokens])
|
| 149 |
+
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
|
| 150 |
+
return text
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
_tokenizer = SimpleTokenizer()
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
|
| 157 |
+
"""
|
| 158 |
+
Returns the tokenized representation of given input string(s)
|
| 159 |
+
|
| 160 |
+
Parameters
|
| 161 |
+
----------
|
| 162 |
+
texts : Union[str, List[str]]
|
| 163 |
+
An input string or a list of input strings to tokenize
|
| 164 |
+
context_length : int
|
| 165 |
+
The context length to use; all CLIP models use 77 as the context length
|
| 166 |
+
|
| 167 |
+
Returns
|
| 168 |
+
-------
|
| 169 |
+
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
|
| 170 |
+
"""
|
| 171 |
+
if isinstance(texts, str):
|
| 172 |
+
texts = [texts]
|
| 173 |
+
|
| 174 |
+
sot_token = _tokenizer.encoder["<start_of_text>"]
|
| 175 |
+
eot_token = _tokenizer.encoder["<end_of_text>"]
|
| 176 |
+
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
|
| 177 |
+
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
|
| 178 |
+
|
| 179 |
+
for i, tokens in enumerate(all_tokens):
|
| 180 |
+
if len(tokens) > context_length:
|
| 181 |
+
tokens = tokens[:context_length] # Truncate
|
| 182 |
+
tokens[-1] = eot_token
|
| 183 |
+
result[i, :len(tokens)] = torch.tensor(tokens)
|
| 184 |
+
|
| 185 |
+
return result
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
class HFTokenizer:
|
| 189 |
+
"HuggingFace tokenizer wrapper"
|
| 190 |
+
def __init__(self, tokenizer_name:str):
|
| 191 |
+
from transformers import AutoTokenizer
|
| 192 |
+
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
|
| 193 |
+
|
| 194 |
+
def __call__(self, texts:Union[str, List[str]], context_length:int=77) -> torch.Tensor:
|
| 195 |
+
# same cleaning as for default tokenizer, except lowercasing
|
| 196 |
+
# adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance
|
| 197 |
+
if isinstance(texts, str):
|
| 198 |
+
texts = [texts]
|
| 199 |
+
texts = [whitespace_clean(basic_clean(text)) for text in texts]
|
| 200 |
+
input_ids = self.tokenizer(texts, return_tensors='pt', max_length=context_length, padding='max_length', truncation=True).input_ids
|
| 201 |
+
return input_ids
|
SDXL_EcomID_ComfyUI/eva_clip/transform.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Sequence, Tuple
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torchvision.transforms.functional as F
|
| 6 |
+
|
| 7 |
+
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \
|
| 8 |
+
CenterCrop
|
| 9 |
+
|
| 10 |
+
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ResizeMaxSize(nn.Module):
|
| 14 |
+
|
| 15 |
+
def __init__(self, max_size, interpolation=InterpolationMode.BICUBIC, fn='max', fill=0):
|
| 16 |
+
super().__init__()
|
| 17 |
+
if not isinstance(max_size, int):
|
| 18 |
+
raise TypeError(f"Size should be int. Got {type(max_size)}")
|
| 19 |
+
self.max_size = max_size
|
| 20 |
+
self.interpolation = interpolation
|
| 21 |
+
self.fn = min if fn == 'min' else min
|
| 22 |
+
self.fill = fill
|
| 23 |
+
|
| 24 |
+
def forward(self, img):
|
| 25 |
+
if isinstance(img, torch.Tensor):
|
| 26 |
+
height, width = img.shape[:2]
|
| 27 |
+
else:
|
| 28 |
+
width, height = img.size
|
| 29 |
+
scale = self.max_size / float(max(height, width))
|
| 30 |
+
if scale != 1.0:
|
| 31 |
+
new_size = tuple(round(dim * scale) for dim in (height, width))
|
| 32 |
+
img = F.resize(img, new_size, self.interpolation)
|
| 33 |
+
pad_h = self.max_size - new_size[0]
|
| 34 |
+
pad_w = self.max_size - new_size[1]
|
| 35 |
+
img = F.pad(img, padding=[pad_w//2, pad_h//2, pad_w - pad_w//2, pad_h - pad_h//2], fill=self.fill)
|
| 36 |
+
return img
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _convert_to_rgb(image):
|
| 40 |
+
return image.convert('RGB')
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# class CatGen(nn.Module):
|
| 44 |
+
# def __init__(self, num=4):
|
| 45 |
+
# self.num = num
|
| 46 |
+
# def mixgen_batch(image, text):
|
| 47 |
+
# batch_size = image.shape[0]
|
| 48 |
+
# index = np.random.permutation(batch_size)
|
| 49 |
+
|
| 50 |
+
# cat_images = []
|
| 51 |
+
# for i in range(batch_size):
|
| 52 |
+
# # image mixup
|
| 53 |
+
# image[i,:] = lam * image[i,:] + (1 - lam) * image[index[i],:]
|
| 54 |
+
# # text concat
|
| 55 |
+
# text[i] = tokenizer((str(text[i]) + " " + str(text[index[i]])))[0]
|
| 56 |
+
# text = torch.stack(text)
|
| 57 |
+
# return image, text
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def image_transform(
|
| 61 |
+
image_size: int,
|
| 62 |
+
is_train: bool,
|
| 63 |
+
mean: Optional[Tuple[float, ...]] = None,
|
| 64 |
+
std: Optional[Tuple[float, ...]] = None,
|
| 65 |
+
resize_longest_max: bool = False,
|
| 66 |
+
fill_color: int = 0,
|
| 67 |
+
):
|
| 68 |
+
mean = mean or OPENAI_DATASET_MEAN
|
| 69 |
+
if not isinstance(mean, (list, tuple)):
|
| 70 |
+
mean = (mean,) * 3
|
| 71 |
+
|
| 72 |
+
std = std or OPENAI_DATASET_STD
|
| 73 |
+
if not isinstance(std, (list, tuple)):
|
| 74 |
+
std = (std,) * 3
|
| 75 |
+
|
| 76 |
+
if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:
|
| 77 |
+
# for square size, pass size as int so that Resize() uses aspect preserving shortest edge
|
| 78 |
+
image_size = image_size[0]
|
| 79 |
+
|
| 80 |
+
normalize = Normalize(mean=mean, std=std)
|
| 81 |
+
if is_train:
|
| 82 |
+
return Compose([
|
| 83 |
+
RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC),
|
| 84 |
+
_convert_to_rgb,
|
| 85 |
+
ToTensor(),
|
| 86 |
+
normalize,
|
| 87 |
+
])
|
| 88 |
+
else:
|
| 89 |
+
if resize_longest_max:
|
| 90 |
+
transforms = [
|
| 91 |
+
ResizeMaxSize(image_size, fill=fill_color)
|
| 92 |
+
]
|
| 93 |
+
else:
|
| 94 |
+
transforms = [
|
| 95 |
+
Resize(image_size, interpolation=InterpolationMode.BICUBIC),
|
| 96 |
+
CenterCrop(image_size),
|
| 97 |
+
]
|
| 98 |
+
transforms.extend([
|
| 99 |
+
_convert_to_rgb,
|
| 100 |
+
ToTensor(),
|
| 101 |
+
normalize,
|
| 102 |
+
])
|
| 103 |
+
return Compose(transforms)
|
SDXL_EcomID_ComfyUI/eva_clip/transformer.py
ADDED
|
@@ -0,0 +1,737 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import logging
|
| 3 |
+
from collections import OrderedDict
|
| 4 |
+
import math
|
| 5 |
+
from typing import Callable, Optional, Sequence
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
from torch import nn
|
| 9 |
+
from torch.nn import functional as F
|
| 10 |
+
|
| 11 |
+
try:
|
| 12 |
+
from timm.models.layers import trunc_normal_
|
| 13 |
+
except:
|
| 14 |
+
from timm.layers import trunc_normal_
|
| 15 |
+
|
| 16 |
+
from .rope import VisionRotaryEmbedding, VisionRotaryEmbeddingFast
|
| 17 |
+
from .utils import to_2tuple
|
| 18 |
+
|
| 19 |
+
if os.getenv('ENV_TYPE') == 'deepspeed':
|
| 20 |
+
try:
|
| 21 |
+
import deepspeed
|
| 22 |
+
from deepspeed.runtime.activation_checkpointing.checkpointing import checkpoint
|
| 23 |
+
except:
|
| 24 |
+
print("Please 'pip install deepspeed'")
|
| 25 |
+
deepspeed = None
|
| 26 |
+
from torch.utils.checkpoint import checkpoint
|
| 27 |
+
else:
|
| 28 |
+
from torch.utils.checkpoint import checkpoint
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
import xformers.ops as xops
|
| 32 |
+
except ImportError:
|
| 33 |
+
xops = None
|
| 34 |
+
print("Please 'pip install xformers'")
|
| 35 |
+
|
| 36 |
+
class LayerNormFp32(nn.LayerNorm):
|
| 37 |
+
"""Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back)."""
|
| 38 |
+
def __init__(self, *args, **kwargs):
|
| 39 |
+
super().__init__(*args, **kwargs)
|
| 40 |
+
|
| 41 |
+
def forward(self, x: torch.Tensor):
|
| 42 |
+
output = F.layer_norm(
|
| 43 |
+
x.float(),
|
| 44 |
+
self.normalized_shape,
|
| 45 |
+
self.weight.float() if self.weight is not None else None,
|
| 46 |
+
self.bias.float() if self.bias is not None else None,
|
| 47 |
+
self.eps,
|
| 48 |
+
)
|
| 49 |
+
return output.type_as(x)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class LayerNorm(nn.LayerNorm):
|
| 53 |
+
"""Subclass torch's LayerNorm (with cast back to input dtype)."""
|
| 54 |
+
|
| 55 |
+
def forward(self, x: torch.Tensor):
|
| 56 |
+
orig_type = x.dtype
|
| 57 |
+
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
|
| 58 |
+
return x.to(orig_type)
|
| 59 |
+
|
| 60 |
+
class QuickGELU(nn.Module):
|
| 61 |
+
# NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
|
| 62 |
+
def forward(self, x: torch.Tensor):
|
| 63 |
+
return x * torch.sigmoid(1.702 * x)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class LayerScale(nn.Module):
|
| 67 |
+
def __init__(self, dim, init_values=1e-5, inplace=False):
|
| 68 |
+
super().__init__()
|
| 69 |
+
self.inplace = inplace
|
| 70 |
+
self.gamma = nn.Parameter(init_values * torch.ones(dim))
|
| 71 |
+
|
| 72 |
+
def forward(self, x):
|
| 73 |
+
return x.mul_(self.gamma) if self.inplace else x * self.gamma
|
| 74 |
+
|
| 75 |
+
class PatchDropout(nn.Module):
|
| 76 |
+
"""
|
| 77 |
+
https://arxiv.org/abs/2212.00794
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
def __init__(self, prob, exclude_first_token=True):
|
| 81 |
+
super().__init__()
|
| 82 |
+
assert 0 <= prob < 1.
|
| 83 |
+
self.prob = prob
|
| 84 |
+
self.exclude_first_token = exclude_first_token # exclude CLS token
|
| 85 |
+
logging.info(f"os.getenv('RoPE')={os.getenv('RoPE')}")
|
| 86 |
+
|
| 87 |
+
def forward(self, x):
|
| 88 |
+
if not self.training or self.prob == 0.:
|
| 89 |
+
return x
|
| 90 |
+
|
| 91 |
+
if self.exclude_first_token:
|
| 92 |
+
cls_tokens, x = x[:, :1], x[:, 1:]
|
| 93 |
+
else:
|
| 94 |
+
cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
|
| 95 |
+
|
| 96 |
+
batch = x.size()[0]
|
| 97 |
+
num_tokens = x.size()[1]
|
| 98 |
+
|
| 99 |
+
batch_indices = torch.arange(batch)
|
| 100 |
+
batch_indices = batch_indices[..., None]
|
| 101 |
+
|
| 102 |
+
keep_prob = 1 - self.prob
|
| 103 |
+
num_patches_keep = max(1, int(num_tokens * keep_prob))
|
| 104 |
+
|
| 105 |
+
rand = torch.randn(batch, num_tokens)
|
| 106 |
+
patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
|
| 107 |
+
|
| 108 |
+
x = x[batch_indices, patch_indices_keep]
|
| 109 |
+
|
| 110 |
+
if self.exclude_first_token:
|
| 111 |
+
x = torch.cat((cls_tokens, x), dim=1)
|
| 112 |
+
|
| 113 |
+
if self.training and os.getenv('RoPE') == '1':
|
| 114 |
+
return x, patch_indices_keep
|
| 115 |
+
|
| 116 |
+
return x
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def _in_projection_packed(
|
| 120 |
+
q: torch.Tensor,
|
| 121 |
+
k: torch.Tensor,
|
| 122 |
+
v: torch.Tensor,
|
| 123 |
+
w: torch.Tensor,
|
| 124 |
+
b: Optional[torch.Tensor] = None,
|
| 125 |
+
):
|
| 126 |
+
"""
|
| 127 |
+
https://github.com/pytorch/pytorch/blob/db2a237763eb8693a20788be94f8c192e762baa8/torch/nn/functional.py#L4726
|
| 128 |
+
"""
|
| 129 |
+
E = q.size(-1)
|
| 130 |
+
if k is v:
|
| 131 |
+
if q is k:
|
| 132 |
+
# self-attention
|
| 133 |
+
return F.linear(q, w, b).chunk(3, dim=-1)
|
| 134 |
+
else:
|
| 135 |
+
# encoder-decoder attention
|
| 136 |
+
w_q, w_kv = w.split([E, E * 2])
|
| 137 |
+
if b is None:
|
| 138 |
+
b_q = b_kv = None
|
| 139 |
+
else:
|
| 140 |
+
b_q, b_kv = b.split([E, E * 2])
|
| 141 |
+
return (F.linear(q, w_q, b_q),) + F.linear(k, w_kv, b_kv).chunk(2, dim=-1)
|
| 142 |
+
else:
|
| 143 |
+
w_q, w_k, w_v = w.chunk(3)
|
| 144 |
+
if b is None:
|
| 145 |
+
b_q = b_k = b_v = None
|
| 146 |
+
else:
|
| 147 |
+
b_q, b_k, b_v = b.chunk(3)
|
| 148 |
+
return F.linear(q, w_q, b_q), F.linear(k, w_k, b_k), F.linear(v, w_v, b_v)
|
| 149 |
+
|
| 150 |
+
class Attention(nn.Module):
|
| 151 |
+
def __init__(
|
| 152 |
+
self,
|
| 153 |
+
dim,
|
| 154 |
+
num_heads=8,
|
| 155 |
+
qkv_bias=True,
|
| 156 |
+
scaled_cosine=False,
|
| 157 |
+
scale_heads=False,
|
| 158 |
+
logit_scale_max=math.log(1. / 0.01),
|
| 159 |
+
attn_drop=0.,
|
| 160 |
+
proj_drop=0.,
|
| 161 |
+
xattn=False,
|
| 162 |
+
rope=False
|
| 163 |
+
):
|
| 164 |
+
super().__init__()
|
| 165 |
+
self.scaled_cosine = scaled_cosine
|
| 166 |
+
self.scale_heads = scale_heads
|
| 167 |
+
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
|
| 168 |
+
self.num_heads = num_heads
|
| 169 |
+
self.head_dim = dim // num_heads
|
| 170 |
+
self.scale = self.head_dim ** -0.5
|
| 171 |
+
self.logit_scale_max = logit_scale_max
|
| 172 |
+
|
| 173 |
+
# keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original
|
| 174 |
+
self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)
|
| 175 |
+
if qkv_bias:
|
| 176 |
+
self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))
|
| 177 |
+
else:
|
| 178 |
+
self.in_proj_bias = None
|
| 179 |
+
|
| 180 |
+
if self.scaled_cosine:
|
| 181 |
+
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
|
| 182 |
+
else:
|
| 183 |
+
self.logit_scale = None
|
| 184 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 185 |
+
if self.scale_heads:
|
| 186 |
+
self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))
|
| 187 |
+
else:
|
| 188 |
+
self.head_scale = None
|
| 189 |
+
self.out_proj = nn.Linear(dim, dim)
|
| 190 |
+
self.out_drop = nn.Dropout(proj_drop)
|
| 191 |
+
self.xattn = xattn
|
| 192 |
+
self.xattn_drop = attn_drop
|
| 193 |
+
self.rope = rope
|
| 194 |
+
|
| 195 |
+
def forward(self, x, attn_mask: Optional[torch.Tensor] = None):
|
| 196 |
+
L, N, C = x.shape
|
| 197 |
+
q, k, v = F.linear(x, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)
|
| 198 |
+
if self.xattn:
|
| 199 |
+
q = q.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
|
| 200 |
+
k = k.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
|
| 201 |
+
v = v.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
|
| 202 |
+
|
| 203 |
+
x = xops.memory_efficient_attention(
|
| 204 |
+
q, k, v,
|
| 205 |
+
p=self.xattn_drop,
|
| 206 |
+
scale=self.scale if self.logit_scale is None else None,
|
| 207 |
+
attn_bias=xops.LowerTriangularMask() if attn_mask is not None else None,
|
| 208 |
+
)
|
| 209 |
+
else:
|
| 210 |
+
q = q.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
|
| 211 |
+
k = k.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
|
| 212 |
+
v = v.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
|
| 213 |
+
|
| 214 |
+
if self.logit_scale is not None:
|
| 215 |
+
attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))
|
| 216 |
+
logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()
|
| 217 |
+
attn = attn.view(N, self.num_heads, L, L) * logit_scale
|
| 218 |
+
attn = attn.view(-1, L, L)
|
| 219 |
+
else:
|
| 220 |
+
q = q * self.scale
|
| 221 |
+
attn = torch.bmm(q, k.transpose(-1, -2))
|
| 222 |
+
|
| 223 |
+
if attn_mask is not None:
|
| 224 |
+
if attn_mask.dtype == torch.bool:
|
| 225 |
+
new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
|
| 226 |
+
new_attn_mask.masked_fill_(attn_mask, float("-inf"))
|
| 227 |
+
attn_mask = new_attn_mask
|
| 228 |
+
attn += attn_mask
|
| 229 |
+
|
| 230 |
+
attn = attn.softmax(dim=-1)
|
| 231 |
+
attn = self.attn_drop(attn)
|
| 232 |
+
|
| 233 |
+
x = torch.bmm(attn, v)
|
| 234 |
+
|
| 235 |
+
if self.head_scale is not None:
|
| 236 |
+
x = x.view(N, self.num_heads, L, C) * self.head_scale
|
| 237 |
+
x = x.view(-1, L, C)
|
| 238 |
+
x = x.transpose(0, 1).reshape(L, N, C)
|
| 239 |
+
x = self.out_proj(x)
|
| 240 |
+
x = self.out_drop(x)
|
| 241 |
+
return x
|
| 242 |
+
|
| 243 |
+
class CustomAttention(nn.Module):
|
| 244 |
+
def __init__(
|
| 245 |
+
self,
|
| 246 |
+
dim,
|
| 247 |
+
num_heads=8,
|
| 248 |
+
qkv_bias=True,
|
| 249 |
+
scaled_cosine=True,
|
| 250 |
+
scale_heads=False,
|
| 251 |
+
logit_scale_max=math.log(1. / 0.01),
|
| 252 |
+
attn_drop=0.,
|
| 253 |
+
proj_drop=0.,
|
| 254 |
+
xattn=False
|
| 255 |
+
):
|
| 256 |
+
super().__init__()
|
| 257 |
+
self.scaled_cosine = scaled_cosine
|
| 258 |
+
self.scale_heads = scale_heads
|
| 259 |
+
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
|
| 260 |
+
self.num_heads = num_heads
|
| 261 |
+
self.head_dim = dim // num_heads
|
| 262 |
+
self.scale = self.head_dim ** -0.5
|
| 263 |
+
self.logit_scale_max = logit_scale_max
|
| 264 |
+
|
| 265 |
+
# keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original
|
| 266 |
+
self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)
|
| 267 |
+
if qkv_bias:
|
| 268 |
+
self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))
|
| 269 |
+
else:
|
| 270 |
+
self.in_proj_bias = None
|
| 271 |
+
|
| 272 |
+
if self.scaled_cosine:
|
| 273 |
+
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
|
| 274 |
+
else:
|
| 275 |
+
self.logit_scale = None
|
| 276 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 277 |
+
if self.scale_heads:
|
| 278 |
+
self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))
|
| 279 |
+
else:
|
| 280 |
+
self.head_scale = None
|
| 281 |
+
self.out_proj = nn.Linear(dim, dim)
|
| 282 |
+
self.out_drop = nn.Dropout(proj_drop)
|
| 283 |
+
self.xattn = xattn
|
| 284 |
+
self.xattn_drop = attn_drop
|
| 285 |
+
|
| 286 |
+
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
| 287 |
+
q, k, v = _in_projection_packed(query, key, value, self.in_proj_weight, self.in_proj_bias)
|
| 288 |
+
N_q, B_q, C_q = q.shape
|
| 289 |
+
N_k, B_k, C_k = k.shape
|
| 290 |
+
N_v, B_v, C_v = v.shape
|
| 291 |
+
if self.xattn:
|
| 292 |
+
# B, N, C -> B, N, num_heads, C
|
| 293 |
+
q = q.permute(1, 0, 2).reshape(B_q, N_q, self.num_heads, -1)
|
| 294 |
+
k = k.permute(1, 0, 2).reshape(B_k, N_k, self.num_heads, -1)
|
| 295 |
+
v = v.permute(1, 0, 2).reshape(B_v, N_v, self.num_heads, -1)
|
| 296 |
+
|
| 297 |
+
x = xops.memory_efficient_attention(
|
| 298 |
+
q, k, v,
|
| 299 |
+
p=self.xattn_drop,
|
| 300 |
+
scale=self.scale if self.logit_scale is None else None,
|
| 301 |
+
attn_bias=xops.LowerTriangularMask() if attn_mask is not None else None
|
| 302 |
+
)
|
| 303 |
+
else:
|
| 304 |
+
# B*H, L, C
|
| 305 |
+
q = q.contiguous().view(N_q, B_q * self.num_heads, -1).transpose(0, 1)
|
| 306 |
+
k = k.contiguous().view(N_k, B_k * self.num_heads, -1).transpose(0, 1)
|
| 307 |
+
v = v.contiguous().view(N_v, B_v * self.num_heads, -1).transpose(0, 1)
|
| 308 |
+
|
| 309 |
+
if self.logit_scale is not None:
|
| 310 |
+
# B*H, N_q, N_k
|
| 311 |
+
attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))
|
| 312 |
+
logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()
|
| 313 |
+
attn = attn.view(B_q, self.num_heads, N_q, N_k) * logit_scale
|
| 314 |
+
attn = attn.view(-1, N_q, N_k)
|
| 315 |
+
else:
|
| 316 |
+
q = q * self.scale
|
| 317 |
+
attn = torch.bmm(q, k.transpose(-1, -2))
|
| 318 |
+
|
| 319 |
+
if attn_mask is not None:
|
| 320 |
+
if attn_mask.dtype == torch.bool:
|
| 321 |
+
new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
|
| 322 |
+
new_attn_mask.masked_fill_(attn_mask, float("-inf"))
|
| 323 |
+
attn_mask = new_attn_mask
|
| 324 |
+
attn += attn_mask
|
| 325 |
+
|
| 326 |
+
attn = attn.softmax(dim=-1)
|
| 327 |
+
attn = self.attn_drop(attn)
|
| 328 |
+
|
| 329 |
+
x = torch.bmm(attn, v)
|
| 330 |
+
|
| 331 |
+
if self.head_scale is not None:
|
| 332 |
+
x = x.view(B_q, self.num_heads, N_q, C_q) * self.head_scale
|
| 333 |
+
x = x.view(-1, N_q, C_q)
|
| 334 |
+
x = x.transpose(0, 1).reshape(N_q, B_q, C_q)
|
| 335 |
+
x = self.out_proj(x)
|
| 336 |
+
x = self.out_drop(x)
|
| 337 |
+
return x
|
| 338 |
+
|
| 339 |
+
class CustomResidualAttentionBlock(nn.Module):
|
| 340 |
+
def __init__(
|
| 341 |
+
self,
|
| 342 |
+
d_model: int,
|
| 343 |
+
n_head: int,
|
| 344 |
+
mlp_ratio: float = 4.0,
|
| 345 |
+
ls_init_value: float = None,
|
| 346 |
+
act_layer: Callable = nn.GELU,
|
| 347 |
+
norm_layer: Callable = LayerNorm,
|
| 348 |
+
scale_cosine_attn: bool = False,
|
| 349 |
+
scale_heads: bool = False,
|
| 350 |
+
scale_attn: bool = False,
|
| 351 |
+
scale_fc: bool = False,
|
| 352 |
+
cross_attn: bool = False,
|
| 353 |
+
xattn: bool = False,
|
| 354 |
+
):
|
| 355 |
+
super().__init__()
|
| 356 |
+
|
| 357 |
+
self.ln_1 = norm_layer(d_model)
|
| 358 |
+
self.ln_1_k = norm_layer(d_model) if cross_attn else self.ln_1
|
| 359 |
+
self.ln_1_v = norm_layer(d_model) if cross_attn else self.ln_1
|
| 360 |
+
self.attn = CustomAttention(
|
| 361 |
+
d_model, n_head,
|
| 362 |
+
qkv_bias=True,
|
| 363 |
+
attn_drop=0.,
|
| 364 |
+
proj_drop=0.,
|
| 365 |
+
scaled_cosine=scale_cosine_attn,
|
| 366 |
+
scale_heads=scale_heads,
|
| 367 |
+
xattn=xattn
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
self.ln_attn = norm_layer(d_model) if scale_attn else nn.Identity()
|
| 371 |
+
self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
|
| 372 |
+
|
| 373 |
+
self.ln_2 = norm_layer(d_model)
|
| 374 |
+
mlp_width = int(d_model * mlp_ratio)
|
| 375 |
+
self.mlp = nn.Sequential(OrderedDict([
|
| 376 |
+
("c_fc", nn.Linear(d_model, mlp_width)),
|
| 377 |
+
('ln', norm_layer(mlp_width) if scale_fc else nn.Identity()),
|
| 378 |
+
("gelu", act_layer()),
|
| 379 |
+
("c_proj", nn.Linear(mlp_width, d_model))
|
| 380 |
+
]))
|
| 381 |
+
|
| 382 |
+
self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
|
| 383 |
+
|
| 384 |
+
def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
| 385 |
+
q = q + self.ls_1(self.ln_attn(self.attn(self.ln_1(q), self.ln_1_k(k), self.ln_1_v(v), attn_mask=attn_mask)))
|
| 386 |
+
q = q + self.ls_2(self.mlp(self.ln_2(q)))
|
| 387 |
+
return q
|
| 388 |
+
|
| 389 |
+
class CustomTransformer(nn.Module):
|
| 390 |
+
def __init__(
|
| 391 |
+
self,
|
| 392 |
+
width: int,
|
| 393 |
+
layers: int,
|
| 394 |
+
heads: int,
|
| 395 |
+
mlp_ratio: float = 4.0,
|
| 396 |
+
ls_init_value: float = None,
|
| 397 |
+
act_layer: Callable = nn.GELU,
|
| 398 |
+
norm_layer: Callable = LayerNorm,
|
| 399 |
+
scale_cosine_attn: bool = True,
|
| 400 |
+
scale_heads: bool = False,
|
| 401 |
+
scale_attn: bool = False,
|
| 402 |
+
scale_fc: bool = False,
|
| 403 |
+
cross_attn: bool = False,
|
| 404 |
+
xattn: bool = False,
|
| 405 |
+
):
|
| 406 |
+
super().__init__()
|
| 407 |
+
self.width = width
|
| 408 |
+
self.layers = layers
|
| 409 |
+
self.grad_checkpointing = False
|
| 410 |
+
self.xattn = xattn
|
| 411 |
+
|
| 412 |
+
self.resblocks = nn.ModuleList([
|
| 413 |
+
CustomResidualAttentionBlock(
|
| 414 |
+
width,
|
| 415 |
+
heads,
|
| 416 |
+
mlp_ratio,
|
| 417 |
+
ls_init_value=ls_init_value,
|
| 418 |
+
act_layer=act_layer,
|
| 419 |
+
norm_layer=norm_layer,
|
| 420 |
+
scale_cosine_attn=scale_cosine_attn,
|
| 421 |
+
scale_heads=scale_heads,
|
| 422 |
+
scale_attn=scale_attn,
|
| 423 |
+
scale_fc=scale_fc,
|
| 424 |
+
cross_attn=cross_attn,
|
| 425 |
+
xattn=xattn)
|
| 426 |
+
for _ in range(layers)
|
| 427 |
+
])
|
| 428 |
+
|
| 429 |
+
def get_cast_dtype(self) -> torch.dtype:
|
| 430 |
+
return self.resblocks[0].mlp.c_fc.weight.dtype
|
| 431 |
+
|
| 432 |
+
def forward(self, q: torch.Tensor, k: torch.Tensor = None, v: torch.Tensor = None, attn_mask: Optional[torch.Tensor] = None):
|
| 433 |
+
if k is None and v is None:
|
| 434 |
+
k = v = q
|
| 435 |
+
for r in self.resblocks:
|
| 436 |
+
if self.grad_checkpointing and not torch.jit.is_scripting():
|
| 437 |
+
q = checkpoint(r, q, k, v, attn_mask)
|
| 438 |
+
else:
|
| 439 |
+
q = r(q, k, v, attn_mask=attn_mask)
|
| 440 |
+
return q
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
class ResidualAttentionBlock(nn.Module):
|
| 444 |
+
def __init__(
|
| 445 |
+
self,
|
| 446 |
+
d_model: int,
|
| 447 |
+
n_head: int,
|
| 448 |
+
mlp_ratio: float = 4.0,
|
| 449 |
+
ls_init_value: float = None,
|
| 450 |
+
act_layer: Callable = nn.GELU,
|
| 451 |
+
norm_layer: Callable = LayerNorm,
|
| 452 |
+
xattn: bool = False,
|
| 453 |
+
):
|
| 454 |
+
super().__init__()
|
| 455 |
+
|
| 456 |
+
self.ln_1 = norm_layer(d_model)
|
| 457 |
+
if xattn:
|
| 458 |
+
self.attn = Attention(d_model, n_head, xattn=True)
|
| 459 |
+
else:
|
| 460 |
+
self.attn = nn.MultiheadAttention(d_model, n_head)
|
| 461 |
+
self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
|
| 462 |
+
|
| 463 |
+
self.ln_2 = norm_layer(d_model)
|
| 464 |
+
mlp_width = int(d_model * mlp_ratio)
|
| 465 |
+
self.mlp = nn.Sequential(OrderedDict([
|
| 466 |
+
("c_fc", nn.Linear(d_model, mlp_width)),
|
| 467 |
+
("gelu", act_layer()),
|
| 468 |
+
("c_proj", nn.Linear(mlp_width, d_model))
|
| 469 |
+
]))
|
| 470 |
+
|
| 471 |
+
self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
|
| 472 |
+
self.xattn = xattn
|
| 473 |
+
|
| 474 |
+
def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
| 475 |
+
attn_mask = attn_mask.to(x.dtype) if attn_mask is not None else None
|
| 476 |
+
if self.xattn:
|
| 477 |
+
return self.attn(x, attn_mask=attn_mask)
|
| 478 |
+
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
|
| 479 |
+
|
| 480 |
+
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
| 481 |
+
x = x + self.ls_1(self.attention(self.ln_1(x), attn_mask=attn_mask))
|
| 482 |
+
x = x + self.ls_2(self.mlp(self.ln_2(x)))
|
| 483 |
+
return x
|
| 484 |
+
|
| 485 |
+
class Transformer(nn.Module):
|
| 486 |
+
def __init__(
|
| 487 |
+
self,
|
| 488 |
+
width: int,
|
| 489 |
+
layers: int,
|
| 490 |
+
heads: int,
|
| 491 |
+
mlp_ratio: float = 4.0,
|
| 492 |
+
ls_init_value: float = None,
|
| 493 |
+
act_layer: Callable = nn.GELU,
|
| 494 |
+
norm_layer: Callable = LayerNorm,
|
| 495 |
+
xattn: bool = False,
|
| 496 |
+
):
|
| 497 |
+
super().__init__()
|
| 498 |
+
self.width = width
|
| 499 |
+
self.layers = layers
|
| 500 |
+
self.grad_checkpointing = False
|
| 501 |
+
|
| 502 |
+
self.resblocks = nn.ModuleList([
|
| 503 |
+
ResidualAttentionBlock(
|
| 504 |
+
width, heads, mlp_ratio, ls_init_value=ls_init_value, act_layer=act_layer, norm_layer=norm_layer, xattn=xattn)
|
| 505 |
+
for _ in range(layers)
|
| 506 |
+
])
|
| 507 |
+
|
| 508 |
+
def get_cast_dtype(self) -> torch.dtype:
|
| 509 |
+
return self.resblocks[0].mlp.c_fc.weight.dtype
|
| 510 |
+
|
| 511 |
+
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
|
| 512 |
+
for r in self.resblocks:
|
| 513 |
+
if self.grad_checkpointing and not torch.jit.is_scripting():
|
| 514 |
+
x = checkpoint(r, x, attn_mask)
|
| 515 |
+
else:
|
| 516 |
+
x = r(x, attn_mask=attn_mask)
|
| 517 |
+
return x
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
class VisionTransformer(nn.Module):
|
| 521 |
+
def __init__(
|
| 522 |
+
self,
|
| 523 |
+
image_size: int,
|
| 524 |
+
patch_size: int,
|
| 525 |
+
width: int,
|
| 526 |
+
layers: int,
|
| 527 |
+
heads: int,
|
| 528 |
+
mlp_ratio: float,
|
| 529 |
+
ls_init_value: float = None,
|
| 530 |
+
patch_dropout: float = 0.,
|
| 531 |
+
global_average_pool: bool = False,
|
| 532 |
+
output_dim: int = 512,
|
| 533 |
+
act_layer: Callable = nn.GELU,
|
| 534 |
+
norm_layer: Callable = LayerNorm,
|
| 535 |
+
xattn: bool = False,
|
| 536 |
+
):
|
| 537 |
+
super().__init__()
|
| 538 |
+
self.image_size = to_2tuple(image_size)
|
| 539 |
+
self.patch_size = to_2tuple(patch_size)
|
| 540 |
+
self.grid_size = (self.image_size[0] // self.patch_size[0], self.image_size[1] // self.patch_size[1])
|
| 541 |
+
self.output_dim = output_dim
|
| 542 |
+
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
|
| 543 |
+
|
| 544 |
+
scale = width ** -0.5
|
| 545 |
+
self.class_embedding = nn.Parameter(scale * torch.randn(width))
|
| 546 |
+
self.positional_embedding = nn.Parameter(scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width))
|
| 547 |
+
|
| 548 |
+
# setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn
|
| 549 |
+
self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()
|
| 550 |
+
self.ln_pre = norm_layer(width)
|
| 551 |
+
|
| 552 |
+
self.transformer = Transformer(
|
| 553 |
+
width,
|
| 554 |
+
layers,
|
| 555 |
+
heads,
|
| 556 |
+
mlp_ratio,
|
| 557 |
+
ls_init_value=ls_init_value,
|
| 558 |
+
act_layer=act_layer,
|
| 559 |
+
norm_layer=norm_layer,
|
| 560 |
+
xattn=xattn
|
| 561 |
+
)
|
| 562 |
+
|
| 563 |
+
self.global_average_pool = global_average_pool
|
| 564 |
+
self.ln_post = norm_layer(width)
|
| 565 |
+
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
|
| 566 |
+
|
| 567 |
+
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
|
| 568 |
+
for param in self.parameters():
|
| 569 |
+
param.requires_grad = False
|
| 570 |
+
|
| 571 |
+
if unlocked_groups != 0:
|
| 572 |
+
groups = [
|
| 573 |
+
[
|
| 574 |
+
self.conv1,
|
| 575 |
+
self.class_embedding,
|
| 576 |
+
self.positional_embedding,
|
| 577 |
+
self.ln_pre,
|
| 578 |
+
],
|
| 579 |
+
*self.transformer.resblocks[:-1],
|
| 580 |
+
[
|
| 581 |
+
self.transformer.resblocks[-1],
|
| 582 |
+
self.ln_post,
|
| 583 |
+
],
|
| 584 |
+
self.proj,
|
| 585 |
+
]
|
| 586 |
+
|
| 587 |
+
def _unlock(x):
|
| 588 |
+
if isinstance(x, Sequence):
|
| 589 |
+
for g in x:
|
| 590 |
+
_unlock(g)
|
| 591 |
+
else:
|
| 592 |
+
if isinstance(x, torch.nn.Parameter):
|
| 593 |
+
x.requires_grad = True
|
| 594 |
+
else:
|
| 595 |
+
for p in x.parameters():
|
| 596 |
+
p.requires_grad = True
|
| 597 |
+
|
| 598 |
+
_unlock(groups[-unlocked_groups:])
|
| 599 |
+
|
| 600 |
+
def get_num_layers(self):
|
| 601 |
+
return self.transformer.layers
|
| 602 |
+
|
| 603 |
+
@torch.jit.ignore
|
| 604 |
+
def set_grad_checkpointing(self, enable=True):
|
| 605 |
+
self.transformer.grad_checkpointing = enable
|
| 606 |
+
|
| 607 |
+
@torch.jit.ignore
|
| 608 |
+
def no_weight_decay(self):
|
| 609 |
+
return {'positional_embedding', 'class_embedding'}
|
| 610 |
+
|
| 611 |
+
def forward(self, x: torch.Tensor, return_all_features: bool=False):
|
| 612 |
+
x = self.conv1(x) # shape = [*, width, grid, grid]
|
| 613 |
+
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
|
| 614 |
+
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
|
| 615 |
+
x = torch.cat(
|
| 616 |
+
[self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),
|
| 617 |
+
x], dim=1) # shape = [*, grid ** 2 + 1, width]
|
| 618 |
+
x = x + self.positional_embedding.to(x.dtype)
|
| 619 |
+
|
| 620 |
+
# a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in
|
| 621 |
+
x = self.patch_dropout(x)
|
| 622 |
+
x = self.ln_pre(x)
|
| 623 |
+
|
| 624 |
+
x = x.permute(1, 0, 2) # NLD -> LND
|
| 625 |
+
x = self.transformer(x)
|
| 626 |
+
x = x.permute(1, 0, 2) # LND -> NLD
|
| 627 |
+
|
| 628 |
+
if not return_all_features:
|
| 629 |
+
if self.global_average_pool:
|
| 630 |
+
x = x.mean(dim=1) #x = x[:,1:,:].mean(dim=1)
|
| 631 |
+
else:
|
| 632 |
+
x = x[:, 0]
|
| 633 |
+
|
| 634 |
+
x = self.ln_post(x)
|
| 635 |
+
|
| 636 |
+
if self.proj is not None:
|
| 637 |
+
x = x @ self.proj
|
| 638 |
+
|
| 639 |
+
return x
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
class TextTransformer(nn.Module):
|
| 643 |
+
def __init__(
|
| 644 |
+
self,
|
| 645 |
+
context_length: int = 77,
|
| 646 |
+
vocab_size: int = 49408,
|
| 647 |
+
width: int = 512,
|
| 648 |
+
heads: int = 8,
|
| 649 |
+
layers: int = 12,
|
| 650 |
+
ls_init_value: float = None,
|
| 651 |
+
output_dim: int = 512,
|
| 652 |
+
act_layer: Callable = nn.GELU,
|
| 653 |
+
norm_layer: Callable = LayerNorm,
|
| 654 |
+
xattn: bool= False,
|
| 655 |
+
attn_mask: bool = True
|
| 656 |
+
):
|
| 657 |
+
super().__init__()
|
| 658 |
+
self.context_length = context_length
|
| 659 |
+
self.vocab_size = vocab_size
|
| 660 |
+
self.width = width
|
| 661 |
+
self.output_dim = output_dim
|
| 662 |
+
|
| 663 |
+
self.token_embedding = nn.Embedding(vocab_size, width)
|
| 664 |
+
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, width))
|
| 665 |
+
self.transformer = Transformer(
|
| 666 |
+
width=width,
|
| 667 |
+
layers=layers,
|
| 668 |
+
heads=heads,
|
| 669 |
+
ls_init_value=ls_init_value,
|
| 670 |
+
act_layer=act_layer,
|
| 671 |
+
norm_layer=norm_layer,
|
| 672 |
+
xattn=xattn
|
| 673 |
+
)
|
| 674 |
+
|
| 675 |
+
self.xattn = xattn
|
| 676 |
+
self.ln_final = norm_layer(width)
|
| 677 |
+
self.text_projection = nn.Parameter(torch.empty(width, output_dim))
|
| 678 |
+
|
| 679 |
+
if attn_mask:
|
| 680 |
+
self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)
|
| 681 |
+
else:
|
| 682 |
+
self.attn_mask = None
|
| 683 |
+
|
| 684 |
+
self.init_parameters()
|
| 685 |
+
|
| 686 |
+
def init_parameters(self):
|
| 687 |
+
nn.init.normal_(self.token_embedding.weight, std=0.02)
|
| 688 |
+
nn.init.normal_(self.positional_embedding, std=0.01)
|
| 689 |
+
|
| 690 |
+
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
|
| 691 |
+
attn_std = self.transformer.width ** -0.5
|
| 692 |
+
fc_std = (2 * self.transformer.width) ** -0.5
|
| 693 |
+
for block in self.transformer.resblocks:
|
| 694 |
+
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
|
| 695 |
+
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
|
| 696 |
+
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
|
| 697 |
+
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
|
| 698 |
+
|
| 699 |
+
if self.text_projection is not None:
|
| 700 |
+
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
|
| 701 |
+
|
| 702 |
+
@torch.jit.ignore
|
| 703 |
+
def set_grad_checkpointing(self, enable=True):
|
| 704 |
+
self.transformer.grad_checkpointing = enable
|
| 705 |
+
|
| 706 |
+
@torch.jit.ignore
|
| 707 |
+
def no_weight_decay(self):
|
| 708 |
+
# return {'positional_embedding', 'token_embedding'}
|
| 709 |
+
return {'positional_embedding'}
|
| 710 |
+
|
| 711 |
+
def get_num_layers(self):
|
| 712 |
+
return self.transformer.layers
|
| 713 |
+
|
| 714 |
+
def build_attention_mask(self):
|
| 715 |
+
# lazily create causal attention mask, with full attention between the vision tokens
|
| 716 |
+
# pytorch uses additive attention mask; fill with -inf
|
| 717 |
+
mask = torch.empty(self.context_length, self.context_length)
|
| 718 |
+
mask.fill_(float("-inf"))
|
| 719 |
+
mask.triu_(1) # zero out the lower diagonal
|
| 720 |
+
return mask
|
| 721 |
+
|
| 722 |
+
def forward(self, text, return_all_features: bool=False):
|
| 723 |
+
cast_dtype = self.transformer.get_cast_dtype()
|
| 724 |
+
x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
|
| 725 |
+
|
| 726 |
+
x = x + self.positional_embedding.to(cast_dtype)
|
| 727 |
+
x = x.permute(1, 0, 2) # NLD -> LND
|
| 728 |
+
x = self.transformer(x, attn_mask=self.attn_mask)
|
| 729 |
+
# x = self.transformer(x) # no attention mask is applied
|
| 730 |
+
x = x.permute(1, 0, 2) # LND -> NLD
|
| 731 |
+
x = self.ln_final(x)
|
| 732 |
+
|
| 733 |
+
if not return_all_features:
|
| 734 |
+
# x.shape = [batch_size, n_ctx, transformer.width]
|
| 735 |
+
# take features from the eot embedding (eot_token is the highest number in each sequence)
|
| 736 |
+
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
|
| 737 |
+
return x
|
SDXL_EcomID_ComfyUI/eva_clip/utils.py
ADDED
|
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from itertools import repeat
|
| 2 |
+
import collections.abc
|
| 3 |
+
import logging
|
| 4 |
+
import math
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch import nn as nn
|
| 9 |
+
from torchvision.ops.misc import FrozenBatchNorm2d
|
| 10 |
+
import torch.nn.functional as F
|
| 11 |
+
|
| 12 |
+
# open CLIP
|
| 13 |
+
def resize_clip_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
|
| 14 |
+
# Rescale the grid of position embeddings when loading from state_dict
|
| 15 |
+
old_pos_embed = state_dict.get('visual.positional_embedding', None)
|
| 16 |
+
if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):
|
| 17 |
+
return
|
| 18 |
+
grid_size = to_2tuple(model.visual.grid_size)
|
| 19 |
+
extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
|
| 20 |
+
new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
|
| 21 |
+
if new_seq_len == old_pos_embed.shape[0]:
|
| 22 |
+
return
|
| 23 |
+
|
| 24 |
+
if extra_tokens:
|
| 25 |
+
pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
|
| 26 |
+
else:
|
| 27 |
+
pos_emb_tok, pos_emb_img = None, old_pos_embed
|
| 28 |
+
old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
|
| 29 |
+
|
| 30 |
+
logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
|
| 31 |
+
pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
|
| 32 |
+
pos_emb_img = F.interpolate(
|
| 33 |
+
pos_emb_img,
|
| 34 |
+
size=grid_size,
|
| 35 |
+
mode=interpolation,
|
| 36 |
+
align_corners=True,
|
| 37 |
+
)
|
| 38 |
+
pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
|
| 39 |
+
if pos_emb_tok is not None:
|
| 40 |
+
new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
|
| 41 |
+
else:
|
| 42 |
+
new_pos_embed = pos_emb_img
|
| 43 |
+
state_dict['visual.positional_embedding'] = new_pos_embed
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def resize_visual_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
|
| 47 |
+
# Rescale the grid of position embeddings when loading from state_dict
|
| 48 |
+
old_pos_embed = state_dict.get('positional_embedding', None)
|
| 49 |
+
if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):
|
| 50 |
+
return
|
| 51 |
+
grid_size = to_2tuple(model.visual.grid_size)
|
| 52 |
+
extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
|
| 53 |
+
new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
|
| 54 |
+
if new_seq_len == old_pos_embed.shape[0]:
|
| 55 |
+
return
|
| 56 |
+
|
| 57 |
+
if extra_tokens:
|
| 58 |
+
pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
|
| 59 |
+
else:
|
| 60 |
+
pos_emb_tok, pos_emb_img = None, old_pos_embed
|
| 61 |
+
old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
|
| 62 |
+
|
| 63 |
+
logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
|
| 64 |
+
pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
|
| 65 |
+
pos_emb_img = F.interpolate(
|
| 66 |
+
pos_emb_img,
|
| 67 |
+
size=grid_size,
|
| 68 |
+
mode=interpolation,
|
| 69 |
+
align_corners=True,
|
| 70 |
+
)
|
| 71 |
+
pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
|
| 72 |
+
if pos_emb_tok is not None:
|
| 73 |
+
new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
|
| 74 |
+
else:
|
| 75 |
+
new_pos_embed = pos_emb_img
|
| 76 |
+
state_dict['positional_embedding'] = new_pos_embed
|
| 77 |
+
|
| 78 |
+
def resize_evaclip_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
|
| 79 |
+
all_keys = list(state_dict.keys())
|
| 80 |
+
# interpolate position embedding
|
| 81 |
+
if 'visual.pos_embed' in state_dict:
|
| 82 |
+
pos_embed_checkpoint = state_dict['visual.pos_embed']
|
| 83 |
+
embedding_size = pos_embed_checkpoint.shape[-1]
|
| 84 |
+
num_patches = model.visual.patch_embed.num_patches
|
| 85 |
+
num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
|
| 86 |
+
# height (== width) for the checkpoint position embedding
|
| 87 |
+
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
|
| 88 |
+
# height (== width) for the new position embedding
|
| 89 |
+
new_size = int(num_patches ** 0.5)
|
| 90 |
+
# class_token and dist_token are kept unchanged
|
| 91 |
+
if orig_size != new_size:
|
| 92 |
+
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
|
| 93 |
+
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
|
| 94 |
+
# only the position tokens are interpolated
|
| 95 |
+
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
|
| 96 |
+
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
|
| 97 |
+
pos_tokens = torch.nn.functional.interpolate(
|
| 98 |
+
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
|
| 99 |
+
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
|
| 100 |
+
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
|
| 101 |
+
state_dict['visual.pos_embed'] = new_pos_embed
|
| 102 |
+
|
| 103 |
+
patch_embed_proj = state_dict['visual.patch_embed.proj.weight']
|
| 104 |
+
patch_size = model.visual.patch_embed.patch_size
|
| 105 |
+
state_dict['visual.patch_embed.proj.weight'] = torch.nn.functional.interpolate(
|
| 106 |
+
patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def resize_eva_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
|
| 110 |
+
all_keys = list(state_dict.keys())
|
| 111 |
+
# interpolate position embedding
|
| 112 |
+
if 'pos_embed' in state_dict:
|
| 113 |
+
pos_embed_checkpoint = state_dict['pos_embed']
|
| 114 |
+
embedding_size = pos_embed_checkpoint.shape[-1]
|
| 115 |
+
num_patches = model.visual.patch_embed.num_patches
|
| 116 |
+
num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
|
| 117 |
+
# height (== width) for the checkpoint position embedding
|
| 118 |
+
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
|
| 119 |
+
# height (== width) for the new position embedding
|
| 120 |
+
new_size = int(num_patches ** 0.5)
|
| 121 |
+
# class_token and dist_token are kept unchanged
|
| 122 |
+
if orig_size != new_size:
|
| 123 |
+
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
|
| 124 |
+
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
|
| 125 |
+
# only the position tokens are interpolated
|
| 126 |
+
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
|
| 127 |
+
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
|
| 128 |
+
pos_tokens = torch.nn.functional.interpolate(
|
| 129 |
+
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
|
| 130 |
+
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
|
| 131 |
+
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
|
| 132 |
+
state_dict['pos_embed'] = new_pos_embed
|
| 133 |
+
|
| 134 |
+
patch_embed_proj = state_dict['patch_embed.proj.weight']
|
| 135 |
+
patch_size = model.visual.patch_embed.patch_size
|
| 136 |
+
state_dict['patch_embed.proj.weight'] = torch.nn.functional.interpolate(
|
| 137 |
+
patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def resize_rel_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
|
| 141 |
+
all_keys = list(state_dict.keys())
|
| 142 |
+
for key in all_keys:
|
| 143 |
+
if "relative_position_index" in key:
|
| 144 |
+
state_dict.pop(key)
|
| 145 |
+
|
| 146 |
+
if "relative_position_bias_table" in key:
|
| 147 |
+
rel_pos_bias = state_dict[key]
|
| 148 |
+
src_num_pos, num_attn_heads = rel_pos_bias.size()
|
| 149 |
+
dst_num_pos, _ = model.visual.state_dict()[key].size()
|
| 150 |
+
dst_patch_shape = model.visual.patch_embed.patch_shape
|
| 151 |
+
if dst_patch_shape[0] != dst_patch_shape[1]:
|
| 152 |
+
raise NotImplementedError()
|
| 153 |
+
num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)
|
| 154 |
+
src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
|
| 155 |
+
dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
|
| 156 |
+
if src_size != dst_size:
|
| 157 |
+
print("Position interpolate for %s from %dx%d to %dx%d" % (
|
| 158 |
+
key, src_size, src_size, dst_size, dst_size))
|
| 159 |
+
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
|
| 160 |
+
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
|
| 161 |
+
|
| 162 |
+
def geometric_progression(a, r, n):
|
| 163 |
+
return a * (1.0 - r ** n) / (1.0 - r)
|
| 164 |
+
|
| 165 |
+
left, right = 1.01, 1.5
|
| 166 |
+
while right - left > 1e-6:
|
| 167 |
+
q = (left + right) / 2.0
|
| 168 |
+
gp = geometric_progression(1, q, src_size // 2)
|
| 169 |
+
if gp > dst_size // 2:
|
| 170 |
+
right = q
|
| 171 |
+
else:
|
| 172 |
+
left = q
|
| 173 |
+
|
| 174 |
+
# if q > 1.090307:
|
| 175 |
+
# q = 1.090307
|
| 176 |
+
|
| 177 |
+
dis = []
|
| 178 |
+
cur = 1
|
| 179 |
+
for i in range(src_size // 2):
|
| 180 |
+
dis.append(cur)
|
| 181 |
+
cur += q ** (i + 1)
|
| 182 |
+
|
| 183 |
+
r_ids = [-_ for _ in reversed(dis)]
|
| 184 |
+
|
| 185 |
+
x = r_ids + [0] + dis
|
| 186 |
+
y = r_ids + [0] + dis
|
| 187 |
+
|
| 188 |
+
t = dst_size // 2.0
|
| 189 |
+
dx = np.arange(-t, t + 0.1, 1.0)
|
| 190 |
+
dy = np.arange(-t, t + 0.1, 1.0)
|
| 191 |
+
|
| 192 |
+
print("Original positions = %s" % str(x))
|
| 193 |
+
print("Target positions = %s" % str(dx))
|
| 194 |
+
|
| 195 |
+
all_rel_pos_bias = []
|
| 196 |
+
|
| 197 |
+
for i in range(num_attn_heads):
|
| 198 |
+
z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
|
| 199 |
+
f = F.interpolate.interp2d(x, y, z, kind='cubic')
|
| 200 |
+
all_rel_pos_bias.append(
|
| 201 |
+
torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))
|
| 202 |
+
|
| 203 |
+
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
|
| 204 |
+
|
| 205 |
+
new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
|
| 206 |
+
state_dict[key] = new_rel_pos_bias
|
| 207 |
+
|
| 208 |
+
# interpolate position embedding
|
| 209 |
+
if 'pos_embed' in state_dict:
|
| 210 |
+
pos_embed_checkpoint = state_dict['pos_embed']
|
| 211 |
+
embedding_size = pos_embed_checkpoint.shape[-1]
|
| 212 |
+
num_patches = model.visual.patch_embed.num_patches
|
| 213 |
+
num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
|
| 214 |
+
# height (== width) for the checkpoint position embedding
|
| 215 |
+
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
|
| 216 |
+
# height (== width) for the new position embedding
|
| 217 |
+
new_size = int(num_patches ** 0.5)
|
| 218 |
+
# class_token and dist_token are kept unchanged
|
| 219 |
+
if orig_size != new_size:
|
| 220 |
+
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
|
| 221 |
+
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
|
| 222 |
+
# only the position tokens are interpolated
|
| 223 |
+
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
|
| 224 |
+
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
|
| 225 |
+
pos_tokens = torch.nn.functional.interpolate(
|
| 226 |
+
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
|
| 227 |
+
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
|
| 228 |
+
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
|
| 229 |
+
state_dict['pos_embed'] = new_pos_embed
|
| 230 |
+
|
| 231 |
+
patch_embed_proj = state_dict['patch_embed.proj.weight']
|
| 232 |
+
patch_size = model.visual.patch_embed.patch_size
|
| 233 |
+
state_dict['patch_embed.proj.weight'] = torch.nn.functional.interpolate(
|
| 234 |
+
patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def freeze_batch_norm_2d(module, module_match={}, name=''):
|
| 238 |
+
"""
|
| 239 |
+
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
|
| 240 |
+
itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
|
| 241 |
+
returned. Otherwise, the module is walked recursively and submodules are converted in place.
|
| 242 |
+
|
| 243 |
+
Args:
|
| 244 |
+
module (torch.nn.Module): Any PyTorch module.
|
| 245 |
+
module_match (dict): Dictionary of full module names to freeze (all if empty)
|
| 246 |
+
name (str): Full module name (prefix)
|
| 247 |
+
|
| 248 |
+
Returns:
|
| 249 |
+
torch.nn.Module: Resulting module
|
| 250 |
+
|
| 251 |
+
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
|
| 252 |
+
"""
|
| 253 |
+
res = module
|
| 254 |
+
is_match = True
|
| 255 |
+
if module_match:
|
| 256 |
+
is_match = name in module_match
|
| 257 |
+
if is_match and isinstance(module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)):
|
| 258 |
+
res = FrozenBatchNorm2d(module.num_features)
|
| 259 |
+
res.num_features = module.num_features
|
| 260 |
+
res.affine = module.affine
|
| 261 |
+
if module.affine:
|
| 262 |
+
res.weight.data = module.weight.data.clone().detach()
|
| 263 |
+
res.bias.data = module.bias.data.clone().detach()
|
| 264 |
+
res.running_mean.data = module.running_mean.data
|
| 265 |
+
res.running_var.data = module.running_var.data
|
| 266 |
+
res.eps = module.eps
|
| 267 |
+
else:
|
| 268 |
+
for child_name, child in module.named_children():
|
| 269 |
+
full_child_name = '.'.join([name, child_name]) if name else child_name
|
| 270 |
+
new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
|
| 271 |
+
if new_child is not child:
|
| 272 |
+
res.add_module(child_name, new_child)
|
| 273 |
+
return res
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
# From PyTorch internals
|
| 277 |
+
def _ntuple(n):
|
| 278 |
+
def parse(x):
|
| 279 |
+
if isinstance(x, collections.abc.Iterable):
|
| 280 |
+
return x
|
| 281 |
+
return tuple(repeat(x, n))
|
| 282 |
+
return parse
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
to_1tuple = _ntuple(1)
|
| 286 |
+
to_2tuple = _ntuple(2)
|
| 287 |
+
to_3tuple = _ntuple(3)
|
| 288 |
+
to_4tuple = _ntuple(4)
|
| 289 |
+
to_ntuple = lambda n, x: _ntuple(n)(x)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def is_logging(args):
|
| 293 |
+
def is_global_master(args):
|
| 294 |
+
return args.rank == 0
|
| 295 |
+
|
| 296 |
+
def is_local_master(args):
|
| 297 |
+
return args.local_rank == 0
|
| 298 |
+
|
| 299 |
+
def is_master(args, local=False):
|
| 300 |
+
return is_local_master(args) if local else is_global_master(args)
|
| 301 |
+
return is_master
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
class AllGather(torch.autograd.Function):
|
| 305 |
+
"""An autograd function that performs allgather on a tensor.
|
| 306 |
+
Performs all_gather operation on the provided tensors.
|
| 307 |
+
*** Warning ***: torch.distributed.all_gather has no gradient.
|
| 308 |
+
"""
|
| 309 |
+
|
| 310 |
+
@staticmethod
|
| 311 |
+
def forward(ctx, tensor, rank, world_size):
|
| 312 |
+
tensors_gather = [torch.empty_like(tensor) for _ in range(world_size)]
|
| 313 |
+
torch.distributed.all_gather(tensors_gather, tensor)
|
| 314 |
+
ctx.rank = rank
|
| 315 |
+
ctx.batch_size = tensor.shape[0]
|
| 316 |
+
return torch.cat(tensors_gather, 0)
|
| 317 |
+
|
| 318 |
+
@staticmethod
|
| 319 |
+
def backward(ctx, grad_output):
|
| 320 |
+
return (
|
| 321 |
+
grad_output[ctx.batch_size * ctx.rank: ctx.batch_size * (ctx.rank + 1)],
|
| 322 |
+
None,
|
| 323 |
+
None
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
allgather = AllGather.apply
|
SDXL_EcomID_ComfyUI/examples/Multi-ControlNet.png
ADDED
|
Git LFS Details
|
SDXL_EcomID_ComfyUI/examples/ecomid_basic_workflow.json
ADDED
|
@@ -0,0 +1,800 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"last_node_id": 15,
|
| 3 |
+
"last_link_id": 18,
|
| 4 |
+
"nodes": [
|
| 5 |
+
{
|
| 6 |
+
"id": 1,
|
| 7 |
+
"type": "InstantIDModelLoader",
|
| 8 |
+
"pos": [
|
| 9 |
+
1009.44140625,
|
| 10 |
+
227.6875
|
| 11 |
+
],
|
| 12 |
+
"size": {
|
| 13 |
+
"0": 315,
|
| 14 |
+
"1": 58
|
| 15 |
+
},
|
| 16 |
+
"flags": {},
|
| 17 |
+
"order": 0,
|
| 18 |
+
"mode": 0,
|
| 19 |
+
"outputs": [
|
| 20 |
+
{
|
| 21 |
+
"name": "INSTANTID",
|
| 22 |
+
"type": "INSTANTID",
|
| 23 |
+
"links": [
|
| 24 |
+
3
|
| 25 |
+
],
|
| 26 |
+
"shape": 3,
|
| 27 |
+
"label": "INSTANTID",
|
| 28 |
+
"slot_index": 0
|
| 29 |
+
}
|
| 30 |
+
],
|
| 31 |
+
"properties": {
|
| 32 |
+
"Node name for S&R": "InstantIDModelLoader"
|
| 33 |
+
},
|
| 34 |
+
"widgets_values": [
|
| 35 |
+
"ip-adapter.bin"
|
| 36 |
+
]
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"id": 9,
|
| 40 |
+
"type": "EcomID_PulidModelLoader",
|
| 41 |
+
"pos": [
|
| 42 |
+
999,
|
| 43 |
+
320
|
| 44 |
+
],
|
| 45 |
+
"size": {
|
| 46 |
+
"0": 315,
|
| 47 |
+
"1": 58
|
| 48 |
+
},
|
| 49 |
+
"flags": {},
|
| 50 |
+
"order": 1,
|
| 51 |
+
"mode": 0,
|
| 52 |
+
"outputs": [
|
| 53 |
+
{
|
| 54 |
+
"name": "PULID",
|
| 55 |
+
"type": "PULID",
|
| 56 |
+
"links": [
|
| 57 |
+
4
|
| 58 |
+
],
|
| 59 |
+
"shape": 3,
|
| 60 |
+
"label": "PULID",
|
| 61 |
+
"slot_index": 0
|
| 62 |
+
}
|
| 63 |
+
],
|
| 64 |
+
"properties": {
|
| 65 |
+
"Node name for S&R": "EcomID_PulidModelLoader"
|
| 66 |
+
},
|
| 67 |
+
"widgets_values": [
|
| 68 |
+
"ip-adapter_pulid_sdxl_fp16.safetensors"
|
| 69 |
+
]
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"id": 3,
|
| 73 |
+
"type": "PulidEvaClipLoader",
|
| 74 |
+
"pos": [
|
| 75 |
+
1101,
|
| 76 |
+
421
|
| 77 |
+
],
|
| 78 |
+
"size": {
|
| 79 |
+
"0": 210,
|
| 80 |
+
"1": 26
|
| 81 |
+
},
|
| 82 |
+
"flags": {},
|
| 83 |
+
"order": 2,
|
| 84 |
+
"mode": 0,
|
| 85 |
+
"outputs": [
|
| 86 |
+
{
|
| 87 |
+
"name": "EVA_CLIP",
|
| 88 |
+
"type": "EVA_CLIP",
|
| 89 |
+
"links": [
|
| 90 |
+
5
|
| 91 |
+
],
|
| 92 |
+
"shape": 3,
|
| 93 |
+
"label": "EVA_CLIP",
|
| 94 |
+
"slot_index": 0
|
| 95 |
+
}
|
| 96 |
+
],
|
| 97 |
+
"properties": {
|
| 98 |
+
"Node name for S&R": "PulidEvaClipLoader"
|
| 99 |
+
}
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"id": 10,
|
| 103 |
+
"type": "EcomIDFaceAnalysis",
|
| 104 |
+
"pos": [
|
| 105 |
+
999,
|
| 106 |
+
488
|
| 107 |
+
],
|
| 108 |
+
"size": {
|
| 109 |
+
"0": 315,
|
| 110 |
+
"1": 58
|
| 111 |
+
},
|
| 112 |
+
"flags": {},
|
| 113 |
+
"order": 3,
|
| 114 |
+
"mode": 0,
|
| 115 |
+
"outputs": [
|
| 116 |
+
{
|
| 117 |
+
"name": "FACEANALYSIS",
|
| 118 |
+
"type": "FACEANALYSIS",
|
| 119 |
+
"links": [
|
| 120 |
+
6
|
| 121 |
+
],
|
| 122 |
+
"shape": 3,
|
| 123 |
+
"label": "FACEANALYSIS",
|
| 124 |
+
"slot_index": 0
|
| 125 |
+
}
|
| 126 |
+
],
|
| 127 |
+
"properties": {
|
| 128 |
+
"Node name for S&R": "EcomIDFaceAnalysis"
|
| 129 |
+
},
|
| 130 |
+
"widgets_values": [
|
| 131 |
+
"CPU"
|
| 132 |
+
]
|
| 133 |
+
},
|
| 134 |
+
{
|
| 135 |
+
"id": 2,
|
| 136 |
+
"type": "ControlNetLoader",
|
| 137 |
+
"pos": [
|
| 138 |
+
999,
|
| 139 |
+
589
|
| 140 |
+
],
|
| 141 |
+
"size": {
|
| 142 |
+
"0": 315,
|
| 143 |
+
"1": 58
|
| 144 |
+
},
|
| 145 |
+
"flags": {},
|
| 146 |
+
"order": 4,
|
| 147 |
+
"mode": 0,
|
| 148 |
+
"outputs": [
|
| 149 |
+
{
|
| 150 |
+
"name": "CONTROL_NET",
|
| 151 |
+
"type": "CONTROL_NET",
|
| 152 |
+
"links": [
|
| 153 |
+
7
|
| 154 |
+
],
|
| 155 |
+
"shape": 3,
|
| 156 |
+
"label": "CONTROL_NET",
|
| 157 |
+
"slot_index": 0
|
| 158 |
+
}
|
| 159 |
+
],
|
| 160 |
+
"properties": {
|
| 161 |
+
"Node name for S&R": "ControlNetLoader"
|
| 162 |
+
},
|
| 163 |
+
"widgets_values": [
|
| 164 |
+
"controlnet_ecomid_sdxl.safetensors"
|
| 165 |
+
]
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"id": 5,
|
| 169 |
+
"type": "CLIPTextEncode",
|
| 170 |
+
"pos": [
|
| 171 |
+
975,
|
| 172 |
+
689
|
| 173 |
+
],
|
| 174 |
+
"size": {
|
| 175 |
+
"0": 380,
|
| 176 |
+
"1": 160
|
| 177 |
+
},
|
| 178 |
+
"flags": {},
|
| 179 |
+
"order": 8,
|
| 180 |
+
"mode": 0,
|
| 181 |
+
"inputs": [
|
| 182 |
+
{
|
| 183 |
+
"name": "clip",
|
| 184 |
+
"type": "CLIP",
|
| 185 |
+
"link": 1,
|
| 186 |
+
"label": "clip"
|
| 187 |
+
}
|
| 188 |
+
],
|
| 189 |
+
"outputs": [
|
| 190 |
+
{
|
| 191 |
+
"name": "CONDITIONING",
|
| 192 |
+
"type": "CONDITIONING",
|
| 193 |
+
"links": [
|
| 194 |
+
9
|
| 195 |
+
],
|
| 196 |
+
"slot_index": 0,
|
| 197 |
+
"label": "CONDITIONING"
|
| 198 |
+
}
|
| 199 |
+
],
|
| 200 |
+
"properties": {
|
| 201 |
+
"Node name for S&R": "CLIPTextEncode"
|
| 202 |
+
},
|
| 203 |
+
"widgets_values": [
|
| 204 |
+
"1girl, solo, dress, jewelry, beach, pink_dress, realistic"
|
| 205 |
+
]
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"id": 6,
|
| 209 |
+
"type": "CLIPTextEncode",
|
| 210 |
+
"pos": [
|
| 211 |
+
975,
|
| 212 |
+
893
|
| 213 |
+
],
|
| 214 |
+
"size": {
|
| 215 |
+
"0": 380,
|
| 216 |
+
"1": 160
|
| 217 |
+
},
|
| 218 |
+
"flags": {},
|
| 219 |
+
"order": 9,
|
| 220 |
+
"mode": 0,
|
| 221 |
+
"inputs": [
|
| 222 |
+
{
|
| 223 |
+
"name": "clip",
|
| 224 |
+
"type": "CLIP",
|
| 225 |
+
"link": 2,
|
| 226 |
+
"label": "clip"
|
| 227 |
+
}
|
| 228 |
+
],
|
| 229 |
+
"outputs": [
|
| 230 |
+
{
|
| 231 |
+
"name": "CONDITIONING",
|
| 232 |
+
"type": "CONDITIONING",
|
| 233 |
+
"links": [
|
| 234 |
+
10
|
| 235 |
+
],
|
| 236 |
+
"slot_index": 0,
|
| 237 |
+
"label": "CONDITIONING"
|
| 238 |
+
}
|
| 239 |
+
],
|
| 240 |
+
"properties": {
|
| 241 |
+
"Node name for S&R": "CLIPTextEncode"
|
| 242 |
+
},
|
| 243 |
+
"widgets_values": [
|
| 244 |
+
"bad hand, (worst quality, low quality, nevus, normal quality:1.6), blur skin,nevus,signature, logo, watermark,username,text"
|
| 245 |
+
]
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"id": 13,
|
| 249 |
+
"type": "VAEDecode",
|
| 250 |
+
"pos": [
|
| 251 |
+
2113,
|
| 252 |
+
437
|
| 253 |
+
],
|
| 254 |
+
"size": {
|
| 255 |
+
"0": 210,
|
| 256 |
+
"1": 46
|
| 257 |
+
},
|
| 258 |
+
"flags": {},
|
| 259 |
+
"order": 12,
|
| 260 |
+
"mode": 0,
|
| 261 |
+
"inputs": [
|
| 262 |
+
{
|
| 263 |
+
"name": "samples",
|
| 264 |
+
"type": "LATENT",
|
| 265 |
+
"link": 14,
|
| 266 |
+
"label": "samples"
|
| 267 |
+
},
|
| 268 |
+
{
|
| 269 |
+
"name": "vae",
|
| 270 |
+
"type": "VAE",
|
| 271 |
+
"link": 15,
|
| 272 |
+
"label": "vae"
|
| 273 |
+
}
|
| 274 |
+
],
|
| 275 |
+
"outputs": [
|
| 276 |
+
{
|
| 277 |
+
"name": "IMAGE",
|
| 278 |
+
"type": "IMAGE",
|
| 279 |
+
"links": [
|
| 280 |
+
16
|
| 281 |
+
],
|
| 282 |
+
"shape": 3,
|
| 283 |
+
"label": "IMAGE",
|
| 284 |
+
"slot_index": 0
|
| 285 |
+
}
|
| 286 |
+
],
|
| 287 |
+
"properties": {
|
| 288 |
+
"Node name for S&R": "VAEDecode"
|
| 289 |
+
}
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"id": 8,
|
| 293 |
+
"type": "ApplyEcomIDAdvanced",
|
| 294 |
+
"pos": [
|
| 295 |
+
1411,
|
| 296 |
+
437
|
| 297 |
+
],
|
| 298 |
+
"size": {
|
| 299 |
+
"0": 315,
|
| 300 |
+
"1": 402
|
| 301 |
+
},
|
| 302 |
+
"flags": {},
|
| 303 |
+
"order": 10,
|
| 304 |
+
"mode": 0,
|
| 305 |
+
"inputs": [
|
| 306 |
+
{
|
| 307 |
+
"name": "instantid_ipa",
|
| 308 |
+
"type": "INSTANTID",
|
| 309 |
+
"link": 3,
|
| 310 |
+
"label": "instantid_ipa"
|
| 311 |
+
},
|
| 312 |
+
{
|
| 313 |
+
"name": "pulid",
|
| 314 |
+
"type": "PULID",
|
| 315 |
+
"link": 4,
|
| 316 |
+
"label": "pulid"
|
| 317 |
+
},
|
| 318 |
+
{
|
| 319 |
+
"name": "eva_clip",
|
| 320 |
+
"type": "EVA_CLIP",
|
| 321 |
+
"link": 5,
|
| 322 |
+
"label": "eva_clip"
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"name": "insightface",
|
| 326 |
+
"type": "FACEANALYSIS",
|
| 327 |
+
"link": 6,
|
| 328 |
+
"label": "insightface"
|
| 329 |
+
},
|
| 330 |
+
{
|
| 331 |
+
"name": "control_net",
|
| 332 |
+
"type": "CONTROL_NET",
|
| 333 |
+
"link": 7,
|
| 334 |
+
"label": "control_net"
|
| 335 |
+
},
|
| 336 |
+
{
|
| 337 |
+
"name": "image",
|
| 338 |
+
"type": "IMAGE",
|
| 339 |
+
"link": 8,
|
| 340 |
+
"label": "image"
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"name": "model",
|
| 344 |
+
"type": "MODEL",
|
| 345 |
+
"link": 17,
|
| 346 |
+
"label": "model"
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"name": "positive",
|
| 350 |
+
"type": "CONDITIONING",
|
| 351 |
+
"link": 9,
|
| 352 |
+
"label": "positive"
|
| 353 |
+
},
|
| 354 |
+
{
|
| 355 |
+
"name": "negative",
|
| 356 |
+
"type": "CONDITIONING",
|
| 357 |
+
"link": 10,
|
| 358 |
+
"label": "negative"
|
| 359 |
+
},
|
| 360 |
+
{
|
| 361 |
+
"name": "image_kps",
|
| 362 |
+
"type": "IMAGE",
|
| 363 |
+
"link": null,
|
| 364 |
+
"label": "image_kps"
|
| 365 |
+
},
|
| 366 |
+
{
|
| 367 |
+
"name": "mask",
|
| 368 |
+
"type": "MASK",
|
| 369 |
+
"link": null,
|
| 370 |
+
"label": "mask"
|
| 371 |
+
}
|
| 372 |
+
],
|
| 373 |
+
"outputs": [
|
| 374 |
+
{
|
| 375 |
+
"name": "MODEL",
|
| 376 |
+
"type": "MODEL",
|
| 377 |
+
"links": [
|
| 378 |
+
11
|
| 379 |
+
],
|
| 380 |
+
"shape": 3,
|
| 381 |
+
"label": "MODEL",
|
| 382 |
+
"slot_index": 0
|
| 383 |
+
},
|
| 384 |
+
{
|
| 385 |
+
"name": "positive",
|
| 386 |
+
"type": "CONDITIONING",
|
| 387 |
+
"links": [
|
| 388 |
+
12
|
| 389 |
+
],
|
| 390 |
+
"shape": 3,
|
| 391 |
+
"label": "positive",
|
| 392 |
+
"slot_index": 1
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"name": "negative",
|
| 396 |
+
"type": "CONDITIONING",
|
| 397 |
+
"links": [
|
| 398 |
+
13
|
| 399 |
+
],
|
| 400 |
+
"shape": 3,
|
| 401 |
+
"label": "negative",
|
| 402 |
+
"slot_index": 2
|
| 403 |
+
}
|
| 404 |
+
],
|
| 405 |
+
"properties": {
|
| 406 |
+
"Node name for S&R": "ApplyEcomIDAdvanced"
|
| 407 |
+
},
|
| 408 |
+
"widgets_values": [
|
| 409 |
+
"fidelity",
|
| 410 |
+
0,
|
| 411 |
+
1,
|
| 412 |
+
0.3,
|
| 413 |
+
0.8,
|
| 414 |
+
0,
|
| 415 |
+
"average"
|
| 416 |
+
]
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"id": 15,
|
| 420 |
+
"type": "EmptyLatentImage",
|
| 421 |
+
"pos": [
|
| 422 |
+
1407,
|
| 423 |
+
902
|
| 424 |
+
],
|
| 425 |
+
"size": {
|
| 426 |
+
"0": 315,
|
| 427 |
+
"1": 106
|
| 428 |
+
},
|
| 429 |
+
"flags": {},
|
| 430 |
+
"order": 5,
|
| 431 |
+
"mode": 0,
|
| 432 |
+
"outputs": [
|
| 433 |
+
{
|
| 434 |
+
"name": "LATENT",
|
| 435 |
+
"type": "LATENT",
|
| 436 |
+
"links": [
|
| 437 |
+
18
|
| 438 |
+
],
|
| 439 |
+
"shape": 3,
|
| 440 |
+
"label": "LATENT",
|
| 441 |
+
"slot_index": 0
|
| 442 |
+
}
|
| 443 |
+
],
|
| 444 |
+
"properties": {
|
| 445 |
+
"Node name for S&R": "EmptyLatentImage"
|
| 446 |
+
},
|
| 447 |
+
"widgets_values": [
|
| 448 |
+
1024,
|
| 449 |
+
1024,
|
| 450 |
+
1
|
| 451 |
+
]
|
| 452 |
+
},
|
| 453 |
+
{
|
| 454 |
+
"id": 11,
|
| 455 |
+
"type": "LoadImage",
|
| 456 |
+
"pos": [
|
| 457 |
+
573,
|
| 458 |
+
329
|
| 459 |
+
],
|
| 460 |
+
"size": [
|
| 461 |
+
315,
|
| 462 |
+
314
|
| 463 |
+
],
|
| 464 |
+
"flags": {},
|
| 465 |
+
"order": 6,
|
| 466 |
+
"mode": 0,
|
| 467 |
+
"outputs": [
|
| 468 |
+
{
|
| 469 |
+
"name": "IMAGE",
|
| 470 |
+
"type": "IMAGE",
|
| 471 |
+
"links": [
|
| 472 |
+
8
|
| 473 |
+
],
|
| 474 |
+
"shape": 3,
|
| 475 |
+
"label": "IMAGE",
|
| 476 |
+
"slot_index": 0
|
| 477 |
+
},
|
| 478 |
+
{
|
| 479 |
+
"name": "MASK",
|
| 480 |
+
"type": "MASK",
|
| 481 |
+
"links": null,
|
| 482 |
+
"shape": 3,
|
| 483 |
+
"label": "MASK"
|
| 484 |
+
}
|
| 485 |
+
],
|
| 486 |
+
"properties": {
|
| 487 |
+
"Node name for S&R": "LoadImage"
|
| 488 |
+
},
|
| 489 |
+
"widgets_values": [
|
| 490 |
+
"4 (2).png",
|
| 491 |
+
"image"
|
| 492 |
+
]
|
| 493 |
+
},
|
| 494 |
+
{
|
| 495 |
+
"id": 4,
|
| 496 |
+
"type": "CheckpointLoaderSimple",
|
| 497 |
+
"pos": [
|
| 498 |
+
564,
|
| 499 |
+
739
|
| 500 |
+
],
|
| 501 |
+
"size": {
|
| 502 |
+
"0": 315,
|
| 503 |
+
"1": 98
|
| 504 |
+
},
|
| 505 |
+
"flags": {},
|
| 506 |
+
"order": 7,
|
| 507 |
+
"mode": 0,
|
| 508 |
+
"outputs": [
|
| 509 |
+
{
|
| 510 |
+
"name": "MODEL",
|
| 511 |
+
"type": "MODEL",
|
| 512 |
+
"links": [
|
| 513 |
+
17
|
| 514 |
+
],
|
| 515 |
+
"slot_index": 0,
|
| 516 |
+
"label": "MODEL"
|
| 517 |
+
},
|
| 518 |
+
{
|
| 519 |
+
"name": "CLIP",
|
| 520 |
+
"type": "CLIP",
|
| 521 |
+
"links": [
|
| 522 |
+
1,
|
| 523 |
+
2
|
| 524 |
+
],
|
| 525 |
+
"slot_index": 1,
|
| 526 |
+
"label": "CLIP"
|
| 527 |
+
},
|
| 528 |
+
{
|
| 529 |
+
"name": "VAE",
|
| 530 |
+
"type": "VAE",
|
| 531 |
+
"links": [
|
| 532 |
+
15
|
| 533 |
+
],
|
| 534 |
+
"slot_index": 2,
|
| 535 |
+
"label": "VAE"
|
| 536 |
+
}
|
| 537 |
+
],
|
| 538 |
+
"properties": {
|
| 539 |
+
"Node name for S&R": "CheckpointLoaderSimple"
|
| 540 |
+
},
|
| 541 |
+
"widgets_values": [
|
| 542 |
+
"ecomxl.safetensors"
|
| 543 |
+
]
|
| 544 |
+
},
|
| 545 |
+
{
|
| 546 |
+
"id": 12,
|
| 547 |
+
"type": "KSampler",
|
| 548 |
+
"pos": [
|
| 549 |
+
1763,
|
| 550 |
+
436
|
| 551 |
+
],
|
| 552 |
+
"size": {
|
| 553 |
+
"0": 315,
|
| 554 |
+
"1": 262
|
| 555 |
+
},
|
| 556 |
+
"flags": {},
|
| 557 |
+
"order": 11,
|
| 558 |
+
"mode": 0,
|
| 559 |
+
"inputs": [
|
| 560 |
+
{
|
| 561 |
+
"name": "model",
|
| 562 |
+
"type": "MODEL",
|
| 563 |
+
"link": 11,
|
| 564 |
+
"label": "model"
|
| 565 |
+
},
|
| 566 |
+
{
|
| 567 |
+
"name": "positive",
|
| 568 |
+
"type": "CONDITIONING",
|
| 569 |
+
"link": 12,
|
| 570 |
+
"label": "positive"
|
| 571 |
+
},
|
| 572 |
+
{
|
| 573 |
+
"name": "negative",
|
| 574 |
+
"type": "CONDITIONING",
|
| 575 |
+
"link": 13,
|
| 576 |
+
"label": "negative"
|
| 577 |
+
},
|
| 578 |
+
{
|
| 579 |
+
"name": "latent_image",
|
| 580 |
+
"type": "LATENT",
|
| 581 |
+
"link": 18,
|
| 582 |
+
"label": "latent_image"
|
| 583 |
+
}
|
| 584 |
+
],
|
| 585 |
+
"outputs": [
|
| 586 |
+
{
|
| 587 |
+
"name": "LATENT",
|
| 588 |
+
"type": "LATENT",
|
| 589 |
+
"links": [
|
| 590 |
+
14
|
| 591 |
+
],
|
| 592 |
+
"shape": 3,
|
| 593 |
+
"label": "LATENT",
|
| 594 |
+
"slot_index": 0
|
| 595 |
+
}
|
| 596 |
+
],
|
| 597 |
+
"properties": {
|
| 598 |
+
"Node name for S&R": "KSampler"
|
| 599 |
+
},
|
| 600 |
+
"widgets_values": [
|
| 601 |
+
730401293336759,
|
| 602 |
+
"randomize",
|
| 603 |
+
25,
|
| 604 |
+
3.5,
|
| 605 |
+
"dpm_2",
|
| 606 |
+
"karras",
|
| 607 |
+
1
|
| 608 |
+
]
|
| 609 |
+
},
|
| 610 |
+
{
|
| 611 |
+
"id": 14,
|
| 612 |
+
"type": "PreviewImage",
|
| 613 |
+
"pos": [
|
| 614 |
+
2110,
|
| 615 |
+
532
|
| 616 |
+
],
|
| 617 |
+
"size": [
|
| 618 |
+
428.19834675017046,
|
| 619 |
+
539.6653026265071
|
| 620 |
+
],
|
| 621 |
+
"flags": {},
|
| 622 |
+
"order": 13,
|
| 623 |
+
"mode": 0,
|
| 624 |
+
"inputs": [
|
| 625 |
+
{
|
| 626 |
+
"name": "images",
|
| 627 |
+
"type": "IMAGE",
|
| 628 |
+
"link": 16,
|
| 629 |
+
"label": "images"
|
| 630 |
+
}
|
| 631 |
+
],
|
| 632 |
+
"properties": {
|
| 633 |
+
"Node name for S&R": "PreviewImage"
|
| 634 |
+
},
|
| 635 |
+
"widgets_values": [
|
| 636 |
+
"0",
|
| 637 |
+
"0",
|
| 638 |
+
"0"
|
| 639 |
+
]
|
| 640 |
+
}
|
| 641 |
+
],
|
| 642 |
+
"links": [
|
| 643 |
+
[
|
| 644 |
+
1,
|
| 645 |
+
4,
|
| 646 |
+
1,
|
| 647 |
+
5,
|
| 648 |
+
0,
|
| 649 |
+
"CLIP"
|
| 650 |
+
],
|
| 651 |
+
[
|
| 652 |
+
2,
|
| 653 |
+
4,
|
| 654 |
+
1,
|
| 655 |
+
6,
|
| 656 |
+
0,
|
| 657 |
+
"CLIP"
|
| 658 |
+
],
|
| 659 |
+
[
|
| 660 |
+
3,
|
| 661 |
+
1,
|
| 662 |
+
0,
|
| 663 |
+
8,
|
| 664 |
+
0,
|
| 665 |
+
"INSTANTID"
|
| 666 |
+
],
|
| 667 |
+
[
|
| 668 |
+
4,
|
| 669 |
+
9,
|
| 670 |
+
0,
|
| 671 |
+
8,
|
| 672 |
+
1,
|
| 673 |
+
"PULID"
|
| 674 |
+
],
|
| 675 |
+
[
|
| 676 |
+
5,
|
| 677 |
+
3,
|
| 678 |
+
0,
|
| 679 |
+
8,
|
| 680 |
+
2,
|
| 681 |
+
"EVA_CLIP"
|
| 682 |
+
],
|
| 683 |
+
[
|
| 684 |
+
6,
|
| 685 |
+
10,
|
| 686 |
+
0,
|
| 687 |
+
8,
|
| 688 |
+
3,
|
| 689 |
+
"FACEANALYSIS"
|
| 690 |
+
],
|
| 691 |
+
[
|
| 692 |
+
7,
|
| 693 |
+
2,
|
| 694 |
+
0,
|
| 695 |
+
8,
|
| 696 |
+
4,
|
| 697 |
+
"CONTROL_NET"
|
| 698 |
+
],
|
| 699 |
+
[
|
| 700 |
+
8,
|
| 701 |
+
11,
|
| 702 |
+
0,
|
| 703 |
+
8,
|
| 704 |
+
5,
|
| 705 |
+
"IMAGE"
|
| 706 |
+
],
|
| 707 |
+
[
|
| 708 |
+
9,
|
| 709 |
+
5,
|
| 710 |
+
0,
|
| 711 |
+
8,
|
| 712 |
+
7,
|
| 713 |
+
"CONDITIONING"
|
| 714 |
+
],
|
| 715 |
+
[
|
| 716 |
+
10,
|
| 717 |
+
6,
|
| 718 |
+
0,
|
| 719 |
+
8,
|
| 720 |
+
8,
|
| 721 |
+
"CONDITIONING"
|
| 722 |
+
],
|
| 723 |
+
[
|
| 724 |
+
11,
|
| 725 |
+
8,
|
| 726 |
+
0,
|
| 727 |
+
12,
|
| 728 |
+
0,
|
| 729 |
+
"MODEL"
|
| 730 |
+
],
|
| 731 |
+
[
|
| 732 |
+
12,
|
| 733 |
+
8,
|
| 734 |
+
1,
|
| 735 |
+
12,
|
| 736 |
+
1,
|
| 737 |
+
"CONDITIONING"
|
| 738 |
+
],
|
| 739 |
+
[
|
| 740 |
+
13,
|
| 741 |
+
8,
|
| 742 |
+
2,
|
| 743 |
+
12,
|
| 744 |
+
2,
|
| 745 |
+
"CONDITIONING"
|
| 746 |
+
],
|
| 747 |
+
[
|
| 748 |
+
14,
|
| 749 |
+
12,
|
| 750 |
+
0,
|
| 751 |
+
13,
|
| 752 |
+
0,
|
| 753 |
+
"LATENT"
|
| 754 |
+
],
|
| 755 |
+
[
|
| 756 |
+
15,
|
| 757 |
+
4,
|
| 758 |
+
2,
|
| 759 |
+
13,
|
| 760 |
+
1,
|
| 761 |
+
"VAE"
|
| 762 |
+
],
|
| 763 |
+
[
|
| 764 |
+
16,
|
| 765 |
+
13,
|
| 766 |
+
0,
|
| 767 |
+
14,
|
| 768 |
+
0,
|
| 769 |
+
"IMAGE"
|
| 770 |
+
],
|
| 771 |
+
[
|
| 772 |
+
17,
|
| 773 |
+
4,
|
| 774 |
+
0,
|
| 775 |
+
8,
|
| 776 |
+
6,
|
| 777 |
+
"MODEL"
|
| 778 |
+
],
|
| 779 |
+
[
|
| 780 |
+
18,
|
| 781 |
+
15,
|
| 782 |
+
0,
|
| 783 |
+
12,
|
| 784 |
+
3,
|
| 785 |
+
"LATENT"
|
| 786 |
+
]
|
| 787 |
+
],
|
| 788 |
+
"groups": [],
|
| 789 |
+
"config": {},
|
| 790 |
+
"extra": {
|
| 791 |
+
"ds": {
|
| 792 |
+
"scale": 0.6830134553650709,
|
| 793 |
+
"offset": [
|
| 794 |
+
-260.1114479220462,
|
| 795 |
+
-82.98489012650744
|
| 796 |
+
]
|
| 797 |
+
}
|
| 798 |
+
},
|
| 799 |
+
"version": 0.4
|
| 800 |
+
}
|
SDXL_EcomID_ComfyUI/examples/ecomid_basic_workflow.png
ADDED
|
Git LFS Details
|
SDXL_EcomID_ComfyUI/examples/keypoint.png
ADDED
|
Git LFS Details
|
SDXL_EcomID_ComfyUI/gitattributes.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CrossAttentionPatch.py filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
EcomID.py filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
LICENSE filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
README.md filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
requirements.txt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
__init__.py filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
encoders.py filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
eva_clip filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
examples filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
images filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
pyproject.toml filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
resampler.py filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
utils.py filter=lfs diff=lfs merge=lfs -text
|
SDXL_EcomID_ComfyUI/images/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
SDXL_EcomID_ComfyUI/images/images_alibaba.png
ADDED
|
SDXL_EcomID_ComfyUI/images/images_alimama.png
ADDED
|
SDXL_EcomID_ComfyUI/images/overflow.png
ADDED
|
Git LFS Details
|
SDXL_EcomID_ComfyUI/images/show_case/.DS_Store
ADDED
|
Binary file (10.2 kB). View file
|
|
|
SDXL_EcomID_ComfyUI/images/show_case/1.png
ADDED
|
Git LFS Details
|
SDXL_EcomID_ComfyUI/images/show_case/10.png
ADDED
|
Git LFS Details
|
SDXL_EcomID_ComfyUI/images/show_case/11.png
ADDED
|
Git LFS Details
|