Spaces:
Running
on
Zero
Running
on
Zero
File size: 14,706 Bytes
9ab8b5f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 |
import os
import torch
import base64
import json
import zlib
import logging
import numpy as np
import safetensors.torch
from PIL import Image
class EmbeddingEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, torch.Tensor):
return {'TORCHTENSOR': obj.cpu().detach().numpy().tolist()}
return json.JSONEncoder.default(self, obj)
class EmbeddingDecoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(self, *args, object_hook=self.object_hook, **kwargs)
def object_hook(self, d):
if 'TORCHTENSOR' in d:
return torch.from_numpy(np.array(d['TORCHTENSOR']))
return d
def embedding_to_b64(data):
d = json.dumps(data, cls=EmbeddingEncoder)
return base64.b64encode(d.encode())
def embedding_from_b64(data):
d = base64.b64decode(data)
return json.loads(d, cls=EmbeddingDecoder)
def lcg(m=2 ** 32, a=1664525, c=1013904223, seed=0):
while True:
seed = (a * seed + c) % m
yield seed % 255
def xor_block(block):
g = lcg()
randblock = np.array([next(g) for _ in range(np.prod(block.shape))]).astype(np.uint8).reshape(block.shape)
return np.bitwise_xor(block.astype(np.uint8), randblock & 0x0F)
def crop_black(img, tol=0):
mask = (img > tol).all(2)
mask0, mask1 = mask.any(0), mask.any(1)
col_start, col_end = mask0.argmax(), mask.shape[1] - mask0[::-1].argmax()
row_start, row_end = mask1.argmax(), mask.shape[0] - mask1[::-1].argmax()
return img[row_start:row_end, col_start:col_end]
def extract_image_data_embed(image):
d = 3
outarr = crop_black(np.array(image.convert('RGB').getdata()).reshape(image.size[1], image.size[0], d).astype(np.uint8)) & 0x0F
black_cols = np.where(np.sum(outarr, axis=(0, 2)) == 0)
if black_cols[0].shape[0] < 2:
print(f'{os.path.basename(getattr(image, "filename", "unknown image file"))}: no embedded information found.')
return None
data_block_lower = outarr[:, :black_cols[0].min(), :].astype(np.uint8)
data_block_upper = outarr[:, black_cols[0].max() + 1:, :].astype(np.uint8)
data_block_lower = xor_block(data_block_lower)
data_block_upper = xor_block(data_block_upper)
data_block = (data_block_upper << 4) | (data_block_lower)
data_block = data_block.flatten().tobytes()
data = zlib.decompress(data_block)
return json.loads(data, cls=EmbeddingDecoder)
class Embedding:
def __init__(self, vec, name, step=None):
self.vec = vec
self.name = name
self.step = step
self.shape = None
self.vectors = 0
self.sd_checkpoint = None
self.sd_checkpoint_name = None
class DirWithTextualInversionEmbeddings:
def __init__(self, path):
self.path = path
self.mtime = None
def has_changed(self):
if not os.path.isdir(self.path):
return False
mt = os.path.getmtime(self.path)
if self.mtime is None or mt > self.mtime:
return True
def update(self):
if not os.path.isdir(self.path):
return
self.mtime = os.path.getmtime(self.path)
class EmbeddingDatabase:
def __init__(self, tokenizer, expected_shape=-1):
self.ids_lookup = {}
self.word_embeddings = {}
self.embedding_dirs = {}
self.skipped_embeddings = {}
self.expected_shape = expected_shape
self.tokenizer = tokenizer
self.fixes = []
def add_embedding_dir(self, path):
self.embedding_dirs[path] = DirWithTextualInversionEmbeddings(path)
def clear_embedding_dirs(self):
self.embedding_dirs.clear()
def register_embedding(self, embedding):
return self.register_embedding_by_name(embedding, embedding.name)
def register_embedding_by_name(self, embedding, name):
ids = self.tokenizer([name], truncation=False, add_special_tokens=False)["input_ids"][0]
first_id = ids[0]
if first_id not in self.ids_lookup:
self.ids_lookup[first_id] = []
if name in self.word_embeddings:
lookup = [x for x in self.ids_lookup[first_id] if x[1].name != name]
else:
lookup = self.ids_lookup[first_id]
if embedding is not None:
lookup += [(ids, embedding)]
self.ids_lookup[first_id] = sorted(lookup, key=lambda x: len(x[0]), reverse=True)
if embedding is None:
if name in self.word_embeddings:
del self.word_embeddings[name]
if len(self.ids_lookup[first_id]) == 0:
del self.ids_lookup[first_id]
return None
self.word_embeddings[name] = embedding
return embedding
def load_from_file(self, path, filename):
name, ext = os.path.splitext(filename)
ext = ext.upper()
if ext in ['.PNG', '.WEBP', '.JXL', '.AVIF']:
_, second_ext = os.path.splitext(name)
if second_ext.upper() == '.PREVIEW':
return
embed_image = Image.open(path)
if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text:
data = embedding_from_b64(embed_image.text['sd-ti-embedding'])
name = data.get('name', name)
else:
data = extract_image_data_embed(embed_image)
if data:
name = data.get('name', name)
else:
return
elif ext in ['.BIN', '.PT']:
data = torch.load(path, map_location="cpu")
elif ext in ['.SAFETENSORS']:
data = safetensors.torch.load_file(path, device="cpu")
else:
return
emb_out = None
if data is not None:
embedding = create_embedding_from_data(data, name, filename=filename, filepath=path)
# if self.expected_shape == -1 or self.expected_shape == embedding.shape:
emb_out = self.register_embedding(embedding)
# else:
# emb_out = self.skipped_embeddings[name] = embedding
else:
print(f"Unable to load Textual inversion embedding due to data issue: '{name}'.")
return emb_out
def load_from_dir(self, embdir):
if not os.path.isdir(embdir.path):
return
for root, _, fns in os.walk(embdir.path, followlinks=True):
for fn in fns:
try:
fullfn = os.path.join(root, fn)
if os.stat(fullfn).st_size == 0:
continue
self.load_from_file(fullfn, fn)
except Exception:
print(f"Error loading embedding {fn}")
continue
def load_textual_inversion_embeddings(self):
self.ids_lookup.clear()
self.word_embeddings.clear()
self.skipped_embeddings.clear()
for embdir in self.embedding_dirs.values():
self.load_from_dir(embdir)
embdir.update()
return
def find_embedding_at_position(self, tokens, offset):
token = tokens[offset]
possible_matches = self.ids_lookup.get(token, None)
if possible_matches is None:
return None, None
for ids, embedding in possible_matches:
if tokens[offset:offset + len(ids)] == ids:
return embedding, len(ids)
return None, None
def create_embedding_from_data(data, name, filename='unknown embedding file', filepath=None):
if 'string_to_param' in data: # textual inversion embeddings
param_dict = data['string_to_param']
param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11
assert len(param_dict) == 1, 'embedding file has multiple terms in it'
emb = next(iter(param_dict.items()))[1]
vec = emb.detach().to(dtype=torch.float32)
shape = vec.shape[-1]
vectors = vec.shape[0]
elif type(data) == dict and 'clip_g' in data and 'clip_l' in data: # SDXL embedding
vec = {k: v.detach().to(dtype=torch.float32) for k, v in data.items()}
shape = data['clip_g'].shape[-1] + data['clip_l'].shape[-1]
vectors = data['clip_g'].shape[0]
elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: # diffuser concepts
assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
emb = next(iter(data.values()))
if len(emb.shape) == 1:
emb = emb.unsqueeze(0)
vec = emb.detach().to(dtype=torch.float32)
shape = vec.shape[-1]
vectors = vec.shape[0]
else:
raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
embedding = Embedding(vec, name)
embedding.step = data.get('step', None)
embedding.sd_checkpoint = data.get('sd_checkpoint', None)
embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
embedding.vectors = vectors
embedding.shape = shape
return embedding
from comfy.sd1_clip import expand_directory_list
def get_embed_file_path(embedding_name, embedding_directory):
if isinstance(embedding_directory, str):
embedding_directory = [embedding_directory]
embedding_directory = expand_directory_list(embedding_directory)
valid_file = None
for embed_dir in embedding_directory:
embed_path = os.path.abspath(os.path.join(embed_dir, embedding_name))
embed_dir = os.path.abspath(embed_dir)
try:
if os.path.commonpath((embed_dir, embed_path)) != embed_dir:
continue
except Exception:
continue
if not os.path.isfile(embed_path):
extensions = ['.safetensors', '.pt', '.bin']
for x in extensions:
t = embed_path + x
if os.path.isfile(t):
valid_file = t
break
else:
valid_file = embed_path
if valid_file is not None:
break
if valid_file is None:
return None
return valid_file
import re
from ..shared import logger
emb_re_ = r"(embedding:)?(?:({}[\w\.\-\!\$\/\\]+(\.safetensors|\.pt|\.bin)|(?(1)[\w\.\-\!\$\/\\]+|(?!)))(\.safetensors|\.pt|\.bin)?)(?:(:)(\d+\.?\d*|\d*\.\d+))?"
def get_valid_embeddings(embedding_directories):
from builtins import any as b_any
exts = ['.safetensors', '.pt', '.bin']
if isinstance(embedding_directories, str):
embedding_directories = [embedding_directories]
embedding_directories = expand_directory_list(embedding_directories)
embs = set()
from collections import OrderedDict, namedtuple
EmbedInfo = namedtuple('EmbedInfo', ['basename', 'filename', 'filepath'])
store = OrderedDict()
for embd in embedding_directories:
for root, dirs, files in os.walk(embd, followlinks=True, topdown=False):
for name in files:
if not b_any(x in os.path.splitext(name)[1] for x in exts): continue
basename = os.path.basename(name)
for ext in exts: basename=basename.removesuffix(ext)
relpath_basename = os.path.normpath(os.path.join(os.path.relpath(root, embd), basename))
k = os.path.normpath(os.path.join(os.path.relpath(root, embd), name))
store[k] = EmbedInfo(basename, name, os.path.join(root, name))
# add its counterpart
if '/' in k:
store[k.replace('/', '\\')] = EmbedInfo(basename, name, os.path.join(root, name))
elif '\\' in relpath_basename:
store[k.replace('\\', '/')] = EmbedInfo(basename, name, os.path.join(root, name))
embs = OrderedDict(sorted(store.items(), key=lambda item: len(item[0]), reverse=True))
return embs
class EmbbeddingRegex:
STR_PATTERN = r"(embedding:)?(?:({}[\w\.\-\!\$\/\\]+(\.safetensors|\.pt|\.bin)|(?(1)[\w\.\-\!\$\/\\]+|(?!)))(\.safetensors|\.pt|\.bin)?)(?:(:)(\d+\.?\d*|\d*\.\d+))?"
def __init__(self, embedding_directory) -> None:
self.embedding_directory = embedding_directory
self.embeddings = get_valid_embeddings(self.embedding_directory) if self.embedding_directory is not None else {}
joined_keys = '|'.join([re.escape(os.path.splitext(k)[0]) for k in self.embeddings.keys()])
emb_re = self.STR_PATTERN.format(joined_keys + '|' if joined_keys else '')
self.pattern = re.compile(emb_re, flags=re.MULTILINE | re.UNICODE | re.IGNORECASE)
def parse_and_register_embeddings(self, text: str):
embr = EmbbeddingRegex(self.embedding_directory)
embs = embr.embeddings
matches = embr.pattern.finditer(text)
exts = ['.pt', '.safetensors', '.bin']
for matchNum, match in enumerate(matches, start=1):
found=False
ext = (match.group(4) or (match.group(3) or ''))
embedding_sname = (match.group(2) or '').removesuffix(ext)
embedding_name = embedding_sname + ext
if embedding_name:
embed = None
if ext:
embed_info = embs.get(embedding_name + ext, None)
else:
for _ext in exts:
embed_info = embs.get(embedding_name + _ext, None)
if embed_info is not None: break
if embed_info is not None:
found=True
try:
embed = self.embeddings.load_from_file(embed_info.filepath, embed_info.filename)
except Exception as e:
logging.warning(f'\033[33mWarning\033[0m loading embedding `{embedding_name + ext}`: {e}')
if embed is not None:
found=True
logger.debug(f'using embedding:{embedding_name}')
if not found:
logging.warning(f"\033[33mwarning\033[0m, embedding:{embedding_name} does not exist, ignoring")
# ComfyUI trims non-existent embedding_names while A1111 doesn't.
# here we get group 2,5,6. (group 2 minus its file extension)
out = embr.pattern.sub(lambda m: (m.group(2) or '').removesuffix(m.group(4) or (m.group(3) or '')) + (m.group(5) or '') + (m.group(6) or ''), text)
return out |