Coercer commited on
Commit
e25f86c
·
verified ·
1 Parent(s): 76951e5

Upload 4 files

Browse files
Python_Infer_Utils/Swan.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+
4
+ from collections import namedtuple
5
+ import cat, pigeon
6
+ from pig import worm
7
+
8
+
9
+ ChickenFix = namedtuple('ChickenFix', ['offset', 'embedding'])
10
+ last_extra_generation_params = {}
11
+
12
+
13
+ class Chicken:
14
+ def __init__(self):
15
+ self.tokens = []
16
+ self.multipliers = []
17
+ self.fixes = []
18
+
19
+
20
+ class Dog(torch.nn.Module):
21
+ def __init__(self, wrapped, embeddings, textual_inversion_key='clip_l'):
22
+ super().__init__()
23
+ self.wrapped = wrapped
24
+ self.embeddings = embeddings
25
+ self.textual_inversion_key = textual_inversion_key
26
+ self.weight = self.wrapped.weight
27
+
28
+ def forward(self, input_ids):
29
+ batch_fixes = self.embeddings.fixes
30
+ self.embeddings.fixes = None
31
+
32
+ inputs_embeds = self.wrapped(input_ids)
33
+
34
+ if batch_fixes is None or len(batch_fixes) == 0 or max([len(x) for x in batch_fixes]) == 0:
35
+ return inputs_embeds
36
+
37
+ vecs = []
38
+ for fixes, tensor in zip(batch_fixes, inputs_embeds):
39
+ for offset, embedding in fixes:
40
+ emb = embedding.vec[self.textual_inversion_key] if isinstance(embedding.vec, dict) else embedding.vec
41
+ emb = emb.to(inputs_embeds)
42
+ emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0])
43
+ tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]]).to(dtype=inputs_embeds.dtype)
44
+
45
+ vecs.append(tensor)
46
+
47
+ return torch.stack(vecs)
48
+
49
+
50
+ class Eagle:
51
+ def __init__(
52
+ self, text_encoder, tokenizer, chunk_length=75,
53
+ embedding_dir=None, embedding_key='clip_l', embedding_expected_shape=768, pigeon_name="Original",
54
+ text_projection=False, minimal_clip_skip=1, clip_skip=1, return_pooled=False, final_layer_norm=True
55
+ ):
56
+ super().__init__()
57
+
58
+ self.embeddings = worm(tokenizer, embedding_expected_shape)
59
+
60
+ if isinstance(embedding_dir, str):
61
+ self.embeddings.add_embedding_dir(embedding_dir)
62
+ self.embeddings.load_textual_inversion_embeddings()
63
+
64
+ self.embedding_key = embedding_key
65
+
66
+ self.text_encoder = text_encoder
67
+ self.tokenizer = tokenizer
68
+
69
+ self.pigeon = pigeon.get_current_option()()
70
+ self.text_projection = text_projection
71
+ self.minimal_clip_skip = minimal_clip_skip
72
+ self.clip_skip = clip_skip
73
+ self.return_pooled = return_pooled
74
+ self.final_layer_norm = final_layer_norm
75
+
76
+ self.chunk_length = chunk_length
77
+
78
+ self.id_start = self.tokenizer.bos_token_id
79
+ self.id_end = self.tokenizer.eos_token_id
80
+ self.id_pad = self.tokenizer.pad_token_id
81
+
82
+ model_embeddings = text_encoder.text_model.embeddings
83
+ model_embeddings.token_embedding = Dog(model_embeddings.token_embedding, self.embeddings, textual_inversion_key=embedding_key)
84
+
85
+ vocab = self.tokenizer.get_vocab()
86
+
87
+ self.comma_token = vocab.get(',</w>', None)
88
+
89
+ self.token_mults = {}
90
+
91
+ tokens_with_parens = [(k, v) for k, v in vocab.items() if '(' in k or ')' in k or '[' in k or ']' in k]
92
+ for text, ident in tokens_with_parens:
93
+ mult = 1.0
94
+ for c in text:
95
+ if c == '[':
96
+ mult /= 1.1
97
+ if c == ']':
98
+ mult *= 1.1
99
+ if c == '(':
100
+ mult *= 1.1
101
+ if c == ')':
102
+ mult /= 1.1
103
+
104
+ if mult != 1.0:
105
+ self.token_mults[ident] = mult
106
+
107
+ def empty_chunk(self):
108
+ chunk = Chicken()
109
+ chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1)
110
+ chunk.multipliers = [1.0] * (self.chunk_length + 2)
111
+ return chunk
112
+
113
+ def get_target_prompt_token_count(self, token_count):
114
+ return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length
115
+
116
+ def tokenize(self, texts):
117
+ tokenized = self.tokenizer(texts, truncation=False, add_special_tokens=False)["input_ids"]
118
+
119
+ return tokenized
120
+
121
+ def encode_with_transformers(self, tokens):
122
+ target_device = "cuda"
123
+
124
+ self.text_encoder.text_model.embeddings.position_ids = self.text_encoder.text_model.embeddings.position_ids.to(device=target_device)
125
+ self.text_encoder.text_model.embeddings.position_embedding = self.text_encoder.text_model.embeddings.position_embedding.to(dtype=torch.float32)
126
+ self.text_encoder.text_model.embeddings.token_embedding = self.text_encoder.text_model.embeddings.token_embedding.to(dtype=torch.float32)
127
+
128
+ tokens = tokens.to(target_device)
129
+
130
+ outputs = self.text_encoder.transformer(tokens, output_hidden_states=True)
131
+
132
+ layer_id = - max(self.clip_skip, self.minimal_clip_skip)
133
+ z = outputs.hidden_states[layer_id]
134
+
135
+ if self.final_layer_norm:
136
+ z = self.text_encoder.transformer.text_model.final_layer_norm(z)
137
+
138
+ if self.return_pooled:
139
+ pooled_output = outputs.pooler_output
140
+
141
+ if self.text_projection and self.embedding_key != 'clip_l':
142
+ pooled_output = self.text_encoder.transformer.text_projection(pooled_output)
143
+
144
+ z.pooled = pooled_output
145
+ return z
146
+
147
+ def tokenize_line(self, line):
148
+ parsed = cat.parse_prompt_attention(line, self.pigeon.name)
149
+
150
+ tokenized = self.tokenize([text for text, _ in parsed])
151
+
152
+ chunks = []
153
+ chunk = Chicken()
154
+ token_count = 0
155
+ last_comma = -1
156
+
157
+ def next_chunk(is_last=False):
158
+ nonlocal token_count
159
+ nonlocal last_comma
160
+ nonlocal chunk
161
+
162
+ if is_last:
163
+ token_count += len(chunk.tokens)
164
+ else:
165
+ token_count += self.chunk_length
166
+
167
+ to_add = self.chunk_length - len(chunk.tokens)
168
+ if to_add > 0:
169
+ chunk.tokens += [self.id_end] * to_add
170
+ chunk.multipliers += [1.0] * to_add
171
+
172
+ chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end]
173
+ chunk.multipliers = [1.0] + chunk.multipliers + [1.0]
174
+
175
+ last_comma = -1
176
+ chunks.append(chunk)
177
+ chunk = Chicken()
178
+
179
+ for tokens, (text, weight) in zip(tokenized, parsed):
180
+ if text == 'BREAK' and weight == -1:
181
+ next_chunk()
182
+ continue
183
+
184
+ position = 0
185
+ while position < len(tokens):
186
+ token = tokens[position]
187
+
188
+ comma_padding_backtrack = 20
189
+
190
+ if token == self.comma_token:
191
+ last_comma = len(chunk.tokens)
192
+
193
+ elif comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= comma_padding_backtrack:
194
+ break_location = last_comma + 1
195
+
196
+ reloc_tokens = chunk.tokens[break_location:]
197
+ reloc_mults = chunk.multipliers[break_location:]
198
+
199
+ chunk.tokens = chunk.tokens[:break_location]
200
+ chunk.multipliers = chunk.multipliers[:break_location]
201
+
202
+ next_chunk()
203
+ chunk.tokens = reloc_tokens
204
+ chunk.multipliers = reloc_mults
205
+
206
+ if len(chunk.tokens) == self.chunk_length:
207
+ next_chunk()
208
+
209
+ embedding, embedding_length_in_tokens = self.embeddings.find_embedding_at_position(tokens, position)
210
+ if embedding is None:
211
+ chunk.tokens.append(token)
212
+ chunk.multipliers.append(weight)
213
+ position += 1
214
+ continue
215
+
216
+ emb_len = int(embedding.vectors)
217
+ if len(chunk.tokens) + emb_len > self.chunk_length:
218
+ next_chunk()
219
+
220
+ chunk.fixes.append(ChickenFix(len(chunk.tokens), embedding))
221
+
222
+ chunk.tokens += [0] * emb_len
223
+ chunk.multipliers += [weight] * emb_len
224
+ position += embedding_length_in_tokens
225
+
226
+ if chunk.tokens or not chunks:
227
+ next_chunk(is_last=True)
228
+
229
+ return chunks, token_count
230
+
231
+ def process_texts(self, texts):
232
+ token_count = 0
233
+
234
+ cache = {}
235
+ batch_chunks = []
236
+ for line in texts:
237
+ if line in cache:
238
+ chunks = cache[line]
239
+ else:
240
+ chunks, current_token_count = self.tokenize_line(line)
241
+ token_count = max(current_token_count, token_count)
242
+
243
+ cache[line] = chunks
244
+
245
+ batch_chunks.append(chunks)
246
+
247
+ return batch_chunks, token_count
248
+
249
+ def __call__(self, texts):
250
+ self.pigeon = pigeon.get_current_option()()
251
+
252
+ batch_chunks, token_count = self.process_texts(texts)
253
+
254
+ used_embeddings = {}
255
+ chunk_count = max([len(x) for x in batch_chunks])
256
+
257
+ zs = []
258
+ for i in range(chunk_count):
259
+ batch_chunk = [chunks[i] if i < len(chunks) else self.empty_chunk() for chunks in batch_chunks]
260
+
261
+ tokens = [x.tokens for x in batch_chunk]
262
+ multipliers = [x.multipliers for x in batch_chunk]
263
+ self.embeddings.fixes = [x.fixes for x in batch_chunk]
264
+
265
+ for fixes in self.embeddings.fixes:
266
+ for _position, embedding in fixes:
267
+ used_embeddings[embedding.name] = embedding
268
+
269
+ z = self.process_tokens(tokens, multipliers)
270
+ zs.append(z)
271
+
272
+ global last_extra_generation_params
273
+
274
+ if used_embeddings:
275
+ names = []
276
+
277
+ for name, embedding in used_embeddings.items():
278
+ print(f'[Textual Inversion] Used Embedding [{name}] in CLIP of [{self.embedding_key}]')
279
+ names.append(name.replace(":", "").replace(",", ""))
280
+
281
+ if "TI" in last_extra_generation_params:
282
+ last_extra_generation_params["TI"] += ", " + ", ".join(names)
283
+ else:
284
+ last_extra_generation_params["TI"] = ", ".join(names)
285
+
286
+ if any(x for x in texts if "(" in x or "[" in x) and self.pigeon.name != "Original":
287
+ last_extra_generation_params["Emphasis"] = self.pigeon.name
288
+
289
+ if self.return_pooled:
290
+ return torch.hstack(zs), zs[0].pooled
291
+ else:
292
+ return torch.hstack(zs)
293
+
294
+ def process_tokens(self, remade_batch_tokens, batch_multipliers):
295
+ tokens = torch.asarray(remade_batch_tokens)
296
+
297
+ if self.id_end != self.id_pad:
298
+ for batch_pos in range(len(remade_batch_tokens)):
299
+ index = remade_batch_tokens[batch_pos].index(self.id_end)
300
+ tokens[batch_pos, index + 1:tokens.shape[1]] = self.id_pad
301
+
302
+ z = self.encode_with_transformers(tokens)
303
+
304
+ pooled = getattr(z, 'pooled', None)
305
+
306
+ self.pigeon.tokens = remade_batch_tokens
307
+ self.pigeon.multipliers = torch.asarray(batch_multipliers).to(z)
308
+ self.pigeon.z = z
309
+ self.pigeon.after_transformers()
310
+ z = self.pigeon.z
311
+
312
+ if pooled is not None:
313
+ z.pooled = pooled
314
+
315
+ return z
Python_Infer_Utils/cat.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+
4
+ re_attention = re.compile(r"""
5
+ \\\(|
6
+ \\\)|
7
+ \\\[|
8
+ \\]|
9
+ \\\\|
10
+ \\|
11
+ \(|
12
+ \[|
13
+ :\s*([+-]?[.\d]+)\s*\)|
14
+ \)|
15
+ ]|
16
+ [^\\()\[\]:]+|
17
+ :
18
+ """, re.X)
19
+
20
+ re_break = re.compile(r"\s*\bBREAK\b\s*", re.S)
21
+
22
+
23
+ def parse_prompt_attention(text, pigeon):
24
+ res = []
25
+ round_brackets = []
26
+ square_brackets = []
27
+
28
+ round_bracket_multiplier = 1.1
29
+ square_bracket_multiplier = 1 / 1.1
30
+
31
+ def multiply_range(start_position, multiplier):
32
+ for p in range(start_position, len(res)):
33
+ res[p][1] *= multiplier
34
+
35
+ if pigeon == "None":
36
+ # interpret literally
37
+ res = [[text, 1.0]]
38
+ else:
39
+ for m in re_attention.finditer(text):
40
+ text = m.group(0)
41
+ weight = m.group(1)
42
+
43
+ if text.startswith('\\'):
44
+ res.append([text[1:], 1.0])
45
+ elif text == '(':
46
+ round_brackets.append(len(res))
47
+ elif text == '[':
48
+ square_brackets.append(len(res))
49
+ elif weight is not None and round_brackets:
50
+ multiply_range(round_brackets.pop(), float(weight))
51
+ elif text == ')' and round_brackets:
52
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
53
+ elif text == ']' and square_brackets:
54
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
55
+ else:
56
+ parts = re.split(re_break, text)
57
+ for i, part in enumerate(parts):
58
+ if i > 0:
59
+ res.append(["BREAK", -1])
60
+ res.append([part, 1.0])
61
+
62
+ for pos in round_brackets:
63
+ multiply_range(pos, round_bracket_multiplier)
64
+
65
+ for pos in square_brackets:
66
+ multiply_range(pos, square_bracket_multiplier)
67
+
68
+ if len(res) == 0:
69
+ res = [["", 1.0]]
70
+
71
+ i = 0
72
+ while i + 1 < len(res):
73
+ if res[i][1] == res[i + 1][1]:
74
+ res[i][0] += res[i + 1][0]
75
+ res.pop(i + 1)
76
+ else:
77
+ i += 1
78
+
79
+ return res
Python_Infer_Utils/pig.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import base64
4
+ import json
5
+ import zlib
6
+ import numpy as np
7
+ import safetensors.torch
8
+
9
+ from PIL import Image
10
+
11
+
12
+ class EmbeddingEncoder(json.JSONEncoder):
13
+ def default(self, obj):
14
+ if isinstance(obj, torch.Tensor):
15
+ return {'TORCHTENSOR': obj.cpu().detach().numpy().tolist()}
16
+ return json.JSONEncoder.default(self, obj)
17
+
18
+
19
+ class EmbeddingDecoder(json.JSONDecoder):
20
+ def __init__(self, *args, **kwargs):
21
+ json.JSONDecoder.__init__(self, *args, object_hook=self.object_hook, **kwargs)
22
+
23
+ def object_hook(self, d):
24
+ if 'TORCHTENSOR' in d:
25
+ return torch.from_numpy(np.array(d['TORCHTENSOR']))
26
+ return d
27
+
28
+
29
+ def embedding_to_b64(data):
30
+ d = json.dumps(data, cls=EmbeddingEncoder)
31
+ return base64.b64encode(d.encode())
32
+
33
+
34
+ def embedding_from_b64(data):
35
+ d = base64.b64decode(data)
36
+ return json.loads(d, cls=EmbeddingDecoder)
37
+
38
+
39
+ def lcg(m=2 ** 32, a=1664525, c=1013904223, seed=0):
40
+ while True:
41
+ seed = (a * seed + c) % m
42
+ yield seed % 255
43
+
44
+
45
+ def xor_block(block):
46
+ g = lcg()
47
+ randblock = np.array([next(g) for _ in range(np.prod(block.shape))]).astype(np.uint8).reshape(block.shape)
48
+ return np.bitwise_xor(block.astype(np.uint8), randblock & 0x0F)
49
+
50
+
51
+ def crop_black(img, tol=0):
52
+ mask = (img > tol).all(2)
53
+ mask0, mask1 = mask.any(0), mask.any(1)
54
+ col_start, col_end = mask0.argmax(), mask.shape[1] - mask0[::-1].argmax()
55
+ row_start, row_end = mask1.argmax(), mask.shape[0] - mask1[::-1].argmax()
56
+ return img[row_start:row_end, col_start:col_end]
57
+
58
+
59
+ def extract_image_data_embed(image):
60
+ d = 3
61
+ outarr = crop_black(np.array(image.convert('RGB').getdata()).reshape(image.size[1], image.size[0], d).astype(np.uint8)) & 0x0F
62
+ black_cols = np.where(np.sum(outarr, axis=(0, 2)) == 0)
63
+ if black_cols[0].shape[0] < 2:
64
+ print(f'{os.path.basename(getattr(image, "filename", "unknown image file"))}: no embedded information found.')
65
+ return None
66
+
67
+ data_block_lower = outarr[:, :black_cols[0].min(), :].astype(np.uint8)
68
+ data_block_upper = outarr[:, black_cols[0].max() + 1:, :].astype(np.uint8)
69
+
70
+ data_block_lower = xor_block(data_block_lower)
71
+ data_block_upper = xor_block(data_block_upper)
72
+
73
+ data_block = (data_block_upper << 4) | (data_block_lower)
74
+ data_block = data_block.flatten().tobytes()
75
+
76
+ data = zlib.decompress(data_block)
77
+ return json.loads(data, cls=EmbeddingDecoder)
78
+
79
+
80
+ class Embedding:
81
+ def __init__(self, vec, name, step=None):
82
+ self.vec = vec
83
+ self.name = name
84
+ self.step = step
85
+ self.shape = None
86
+ self.vectors = 0
87
+ self.sd_checkpoint = None
88
+ self.sd_checkpoint_name = None
89
+
90
+
91
+ class DirWithTextualInversionEmbeddings:
92
+ def __init__(self, path):
93
+ self.path = path
94
+ self.mtime = None
95
+
96
+ def has_changed(self):
97
+ if not os.path.isdir(self.path):
98
+ return False
99
+
100
+ mt = os.path.getmtime(self.path)
101
+ if self.mtime is None or mt > self.mtime:
102
+ return True
103
+
104
+ def update(self):
105
+ if not os.path.isdir(self.path):
106
+ return
107
+
108
+ self.mtime = os.path.getmtime(self.path)
109
+
110
+
111
+ class worm:
112
+ def __init__(self, tokenizer, expected_shape=-1):
113
+ self.ids_lookup = {}
114
+ self.word_embeddings = {}
115
+ self.embedding_dirs = {}
116
+ self.skipped_embeddings = {}
117
+ self.expected_shape = expected_shape
118
+ self.tokenizer = tokenizer
119
+ self.fixes = []
120
+
121
+ def add_embedding_dir(self, path):
122
+ self.embedding_dirs[path] = DirWithTextualInversionEmbeddings(path)
123
+
124
+ def clear_embedding_dirs(self):
125
+ self.embedding_dirs.clear()
126
+
127
+ def register_embedding(self, embedding):
128
+ return self.register_embedding_by_name(embedding, embedding.name)
129
+
130
+ def register_embedding_by_name(self, embedding, name):
131
+ ids = self.tokenizer([name], truncation=False, add_special_tokens=False)["input_ids"][0]
132
+ first_id = ids[0]
133
+ if first_id not in self.ids_lookup:
134
+ self.ids_lookup[first_id] = []
135
+ if name in self.word_embeddings:
136
+ lookup = [x for x in self.ids_lookup[first_id] if x[1].name != name]
137
+ else:
138
+ lookup = self.ids_lookup[first_id]
139
+ if embedding is not None:
140
+ lookup += [(ids, embedding)]
141
+ self.ids_lookup[first_id] = sorted(lookup, key=lambda x: len(x[0]), reverse=True)
142
+ if embedding is None:
143
+ if name in self.word_embeddings:
144
+ del self.word_embeddings[name]
145
+ if len(self.ids_lookup[first_id]) == 0:
146
+ del self.ids_lookup[first_id]
147
+ return None
148
+ self.word_embeddings[name] = embedding
149
+ return embedding
150
+
151
+ def load_from_file(self, path, filename):
152
+ name, ext = os.path.splitext(filename)
153
+ ext = ext.upper()
154
+
155
+ if ext in ['.PNG', '.WEBP', '.JXL', '.AVIF']:
156
+ _, second_ext = os.path.splitext(name)
157
+ if second_ext.upper() == '.PREVIEW':
158
+ return
159
+
160
+ embed_image = Image.open(path)
161
+ if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text:
162
+ data = embedding_from_b64(embed_image.text['sd-ti-embedding'])
163
+ name = data.get('name', name)
164
+ else:
165
+ data = extract_image_data_embed(embed_image)
166
+ if data:
167
+ name = data.get('name', name)
168
+ else:
169
+ return
170
+ elif ext in ['.BIN', '.PT']:
171
+ data = torch.load(path, map_location="cpu")
172
+ elif ext in ['.SAFETENSORS']:
173
+ data = safetensors.torch.load_file(path, device="cpu")
174
+ else:
175
+ return
176
+
177
+ if data is not None:
178
+ embedding = create_embedding_from_data(data, name, filename=filename, filepath=path)
179
+
180
+ if self.expected_shape == -1 or self.expected_shape == embedding.shape:
181
+ self.register_embedding(embedding)
182
+ else:
183
+ self.skipped_embeddings[name] = embedding
184
+ else:
185
+ print(f"Unable to load Textual inversion embedding due to data issue: '{name}'.")
186
+
187
+ def load_from_dir(self, embdir):
188
+ if not os.path.isdir(embdir.path):
189
+ return
190
+
191
+ for root, _, fns in os.walk(embdir.path, followlinks=True):
192
+ for fn in fns:
193
+ try:
194
+ fullfn = os.path.join(root, fn)
195
+
196
+ if os.stat(fullfn).st_size == 0:
197
+ continue
198
+
199
+ self.load_from_file(fullfn, fn)
200
+ except Exception:
201
+ print(f"Error loading embedding {fn}")
202
+ continue
203
+
204
+ def load_textual_inversion_embeddings(self):
205
+ self.ids_lookup.clear()
206
+ self.word_embeddings.clear()
207
+ self.skipped_embeddings.clear()
208
+
209
+ for embdir in self.embedding_dirs.values():
210
+ self.load_from_dir(embdir)
211
+ embdir.update()
212
+
213
+ return
214
+
215
+ def find_embedding_at_position(self, tokens, offset):
216
+ token = tokens[offset]
217
+ possible_matches = self.ids_lookup.get(token, None)
218
+
219
+ if possible_matches is None:
220
+ return None, None
221
+
222
+ for ids, embedding in possible_matches:
223
+ if tokens[offset:offset + len(ids)] == ids:
224
+ return embedding, len(ids)
225
+
226
+ return None, None
227
+
228
+
229
+ def create_embedding_from_data(data, name, filename='unknown embedding file', filepath=None):
230
+ if 'string_to_param' in data: # textual inversion embeddings
231
+ param_dict = data['string_to_param']
232
+ param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11
233
+ assert len(param_dict) == 1, 'embedding file has multiple terms in it'
234
+ emb = next(iter(param_dict.items()))[1]
235
+ vec = emb.detach().to(dtype=torch.float32)
236
+ shape = vec.shape[-1]
237
+ vectors = vec.shape[0]
238
+ elif type(data) == dict and 'clip_g' in data and 'clip_l' in data: # SDXL embedding
239
+ vec = {k: v.detach().to(dtype=torch.float32) for k, v in data.items()}
240
+ shape = data['clip_g'].shape[-1] + data['clip_l'].shape[-1]
241
+ vectors = data['clip_g'].shape[0]
242
+ elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: # diffuser concepts
243
+ assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
244
+
245
+ emb = next(iter(data.values()))
246
+ if len(emb.shape) == 1:
247
+ emb = emb.unsqueeze(0)
248
+ vec = emb.detach().to(dtype=torch.float32)
249
+ shape = vec.shape[-1]
250
+ vectors = vec.shape[0]
251
+ else:
252
+ raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
253
+
254
+ embedding = Embedding(vec, name)
255
+ embedding.step = data.get('step', None)
256
+ embedding.sd_checkpoint = data.get('sd_checkpoint', None)
257
+ embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
258
+ embedding.vectors = vectors
259
+ embedding.shape = shape
260
+
261
+ if filepath:
262
+ embedding.filename = filepath
263
+
264
+ return embedding
Python_Infer_Utils/pigeon.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ class Emphasis:
5
+ name: str = "Base"
6
+ description: str = ""
7
+ tokens: list[list[int]]
8
+ multipliers: torch.Tensor
9
+ z: torch.Tensor
10
+
11
+ def after_transformers(self):
12
+ pass
13
+
14
+
15
+ class EmphasisNone(Emphasis):
16
+ name = "None"
17
+ description = "disable the mechanism entirely and treat (:.1.1) as literal characters"
18
+
19
+
20
+ class EmphasisIgnore(Emphasis):
21
+ name = "Ignore"
22
+ description = "treat all empasised words as if they have no pigeon"
23
+
24
+
25
+ class EmphasisOriginal(Emphasis):
26
+ name = "Original"
27
+ description = "the original pigeon implementation"
28
+
29
+ def after_transformers(self):
30
+ original_mean = self.z.mean()
31
+ self.z = self.z * self.multipliers.reshape(self.multipliers.shape + (1,)).expand(self.z.shape)
32
+ new_mean = self.z.mean()
33
+ self.z = self.z * (original_mean / new_mean)
34
+
35
+
36
+ class EmphasisOriginalNoNorm(EmphasisOriginal):
37
+ name = "No norm"
38
+ description = "same as original, but without normalization (seems to work better for SDXL)"
39
+
40
+ def after_transformers(self):
41
+ self.z = self.z * self.multipliers.reshape(self.multipliers.shape + (1,)).expand(self.z.shape)
42
+
43
+
44
+ def get_current_option():
45
+ return (EmphasisOriginal)
46
+
47
+
48
+ def get_options_descriptions():
49
+ return ", ".join(f"{x.name}: {x.description}" for x in options)
50
+
51
+
52
+ options = [
53
+ EmphasisNone,
54
+ EmphasisIgnore,
55
+ EmphasisOriginal,
56
+ EmphasisOriginalNoNorm,
57
+ ]