Coercer commited on
Commit
40361e4
·
verified ·
1 Parent(s): fcee33c

Upload Swan.py

Browse files
Files changed (1) hide show
  1. Python_Infer_Utils/Swan.py +318 -0
Python_Infer_Utils/Swan.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+
4
+ from collections import namedtuple
5
+ from backend.text_processing import parsing, emphasis
6
+ from backend.text_processing.textual_inversion import EmbeddingDatabase
7
+ from backend import memory_management
8
+
9
+ from modules.shared import opts
10
+
11
+
12
+ ChickenFix = namedtuple('ChickenFix', ['offset', 'embedding'])
13
+ last_extra_generation_params = {}
14
+
15
+
16
+ class Chicken:
17
+ def __init__(self):
18
+ self.tokens = []
19
+ self.multipliers = []
20
+ self.fixes = []
21
+
22
+
23
+ class Dog(torch.nn.Module):
24
+ def __init__(self, wrapped, embeddings, textual_inversion_key='clip_l'):
25
+ super().__init__()
26
+ self.wrapped = wrapped
27
+ self.embeddings = embeddings
28
+ self.textual_inversion_key = textual_inversion_key
29
+ self.weight = self.wrapped.weight
30
+
31
+ def forward(self, input_ids):
32
+ batch_fixes = self.embeddings.fixes
33
+ self.embeddings.fixes = None
34
+
35
+ inputs_embeds = self.wrapped(input_ids)
36
+
37
+ if batch_fixes is None or len(batch_fixes) == 0 or max([len(x) for x in batch_fixes]) == 0:
38
+ return inputs_embeds
39
+
40
+ vecs = []
41
+ for fixes, tensor in zip(batch_fixes, inputs_embeds):
42
+ for offset, embedding in fixes:
43
+ emb = embedding.vec[self.textual_inversion_key] if isinstance(embedding.vec, dict) else embedding.vec
44
+ emb = emb.to(inputs_embeds)
45
+ emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0])
46
+ tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]]).to(dtype=inputs_embeds.dtype)
47
+
48
+ vecs.append(tensor)
49
+
50
+ return torch.stack(vecs)
51
+
52
+
53
+ class Eagle:
54
+ def __init__(
55
+ self, text_encoder, tokenizer, chunk_length=75,
56
+ embedding_dir=None, embedding_key='clip_l', embedding_expected_shape=768, emphasis_name="Original",
57
+ text_projection=False, minimal_clip_skip=1, clip_skip=1, return_pooled=False, final_layer_norm=True
58
+ ):
59
+ super().__init__()
60
+
61
+ self.embeddings = EmbeddingDatabase(tokenizer, embedding_expected_shape)
62
+
63
+ if isinstance(embedding_dir, str):
64
+ self.embeddings.add_embedding_dir(embedding_dir)
65
+ self.embeddings.load_textual_inversion_embeddings()
66
+
67
+ self.embedding_key = embedding_key
68
+
69
+ self.text_encoder = text_encoder
70
+ self.tokenizer = tokenizer
71
+
72
+ self.emphasis = emphasis.get_current_option(opts.emphasis)()
73
+ self.text_projection = text_projection
74
+ self.minimal_clip_skip = minimal_clip_skip
75
+ self.clip_skip = clip_skip
76
+ self.return_pooled = return_pooled
77
+ self.final_layer_norm = final_layer_norm
78
+
79
+ self.chunk_length = chunk_length
80
+
81
+ self.id_start = self.tokenizer.bos_token_id
82
+ self.id_end = self.tokenizer.eos_token_id
83
+ self.id_pad = self.tokenizer.pad_token_id
84
+
85
+ model_embeddings = text_encoder.transformer.text_model.embeddings
86
+ model_embeddings.token_embedding = Dog(model_embeddings.token_embedding, self.embeddings, textual_inversion_key=embedding_key)
87
+
88
+ vocab = self.tokenizer.get_vocab()
89
+
90
+ self.comma_token = vocab.get(',</w>', None)
91
+
92
+ self.token_mults = {}
93
+
94
+ tokens_with_parens = [(k, v) for k, v in vocab.items() if '(' in k or ')' in k or '[' in k or ']' in k]
95
+ for text, ident in tokens_with_parens:
96
+ mult = 1.0
97
+ for c in text:
98
+ if c == '[':
99
+ mult /= 1.1
100
+ if c == ']':
101
+ mult *= 1.1
102
+ if c == '(':
103
+ mult *= 1.1
104
+ if c == ')':
105
+ mult /= 1.1
106
+
107
+ if mult != 1.0:
108
+ self.token_mults[ident] = mult
109
+
110
+ def empty_chunk(self):
111
+ chunk = Chicken()
112
+ chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1)
113
+ chunk.multipliers = [1.0] * (self.chunk_length + 2)
114
+ return chunk
115
+
116
+ def get_target_prompt_token_count(self, token_count):
117
+ return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length
118
+
119
+ def tokenize(self, texts):
120
+ tokenized = self.tokenizer(texts, truncation=False, add_special_tokens=False)["input_ids"]
121
+
122
+ return tokenized
123
+
124
+ def encode_with_transformers(self, tokens):
125
+ target_device = memory_management.text_encoder_device()
126
+
127
+ self.text_encoder.transformer.text_model.embeddings.position_ids = self.text_encoder.transformer.text_model.embeddings.position_ids.to(device=target_device)
128
+ self.text_encoder.transformer.text_model.embeddings.position_embedding = self.text_encoder.transformer.text_model.embeddings.position_embedding.to(dtype=torch.float32)
129
+ self.text_encoder.transformer.text_model.embeddings.token_embedding = self.text_encoder.transformer.text_model.embeddings.token_embedding.to(dtype=torch.float32)
130
+
131
+ tokens = tokens.to(target_device)
132
+
133
+ outputs = self.text_encoder.transformer(tokens, output_hidden_states=True)
134
+
135
+ layer_id = - max(self.clip_skip, self.minimal_clip_skip)
136
+ z = outputs.hidden_states[layer_id]
137
+
138
+ if self.final_layer_norm:
139
+ z = self.text_encoder.transformer.text_model.final_layer_norm(z)
140
+
141
+ if self.return_pooled:
142
+ pooled_output = outputs.pooler_output
143
+
144
+ if self.text_projection and self.embedding_key != 'clip_l':
145
+ pooled_output = self.text_encoder.transformer.text_projection(pooled_output)
146
+
147
+ z.pooled = pooled_output
148
+ return z
149
+
150
+ def tokenize_line(self, line):
151
+ parsed = parsing.parse_prompt_attention(line, self.emphasis.name)
152
+
153
+ tokenized = self.tokenize([text for text, _ in parsed])
154
+
155
+ chunks = []
156
+ chunk = Chicken()
157
+ token_count = 0
158
+ last_comma = -1
159
+
160
+ def next_chunk(is_last=False):
161
+ nonlocal token_count
162
+ nonlocal last_comma
163
+ nonlocal chunk
164
+
165
+ if is_last:
166
+ token_count += len(chunk.tokens)
167
+ else:
168
+ token_count += self.chunk_length
169
+
170
+ to_add = self.chunk_length - len(chunk.tokens)
171
+ if to_add > 0:
172
+ chunk.tokens += [self.id_end] * to_add
173
+ chunk.multipliers += [1.0] * to_add
174
+
175
+ chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end]
176
+ chunk.multipliers = [1.0] + chunk.multipliers + [1.0]
177
+
178
+ last_comma = -1
179
+ chunks.append(chunk)
180
+ chunk = Chicken()
181
+
182
+ for tokens, (text, weight) in zip(tokenized, parsed):
183
+ if text == 'BREAK' and weight == -1:
184
+ next_chunk()
185
+ continue
186
+
187
+ position = 0
188
+ while position < len(tokens):
189
+ token = tokens[position]
190
+
191
+ comma_padding_backtrack = 20
192
+
193
+ if token == self.comma_token:
194
+ last_comma = len(chunk.tokens)
195
+
196
+ elif comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= comma_padding_backtrack:
197
+ break_location = last_comma + 1
198
+
199
+ reloc_tokens = chunk.tokens[break_location:]
200
+ reloc_mults = chunk.multipliers[break_location:]
201
+
202
+ chunk.tokens = chunk.tokens[:break_location]
203
+ chunk.multipliers = chunk.multipliers[:break_location]
204
+
205
+ next_chunk()
206
+ chunk.tokens = reloc_tokens
207
+ chunk.multipliers = reloc_mults
208
+
209
+ if len(chunk.tokens) == self.chunk_length:
210
+ next_chunk()
211
+
212
+ embedding, embedding_length_in_tokens = self.embeddings.find_embedding_at_position(tokens, position)
213
+ if embedding is None:
214
+ chunk.tokens.append(token)
215
+ chunk.multipliers.append(weight)
216
+ position += 1
217
+ continue
218
+
219
+ emb_len = int(embedding.vectors)
220
+ if len(chunk.tokens) + emb_len > self.chunk_length:
221
+ next_chunk()
222
+
223
+ chunk.fixes.append(ChickenFix(len(chunk.tokens), embedding))
224
+
225
+ chunk.tokens += [0] * emb_len
226
+ chunk.multipliers += [weight] * emb_len
227
+ position += embedding_length_in_tokens
228
+
229
+ if chunk.tokens or not chunks:
230
+ next_chunk(is_last=True)
231
+
232
+ return chunks, token_count
233
+
234
+ def process_texts(self, texts):
235
+ token_count = 0
236
+
237
+ cache = {}
238
+ batch_chunks = []
239
+ for line in texts:
240
+ if line in cache:
241
+ chunks = cache[line]
242
+ else:
243
+ chunks, current_token_count = self.tokenize_line(line)
244
+ token_count = max(current_token_count, token_count)
245
+
246
+ cache[line] = chunks
247
+
248
+ batch_chunks.append(chunks)
249
+
250
+ return batch_chunks, token_count
251
+
252
+ def __call__(self, texts):
253
+ self.emphasis = emphasis.get_current_option(opts.emphasis)()
254
+
255
+ batch_chunks, token_count = self.process_texts(texts)
256
+
257
+ used_embeddings = {}
258
+ chunk_count = max([len(x) for x in batch_chunks])
259
+
260
+ zs = []
261
+ for i in range(chunk_count):
262
+ batch_chunk = [chunks[i] if i < len(chunks) else self.empty_chunk() for chunks in batch_chunks]
263
+
264
+ tokens = [x.tokens for x in batch_chunk]
265
+ multipliers = [x.multipliers for x in batch_chunk]
266
+ self.embeddings.fixes = [x.fixes for x in batch_chunk]
267
+
268
+ for fixes in self.embeddings.fixes:
269
+ for _position, embedding in fixes:
270
+ used_embeddings[embedding.name] = embedding
271
+
272
+ z = self.process_tokens(tokens, multipliers)
273
+ zs.append(z)
274
+
275
+ global last_extra_generation_params
276
+
277
+ if used_embeddings:
278
+ names = []
279
+
280
+ for name, embedding in used_embeddings.items():
281
+ print(f'[Textual Inversion] Used Embedding [{name}] in CLIP of [{self.embedding_key}]')
282
+ names.append(name.replace(":", "").replace(",", ""))
283
+
284
+ if "TI" in last_extra_generation_params:
285
+ last_extra_generation_params["TI"] += ", " + ", ".join(names)
286
+ else:
287
+ last_extra_generation_params["TI"] = ", ".join(names)
288
+
289
+ if any(x for x in texts if "(" in x or "[" in x) and self.emphasis.name != "Original":
290
+ last_extra_generation_params["Emphasis"] = self.emphasis.name
291
+
292
+ if self.return_pooled:
293
+ return torch.hstack(zs), zs[0].pooled
294
+ else:
295
+ return torch.hstack(zs)
296
+
297
+ def process_tokens(self, remade_batch_tokens, batch_multipliers):
298
+ tokens = torch.asarray(remade_batch_tokens)
299
+
300
+ if self.id_end != self.id_pad:
301
+ for batch_pos in range(len(remade_batch_tokens)):
302
+ index = remade_batch_tokens[batch_pos].index(self.id_end)
303
+ tokens[batch_pos, index + 1:tokens.shape[1]] = self.id_pad
304
+
305
+ z = self.encode_with_transformers(tokens)
306
+
307
+ pooled = getattr(z, 'pooled', None)
308
+
309
+ self.emphasis.tokens = remade_batch_tokens
310
+ self.emphasis.multipliers = torch.asarray(batch_multipliers).to(z)
311
+ self.emphasis.z = z
312
+ self.emphasis.after_transformers()
313
+ z = self.emphasis.z
314
+
315
+ if pooled is not None:
316
+ z.pooled = pooled
317
+
318
+ return z