Xl version of generate-allid-embeddings.py
Browse files- generate-allid-embeddingsXL.py +108 -0
generate-allid-embeddingsXL.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/env python
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
Take a CLIPTextModel compatible text encoder.
|
| 5 |
+
Go through the official range of tokens IDs (0-49405)
|
| 6 |
+
Generate the official "embedding" tensor for each one.
|
| 7 |
+
Save the result set to "temp.allids.safetensors"
|
| 8 |
+
|
| 9 |
+
Defaults to loading openai/clip-vit-large-patch14 from huggingface hub.
|
| 10 |
+
However, can take optional pair of arguments to a .safetensor model, and config file
|
| 11 |
+
RULES of the loader:
|
| 12 |
+
1. the model file must appear to be either in current directory or one down. So,
|
| 13 |
+
badpath1=some/directory/tree/file.here
|
| 14 |
+
badpath2=/absolutepath
|
| 15 |
+
2. yes, you MUST have a matching config.json file
|
| 16 |
+
3. if you have no alternative, you can get away with using pytorch_model.bin
|
| 17 |
+
|
| 18 |
+
Sample location for such things that you can download:
|
| 19 |
+
https://huggingface.co/stablediffusionapi/edge-of-realism/tree/main/text_encoder/
|
| 20 |
+
If there is a .safetensors AND a .bin file, ignore the .bin file
|
| 21 |
+
|
| 22 |
+
You can also convert a singlefile model, such as is downloaded from civitai,
|
| 23 |
+
by using the utility at
|
| 24 |
+
https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py
|
| 25 |
+
Args should look like
|
| 26 |
+
convert_original_stable_diffusion_to_diffusers.py --checkpoint_file somemodel.safetensors \
|
| 27 |
+
--dump_path extractdir --to_safetensors --from_safetensors
|
| 28 |
+
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
import sys
|
| 33 |
+
import json
|
| 34 |
+
import torch
|
| 35 |
+
from safetensors.torch import save_file
|
| 36 |
+
from transformers import CLIPProcessor, CLIPTextModel, CLIPTextModelWithProjection
|
| 37 |
+
|
| 38 |
+
processor=None
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
tmodel2=None
|
| 42 |
+
model_path2=None
|
| 43 |
+
model_config2=None
|
| 44 |
+
|
| 45 |
+
if len(sys.argv) == 3:
|
| 46 |
+
model_path2=sys.argv[1]
|
| 47 |
+
model_config2=sys.argv[2]
|
| 48 |
+
else:
|
| 49 |
+
print("You have to give name of modelfile and config file")
|
| 50 |
+
sys.exit(1)
|
| 51 |
+
|
| 52 |
+
device=torch.device("cuda")
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def initXLCLIPmodel(model_path,model_config):
|
| 57 |
+
global tmodel2,processor
|
| 58 |
+
# yes, oddly they all uses the same one, basically
|
| 59 |
+
processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
|
| 60 |
+
|
| 61 |
+
print("loading",model_path)
|
| 62 |
+
tmodel2 = CLIPTextModelWithProjection.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True)
|
| 63 |
+
tmodel2.to(device)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def embed_from_text2(text):
|
| 67 |
+
global processor,tmodel2
|
| 68 |
+
inputs = processor(text=text, return_tensors="pt")
|
| 69 |
+
inputs.to(device)
|
| 70 |
+
|
| 71 |
+
print("getting embeddings2")
|
| 72 |
+
outputs = tmodel2(**inputs)
|
| 73 |
+
embeddings = outputs.text_embeds
|
| 74 |
+
return embeddings
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# "inputs" == magic pre-embedding format
|
| 78 |
+
def embed_from_inputs(inputs):
|
| 79 |
+
global processor,tmodel2
|
| 80 |
+
with torch.no_grad():
|
| 81 |
+
outputs = tmodel2(**inputs)
|
| 82 |
+
embedding = outputs.text_embeds
|
| 83 |
+
|
| 84 |
+
return embedding
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
initXLCLIPmodel(model_path2,model_config2)
|
| 88 |
+
inputs = processor(text="dummy", return_tensors="pt")
|
| 89 |
+
inputs.to(device)
|
| 90 |
+
|
| 91 |
+
all_embeddings = []
|
| 92 |
+
|
| 93 |
+
for id in range(49405):
|
| 94 |
+
inputs.input_ids[0][1]=id
|
| 95 |
+
|
| 96 |
+
emb=embed_from_inputs(inputs)
|
| 97 |
+
all_embeddings.append(emb)
|
| 98 |
+
if (id %100) ==0:
|
| 99 |
+
print(id)
|
| 100 |
+
|
| 101 |
+
embs = torch.cat(all_embeddings,dim=0)
|
| 102 |
+
print("Shape of result = ",embs.shape)
|
| 103 |
+
|
| 104 |
+
outputfile="cliptextmodel.tempXL.allids.safetensors"
|
| 105 |
+
print(f"Saving the calculatiuons to {outputfile}...")
|
| 106 |
+
save_file({"embeddings": embs}, outputfile)
|
| 107 |
+
|
| 108 |
+
|