| |
|
|
| """ |
| Take a CLIPTextModel compatible text encoder. |
| Go through the official range of tokens IDs (0-49405) |
| Generate the official "embedding" tensor for each one. |
| Save the result set to "temp.allids.safetensors" |
| |
| Defaults to loading openai/clip-vit-large-patch14 from huggingface hub. |
| However, can take optional pair of arguments to a .safetensor model, and config file |
| RULES of the loader: |
| 1. the model file must appear to be either in current directory or one down. So, |
| badpath1=some/directory/tree/file.here |
| badpath2=/absolutepath |
| 2. yes, you MUST have a matching config.json file |
| 3. if you have no alternative, you can get away with using pytorch_model.bin |
| |
| Sample location for such things that you can download: |
| https://huggingface.co/stablediffusionapi/edge-of-realism/tree/main/text_encoder/ |
| If there is a .safetensors AND a .bin file, ignore the .bin file |
| |
| You can also convert a singlefile model, such as is downloaded from civitai, |
| by using the utility at |
| https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py |
| Args should look like |
| convert_original_stable_diffusion_to_diffusers.py --checkpoint_file somemodel.safetensors \ |
| --dump_path extractdir --to_safetensors --from_safetensors |
| |
| """ |
|
|
|
|
| import sys |
| import json |
| import torch |
| from safetensors.torch import save_file |
| from transformers import CLIPProcessor, CLIPTextModel, CLIPTextModelWithProjection |
|
|
| processor=None |
|
|
|
|
| tmodel2=None |
| model_path2=None |
| model_config2=None |
|
|
| if len(sys.argv) == 3: |
| model_path2=sys.argv[1] |
| model_config2=sys.argv[2] |
| else: |
| print("You have to give name of modelfile and config file") |
| sys.exit(1) |
|
|
| device=torch.device("cuda") |
|
|
|
|
|
|
| def initXLCLIPmodel(model_path,model_config): |
| global tmodel2,processor |
| |
| processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14") |
|
|
| print("loading",model_path) |
| tmodel2 = CLIPTextModelWithProjection.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True) |
| tmodel2.to(device) |
|
|
|
|
| def embed_from_text2(text): |
| global processor,tmodel2 |
| inputs = processor(text=text, return_tensors="pt") |
| inputs.to(device) |
|
|
| print("getting embeddings2") |
| outputs = tmodel2(**inputs) |
| embeddings = outputs.text_embeds |
| return embeddings |
|
|
|
|
| |
| def embed_from_inputs(inputs): |
| global processor,tmodel2 |
| with torch.no_grad(): |
| outputs = tmodel2(**inputs) |
| embedding = outputs.text_embeds |
|
|
| return embedding |
|
|
|
|
| initXLCLIPmodel(model_path2,model_config2) |
| inputs = processor(text="dummy", return_tensors="pt") |
| inputs.to(device) |
|
|
| all_embeddings = [] |
|
|
| for id in range(49405): |
| inputs.input_ids[0][1]=id |
|
|
| emb=embed_from_inputs(inputs) |
| all_embeddings.append(emb) |
| if (id %100) ==0: |
| print(id) |
|
|
| embs = torch.cat(all_embeddings,dim=0) |
| print("Shape of result = ",embs.shape) |
|
|
| outputfile="cliptextmodel.tempXL.allids.safetensors" |
| print(f"Saving the calculatiuons to {outputfile}...") |
| save_file({"embeddings": embs}, outputfile) |
|
|
|
|
|
|