Upload 3 files
Browse files- embeddings.safetensors +3 -0
- fullword.json +0 -0
- generate-embeddings.py +73 -0
embeddings.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:617a2de31c505ca771ef354528371573d36d065a6fb9ba4b191f71f277162790
|
| 3 |
+
size 101164120
|
fullword.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
generate-embeddings.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/python3
|
| 2 |
+
|
| 3 |
+
""" Work in progress
|
| 4 |
+
Plan:
|
| 5 |
+
Read in fullword.json for list of works and token
|
| 6 |
+
Generate "proper" embedding for each token, and store in tensor file
|
| 7 |
+
Generate a tensor array of distance to every other token/embedding
|
| 8 |
+
Save it out
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
import sys
|
| 13 |
+
import json
|
| 14 |
+
import torch
|
| 15 |
+
from safetensors.torch import save_file
|
| 16 |
+
from transformers import CLIPProcessor,CLIPModel
|
| 17 |
+
|
| 18 |
+
clipsrc="openai/clip-vit-large-patch14"
|
| 19 |
+
processor=None
|
| 20 |
+
model=None
|
| 21 |
+
|
| 22 |
+
device=torch.device("cuda")
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def init():
|
| 26 |
+
global processor
|
| 27 |
+
global model
|
| 28 |
+
# Load the processor and model
|
| 29 |
+
print("loading processor from "+clipsrc,file=sys.stderr)
|
| 30 |
+
processor = CLIPProcessor.from_pretrained(clipsrc)
|
| 31 |
+
print("done",file=sys.stderr)
|
| 32 |
+
print("loading model from "+clipsrc,file=sys.stderr)
|
| 33 |
+
model = CLIPModel.from_pretrained(clipsrc)
|
| 34 |
+
print("done",file=sys.stderr)
|
| 35 |
+
|
| 36 |
+
model = model.to(device)
|
| 37 |
+
|
| 38 |
+
# Expect SINGLE WORD ONLY
|
| 39 |
+
def standard_embed_calc(text):
|
| 40 |
+
inputs = processor(text=text, return_tensors="pt")
|
| 41 |
+
inputs.to(device)
|
| 42 |
+
with torch.no_grad():
|
| 43 |
+
text_features = model.get_text_features(**inputs)
|
| 44 |
+
embedding = text_features[0]
|
| 45 |
+
return embedding
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
init()
|
| 49 |
+
|
| 50 |
+
print("read in words from json now",file=sys.stderr)
|
| 51 |
+
with open("fullword.json","r") as f:
|
| 52 |
+
tokendict = json.load(f)
|
| 53 |
+
|
| 54 |
+
print("generate embeddings for each now",file=sys.stderr)
|
| 55 |
+
count=1
|
| 56 |
+
all_embeddings = []
|
| 57 |
+
for word in tokendict.keys():
|
| 58 |
+
emb = standard_embed_calc(word)
|
| 59 |
+
emb=emb.unsqueeze(0) # stupid matrix magic to make the cat work
|
| 60 |
+
all_embeddings.append(emb)
|
| 61 |
+
count+=1
|
| 62 |
+
if (count %100) ==0:
|
| 63 |
+
print(count)
|
| 64 |
+
|
| 65 |
+
embs = torch.cat(all_embeddings,dim=0)
|
| 66 |
+
print("Shape of result = ",embs.shape)
|
| 67 |
+
print("Saving all the things...")
|
| 68 |
+
save_file({"embeddings": embs}, "embeddings.safetensors")
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
print("calculate distances now")
|
| 72 |
+
|
| 73 |
+
|