| #basic openclip usage | |
| import torch | |
| from PIL import Image | |
| import open_clip | |
| mtype='ViT-B-32' | |
| mname='laion2b_s34b_b79k' | |
| print("Loading",mtype,mname) | |
| model, _, preprocess = open_clip.create_model_and_transforms(mtype, | |
| pretrained=mname) | |
| tokenizer = open_clip.get_tokenizer(mtype) | |
| #image = preprocess(Image.open("CLIP.png")).unsqueeze(0) | |
| text = tokenizer(["a diagram", "a dog", "a cat"]) | |
| text = tokenizer("cat") | |
| with torch.no_grad(), torch.cuda.amp.autocast(): | |
| # image_features = model.encode_image(image) | |
| text_features = model.encode_text(text) | |
| embedding=text_features[0] | |