| | --- |
| | license: apache-2.0 |
| | --- |
| | |
| | CLIP model post-trained on 80M human face images. |
| |
|
| |
|
| | ``` |
| | from PIL import Image |
| | import requests |
| | |
| | from transformers import CLIPProcessor, CLIPModel |
| | |
| | model = CLIPModel.from_pretrained("P01son/FaceCLIP-base-32") |
| | processor = CLIPProcessor.from_pretrained("P01son/FaceCLIP-base-32") |
| | |
| | url = "http://images.cocodataset.org/val2017/000000039769.jpg" |
| | image = Image.open(requests.get(url, stream=True).raw) |
| | |
| | inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) |
| | |
| | outputs = model(**inputs) |
| | logits_per_image = outputs.logits_per_image # this is the image-text similarity score |
| | probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities |
| | |
| | ``` |