text stringlengths 0 5.54k |
|---|
def edit_image(input_image, instruction): |
image = instruct_pix2pix_pipeline( |
instruction, |
image=input_image, |
output_type="np", |
generator=generator, |
).images[0] |
return image |
input_images = [] |
original_captions = [] |
modified_captions = [] |
edited_images = [] |
for idx in range(len(dataset)): |
input_image = dataset[idx]["image"] |
edit_instruction = dataset[idx]["edit"] |
edited_image = edit_image(input_image, edit_instruction) |
input_images.append(np.array(input_image)) |
original_captions.append(dataset[idx]["input"]) |
modified_captions.append(dataset[idx]["output"]) |
edited_images.append(edited_image) To measure the directional similarity, we first load CLIP’s image and text encoders: Copied from transformers import ( |
CLIPTokenizer, |
CLIPTextModelWithProjection, |
CLIPVisionModelWithProjection, |
CLIPImageProcessor, |
) |
clip_id = "openai/clip-vit-large-patch14" |
tokenizer = CLIPTokenizer.from_pretrained(clip_id) |
text_encoder = CLIPTextModelWithProjection.from_pretrained(clip_id).to(device) |
image_processor = CLIPImageProcessor.from_pretrained(clip_id) |
image_encoder = CLIPVisionModelWithProjection.from_pretrained(clip_id).to(device) Notice that we are using a particular CLIP checkpoint, i.e., openai/clip-vit-large-patch14. This is because the Stable Diffusion pre-training was performed with this CLIP variant. For more details, refer to the documentation. Next, we pre... |
import torch.nn.functional as F |
class DirectionalSimilarity(nn.Module): |
def __init__(self, tokenizer, text_encoder, image_processor, image_encoder): |
super().__init__() |
self.tokenizer = tokenizer |
self.text_encoder = text_encoder |
self.image_processor = image_processor |
self.image_encoder = image_encoder |
def preprocess_image(self, image): |
image = self.image_processor(image, return_tensors="pt")["pixel_values"] |
return {"pixel_values": image.to(device)} |
def tokenize_text(self, text): |
inputs = self.tokenizer( |
text, |
max_length=self.tokenizer.model_max_length, |
padding="max_length", |
truncation=True, |
return_tensors="pt", |
) |
return {"input_ids": inputs.input_ids.to(device)} |
def encode_image(self, image): |
preprocessed_image = self.preprocess_image(image) |
image_features = self.image_encoder(**preprocessed_image).image_embeds |
image_features = image_features / image_features.norm(dim=1, keepdim=True) |
return image_features |
def encode_text(self, text): |
tokenized_text = self.tokenize_text(text) |
text_features = self.text_encoder(**tokenized_text).text_embeds |
text_features = text_features / text_features.norm(dim=1, keepdim=True) |
return text_features |
def compute_directional_similarity(self, img_feat_one, img_feat_two, text_feat_one, text_feat_two): |
sim_direction = F.cosine_similarity(img_feat_two - img_feat_one, text_feat_two - text_feat_one) |
return sim_direction |
def forward(self, image_one, image_two, caption_one, caption_two): |
img_feat_one = self.encode_image(image_one) |
img_feat_two = self.encode_image(image_two) |
text_feat_one = self.encode_text(caption_one) |
text_feat_two = self.encode_text(caption_two) |
directional_similarity = self.compute_directional_similarity( |
img_feat_one, img_feat_two, text_feat_one, text_feat_two |
) |
return directional_similarity Let’s put DirectionalSimilarity to use now. Copied dir_similarity = DirectionalSimilarity(tokenizer, text_encoder, image_processor, image_encoder) |
scores = [] |
for i in range(len(input_images)): |
original_image = input_images[i] |
original_caption = original_captions[i] |
edited_image = edited_images[i] |
modified_caption = modified_captions[i] |
similarity_score = dir_similarity(original_image, edited_image, original_caption, modified_caption) |
scores.append(float(similarity_score.detach().cpu())) |
print(f"CLIP directional similarity: {np.mean(scores)}") |
# CLIP directional similarity: 0.0797976553440094 Like the CLIP Score, the higher the CLIP directional similarity, the better it is. It should be noted that the StableDiffusionInstructPix2PixPipeline exposes two arguments, namely, image_guidance_scale and guidance_scale that let you control the quality of the final edi... |
import requests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.