Spaces:
Configuration error
Configuration error
File size: 3,232 Bytes
be02b2a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 | from PIL import Image
import requests
import torch
from torchvision import transforms
import os
from torchvision.transforms.functional import InterpolationMode
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
from models.blip import blip_decoder
image_size = 384
transform = transforms.Compose([
transforms.Resize((image_size,image_size),interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
])
model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth'
model = blip_decoder(pretrained=model_url, image_size=384, vit='large')
model.eval()
model = model.to(device)
from models.blip_vqa import blip_vqa
image_size_vq = 480
transform_vq = transforms.Compose([
transforms.Resize((image_size_vq,image_size_vq),interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
])
model_url_vq = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_vqa.pth'
model_vq = blip_vqa(pretrained=model_url_vq, image_size=480, vit='base')
model_vq.eval()
model_vq = model_vq.to(device)
def inference(raw_image, model_n, question="", strategy=""):
if model_n == 'Image Captioning':
image = transform(raw_image).unsqueeze(0).to(device)
with torch.no_grad():
if strategy == "Beam search":
caption = model.generate(image, sample=False, num_beams=3, max_length=20, min_length=5)
else:
caption = model.generate(image, sample=True, top_p=0.9, max_length=20, min_length=5)
return 'caption: '+caption[0]
else:
image_vq = transform_vq(raw_image).unsqueeze(0).to(device)
with torch.no_grad():
answer = model_vq(image_vq, question, train=False, inference='generate')
return 'answer: '+answer[0]
#get caption for a single iamge
def get_caption(image_path):
img = Image.open(image_path)
return inference(img, "Image Captioning")[9:]
def display(image_path):
img = mpimg.imread(image_path)
img = Image.open(image_path)
plt.imshow(img)
print("Caption: " + get_caption(image_path))
#returns a dictionary with key -> img_path and value -> caption
def get_captions(img_directory, print_status=True):
#key is img path, value is the caption
captions = {}
length = 0
for file in os.listdir(img_directory):
length+=1
count = 0
for file in os.listdir(img_directory):
f = os.path.join(img_directory, file)
captions[f] = inference(Image.open(f), "Image Captioning")
if print_status:
print("Images complete:", str(count) + "/" + str(length))
print("Caption:", captions[f])
return captions
#writes dictionary to file, key and value seperated by ':'
def write_to_file(filename, caption_dict):
with open(filename, "w") as file:
for i in caption_dict:
file.write(i + ":" + caption_dict[i])
file.close()
|