File size: 6,788 Bytes
774ee39 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 | import os
os.system('git clone https://github.com/pytorch/fairseq.git; cd fairseq;'
'pip install --use-feature=in-tree-build ./; cd ..')
os.system('ls -l')
import torch
import numpy as np
import re
from fairseq import utils,tasks
from fairseq import checkpoint_utils
from fairseq import distributed_utils, options, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from utils.zero_shot_utils import zero_shot_step
from tasks.mm_tasks.vqa_gen import VqaGenTask
from models.ofa import OFAModel
from PIL import Image
from torchvision import transforms
import gradio as gr
# Register VQA task
tasks.register_task('vqa_gen',VqaGenTask)
# turn on cuda if GPU is available
use_cuda = torch.cuda.is_available()
# use fp16 only when GPU is available
use_fp16 = False
os.system('wget https://www.dropbox.com/s/5al62v0pumbfch7/checkpoint_best_25_004_13_4_480.pt; '
'mkdir -p checkpoints; mv checkpoint_best_25_004_13_4_480.pt checkpoints/checkpoint_best_25_004_13_4_480.pt')
# specify some options for evaluation
parser = options.get_generation_parser()
input_args = ["", "--task=vqa_gen",
"--beam=100",
"--unnormalized",
"--path=checkpoints/checkpoint_best_25_004_13_4_480.pt",
"--bpe-dir=utils/BPE",
"--ans2label-file=dataset/trainval_ans2label.pkl"
]
args = options.parse_args_and_arch(parser, input_args)
cfg = convert_namespace_to_omegaconf(args)
# Load pretrained ckpt & config
use_fp16 = cfg.common.fp16
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
overrides = eval(cfg.common_eval.model_overrides)
task = tasks.setup_task(cfg.task)
if cfg.task._name == "vqa_gen":
overrides['val_inference_type'] = "allcand"
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# Move models to GPU
for model in models:
model.eval()
if use_fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Initialize generator
generator = task.build_generator(models, cfg.generation)
for model, ckpt_path in zip(models, utils.split_paths(cfg.common_eval.path)):
model.load_state_dict(checkpoint_utils.load_ema_from_checkpoint(ckpt_path)['model'])
model.eval()
if use_fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Image transform
from torchvision import transforms
mean = [0.5, 0.5, 0.5]
std = [0.5, 0.5, 0.5]
patch_resize_transform = transforms.Compose([
lambda image: image.convert("RGB"),
transforms.Resize((cfg.task.patch_image_size, cfg.task.patch_image_size), interpolation=Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std),
])
# Text preprocess
bos_item = torch.LongTensor([task.src_dict.bos()])
eos_item = torch.LongTensor([task.src_dict.eos()])
pad_idx = task.src_dict.pad()
# Normalize the question
def pre_question(question, max_ques_words):
question = question.lower().lstrip(",.!?*#:;~").replace('-', ' ').replace('/', ' ')
question = re.sub(
r"\s{2,}",
' ',
question,
)
question = question.rstrip('\n')
question = question.strip(' ')
# truncate question
question_words = question.split(' ')
if len(question_words) > max_ques_words:
question = ' '.join(question_words[:max_ques_words])
return question
def encode_text(text, length=None, append_bos=False, append_eos=False):
s = task.tgt_dict.encode_line(
line=task.bpe.encode(text),
add_if_not_exist=False,
append_eos=False
).long()
if length is not None:
s = s[:length]
if append_bos:
s = torch.cat([bos_item, s])
if append_eos:
s = torch.cat([s, eos_item])
return s
# Construct input for open-domain VQA task
def construct_sample(image: Image, question: str):
patch_image = patch_resize_transform(image).unsqueeze(0)
patch_mask = torch.tensor([True])
question = pre_question(question, task.cfg.max_src_length)
question = question + '?' if not question.endswith('?') else question
src_text = encode_text(' {}'.format(question), append_bos=True, append_eos=True).unsqueeze(0)
src_length = torch.LongTensor([s.ne(pad_idx).long().sum() for s in src_text])
ref_dict = np.array([{'yes': 1.0}]) # just placeholder
sample = {
"id":np.array(['42']),
"net_input": {
"src_tokens": src_text,
"src_lengths": src_length,
"patch_images": patch_image,
"patch_masks": patch_mask,
},
"ref_dict": ref_dict,
}
return sample
# Function to turn FP32 to FP16
def apply_half(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.half)
return t
# Function for image captioning
def open_domain_vqa(Image, Question):
sample = construct_sample(Image, Question)
sample = utils.move_to_cuda(sample) if use_cuda else sample
sample = utils.apply_to_sample(apply_half, sample) if use_fp16 else sample
# Run eval step for open-domain VQA
with torch.no_grad():
result, scores = zero_shot_step(task, generator, models, sample)
return result[0]['answer']
title = "TimeMachine-Visual_Question_Answering"
description = "TimeMachine-Visual_Question_Answering. Upload your own pair of image (a pair of images for comparison) or click any one of the examples, and click " \
"\"Submit\" and then wait for OFA's answer. "
article = "<p style='text-align: center'><a href='https://github.com/OFA-Sys/OFA' target='_blank'>OFA Github " \
"Repo</a></p> "
examples = [['test5.jpg', "Which side of the two images has building under construction?"],
['test2.jpg', "Which side of the two images has building under construction?"],
['test.jpg', "Which side of the two images has better pedestrian crossing?"],
['test4.jpg', "Which side of the two images has more building?"]]
io = gr.Interface(fn=open_domain_vqa, inputs=[gr.inputs.Image(type='pil'), "textbox"], outputs=gr.outputs.Textbox(label="Answer"),
title=title, description=description, article=article, examples=examples,
allow_flagging=False, allow_screenshot=False)
io.launch(cache_examples=True) |