| import gradio as gr |
| import sys |
| from BLIP.models.blip import blip_decoder |
| from PIL import Image |
| import requests |
| import torch |
| from torchvision import transforms |
| from torchvision.transforms.functional import InterpolationMode |
| from urllib.parse import urlparse |
|
|
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
| image_size = 384 |
| transform = transforms.Compose([ |
| transforms.ToTensor(), |
| transforms.Resize((image_size,image_size),interpolation=InterpolationMode.BICUBIC), |
| transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) |
| ]) |
|
|
| model_url = "https://technionmail-my.sharepoint.com/personal/snoamr_campus_technion_ac_il/_layouts/15/download.aspx?share=EZxgXQaBXGREgDsQiaTcwAAB0z8jQA_hgAnwwPQDt8Dgew" |
| model = blip_decoder(pretrained=model_url, image_size=384, vit='base') |
| model.eval() |
| model = model.to(device) |
|
|
| def inference(raw_image): |
| |
| image = transform(raw_image).unsqueeze(0).to(device) |
| with torch.no_grad(): |
| caption = model.generate(image, sample=False, num_beams=1, max_length=60, min_length=5) |
| return caption[0] |
|
|
|
|
| inputs = [gr.Image(type='pil', interactive=False),] |
| outputs = gr.outputs.Textbox(label="Caption") |
|
|
| description = "Gradio demo for FuseCap: Leveraging Large Language Models to Fuse Visual Data into Enriched Image Captions. This demo features a BLIP-based model, trained using FuseCap." |
| examples = [["birthday_dog.jpeg"], ["surfer.jpg"], ["bike.jpg"]] |
| article = "<p style='text-align: center'><a href='google.com' target='_blank'>place holder</a>" |
|
|
|
|
| iface = gr.Interface(fn=inference, |
| inputs="image", |
| outputs="text", |
| title="FuseCap", |
| description=description, |
| article=article, |
| examples=examples, |
| enable_queue=True) |
| iface.launch() |
|
|