File size: 1,626 Bytes
38322f1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import streamlit as st

from PIL import Image

from transformers import VisionEncoderDecoderModel, ViTImageProcessor, GPT2TokenizerFast
import torch
from PIL import Image 

model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
feature_extractor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
tokenizer=GPT2TokenizerFast.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
gen_kwargs1 ={"max_length": 4,"num_beams": 2}
gen_kwargs2 ={"max_length": 32,"num_beams": 16}

def predict_step(images):
  pixel_values = feature_extractor(images=images, return_tensors='pt').pixel_values
  output_ids1 = model.generate(pixel_values)
  output_ids2 = model.generate(pixel_values,**gen_kwargs1)
  output_ids3 = model.generate(pixel_values,**gen_kwargs2)
  preds1 = tokenizer.batch_decode(output_ids1, skip_special_tokens=True)
  preds2 = tokenizer.batch_decode(output_ids2, skip_special_tokens=True)
  preds3 = tokenizer.batch_decode(output_ids3, skip_special_tokens=True)
  preds1 =[pred.strip() for pred in preds1]
  preds2 =[pred.strip() for pred in preds2]
  preds3 =[pred.strip() for pred in preds3]
  return preds1[0],preds2[0],preds3[0]

st.title("Image Caption Generator")
upload_image = st.file_uploader(label='Upload image', type=['png', 'jpg','jpeg'], accept_multiple_files=False)
if upload_image is not None:
  image = Image.open(upload_image)
  if image.mode != "RGB":
      image = image.convert(mode="RGB")
      output = predict_step([image])
      st.header("Captions are : ")
      st.text(output[0])
      st.text(output[1])
      st.text(output[2])