Update app.py
Browse files
app.py
CHANGED
|
@@ -1,89 +1,44 @@
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from transformers import
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
)
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
if tokenizer is not None:
|
| 43 |
-
generated_caption = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 44 |
-
else:
|
| 45 |
-
generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 46 |
-
|
| 47 |
-
return generated_caption
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
def generate_caption_coca(model, transform, image):
|
| 51 |
-
im = transform(image).unsqueeze(0).to(device)
|
| 52 |
-
with torch.no_grad(), torch.cuda.amp.autocast():
|
| 53 |
-
generated = model.generate(im, seq_len=20)
|
| 54 |
-
return open_clip.decode(generated[0].detach()).split("<end_of_text>")[0].replace("<start_of_text>", "")
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
def generate_captions(image):
|
| 58 |
-
|
| 59 |
-
caption_git_large_coco = generate_caption(git_processor_large_coco, git_model_large_coco, image)
|
| 60 |
-
|
| 61 |
-
caption_git_large_textcaps = generate_caption(git_processor_large_textcaps, git_model_large_textcaps, image)
|
| 62 |
-
|
| 63 |
-
caption_blip_large = generate_caption(blip_processor_large, blip_model_large, image)
|
| 64 |
-
|
| 65 |
-
caption_coca = generate_caption_coca(coca_model, coca_transform, image)
|
| 66 |
-
|
| 67 |
-
caption_blip2_8_bit = generate_caption(blip2_processor_8_bit, blip2_model_8_bit, image, use_float_16=True).strip()
|
| 68 |
-
|
| 69 |
-
return caption_git_large_coco, caption_git_large_textcaps, caption_blip_large, caption_coca, caption_blip2_8_bit
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
examples = [["Image1.jpg"], ["Image2.jpg"], ["Image3.jpg"]]
|
| 74 |
-
outputs = [gr.outputs.Textbox(label="Caption generated - 1"), gr.outputs.Textbox(label="Caption generated - 2"), gr.outputs.Textbox(label="Caption generated -3"), gr.outputs.Textbox(label="Caption generated - 4"), gr.outputs.Textbox(label="Caption generated - 5")]
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
title = "Interactive demo: comparing image captioning models"
|
| 78 |
-
description = "Image Caption Generator by Sravanth Kurmala"
|
| 79 |
-
article = "Assignment for Listed Inc"
|
| 80 |
-
|
| 81 |
-
interface = gr.Interface(fn=generate_captions,
|
| 82 |
-
inputs=gr.inputs.Image(type="pil"),
|
| 83 |
-
outputs=outputs,
|
| 84 |
-
examples=examples,
|
| 85 |
-
title=title,
|
| 86 |
-
description=description,
|
| 87 |
-
article=article,
|
| 88 |
-
enable_queue=True)
|
| 89 |
interface.launch(debug=True)
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import re
|
| 3 |
import gradio as gr
|
| 4 |
+
from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
|
| 5 |
+
|
| 6 |
+
device='cpu'
|
| 7 |
+
encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
|
| 8 |
+
decoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
|
| 9 |
+
model_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
|
| 10 |
+
feature_extractor = ViTFeatureExtractor.from_pretrained(encoder_checkpoint)
|
| 11 |
+
tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint)
|
| 12 |
+
model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def predict(image,max_length=64, num_beams=4):
|
| 16 |
+
image = image.convert('RGB')
|
| 17 |
+
image = feature_extractor(image, return_tensors="pt").pixel_values.to(device)
|
| 18 |
+
clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0]
|
| 19 |
+
caption_ids = model.generate(image, max_length = max_length)[0]
|
| 20 |
+
caption_text = clean_text(tokenizer.decode(caption_ids))
|
| 21 |
+
return caption_text
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
input = gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)
|
| 26 |
+
output = gr.outputs.Textbox(type="auto",label="Captions")
|
| 27 |
+
examples = [f"example{i}.jpg" for i in range(1,5)]
|
| 28 |
+
|
| 29 |
+
description= "Image caption Generator"
|
| 30 |
+
title = "Image Captioning 🖼️"
|
| 31 |
+
|
| 32 |
+
article = "Created By : Sravanth Kurmala"
|
| 33 |
+
|
| 34 |
+
interface = gr.Interface(
|
| 35 |
+
fn=predict,
|
| 36 |
+
inputs = input,
|
| 37 |
+
theme="grass",
|
| 38 |
+
outputs=output,
|
| 39 |
+
examples = examples,
|
| 40 |
+
title=title,
|
| 41 |
+
description=description,
|
| 42 |
+
article = article,
|
| 43 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
interface.launch(debug=True)
|