Spaces:
Runtime error
Runtime error
disabled models except blip and blip2_8bit models
Browse files
app.py
CHANGED
|
@@ -12,11 +12,11 @@ torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/as
|
|
| 12 |
# git_processor_base = AutoProcessor.from_pretrained("microsoft/git-base-coco")
|
| 13 |
# git_model_base = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
|
| 14 |
|
| 15 |
-
git_processor_large_coco = AutoProcessor.from_pretrained("microsoft/git-large-coco")
|
| 16 |
-
git_model_large_coco = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
|
| 17 |
|
| 18 |
-
git_processor_large_textcaps = AutoProcessor.from_pretrained("microsoft/git-large-r-textcaps")
|
| 19 |
-
git_model_large_textcaps = AutoModelForCausalLM.from_pretrained("microsoft/git-large-r-textcaps")
|
| 20 |
|
| 21 |
# blip_processor_base = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 22 |
# blip_model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
|
@@ -34,20 +34,20 @@ blip2_model_8_bit = Blip2ForConditionalGeneration.from_pretrained("Salesforce/bl
|
|
| 34 |
# vitgpt_model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
| 35 |
# vitgpt_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
| 36 |
|
| 37 |
-
coca_model, _, coca_transform = open_clip.create_model_and_transforms(
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
)
|
| 41 |
|
| 42 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 43 |
|
| 44 |
# git_model_base.to(device)
|
| 45 |
# blip_model_base.to(device)
|
| 46 |
-
git_model_large_coco.to(device)
|
| 47 |
-
git_model_large_textcaps.to(device)
|
| 48 |
blip_model_large.to(device)
|
| 49 |
# vitgpt_model.to(device)
|
| 50 |
-
coca_model.to(device)
|
| 51 |
# blip2_model.to(device)
|
| 52 |
|
| 53 |
def generate_caption(processor, model, image, tokenizer=None, use_float_16=False):
|
|
@@ -76,9 +76,9 @@ def generate_caption_coca(model, transform, image):
|
|
| 76 |
def generate_captions(image):
|
| 77 |
# caption_git_base = generate_caption(git_processor_base, git_model_base, image)
|
| 78 |
|
| 79 |
-
caption_git_large_coco = generate_caption(git_processor_large_coco, git_model_large_coco, image)
|
| 80 |
|
| 81 |
-
caption_git_large_textcaps = generate_caption(git_processor_large_textcaps, git_model_large_textcaps, image)
|
| 82 |
|
| 83 |
# caption_blip_base = generate_caption(blip_processor_base, blip_model_base, image)
|
| 84 |
|
|
@@ -86,17 +86,20 @@ def generate_captions(image):
|
|
| 86 |
|
| 87 |
# caption_vitgpt = generate_caption(vitgpt_processor, vitgpt_model, image, vitgpt_tokenizer)
|
| 88 |
|
| 89 |
-
caption_coca = generate_caption_coca(coca_model, coca_transform, image)
|
| 90 |
|
| 91 |
# caption_blip2 = generate_caption(blip2_processor, blip2_model, image, use_float_16=True).strip()
|
| 92 |
|
| 93 |
caption_blip2_8_bit = generate_caption(blip2_processor_8_bit, blip2_model_8_bit, image, use_float_16=True).strip()
|
| 94 |
|
| 95 |
-
return caption_git_large_coco, caption_git_large_textcaps, caption_blip_large, caption_coca, caption_blip2_8_bit
|
|
|
|
|
|
|
| 96 |
|
| 97 |
|
| 98 |
examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
|
| 99 |
-
outputs = [gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on COCO"), gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on TextCaps"), gr.outputs.Textbox(label="Caption generated by BLIP-large"), gr.outputs.Textbox(label="Caption generated by CoCa"), gr.outputs.Textbox(label="Caption generated by BLIP-2 OPT 6.7b")]
|
|
|
|
| 100 |
|
| 101 |
title = "Interactive demo: comparing image captioning models"
|
| 102 |
description = "Gradio Demo to compare GIT, BLIP, CoCa, and BLIP-2, 4 state-of-the-art vision+language models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."
|
|
|
|
| 12 |
# git_processor_base = AutoProcessor.from_pretrained("microsoft/git-base-coco")
|
| 13 |
# git_model_base = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
|
| 14 |
|
| 15 |
+
# git_processor_large_coco = AutoProcessor.from_pretrained("microsoft/git-large-coco")
|
| 16 |
+
# git_model_large_coco = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
|
| 17 |
|
| 18 |
+
# git_processor_large_textcaps = AutoProcessor.from_pretrained("microsoft/git-large-r-textcaps")
|
| 19 |
+
# git_model_large_textcaps = AutoModelForCausalLM.from_pretrained("microsoft/git-large-r-textcaps")
|
| 20 |
|
| 21 |
# blip_processor_base = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 22 |
# blip_model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
|
|
|
| 34 |
# vitgpt_model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
| 35 |
# vitgpt_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
| 36 |
|
| 37 |
+
# coca_model, _, coca_transform = open_clip.create_model_and_transforms(
|
| 38 |
+
# model_name="coca_ViT-L-14",
|
| 39 |
+
# pretrained="mscoco_finetuned_laion2B-s13B-b90k"
|
| 40 |
+
# )
|
| 41 |
|
| 42 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 43 |
|
| 44 |
# git_model_base.to(device)
|
| 45 |
# blip_model_base.to(device)
|
| 46 |
+
# git_model_large_coco.to(device)
|
| 47 |
+
# git_model_large_textcaps.to(device)
|
| 48 |
blip_model_large.to(device)
|
| 49 |
# vitgpt_model.to(device)
|
| 50 |
+
# coca_model.to(device)
|
| 51 |
# blip2_model.to(device)
|
| 52 |
|
| 53 |
def generate_caption(processor, model, image, tokenizer=None, use_float_16=False):
|
|
|
|
| 76 |
def generate_captions(image):
|
| 77 |
# caption_git_base = generate_caption(git_processor_base, git_model_base, image)
|
| 78 |
|
| 79 |
+
# caption_git_large_coco = generate_caption(git_processor_large_coco, git_model_large_coco, image)
|
| 80 |
|
| 81 |
+
# caption_git_large_textcaps = generate_caption(git_processor_large_textcaps, git_model_large_textcaps, image)
|
| 82 |
|
| 83 |
# caption_blip_base = generate_caption(blip_processor_base, blip_model_base, image)
|
| 84 |
|
|
|
|
| 86 |
|
| 87 |
# caption_vitgpt = generate_caption(vitgpt_processor, vitgpt_model, image, vitgpt_tokenizer)
|
| 88 |
|
| 89 |
+
# caption_coca = generate_caption_coca(coca_model, coca_transform, image)
|
| 90 |
|
| 91 |
# caption_blip2 = generate_caption(blip2_processor, blip2_model, image, use_float_16=True).strip()
|
| 92 |
|
| 93 |
caption_blip2_8_bit = generate_caption(blip2_processor_8_bit, blip2_model_8_bit, image, use_float_16=True).strip()
|
| 94 |
|
| 95 |
+
# return caption_git_large_coco, caption_git_large_textcaps, caption_blip_large, caption_coca, caption_blip2_8_bit
|
| 96 |
+
return caption_blip_large, caption_blip2_8_bit
|
| 97 |
+
|
| 98 |
|
| 99 |
|
| 100 |
examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
|
| 101 |
+
# outputs = [gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on COCO"), gr.outputs.Textbox(label="Caption generated by GIT-large fine-tuned on TextCaps"), gr.outputs.Textbox(label="Caption generated by BLIP-large"), gr.outputs.Textbox(label="Caption generated by CoCa"), gr.outputs.Textbox(label="Caption generated by BLIP-2 OPT 6.7b")]
|
| 102 |
+
outputs = [gr.outputs.Textbox(label="Caption generated by BLIP-large"), gr.outputs.Textbox(label="Caption generated by BLIP-2 OPT 6.7b")]
|
| 103 |
|
| 104 |
title = "Interactive demo: comparing image captioning models"
|
| 105 |
description = "Gradio Demo to compare GIT, BLIP, CoCa, and BLIP-2, 4 state-of-the-art vision+language models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."
|