File size: 1,546 Bytes
786144e
 
 
 
 
 
 
 
 
f1d5627
2b26d59
786144e
 
cf909ce
de39eb7
cf909ce
 
786144e
cf909ce
 
786144e
cf909ce
 
786144e
04f02d5
cf909ce
13f2790
1137a8a
33093ef
90de567
13f2790
786144e
cf909ce
5c90ea8
cf909ce
13f2790
cf909ce
786144e
cf909ce
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import pathlib

import gradio as gr
import open_clip
import torch

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model, _, transform = open_clip.create_model_and_transforms(
    "coca_ViT-B-32",
    pretrained="laion2b_s13b_b90k"
)
model.to(device)
model.eval()

def output_generate(image):
    im = transform(image).unsqueeze(0).to(device)
    with torch.no_grad(), torch.cuda.amp.autocast():
        generated = model.generate(im, seq_len=20)
    return open_clip.decode(generated[0].detach()).split("<end_of_text>")[0].replace("<start_of_text>", "")

def inference_caption(image):
    im = transform(image).unsqueeze(0).to(device)
    with torch.no_grad(), torch.cuda.amp.autocast():
        generated = model.generate(
            im, 
            generation_type="beam_search",
            top_p=1.0, 
            min_seq_len=1, 
            seq_len=100, 
            repetition_penalty=1.2
        )
    return open_clip.decode(generated[0].detach()).split("<end_of_text>")[0].replace("<start_of_text>", "")

image_input = gr.inputs.Image(type="pil")
caption_output = gr.outputs.Textbox(label="Caption Output")
caption_interface = gr.Interface(fn=inference_caption, inputs=image_input, outputs=caption_output, capture_session=True, title="CoCa: Contrastive Captioners", description="An open source implementation of CoCa: Contrastive Captioners are Image-Text Foundation Models.", examples=[path.as_posix() for path in sorted(pathlib.Path("images").glob("*.jpg"))], allow_flagging=False)

caption_interface.launch()