File size: 2,093 Bytes
5aa6736 df8ce60 7e55d00 5aa6736 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 | import gradio as gr
from core.inference import infer
def build_ui(model, processor):
with gr.Blocks(title="AI Document Summarizer") as demo:
with gr.Column(elem_id="container"):
gr.Markdown("# **Open AI Zero-Shot Classification**", elem_id="title")
gr.Markdown(
"This is the demo of model **openai/clip-vit-base-patch32** "
"for zero-shot image classification."
)
with gr.Row(equal_height=True):
with gr.Column():
image_input = gr.Image(type="pil", label="Upload Image", height=310)
text_input = gr.Textbox(label="Input labels (comma separated)")
run_button = gr.Button("Run", variant="primary")
with gr.Column():
output = gr.Label(
label="Open AI Zero-Shot Classification Output",
num_top_classes=5
)
with gr.Row(equal_height=True):
gr.Examples(
examples=[
["./assets/zebra.jpg", "a photo of a zebra, a photo of a horse, a photo of a donkey"],
["./assets/cat.jpg", "a photo of a cat, a photo of two cats, a photo of three cats"],
["./assets/fridge.jpg", "a photo of a fridge, a photo of a cupboard, a photo of a wardrobe"],
["./assets/marriage.jpg", "a photo of a birthday, a photo of a marriage, a photo of a engagement"],
["./assets/giraffe.jpg", "Giraffe looking at same direction, Giraffe looking at opposite direction"]
],
inputs=[image_input, text_input],
outputs=[output],
fn=lambda img, txt: infer(model, processor, img, txt)
)
run_button.click(
fn=lambda img, txt: infer(model, processor, img, txt),
inputs=[image_input, text_input],
outputs=[output]
)
return demo
|