| import string |
| import gradio as gr |
| import requests |
| import io |
|
|
| url="" |
| def inference_chat(input_image,input_text): |
| with io.BytesIO() as buf: |
| input_image.save(buf, 'jpeg') |
| image_bytes = buf.getvalue() |
| files={"img":("input_imge.jpg",image_bytes,'image/png',{})} |
| res=requests.request("post",url=url,data={"input_text":input_text},files=files) |
| return res.json()["answer"] |
| |
| with gr.Blocks( |
| css=""" |
| .message.svelte-w6rprc.svelte-w6rprc.svelte-w6rprc {font-size: 20px; margin-top: 20px} |
| #component-21 > div.wrap.svelte-w6rprc {height: 600px;} |
| """ |
| ) as iface: |
| state = gr.State([]) |
| |
| |
| |
| |
|
|
| with gr.Row(): |
| with gr.Column(scale=1): |
| image_input = gr.Image(type="pil") |
| with gr.Row(): |
| with gr.Column(scale=1): |
| chat_input = gr.Textbox(lines=1, label="VQA Input(问题输入)") |
| with gr.Row(): |
| clear_button = gr.Button(value="Clear", interactive=True) |
| submit_button = gr.Button( |
| value="Submit", interactive=True, variant="primary" |
| ) |
| with gr.Column(): |
| caption_output = gr.Textbox(lines=0, label="VQA Output(模型答案输出)") |
| |
| |
| image_input.change( |
| lambda: ("", "", []), |
| [], |
| [ caption_output, state], |
| queue=False, |
| ) |
| chat_input.submit( |
| inference_chat, |
| [ |
| image_input, |
| chat_input, |
| ], |
| [ caption_output], |
| ) |
| clear_button.click( |
| lambda: ("", [], []), |
| [], |
| [chat_input, state], |
| queue=False, |
| ) |
| submit_button.click( |
| inference_chat, |
| [ |
| image_input, |
| chat_input, |
| ], |
| [caption_output], |
| ) |
|
|
| |
| |
| |
| |
|
|
| iface.queue(concurrency_count=1, api_open=False, max_size=10) |
| iface.launch(enable_queue=True) |