File size: 3,412 Bytes
03a3fa2 8f8879a 03a3fa2 8f8879a 03a3fa2 8f8879a 03a3fa2 8f8879a 03a3fa2 8f8879a cf42464 8f8879a 03a3fa2 8f8879a 03a3fa2 8f8879a 03a3fa2 8f8879a 03a3fa2 8f8879a 03a3fa2 8f8879a dbcf452 8f8879a 4d0f67b 8f8879a 03a3fa2 8f8879a 03a3fa2 dbcf452 03a3fa2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 | import gradio as gr
import random
# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
color_map = {
"harmful": "crimson",
"neutral": "gray",
"beneficial": "green",
}
def html_src(harm_level):
return f"""
<div style="display: flex; gap: 5px;padding: 2px 4px;margin-top: -40px">
<div style="background-color: {color_map[harm_level]}; padding: 2px; border-radius: 5px;">
{harm_level}
</div>
</div>
"""
def print_like_dislike(x: gr.LikeData):
print(x.index, x.value, x.liked)
def add_message(history, message):
for x in message["files"]:
history.append({"role": "user", "content": {"path": x}})
if message["text"] is not None:
history.append({"role": "user", "content": message['text']})
return history, gr.MultimodalTextbox(value=None, interactive=False)
def bot(history, response_type):
if response_type == "gallery":
msg = {"role": "assistant",
"content": gr.Gallery(
[
"https://github.com/gradio-app/gradio/raw/main/gradio/media_assets/images/bus.png",
"https://github.com/gradio-app/gradio/raw/main/gradio/media_assets/images/bus.png",
])
}
elif response_type == "image":
msg = {"role": "assistant",
"content": gr.Image(
"https://github.com/gradio-app/gradio/raw/main/gradio/media_assets/images/bus.png"
)
}
elif response_type == "video":
msg = {"role": "assistant",
"content": gr.Video("https://github.com/gradio-app/gradio/raw/main/gradio/media_assets/videos/world.mp4",
label="test")
}
elif response_type == "audio":
msg = {"role": "assistant",
"content": gr.Audio("https://github.com/gradio-app/gradio/raw/main/gradio/media_assets/audio/audio_sample.wav")
}
elif response_type == "html":
msg = {"role": "assistant",
"content": gr.HTML(
html_src(random.choice(["harmful", "neutral", "beneficial"]))
)
}
elif response_type == "model3d":
msg = {"role": "assistant", "content": gr.Model3D(
"https://github.com/gradio-app/gradio/raw/main/gradio/media_assets/models3d/Fox.gltf"
)}
else:
msg = {"role": "assistant", "content": "Cool!"}
history.append(msg)
return history
with gr.Blocks(fill_height=True) as demo:
chatbot = gr.Chatbot(
elem_id="chatbot",
scale=1,
)
response_type = gr.Radio(
[
"image",
"text",
"gallery",
"video",
"audio",
"html",
"model3d",
],
value="text",
label="Response Type",
)
chat_input = gr.MultimodalTextbox(
interactive=True,
placeholder="Enter message or upload file...",
show_label=False,
)
chat_msg = chat_input.submit(
add_message, [chatbot, chat_input], [chatbot, chat_input]
)
bot_msg = chat_msg.then(
bot, [chatbot, response_type], chatbot, api_name="bot_response"
)
bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
chatbot.like(print_like_dislike, None, None)
if __name__ == "__main__":
demo.launch()
|