vonhatthinh's picture
update: use gemini flash thinking 2.0 for understanding video
861c9a4
raw
history blame
2.42 kB
import gradio as gr
from chatbot_agent import *
from dotenv import load_dotenv
import os
import time
from tiktok_downloader import *
load_dotenv()
os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
def message_and_history(user_input, history):
history = history or []
s = list(sum(history, ()))
s.append(user_input)
inp = ' '.join(s)
output = chatbot_llm.ask_chatbot(user_message=inp)
print(output)
bot_reply = ""
for char in output:
bot_reply += char
temp_history = history.copy()
temp_history.append((user_input, bot_reply))
yield temp_history, temp_history, gr.update(value="")
time.sleep(0.008)
history.append((user_input, output))
return history, history, gr.update(value="")
block = gr.Blocks(theme=gr.themes.Soft())
chatbot_llm = ChatbotAgent()
chatbot_llm.create_user(user_id="test")
with block:
gr.Markdown("<h1><center>ChatBot for KOLs performance prediction</center></h1>")
with gr.Row():
# Column for the Chatbot
with gr.Column(scale=1,):
chatbot = gr.Chatbot(elem_id="chatbot",height=585)
message = gr.Textbox(placeholder="Enter Your Query Here")
state = gr.State()
submit = gr.Button("SEND")
# Button: On click
submit.click(
fn=message_and_history,
inputs=[message, state],
outputs=[chatbot, state, message],
queue=True,
)
# Press ENTER to submit
message.submit(
fn=message_and_history,
inputs=[message, state],
outputs=[chatbot, state, message],
queue=True,
)
# Column for the Video component
with gr.Column(scale=1):
video_input = gr.Video(label="Upload or record a video",
height=690,
width=600,
interactive=True,
)
send_video_btn = gr.Button("Send Video")
send_video_btn.click(
fn=message_and_history,
inputs=[video_input, state],
outputs=[chatbot, state, message],
queue=True,)
block.launch()