ArseniyPerchik's picture
more
fbd53e3
raw
history blame
1.78 kB
import gradio as gr
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import tempfile
def create_animation():
fig, ax = plt.subplots(figsize=(7, 7))
xdata, ydata = [], []
ln, = plt.plot([], [], 'b-', animated=True)
def init():
ax.set_xlim(0, 2*np.pi)
ax.set_ylim(-1.1, 1.1)
return ln,
def update(frame):
xdata.append(frame)
ydata.append(np.sin(frame))
ln.set_data(xdata, ydata)
return ln,
ani = animation.FuncAnimation(
fig, update, frames=np.linspace(0, 2*np.pi, 100),
init_func=init, blit=True, repeat=False
)
# Save to MP4
temp_video = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
ani.save(temp_video.name, writer='ffmpeg', fps=20)
plt.close(fig)
return temp_video.name
def load_image_on_start():
return np.random.rand(700, 700)
# return None
with gr.Blocks() as demo:
gr.Markdown("## Agent Control with Language")
gr.Markdown('## Say the agent where to go and what to do')
with gr.Row():
with gr.Column():
request_audio = gr.Audio()
send_btn = gr.Button(value='Send Request')
request_text = gr.Textbox(label="Request:", lines=2, interactive=False)
request_target = gr.Textbox(label='Target:', lines=2)
request_plan = gr.Textbox(label='Plan status:', lines=2)
with gr.Column():
output_env = gr.Video(label="Env:", autoplay=True)
# EVENTS:
# gr.on(triggers=["load"], fn=load_image_on_start, outputs=output_env_image)
# demo.load(fn=load_image_on_start, outputs=output_env_image)
demo.load(fn=create_animation, outputs=output_env)
demo.launch()
demo.launch()