|
|
""" |
|
|
File: app.py |
|
|
Author: Elena Ryumina and Dmitry Ryumin |
|
|
Description: Description: Main application file for Facial_Expression_Recognition. |
|
|
The file defines the Gradio interface, sets up the main blocks, |
|
|
and includes event handlers for various components. |
|
|
License: MIT License |
|
|
""" |
|
|
|
|
|
import gradio as gr |
|
|
import my_uie |
|
|
|
|
|
from app.description import DESCRIPTION_STATIC, DESCRIPTION_DYNAMIC |
|
|
from app.authors import AUTHORS |
|
|
from app.app_utils import preprocess_image_and_predict, preprocess_video_and_predict |
|
|
|
|
|
def text_emo_analysize(text): |
|
|
text_outcome1,text_outcome2 = my_uie.text_emo_analysize(text) |
|
|
return text_outcome1,text_outcome2 |
|
|
|
|
|
def clear_static_info(): |
|
|
return ( |
|
|
gr.Image(value=None, type="pil"), |
|
|
gr.Image(value=None, scale=1, elem_classes="dl5"), |
|
|
gr.Image(value=None, scale=1, elem_classes="dl2"), |
|
|
gr.Label(value=None, num_top_classes=3, scale=1, elem_classes="dl3"), |
|
|
) |
|
|
|
|
|
def clear_dynamic_info(): |
|
|
return ( |
|
|
gr.Video(value=None), |
|
|
gr.Video(value=None), |
|
|
gr.Video(value=None), |
|
|
gr.Video(value=None), |
|
|
gr.Plot(value=None), |
|
|
) |
|
|
|
|
|
with gr.Blocks(css="app.css") as demo: |
|
|
with gr.Tab("ๆ
ๆๅๆ็ณป็ป"): |
|
|
gr.Markdown(value=DESCRIPTION_DYNAMIC) |
|
|
with gr.Row(): |
|
|
with gr.Column(scale=2): |
|
|
input_video = gr.Video(elem_classes="video1") |
|
|
with gr.Row(): |
|
|
clear_btn_dynamic = gr.Button( |
|
|
value="ๆธ
้ค", interactive=True, scale=1 |
|
|
) |
|
|
submit_dynamic = gr.Button( |
|
|
value="ๆไบค", interactive=True, scale=1, elem_classes="submit" |
|
|
) |
|
|
with gr.Column(scale=2, elem_classes="dl4"): |
|
|
with gr.Row(): |
|
|
output_video = gr.Video(label="Original video", scale=1, elem_classes="video2", visible=False) |
|
|
output_face = gr.Video(label="Pre-processed video", scale=1, elem_classes="video3", visible=False) |
|
|
output_heatmaps = gr.Video(label="Heatmaps", scale=1, elem_classes="video4", visible=False) |
|
|
output_statistics = gr.Plot(label="ๆ
ๆๆฐๆฎ", elem_classes="stat") |
|
|
gr.Examples( |
|
|
["videos/video1.mp4", |
|
|
"videos/video2.mp4", |
|
|
], |
|
|
[input_video], |
|
|
) |
|
|
with gr.Row("ๆๆฌๆ
ๆๅๆ"): |
|
|
with gr.Column(): |
|
|
gr.Markdown("ๆๆฌๆ
ๆๅๆ") |
|
|
text_input = gr.Textbox(lines=2, placeholder='ๅจ่ฟ้่พๅ
ฅๆๆฌ') |
|
|
text_submit_button = gr.Button("ๆไบคๆๆฌๆ
ๆๅๆ") |
|
|
with gr.Column(): |
|
|
|
|
|
text_output_1 = gr.Textbox(label="ๆๆฌๆ
ๆ") |
|
|
text_output_2 = gr.Textbox(label="ๆ
ๆๆฆ็") |
|
|
|
|
|
|
|
|
text_submit_button.click(text_emo_analysize, inputs=text_input, outputs=[text_output_1, text_output_2]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
submit_dynamic.click( |
|
|
fn=preprocess_video_and_predict, |
|
|
inputs=input_video, |
|
|
outputs=[ |
|
|
output_video, |
|
|
output_face, |
|
|
output_heatmaps, |
|
|
output_statistics |
|
|
], |
|
|
queue=True, |
|
|
) |
|
|
clear_btn_dynamic.click( |
|
|
fn=clear_dynamic_info, |
|
|
inputs=[], |
|
|
outputs=[ |
|
|
input_video, |
|
|
output_video, |
|
|
output_face, |
|
|
output_heatmaps, |
|
|
output_statistics |
|
|
], |
|
|
queue=True, |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.queue(api_open=False).launch(share=False) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|