import gradio as gr import os def process_inputs(image, audio, emotion=None, cfg_scale=1.2): # 处理逻辑 current_dir = os.path.dirname(os.path.abspath(__file__)) video_path = os.path.join(current_dir, "src", "prevideo", "obama.mp4") return video_path with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 960px !important; margin: 0 auto !important}") as demo: gr.HTML( """

AI数字人视频生成

""" ) with gr.Row(variant="panel"): with gr.Column(scale=1): with gr.Row(): source_image = gr.Image( label="上传参考图片", type="filepath" ) with gr.Row(): driving_audio = gr.Audio( label="上传音频", type="filepath" ) with gr.Row(): emotion_dropdown = gr.Dropdown( label="选择情感", choices=["开心", "悲伤", "愤怒", "平静"], value="平静" ) with gr.Row(): cfg_slider = gr.Slider( label="生成强度", minimum=1.0, maximum=3.0, step=0.05, value=1.2 ) submit_button = gr.Button("生成视频", variant="primary") with gr.Column(scale=1): output_video = gr.Video( label="生成的视频", height=480, width=640, autoplay=True ) gr.Markdown( """ --- ### **使用说明** 1. 上传一张清晰的正面照片 2. 上传想要数字人说的音频 3. 选择期望的情感表现 4. 调整生成强度(数值越大,表情越夸张) 5. 点击"生成视频"按钮 ### **免责声明** 本项目仅用于学术研究,不对用户生成的内容负责。用户在使用此生成模型时需要对自己的行为负责。 """ ) submit_button.click( fn=process_inputs, inputs=[ source_image, driving_audio, emotion_dropdown, cfg_slider ], outputs=output_video ) demo.launch()