| | from huggingface_hub import snapshot_download |
| | from modelscope.pipelines import pipeline |
| | from modelscope.outputs import OutputKeys |
| | import pathlib |
| | import gradio as gr |
| |
|
| | |
| | model_dir = pathlib.Path('weights') |
| | snapshot_download('damo-vilab/modelscope-damo-text-to-video-synthesis', |
| | repo_type='model', local_dir=model_dir) |
| |
|
| | |
| | pipe = pipeline('text-to-video-synthesis', model_dir.as_posix()) |
| |
|
| | |
| | def generate_video(text_prompt): |
| | test_text = {'text': text_prompt} |
| | output_video_path = pipe(test_text)[OutputKeys.OUTPUT_VIDEO] |
| | return output_video_path |
| |
|
| | |
| | demo = gr.Interface( |
| | fn=generate_video, |
| | inputs=gr.Textbox(label="Enter a text prompt", placeholder="Describe the scene..."), |
| | outputs=gr.Video(label="Generated Video"), |
| | title="Text-to-Video Generator", |
| | description="Enter a text description, and the model will generate a video based on your input.", |
| | ) |
| |
|
| | |
| | if __name__ == "__main__": |
| | demo.launch() |