Geek7 commited on
Commit
f2464b2
·
verified ·
1 Parent(s): 5c6d697

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -38
app.py CHANGED
@@ -1,46 +1,32 @@
1
- import torch
2
- from diffusers import AnimateDiffPipeline, LCMScheduler, MotionAdapter
3
- from diffusers.utils import export_to_gif
4
- from peft import PeftModel # Import the PEFT library
5
  import gradio as gr
6
 
7
- # Load AnimateLCM for video generation
8
- adapter = MotionAdapter.from_pretrained("Binarybardakshat/RCNA_MINI")
9
- pipe = AnimateDiffPipeline.from_pretrained("emilianJR/epiCRealism", motion_adapter=adapter, torch_dtype=torch.float16)
10
- pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear")
11
 
12
- # Load LoRA weights using PEFT
13
- pipe = PeftModel.from_pretrained(pipe, "Binarybardakshat/RCNA_MINI", adapter_name="lcm-lora") # PEFT for LoRA
14
 
15
- pipe.set_adapters(["lcm-lora"], [0.8])
16
- pipe.enable_vae_slicing()
17
- pipe.enable_model_cpu_offload()
 
 
18
 
19
- # Function to generate video from user prompt
20
- def generate_video(prompt):
21
- # Generate video using RCNA MINI
22
- output = pipe(
23
- prompt=prompt,
24
- negative_prompt="bad quality, worse quality, low resolution",
25
- num_frames=16,
26
- guidance_scale=2.0,
27
- num_inference_steps=6,
28
- generator=torch.Generator("cpu").manual_seed(0),
29
- )
30
- frames = output.frames[0]
31
- export_to_gif(frames, "animatelcm.gif")
32
-
33
- # Return the generated video
34
- return "animatelcm.gif"
35
-
36
- # Create a Gradio interface
37
- interface = gr.Interface(
38
  fn=generate_video,
39
- inputs="text", # Accept text prompt
40
- outputs="file", # Output the generated GIF file
41
- title="AnimateLCM Video Generator",
42
- description="Generate videos with a simple prompt using AnimateLCM and RCNA MINI",
43
  )
44
 
45
- # Launch the Gradio app
46
- interface.launch()
 
 
1
+ from huggingface_hub import snapshot_download
2
+ from modelscope.pipelines import pipeline
3
+ from modelscope.outputs import OutputKeys
4
+ import pathlib
5
  import gradio as gr
6
 
7
+ # Download the model weights and prepare the model directory
8
+ model_dir = pathlib.Path('weights')
9
+ snapshot_download('damo-vilab/modelscope-damo-text-to-video-synthesis',
10
+ repo_type='model', local_dir=model_dir)
11
 
12
+ # Initialize the text-to-video synthesis pipeline
13
+ pipe = pipeline('text-to-video-synthesis', model_dir.as_posix())
14
 
15
+ # Define a function that takes a text prompt and generates a video
16
+ def generate_video(text_prompt):
17
+ test_text = {'text': text_prompt}
18
+ output_video_path = pipe(test_text)[OutputKeys.OUTPUT_VIDEO]
19
+ return output_video_path
20
 
21
+ # Set up Gradio interface
22
+ demo = gr.Interface(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  fn=generate_video,
24
+ inputs=gr.Textbox(label="Enter a text prompt", placeholder="Describe the scene..."),
25
+ outputs=gr.Video(label="Generated Video"),
26
+ title="Text-to-Video Generator",
27
+ description="Enter a text description, and the model will generate a video based on your input.",
28
  )
29
 
30
+ # Launch the Gradio interface
31
+ if __name__ == "__main__":
32
+ demo.launch()