sibel10 commited on
Commit
8ea6377
·
verified ·
1 Parent(s): e522bef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -17
app.py CHANGED
@@ -1,26 +1,23 @@
1
- import os
2
  import torch
3
  import gradio as gr
4
  from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
5
- from huggingface_hub import login
6
-
7
- # HuggingFace auth (Space secret)
8
- login(token=os.environ["HF_TOKEN"])
9
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  dtype = torch.float16 if device == "cuda" else torch.float32
12
 
13
  print("Running on:", device)
14
 
 
15
  adapter = MotionAdapter.from_pretrained(
16
  "guoyww/animatediff-motion-adapter-v1-5-2",
17
  torch_dtype=dtype
18
  )
19
 
 
20
  pipe = AnimateDiffPipeline.from_pretrained(
21
- "black-forest-labs/FLUX.1-schnell",
22
  motion_adapter=adapter,
23
- torch_dtype=dtype,
24
  )
25
 
26
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
@@ -30,12 +27,13 @@ pipe.enable_vae_slicing()
30
  pipe.enable_attention_slicing()
31
 
32
  def generate(prompt, steps, guidance, frames, fps, seed):
33
- generator = torch.Generator(device).manual_seed(int(seed))
 
34
  video = pipe(
35
- prompt,
36
- num_inference_steps=steps,
37
- guidance_scale=guidance,
38
- num_frames=frames,
39
  generator=generator
40
  ).frames[0]
41
 
@@ -45,14 +43,15 @@ demo = gr.Interface(
45
  fn=generate,
46
  inputs=[
47
  gr.Textbox(label="Prompt"),
48
- gr.Slider(5, 30, value=15, label="Steps"),
49
- gr.Slider(1, 10, value=7.5, label="Guidance"),
50
- gr.Slider(8, 32, value=16, label="Frames"),
51
- gr.Slider(4, 24, value=8, label="FPS"),
52
  gr.Number(value=42, label="Seed"),
53
  ],
54
  outputs=gr.Video(),
55
- title="FLUX AnimateDiff – ZeroGPU Video Generator"
 
56
  )
57
 
58
  demo.launch()
 
 
1
  import torch
2
  import gradio as gr
3
  from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
 
 
 
 
4
 
5
  device = "cuda" if torch.cuda.is_available() else "cpu"
6
  dtype = torch.float16 if device == "cuda" else torch.float32
7
 
8
  print("Running on:", device)
9
 
10
+ # Load motion adapter (video motion)
11
  adapter = MotionAdapter.from_pretrained(
12
  "guoyww/animatediff-motion-adapter-v1-5-2",
13
  torch_dtype=dtype
14
  )
15
 
16
+ # Load base Stable Diffusion model
17
  pipe = AnimateDiffPipeline.from_pretrained(
18
+ "runwayml/stable-diffusion-v1-5",
19
  motion_adapter=adapter,
20
+ torch_dtype=dtype
21
  )
22
 
23
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
 
27
  pipe.enable_attention_slicing()
28
 
29
  def generate(prompt, steps, guidance, frames, fps, seed):
30
+ generator = torch.Generator(device=device).manual_seed(int(seed))
31
+
32
  video = pipe(
33
+ prompt=prompt,
34
+ num_inference_steps=int(steps),
35
+ guidance_scale=float(guidance),
36
+ num_frames=int(frames),
37
  generator=generator
38
  ).frames[0]
39
 
 
43
  fn=generate,
44
  inputs=[
45
  gr.Textbox(label="Prompt"),
46
+ gr.Slider(10, 40, value=20, step=1, label="Steps"),
47
+ gr.Slider(1, 12, value=7.5, step=0.5, label="Guidance"),
48
+ gr.Slider(8, 32, value=16, step=1, label="Frames"),
49
+ gr.Slider(4, 24, value=8, step=1, label="FPS"),
50
  gr.Number(value=42, label="Seed"),
51
  ],
52
  outputs=gr.Video(),
53
+ title="AnimateDiff Video Generator",
54
+ description="Text to Video using Stable Diffusion + AnimateDiff"
55
  )
56
 
57
  demo.launch()