ciditel commited on
Commit
eea9420
·
verified ·
1 Parent(s): f6b864e

feat img2video

Browse files
Files changed (1) hide show
  1. app.py +36 -4
app.py CHANGED
@@ -2,13 +2,35 @@ from PIL import Image
2
  import torch
3
  import re
4
  import gradio as gr
 
5
  from diffusers import AutoPipelineForText2Image
6
  from diffusers import AutoPipelineForImage2Image
7
- from diffusers.utils import load_image, make_image_grid
 
 
 
 
8
  pipeline_text2image = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16")
9
  pipeline_image2image = AutoPipelineForImage2Image.from_pipe(pipeline_text2image).to("cuda")
10
  pipeline_text2image = pipeline_text2image.to("cuda")
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  def text2img(prompt = "A cinematic shot of a baby racoon wearing an intricate italian priest robe.",guidance_scale=0.0, num_inference_steps=1):
14
  image = pipeline_text2image(prompt=prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
@@ -36,13 +58,23 @@ gradio_app_img2img = gr.Interface(
36
  gr.Image(type='filepath'),
37
  gr.Text(),
38
  gr.Slider(0.0, 10.0, value=1,step=0.1),
39
- gr.Slider(0.0, 100.0, value=1,step=1),
40
- gr.Slider(0.0, 1.0, value=0.5,step=0.05)
41
  ],
42
  outputs="image",
43
  )
44
 
45
- demo = gr.TabbedInterface([gradio_app_text2img,gradio_app_img2img], ["text2img","img2img"])
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  if __name__ == "__main__":
48
  demo.launch()
 
2
  import torch
3
  import re
4
  import gradio as gr
5
+ import ramdon
6
  from diffusers import AutoPipelineForText2Image
7
  from diffusers import AutoPipelineForImage2Image
8
+ from diffusers.utils import load_image, export_to_video
9
+ from diffusers import StableVideoDiffusionPipeline
10
+
11
+
12
+ pipelineVideo = StableVideoDiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt",).to("cuda")
13
  pipeline_text2image = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16")
14
  pipeline_image2image = AutoPipelineForImage2Image.from_pipe(pipeline_text2image).to("cuda")
15
  pipeline_text2image = pipeline_text2image.to("cuda")
16
 
17
+ def image2video(image,seed="",fps=7,outfile="")
18
+ if seed=="":
19
+ seed=random.randint(0, 5000)
20
+
21
+ else:
22
+ try:
23
+ seed=int(seed)
24
+ except:
25
+ seed=random.randint(0, 5000)
26
+ if outfile=="":
27
+ outfile=str(seed)+".mp4"
28
+ image = load_image(image)
29
+ image = image.resize((1024, 576))
30
+ generator = torch.manual_seed(seed)
31
+ frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0]
32
+ export_to_video(frames, outfile, fps=fps)
33
+ return outfile
34
 
35
  def text2img(prompt = "A cinematic shot of a baby racoon wearing an intricate italian priest robe.",guidance_scale=0.0, num_inference_steps=1):
36
  image = pipeline_text2image(prompt=prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
 
58
  gr.Image(type='filepath'),
59
  gr.Text(),
60
  gr.Slider(0.0, 10.0, value=1,step=0.1),
61
+ gr.Text()
 
62
  ],
63
  outputs="image",
64
  )
65
 
66
+ gradio_app_img2video = gr.Interface(
67
+ fn=img2img,
68
+ inputs=[
69
+ gr.Image(type='filepath'),
70
+ gr.Text(),
71
+ gr.Slider(0.0, 40.0, value=9,step=1),
72
+ gr.Text()
73
+ ],
74
+ outputs="video",
75
+ )
76
+
77
+ demo = gr.TabbedInterface([gradio_app_text2img,gradio_app_img2img,gradio_app_img2video], ["text2img","img2img","img2video"])
78
 
79
  if __name__ == "__main__":
80
  demo.launch()