fffiloni commited on
Commit
470d745
·
1 Parent(s): 6e2019a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -0
app.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import DiffusionPipeline
2
+ import gradio as gr
3
+ import torch
4
+ import cv2
5
+ import os
6
+
7
+ MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')
8
+
9
+ device="cuda"
10
+
11
+ pipe = DiffusionPipeline.from_pretrained(
12
+ "CompVis/stable-diffusion-v1-4",
13
+ use_auth_token=MY_SECRET_TOKEN,
14
+ revision='fp16',
15
+ torch_dtype=torch.float16,
16
+ safety_checker=None, # Very important for videos...lots of false positives while interpolating
17
+ custom_pipeline="interpolate_stable_diffusion",
18
+ ).to(device)
19
+ pipe.enable_attention_slicing()
20
+
21
+ def run(prompt1, seed1, prompt2, seed2, prompt3, seed3):
22
+
23
+ frame_filepaths = pipe3.walk(
24
+ prompts=[prompt1, prompt2, prompt3],
25
+ seeds=[seed1, seed2, seed3],
26
+ num_interpolation_steps=16,
27
+ output_dir='./dreams',
28
+ batch_size=4,
29
+ height=512,
30
+ width=512,
31
+ guidance_scale=8.5,
32
+ num_inference_steps=50,
33
+ )
34
+ print(frame_filepaths)
35
+
36
+ frame = cv2.imread(frame_filepaths[0])
37
+ height, width, layers = frame.shape
38
+ fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
39
+ video = cv2.VideoWriter("out.mp4", fourcc, 24, (width,height))
40
+ for image in frame_filepaths:
41
+ #print(image)
42
+ video.write(cv2.imread(image))
43
+
44
+ video.release()
45
+ cv2.destroyAllWindows()
46
+ #print(video)
47
+
48
+ return "out.mp4", frame_filepaths
49
+
50
+ with gr.Blocks() as demo:
51
+ with gr.Column():
52
+ gr.HTML('''
53
+ <h1>
54
+ Stable Diffusion Interpolation • Community pipeline
55
+ </h1>
56
+ <p style='text-align: center;'>
57
+ This community pipeline returns a list of images saved under the folder as defined in output_dir. <br />
58
+ You can use these images to create videos of stable diffusion.
59
+ </p>
60
+
61
+ <p style='text-align: center;'>
62
+ This demo can be run on a GPU of at least 8GB VRAM and should take approximately 5 minutes.<br />
63
+
64
+ </p>
65
+
66
+ ''')
67
+ with gr.Row():
68
+ with gr.Column():
69
+ with gr.Column():
70
+ with gr.Row():
71
+ intpol_prompt_1 = gr.Textbox(lines=1, label="prompt 1")
72
+ seed1 = gr.Slider(label = "Seed 1", minimum = 0, maximum = 2147483647, step = 1, randomize = True)
73
+ with gr.Row():
74
+ intpol_prompt_2 = gr.Textbox(lines=1, label="prompt 2")
75
+ seed2 = gr.Slider(label = "Seed 2", minimum = 0, maximum = 2147483647, step = 1, randomize = True)
76
+ with gr.Row():
77
+ intpol_prompt_3 = gr.Textbox(lines=1, label="prompt 3")
78
+ seed3 = gr.Slider(label = "Seed 3", minimum = 0, maximum = 2147483647, step = 1, randomize = True)
79
+ intpol_run = gr.Button("Run Interpolation")
80
+
81
+ with gr.Column():
82
+ video_output = gr.Video(label="Generated video", show_label=True)
83
+ gallery_output = gr.Gallery(label="Generated images", show_label=False).style(grid=2, height="auto")
84
+
85
+ intpol_run.click(run, inputs=[intpol_prompt_1, seed1, intpol_prompt_2, seed2, intpol_prompt_3, seed3], outputs=[gallery_output, video_output])
86
+
87
+ demo.launch()