bugfix
Browse files
app.py
CHANGED
|
@@ -8,14 +8,15 @@ from diffusers import AutoPipelineForText2Image
|
|
| 8 |
from diffusers import AutoPipelineForImage2Image
|
| 9 |
from diffusers.utils import load_image, export_to_video
|
| 10 |
from diffusers import StableVideoDiffusionPipeline
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
|
|
|
| 14 |
|
| 15 |
|
| 16 |
|
| 17 |
def img2video(image,seed="",fps=7,outfile=""):
|
| 18 |
-
|
| 19 |
if seed=="":
|
| 20 |
seed=random.randint(0, 5000)
|
| 21 |
|
|
@@ -29,19 +30,18 @@ def img2video(image,seed="",fps=7,outfile=""):
|
|
| 29 |
image = load_image(image)
|
| 30 |
image = image.resize((1024, 576))
|
| 31 |
generator = torch.manual_seed(seed)
|
| 32 |
-
frames =
|
| 33 |
export_to_video(frames, outfile, fps=fps)
|
| 34 |
time.time(30)
|
| 35 |
return outfile
|
| 36 |
|
| 37 |
def text2img(prompt = "A cinematic shot of a baby racoon wearing an intricate italian priest robe.",guidance_scale=0.0, num_inference_steps=1):
|
| 38 |
-
|
| 39 |
image = pipeline_text2image(prompt=prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
|
| 40 |
return image
|
| 41 |
|
| 42 |
def img2img(image,prompt="A cinematic shot of a baby racoon wearing an intricate italian priest robe.", guidance_scale=0.0, num_inference_steps=1,strength=0.5):
|
| 43 |
-
|
| 44 |
-
pipeline_image2image = AutoPipelineForImage2Image.from_pipe(pipeline_text2image).to("cuda")
|
| 45 |
init_image = load_image(image)
|
| 46 |
init_image = init_image.resize((512, 512))
|
| 47 |
image = pipeline_image2image(prompt, image=init_image, strength=strength, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
|
|
|
|
| 8 |
from diffusers import AutoPipelineForImage2Image
|
| 9 |
from diffusers.utils import load_image, export_to_video
|
| 10 |
from diffusers import StableVideoDiffusionPipeline
|
| 11 |
+
pipeline_image2video = StableVideoDiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt",).to("cuda")
|
| 12 |
+
pipeline_text2image = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo").to("cuda")
|
| 13 |
+
pipeline_image2image = AutoPipelineForImage2Image.from_pipe(pipeline_text2image).to("cuda")
|
| 14 |
+
pipeline_image2video.enable_model_cpu_offload()
|
| 15 |
|
| 16 |
|
| 17 |
|
| 18 |
def img2video(image,seed="",fps=7,outfile=""):
|
| 19 |
+
|
| 20 |
if seed=="":
|
| 21 |
seed=random.randint(0, 5000)
|
| 22 |
|
|
|
|
| 30 |
image = load_image(image)
|
| 31 |
image = image.resize((1024, 576))
|
| 32 |
generator = torch.manual_seed(seed)
|
| 33 |
+
frames = pipeline_image2video(image, decode_chunk_size=8, generator=generator).frames[0]
|
| 34 |
export_to_video(frames, outfile, fps=fps)
|
| 35 |
time.time(30)
|
| 36 |
return outfile
|
| 37 |
|
| 38 |
def text2img(prompt = "A cinematic shot of a baby racoon wearing an intricate italian priest robe.",guidance_scale=0.0, num_inference_steps=1):
|
| 39 |
+
|
| 40 |
image = pipeline_text2image(prompt=prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
|
| 41 |
return image
|
| 42 |
|
| 43 |
def img2img(image,prompt="A cinematic shot of a baby racoon wearing an intricate italian priest robe.", guidance_scale=0.0, num_inference_steps=1,strength=0.5):
|
| 44 |
+
|
|
|
|
| 45 |
init_image = load_image(image)
|
| 46 |
init_image = init_image.resize((512, 512))
|
| 47 |
image = pipeline_image2image(prompt, image=init_image, strength=strength, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
|