import torch
from diffusers import DiffusionPipeline
# switch to "mps" for apple devices
pipe = DiffusionPipeline.from_pretrained("YiYiXu/image_inputs", dtype=torch.bfloat16, device_map="cuda")
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
image = pipe(prompt).images[0]a simple block to prepare all your default image inputs: takes a image or url, size or pocessor_id in controlnet_aux
used for testing/demo
to create the pipelline
from diffusers import ModularPipeline
get_image_step = ModularPipeline.from_pretrained("YiYiXu/image_inputs")
common image inputs
init_image = get_image_step(image_url=url,output="image")
control_image = get_image_step(image_url=url, processor_id="canny",output="image")
controlnet_union_image = get_image_step(image_url=url, processor_id="lineart_anime",output="image")
inpaint_image = get_image_step(image_url=inpaint_img_url, size=(1024, 1024),output="image")
inpaint_mask = get_image_step(image_url=inpaint_mask_url, size=(1024, 1024),output="image")
ip_adapter_image = get_image_step(image_url=ip_adapter_image_url,output="image")
- Downloads last month
- -
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support