text stringlengths 0 5.54k |
|---|
target_concept = "basket" |
source_text = f"Provide a caption for images containing a {source_concept}. " |
"The captions should be in English and should be no longer than 150 characters." |
target_text = f"Provide a caption for images containing a {target_concept}. " |
"The captions should be in English and should be no longer than 150 characters." Next, create a utility function to generate the prompts: Copied @torch.no_grad() |
def generate_prompts(input_prompt): |
input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids.to("cuda") |
outputs = model.generate( |
input_ids, temperature=0.8, num_return_sequences=16, do_sample=True, max_new_tokens=128, top_k=10 |
) |
return tokenizer.batch_decode(outputs, skip_special_tokens=True) |
source_prompts = generate_prompts(source_text) |
target_prompts = generate_prompts(target_text) |
print(source_prompts) |
print(target_prompts) Check out the generation strategy guide if you’re interested in learning more about strategies for generating different quality text. Load the text encoder model used by the StableDiffusionDiffEditPipeline to encode the text. You’ll use the text encoder to compute the text embeddings: Copied import torch |
from diffusers import StableDiffusionDiffEditPipeline |
pipeline = StableDiffusionDiffEditPipeline.from_pretrained( |
"stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16, use_safetensors=True |
) |
pipeline.enable_model_cpu_offload() |
pipeline.enable_vae_slicing() |
@torch.no_grad() |
def embed_prompts(sentences, tokenizer, text_encoder, device="cuda"): |
embeddings = [] |
for sent in sentences: |
text_inputs = tokenizer( |
sent, |
padding="max_length", |
max_length=tokenizer.model_max_length, |
truncation=True, |
return_tensors="pt", |
) |
text_input_ids = text_inputs.input_ids |
prompt_embeds = text_encoder(text_input_ids.to(device), attention_mask=None)[0] |
embeddings.append(prompt_embeds) |
return torch.concatenate(embeddings, dim=0).mean(dim=0).unsqueeze(0) |
source_embeds = embed_prompts(source_prompts, pipeline.tokenizer, pipeline.text_encoder) |
target_embeds = embed_prompts(target_prompts, pipeline.tokenizer, pipeline.text_encoder) Finally, pass the embeddings to the generate_mask() and invert() functions, and pipeline to generate the image: Copied from diffusers import DDIMInverseScheduler, DDIMScheduler |
from diffusers.utils import load_image, make_image_grid |
from PIL import Image |
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config) |
pipeline.inverse_scheduler = DDIMInverseScheduler.from_config(pipeline.scheduler.config) |
img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png" |
raw_image = load_image(img_url).resize((768, 768)) |
mask_image = pipeline.generate_mask( |
image=raw_image, |
- source_prompt=source_prompt, |
- target_prompt=target_prompt, |
+ source_prompt_embeds=source_embeds, |
+ target_prompt_embeds=target_embeds, |
) |
inv_latents = pipeline.invert( |
- prompt=source_prompt, |
+ prompt_embeds=source_embeds, |
image=raw_image, |
).latents |
output_image = pipeline( |
mask_image=mask_image, |
image_latents=inv_latents, |
- prompt=target_prompt, |
- negative_prompt=source_prompt, |
+ prompt_embeds=target_embeds, |
+ negative_prompt_embeds=source_embeds, |
).images[0] |
mask_image = Image.fromarray((mask_image.squeeze()*255).astype("uint8"), "L") |
make_image_grid([raw_image, mask_image, output_image], rows=1, cols=3) Generate a caption for inversion While you can use the source_prompt as a caption to help generate the partially inverted latents, you can also use the BLIP model to automatically generate a caption. Load the BLIP model and processor from the 🤗 Transformers library: Copied import torch |
from transformers import BlipForConditionalGeneration, BlipProcessor |
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") |
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base", torch_dtype=torch.float16, low_cpu_mem_usage=True) Create a utility function to generate a caption from the input image: Copied @torch.no_grad() |
def generate_caption(images, caption_generator, caption_processor): |
text = "a photograph of" |
inputs = caption_processor(images, text, return_tensors="pt").to(device="cuda", dtype=caption_generator.dtype) |
caption_generator.to("cuda") |
outputs = caption_generator.generate(**inputs, max_new_tokens=128) |
# offload caption generator |
caption_generator.to("cpu") |
caption = caption_processor.batch_decode(outputs, skip_special_tokens=True)[0] |
return caption Load an input image and generate a caption for it using the generate_caption function: Copied from diffusers.utils import load_image |
img_url = "https://github.com/Xiang-cd/DiffEdit-stable-diffusion/raw/main/assets/origin.png" |
raw_image = load_image(img_url).resize((768, 768)) |
caption = generate_caption(raw_image, model, processor) generated caption: "a photograph of a bowl of fruit on a table" Now you can drop the caption into the invert() function to generate the partially inverted latents! |
Shap-E Shap-E is a conditional model for generating 3D assets which could be used for video game development, interior design, and architecture. It is trained on a large dataset of 3D assets, and post-processed to render more views of each object and produce 16K instead of 4K point clouds. The Shap-E model is trained in two steps: an encoder accepts the point clouds and rendered views of a 3D asset and outputs the parameters of implicit functions that represent the asset a diffusion model is trained on the latents produced by the encoder to generate either neural radiance fields (NeRFs) or a textured 3D mesh, making it easier to render and use the 3D asset in downstream applications This guide will show you how to use Shap-E to start generating your own 3D assets! Before you begin, make sure you have the following libraries installed: Copied # uncomment to install the necessary libraries in Colab |
#!pip install -q diffusers transformers accelerate trimesh Text-to-3D To generate a gif of a 3D object, pass a text prompt to the ShapEPipeline. The pipeline generates a list of image frames which are used to create the 3D object. Copied import torch |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.