diff --git a/OmniGen/.gitattributes b/OmniGen/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..5404b57770ddbdf18df340d520ff1bb9b609e697 --- /dev/null +++ b/OmniGen/.gitattributes @@ -0,0 +1,38 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +imgs/** filter=lfs diff=lfs merge=lfs -text +*.jpg filter=lfs diff=lfs merge=lfs -text +*.png filter=lfs diff=lfs merge=lfs -text diff --git a/OmniGen/.gitignore b/OmniGen/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..fa99720a638423bd79a3df78d39719751362714a --- /dev/null +++ b/OmniGen/.gitignore @@ -0,0 +1,2 @@ +*.ipynb +**/__pycache__/ diff --git a/OmniGen/LICENSE b/OmniGen/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..802385928e847d0877470c24abb736abd7031994 --- /dev/null +++ b/OmniGen/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 VectorSpaceLab + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/OmniGen/OmniGen/__init__.py b/OmniGen/OmniGen/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..39d1121f6a3f24bff2c01cc7156f6ba351846cb4 --- /dev/null +++ b/OmniGen/OmniGen/__init__.py @@ -0,0 +1,4 @@ +from .model import OmniGen +from .processor import OmniGenProcessor +from .scheduler import OmniGenScheduler +from .pipeline import OmniGenPipeline \ No newline at end of file diff --git a/OmniGen/OmniGen/__pycache__/__init__.cpython-310.pyc b/OmniGen/OmniGen/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0a69caaaeba57296cf52c4517d3e9440416aab4 Binary files /dev/null and b/OmniGen/OmniGen/__pycache__/__init__.cpython-310.pyc differ diff --git a/OmniGen/OmniGen/__pycache__/model.cpython-310.pyc b/OmniGen/OmniGen/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb8fb360f0b9e97748a855696b724859b30f57d5 Binary files /dev/null and b/OmniGen/OmniGen/__pycache__/model.cpython-310.pyc differ diff --git a/OmniGen/OmniGen/__pycache__/pipeline.cpython-310.pyc b/OmniGen/OmniGen/__pycache__/pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bbcdf6b2630cf8dbe293cda757d233383f15d2c Binary files /dev/null and b/OmniGen/OmniGen/__pycache__/pipeline.cpython-310.pyc differ diff --git a/OmniGen/OmniGen/__pycache__/processor.cpython-310.pyc b/OmniGen/OmniGen/__pycache__/processor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21a4b67abe8c8666ced13884e0ced13059cde76b Binary files /dev/null and b/OmniGen/OmniGen/__pycache__/processor.cpython-310.pyc differ diff --git a/OmniGen/OmniGen/__pycache__/scheduler.cpython-310.pyc b/OmniGen/OmniGen/__pycache__/scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a53acb6b721e6ba7dd7d74ba336d710d04532db Binary files /dev/null and b/OmniGen/OmniGen/__pycache__/scheduler.cpython-310.pyc differ diff --git a/OmniGen/OmniGen/__pycache__/transformer.cpython-310.pyc b/OmniGen/OmniGen/__pycache__/transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2efe8d17510e8b544162c6c72f25534e9ef9dbf2 Binary files /dev/null and b/OmniGen/OmniGen/__pycache__/transformer.cpython-310.pyc differ diff --git a/OmniGen/OmniGen/__pycache__/utils.cpython-310.pyc b/OmniGen/OmniGen/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7f02270f60c459f1b1010c29f5bff2cb9661ed9 Binary files /dev/null and b/OmniGen/OmniGen/__pycache__/utils.cpython-310.pyc differ diff --git a/OmniGen/OmniGen/model.py b/OmniGen/OmniGen/model.py new file mode 100644 index 0000000000000000000000000000000000000000..e280fceadf9264fe6852fa11bfe95b984bcf8bbb --- /dev/null +++ b/OmniGen/OmniGen/model.py @@ -0,0 +1,406 @@ +# The code is revised from DiT +import os +import torch +import torch.nn as nn +import numpy as np +import math +from typing import Dict + +from diffusers.loaders import PeftAdapterMixin +from timm.models.vision_transformer import PatchEmbed, Attention, Mlp +from huggingface_hub import snapshot_download +from safetensors.torch import load_file + +from OmniGen.transformer import Phi3Config, Phi3Transformer + + +def modulate(x, shift, scale): + return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1) + + +class TimestepEmbedder(nn.Module): + """ + Embeds scalar timesteps into vector representations. + """ + def __init__(self, hidden_size, frequency_embedding_size=256): + super().__init__() + self.mlp = nn.Sequential( + nn.Linear(frequency_embedding_size, hidden_size, bias=True), + nn.SiLU(), + nn.Linear(hidden_size, hidden_size, bias=True), + ) + self.frequency_embedding_size = frequency_embedding_size + + @staticmethod + def timestep_embedding(t, dim, max_period=10000): + """ + Create sinusoidal timestep embeddings. + :param t: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an (N, D) Tensor of positional embeddings. + """ + # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(device=t.device) + args = t[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + return embedding + + def forward(self, t, dtype=torch.float32): + t_freq = self.timestep_embedding(t, self.frequency_embedding_size).to(dtype) + t_emb = self.mlp(t_freq) + return t_emb + + +class FinalLayer(nn.Module): + """ + The final layer of DiT. + """ + def __init__(self, hidden_size, patch_size, out_channels): + super().__init__() + self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True) + self.adaLN_modulation = nn.Sequential( + nn.SiLU(), + nn.Linear(hidden_size, 2 * hidden_size, bias=True) + ) + + def forward(self, x, c): + shift, scale = self.adaLN_modulation(c).chunk(2, dim=1) + x = modulate(self.norm_final(x), shift, scale) + x = self.linear(x) + return x + + +def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0, interpolation_scale=1.0, base_size=1): + """ + grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or + [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) + """ + if isinstance(grid_size, int): + grid_size = (grid_size, grid_size) + + grid_h = np.arange(grid_size[0], dtype=np.float32) / (grid_size[0] / base_size) / interpolation_scale + grid_w = np.arange(grid_size[1], dtype=np.float32) / (grid_size[1] / base_size) / interpolation_scale + grid = np.meshgrid(grid_w, grid_h) # here w goes first + grid = np.stack(grid, axis=0) + + grid = grid.reshape([2, 1, grid_size[1], grid_size[0]]) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + if cls_token and extra_tokens > 0: + pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0) + return pos_embed + + +def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): + assert embed_dim % 2 == 0 + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + + emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) + return emb + + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position + pos: a list of positions to be encoded: size (M,) + out: (M, D) + """ + assert embed_dim % 2 == 0 + omega = np.arange(embed_dim // 2, dtype=np.float64) + omega /= embed_dim / 2. + omega = 1. / 10000**omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) + return emb + + +class PatchEmbedMR(nn.Module): + """ 2D Image to Patch Embedding + """ + def __init__( + self, + patch_size: int = 2, + in_chans: int = 4, + embed_dim: int = 768, + bias: bool = True, + ): + super().__init__() + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias) + + def forward(self, x): + x = self.proj(x) + x = x.flatten(2).transpose(1, 2) # NCHW -> NLC + return x + + +class OmniGen(nn.Module, PeftAdapterMixin): + """ + Diffusion model with a Transformer backbone. + """ + def __init__( + self, + transformer_config: Phi3Config, + patch_size=2, + in_channels=4, + pe_interpolation: float = 1.0, + pos_embed_max_size: int = 192, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = in_channels + self.patch_size = patch_size + self.pos_embed_max_size = pos_embed_max_size + + hidden_size = transformer_config.hidden_size + + self.x_embedder = PatchEmbedMR(patch_size, in_channels, hidden_size, bias=True) + self.input_x_embedder = PatchEmbedMR(patch_size, in_channels, hidden_size, bias=True) + + self.time_token = TimestepEmbedder(hidden_size) + self.t_embedder = TimestepEmbedder(hidden_size) + + self.pe_interpolation = pe_interpolation + pos_embed = get_2d_sincos_pos_embed(hidden_size, pos_embed_max_size, interpolation_scale=self.pe_interpolation, base_size=64) + self.register_buffer("pos_embed", torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=True) + + self.final_layer = FinalLayer(hidden_size, patch_size, self.out_channels) + + self.initialize_weights() + + self.llm = Phi3Transformer(config=transformer_config) + self.llm.config.use_cache = False + + @classmethod + def from_pretrained(cls, model_name): + if not os.path.exists(model_name): + cache_folder = os.getenv('HF_HUB_CACHE') + model_name = snapshot_download(repo_id=model_name, + cache_dir=cache_folder, + ignore_patterns=['flax_model.msgpack', 'rust_model.ot', 'tf_model.h5']) + config = Phi3Config.from_pretrained(model_name) + model = cls(config) + if os.path.exists(os.path.join(model_name, 'model.safetensors')): + print("Loading safetensors") + ckpt = load_file(os.path.join(model_name, 'model.safetensors')) + else: + ckpt = torch.load(os.path.join(model_name, 'model.pt'), map_location='cpu') + model.load_state_dict(ckpt) + return model + + def initialize_weights(self): + assert not hasattr(self, "llama") + + # Initialize transformer layers: + def _basic_init(module): + if isinstance(module, nn.Linear): + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + self.apply(_basic_init) + + # Initialize patch_embed like nn.Linear (instead of nn.Conv2d): + w = self.x_embedder.proj.weight.data + nn.init.xavier_uniform_(w.view([w.shape[0], -1])) + nn.init.constant_(self.x_embedder.proj.bias, 0) + + w = self.input_x_embedder.proj.weight.data + nn.init.xavier_uniform_(w.view([w.shape[0], -1])) + nn.init.constant_(self.x_embedder.proj.bias, 0) + + + # Initialize timestep embedding MLP: + nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02) + nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02) + nn.init.normal_(self.time_token.mlp[0].weight, std=0.02) + nn.init.normal_(self.time_token.mlp[2].weight, std=0.02) + + # Zero-out output layers: + nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0) + nn.init.constant_(self.final_layer.adaLN_modulation[-1].bias, 0) + nn.init.constant_(self.final_layer.linear.weight, 0) + nn.init.constant_(self.final_layer.linear.bias, 0) + + def unpatchify(self, x, h, w): + """ + x: (N, T, patch_size**2 * C) + imgs: (N, H, W, C) + """ + c = self.out_channels + + x = x.reshape(shape=(x.shape[0], h//self.patch_size, w//self.patch_size, self.patch_size, self.patch_size, c)) + x = torch.einsum('nhwpqc->nchpwq', x) + imgs = x.reshape(shape=(x.shape[0], c, h, w)) + return imgs + + + def cropped_pos_embed(self, height, width): + """Crops positional embeddings for SD3 compatibility.""" + if self.pos_embed_max_size is None: + raise ValueError("`pos_embed_max_size` must be set for cropping.") + + height = height // self.patch_size + width = width // self.patch_size + if height > self.pos_embed_max_size: + raise ValueError( + f"Height ({height}) cannot be greater than `pos_embed_max_size`: {self.pos_embed_max_size}." + ) + if width > self.pos_embed_max_size: + raise ValueError( + f"Width ({width}) cannot be greater than `pos_embed_max_size`: {self.pos_embed_max_size}." + ) + + top = (self.pos_embed_max_size - height) // 2 + left = (self.pos_embed_max_size - width) // 2 + spatial_pos_embed = self.pos_embed.reshape(1, self.pos_embed_max_size, self.pos_embed_max_size, -1) + spatial_pos_embed = spatial_pos_embed[:, top : top + height, left : left + width, :] + # print(top, top + height, left, left + width, spatial_pos_embed.size()) + spatial_pos_embed = spatial_pos_embed.reshape(1, -1, spatial_pos_embed.shape[-1]) + return spatial_pos_embed + + + def patch_multiple_resolutions(self, latents, padding_latent=None, is_input_images:bool=False): + if isinstance(latents, list): + return_list = False + if padding_latent is None: + padding_latent = [None] * len(latents) + return_list = True + patched_latents, num_tokens, shapes = [], [], [] + for latent, padding in zip(latents, padding_latent): + height, width = latent.shape[-2:] + if is_input_images: + latent = self.input_x_embedder(latent) + else: + latent = self.x_embedder(latent) + pos_embed = self.cropped_pos_embed(height, width) + latent = latent + pos_embed + if padding is not None: + latent = torch.cat([latent, padding], dim=-2) + patched_latents.append(latent) + + num_tokens.append(pos_embed.size(1)) + shapes.append([height, width]) + if not return_list: + latents = torch.cat(patched_latents, dim=0) + else: + latents = patched_latents + else: + height, width = latents.shape[-2:] + if is_input_images: + latents = self.input_x_embedder(latents) + else: + latents = self.x_embedder(latents) + pos_embed = self.cropped_pos_embed(height, width) + latents = latents + pos_embed + num_tokens = latents.size(1) + shapes = [height, width] + return latents, num_tokens, shapes + + + def forward(self, x, timestep, input_ids, input_img_latents, input_image_sizes, attention_mask, position_ids, padding_latent=None, past_key_values=None, return_past_key_values=True, offload_model:bool=False): + """ + + """ + input_is_list = isinstance(x, list) + x, num_tokens, shapes = self.patch_multiple_resolutions(x, padding_latent) + time_token = self.time_token(timestep, dtype=x[0].dtype).unsqueeze(1) + + if input_img_latents is not None: + input_latents, _, _ = self.patch_multiple_resolutions(input_img_latents, is_input_images=True) + if input_ids is not None: + condition_embeds = self.llm.embed_tokens(input_ids).clone() + input_img_inx = 0 + for b_inx in input_image_sizes.keys(): + for start_inx, end_inx in input_image_sizes[b_inx]: + condition_embeds[b_inx, start_inx: end_inx] = input_latents[input_img_inx] + input_img_inx += 1 + if input_img_latents is not None: + assert input_img_inx == len(input_latents) + + input_emb = torch.cat([condition_embeds, time_token, x], dim=1) + else: + input_emb = torch.cat([time_token, x], dim=1) + output = self.llm(inputs_embeds=input_emb, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, offload_model=offload_model) + output, past_key_values = output.last_hidden_state, output.past_key_values + if input_is_list: + image_embedding = output[:, -max(num_tokens):] + time_emb = self.t_embedder(timestep, dtype=x.dtype) + x = self.final_layer(image_embedding, time_emb) + latents = [] + for i in range(x.size(0)): + latent = x[i:i+1, :num_tokens[i]] + latent = self.unpatchify(latent, shapes[i][0], shapes[i][1]) + latents.append(latent) + else: + image_embedding = output[:, -num_tokens:] + time_emb = self.t_embedder(timestep, dtype=x.dtype) + x = self.final_layer(image_embedding, time_emb) + latents = self.unpatchify(x, shapes[0], shapes[1]) + + if return_past_key_values: + return latents, past_key_values + return latents + + @torch.no_grad() + def forward_with_cfg(self, x, timestep, input_ids, input_img_latents, input_image_sizes, attention_mask, position_ids, cfg_scale, use_img_cfg, img_cfg_scale, past_key_values, use_kv_cache, offload_model): + self.llm.config.use_cache = use_kv_cache + model_out, past_key_values = self.forward(x, timestep, input_ids, input_img_latents, input_image_sizes, attention_mask, position_ids, past_key_values=past_key_values, return_past_key_values=True, offload_model=offload_model) + if use_img_cfg: + cond, uncond, img_cond = torch.split(model_out, len(model_out) // 3, dim=0) + cond = uncond + img_cfg_scale * (img_cond - uncond) + cfg_scale * (cond - img_cond) + model_out = [cond, cond, cond] + else: + cond, uncond = torch.split(model_out, len(model_out) // 2, dim=0) + cond = uncond + cfg_scale * (cond - uncond) + model_out = [cond, cond] + + return torch.cat(model_out, dim=0), past_key_values + + + @torch.no_grad() + def forward_with_separate_cfg(self, x, timestep, input_ids, input_img_latents, input_image_sizes, attention_mask, position_ids, cfg_scale, use_img_cfg, img_cfg_scale, past_key_values, use_kv_cache, offload_model): + self.llm.config.use_cache = use_kv_cache + if past_key_values is None: + past_key_values = [None] * len(attention_mask) + + x = torch.split(x, len(x) // len(attention_mask), dim=0) + timestep = timestep.to(x[0].dtype) + timestep = torch.split(timestep, len(timestep) // len(input_ids), dim=0) + + model_out, pask_key_values = [], [] + for i in range(len(input_ids)): + temp_out, temp_pask_key_values = self.forward(x[i], timestep[i], input_ids[i], input_img_latents[i], input_image_sizes[i], attention_mask[i], position_ids[i], past_key_values=past_key_values[i], return_past_key_values=True, offload_model=offload_model) + model_out.append(temp_out) + pask_key_values.append(temp_pask_key_values) + + if len(model_out) == 3: + cond, uncond, img_cond = model_out + cond = uncond + img_cfg_scale * (img_cond - uncond) + cfg_scale * (cond - img_cond) + model_out = [cond, cond, cond] + elif len(model_out) == 2: + cond, uncond = model_out + cond = uncond + cfg_scale * (cond - uncond) + model_out = [cond, cond] + else: + return model_out[0] + + return torch.cat(model_out, dim=0), pask_key_values + + + + diff --git a/OmniGen/OmniGen/pipeline.py b/OmniGen/OmniGen/pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..c1b6d6bfa97b2a14e533f6943d5028aa5dbcbf0d --- /dev/null +++ b/OmniGen/OmniGen/pipeline.py @@ -0,0 +1,307 @@ +import os +import inspect +from typing import Any, Callable, Dict, List, Optional, Union +import gc + +from PIL import Image +import numpy as np +import torch +from huggingface_hub import snapshot_download +from peft import LoraConfig, PeftModel +from diffusers.models import AutoencoderKL +from diffusers.utils import ( + USE_PEFT_BACKEND, + is_torch_xla_available, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from safetensors.torch import load_file + +from OmniGen import OmniGen, OmniGenProcessor, OmniGenScheduler + + +logger = logging.get_logger(__name__) + +EXAMPLE_DOC_STRING = """ + Examples: + ```py + >>> from OmniGen import OmniGenPipeline + >>> pipe = FluxControlNetPipeline.from_pretrained( + ... base_model + ... ) + >>> prompt = "A woman holds a bouquet of flowers and faces the camera" + >>> image = pipe( + ... prompt, + ... guidance_scale=2.5, + ... num_inference_steps=50, + ... ).images[0] + >>> image.save("t2i.png") + ``` +""" + + +90 +class OmniGenPipeline: + def __init__( + self, + vae: AutoencoderKL, + model: OmniGen, + processor: OmniGenProcessor, + ): + self.vae = vae + self.model = model + self.processor = processor + + if torch.cuda.is_available(): + self.device = torch.device("cuda") + elif torch.backends.mps.is_available(): + self.device = torch.device("mps") + else: + logger.info("Don't detect any available devices, using CPU instead") + self.device = torch.device("cpu") + + self.model.to(torch.bfloat16) + self.model.eval() + self.vae.eval() + + self.model_cpu_offload = False + + @classmethod + def from_pretrained(cls, model_name, vae_path: str=None): + if not os.path.exists(model_name) or (not os.path.exists(os.path.join(model_name, 'model.safetensors')) and model_name == "Shitao/OmniGen-v1"): + logger.info("Model not found, downloading...") + cache_folder = os.getenv('HF_HUB_CACHE') + model_name = snapshot_download(repo_id=model_name, + cache_dir=cache_folder, + ignore_patterns=['flax_model.msgpack', 'rust_model.ot', 'tf_model.h5', 'model.pt']) + logger.info(f"Downloaded model to {model_name}") + model = OmniGen.from_pretrained(model_name) + processor = OmniGenProcessor.from_pretrained(model_name) + + if os.path.exists(os.path.join(model_name, "vae")): + vae = AutoencoderKL.from_pretrained(os.path.join(model_name, "vae")) + elif vae_path is not None: + vae = AutoencoderKL.from_pretrained(vae_path).to(device) + else: + logger.info(f"No VAE found in {model_name}, downloading stabilityai/sdxl-vae from HF") + vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae").to(device) + + return cls(vae, model, processor) + + def merge_lora(self, lora_path: str): + model = PeftModel.from_pretrained(self.model, lora_path) + model.merge_and_unload() + + self.model = model + + def to(self, device: Union[str, torch.device]): + if isinstance(device, str): + device = torch.device(device) + self.model.to(device) + self.vae.to(device) + self.device = device + + def vae_encode(self, x, dtype): + if self.vae.config.shift_factor is not None: + x = self.vae.encode(x).latent_dist.sample() + x = (x - self.vae.config.shift_factor) * self.vae.config.scaling_factor + else: + x = self.vae.encode(x).latent_dist.sample().mul_(self.vae.config.scaling_factor) + x = x.to(dtype) + return x + + def move_to_device(self, data): + if isinstance(data, list): + return [x.to(self.device) for x in data] + return data.to(self.device) + + def enable_model_cpu_offload(self): + self.model_cpu_offload = True + self.model.to("cpu") + self.vae.to("cpu") + torch.cuda.empty_cache() # Clear VRAM + gc.collect() # Run garbage collection to free system RAM + + def disable_model_cpu_offload(self): + self.model_cpu_offload = False + self.model.to(self.device) + self.vae.to(self.device) + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + input_images: Union[List[str], List[List[str]]] = None, + height: int = 1024, + width: int = 1024, + num_inference_steps: int = 50, + guidance_scale: float = 3, + use_img_guidance: bool = True, + img_guidance_scale: float = 1.6, + max_input_image_size: int = 1024, + separate_cfg_infer: bool = True, + offload_model: bool = False, + use_kv_cache: bool = True, + offload_kv_cache: bool = True, + use_input_image_size_as_output: bool = False, + dtype: torch.dtype = torch.bfloat16, + seed: int = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + input_images (`List[str]` or `List[List[str]]`, *optional*): + The list of input images. We will replace the "<|image_i|>" in prompt with the 1-th image in list. + height (`int`, *optional*, defaults to 1024): + The height in pixels of the generated image. The number must be a multiple of 16. + width (`int`, *optional*, defaults to 1024): + The width in pixels of the generated image. The number must be a multiple of 16. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 4.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + use_img_guidance (`bool`, *optional*, defaults to True): + Defined as equation 3 in [Instrucpix2pix](https://arxiv.org/pdf/2211.09800). + img_guidance_scale (`float`, *optional*, defaults to 1.6): + Defined as equation 3 in [Instrucpix2pix](https://arxiv.org/pdf/2211.09800). + max_input_image_size (`int`, *optional*, defaults to 1024): the maximum size of input image, which will be used to crop the input image to the maximum size + separate_cfg_infer (`bool`, *optional*, defaults to False): + Perform inference on images with different guidance separately; this can save memory when generating images of large size at the expense of slower inference. + use_kv_cache (`bool`, *optional*, defaults to True): enable kv cache to speed up the inference + offload_kv_cache (`bool`, *optional*, defaults to True): offload the cached key and value to cpu, which can save memory but slow down the generation silightly + offload_model (`bool`, *optional*, defaults to False): offload the model to cpu, which can save memory but slow down the generation + use_input_image_size_as_output (bool, defaults to False): whether to use the input image size as the output image size, which can be used for single-image input, e.g., image editing task + seed (`int`, *optional*): + A random seed for generating output. + dtype (`torch.dtype`, *optional*, defaults to `torch.bfloat16`): + data type for the model + Examples: + + Returns: + A list with the generated images. + """ + # check inputs: + if use_input_image_size_as_output: + assert isinstance(prompt, str) and len(input_images) == 1, "if you want to make sure the output image have the same size as the input image, please only input one image instead of multiple input images" + else: + assert height%16 == 0 and width%16 == 0, "The height and width must be a multiple of 16." + if input_images is None: + use_img_guidance = False + if isinstance(prompt, str): + prompt = [prompt] + input_images = [input_images] if input_images is not None else None + + # set model and processor + if max_input_image_size != self.processor.max_image_size: + self.processor = OmniGenProcessor(self.processor.text_tokenizer, max_image_size=max_input_image_size) + if offload_model: + self.enable_model_cpu_offload() + else: + self.disable_model_cpu_offload() + + input_data = self.processor(prompt, input_images, height=height, width=width, use_img_cfg=use_img_guidance, separate_cfg_input=separate_cfg_infer, use_input_image_size_as_output=use_input_image_size_as_output) + + num_prompt = len(prompt) + num_cfg = 2 if use_img_guidance else 1 + if use_input_image_size_as_output: + if separate_cfg_infer: + height, width = input_data['input_pixel_values'][0][0].shape[-2:] + else: + height, width = input_data['input_pixel_values'][0].shape[-2:] + latent_size_h, latent_size_w = height//8, width//8 + + if seed is not None: + generator = torch.Generator(device=self.device).manual_seed(seed) + else: + generator = None + latents = torch.randn(num_prompt, 4, latent_size_h, latent_size_w, device=self.device, generator=generator) + latents = torch.cat([latents]*(1+num_cfg), 0).to(dtype) + + if input_images is not None and self.model_cpu_offload: self.vae.to(self.device) + input_img_latents = [] + if separate_cfg_infer: + for temp_pixel_values in input_data['input_pixel_values']: + temp_input_latents = [] + for img in temp_pixel_values: + img = self.vae_encode(img.to(self.device), dtype) + temp_input_latents.append(img) + input_img_latents.append(temp_input_latents) + else: + for img in input_data['input_pixel_values']: + img = self.vae_encode(img.to(self.device), dtype) + input_img_latents.append(img) + if input_images is not None and self.model_cpu_offload: + self.vae.to('cpu') + torch.cuda.empty_cache() # Clear VRAM + gc.collect() # Run garbage collection to free system RAM + + model_kwargs = dict(input_ids=self.move_to_device(input_data['input_ids']), + input_img_latents=input_img_latents, + input_image_sizes=input_data['input_image_sizes'], + attention_mask=self.move_to_device(input_data["attention_mask"]), + position_ids=self.move_to_device(input_data["position_ids"]), + cfg_scale=guidance_scale, + img_cfg_scale=img_guidance_scale, + use_img_cfg=use_img_guidance, + use_kv_cache=use_kv_cache, + offload_model=offload_model, + ) + + if separate_cfg_infer: + func = self.model.forward_with_separate_cfg + else: + func = self.model.forward_with_cfg + self.model.to(dtype) + + if self.model_cpu_offload: + for name, param in self.model.named_parameters(): + if 'layers' in name and 'layers.0' not in name: + param.data = param.data.cpu() + else: + param.data = param.data.to(self.device) + for buffer_name, buffer in self.model.named_buffers(): + setattr(self.model, buffer_name, buffer.to(self.device)) + # else: + # self.model.to(self.device) + + scheduler = OmniGenScheduler(num_steps=num_inference_steps) + samples = scheduler(latents, func, model_kwargs, use_kv_cache=use_kv_cache, offload_kv_cache=offload_kv_cache) + samples = samples.chunk((1+num_cfg), dim=0)[0] + + if self.model_cpu_offload: + self.model.to('cpu') + torch.cuda.empty_cache() + gc.collect() + + self.vae.to(self.device) + samples = samples.to(torch.float32) + if self.vae.config.shift_factor is not None: + samples = samples / self.vae.config.scaling_factor + self.vae.config.shift_factor + else: + samples = samples / self.vae.config.scaling_factor + samples = self.vae.decode(samples).sample + + if self.model_cpu_offload: + self.vae.to('cpu') + torch.cuda.empty_cache() + gc.collect() + + output_samples = (samples * 0.5 + 0.5).clamp(0, 1)*255 + output_samples = output_samples.permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy() + output_images = [] + for i, sample in enumerate(output_samples): + output_images.append(Image.fromarray(sample)) + + torch.cuda.empty_cache() # Clear VRAM + gc.collect() # Run garbage collection to free system RAM + return output_images \ No newline at end of file diff --git a/OmniGen/OmniGen/processor.py b/OmniGen/OmniGen/processor.py new file mode 100644 index 0000000000000000000000000000000000000000..9b3dbb65f7cc9c76869b59a22f9f42c5647e0691 --- /dev/null +++ b/OmniGen/OmniGen/processor.py @@ -0,0 +1,338 @@ +import os +import re +from typing import Dict, List +import json + +import torch +import numpy as np +import random +from PIL import Image +from torchvision import transforms +from transformers import AutoTokenizer +from huggingface_hub import snapshot_download + +from OmniGen.utils import ( + create_logger, + update_ema, + requires_grad, + center_crop_arr, + crop_arr, +) + + + + +class OmniGenProcessor: + def __init__(self, + text_tokenizer, + max_image_size: int=1024): + self.text_tokenizer = text_tokenizer + self.max_image_size = max_image_size + + self.image_transform = transforms.Compose([ + transforms.Lambda(lambda pil_image: crop_arr(pil_image, max_image_size)), + transforms.ToTensor(), + transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True) + ]) + + self.collator = OmniGenCollator() + self.separate_collator = OmniGenSeparateCollator() + + @classmethod + def from_pretrained(cls, model_name): + if not os.path.exists(model_name): + cache_folder = os.getenv('HF_HUB_CACHE') + model_name = snapshot_download(repo_id=model_name, + cache_dir=cache_folder, + allow_patterns="*.json") + text_tokenizer = AutoTokenizer.from_pretrained(model_name) + + return cls(text_tokenizer) + + + def process_image(self, image): + image = Image.open(image).convert('RGB') + return self.image_transform(image) + + def process_multi_modal_prompt(self, text, input_images): + text = self.add_prefix_instruction(text) + if input_images is None or len(input_images) == 0: + model_inputs = self.text_tokenizer(text) + return {"input_ids": model_inputs.input_ids, "pixel_values": None, "image_sizes": None} + + pattern = r"<\|image_\d+\|>" + prompt_chunks = [self.text_tokenizer(chunk).input_ids for chunk in re.split(pattern, text)] + + for i in range(1, len(prompt_chunks)): + if prompt_chunks[i][0] == 1: + prompt_chunks[i] = prompt_chunks[i][1:] + + image_tags = re.findall(pattern, text) + image_ids = [int(s.split("|")[1].split("_")[-1]) for s in image_tags] + + unique_image_ids = sorted(list(set(image_ids))) + assert unique_image_ids == list(range(1, len(unique_image_ids)+1)), f"image_ids must start from 1, and must be continuous int, e.g. [1, 2, 3], cannot be {unique_image_ids}" + # total images must be the same as the number of image tags + assert len(unique_image_ids) == len(input_images), f"total images must be the same as the number of image tags, got {len(unique_image_ids)} image tags and {len(input_images)} images" + + input_images = [input_images[x-1] for x in image_ids] + + all_input_ids = [] + img_inx = [] + idx = 0 + for i in range(len(prompt_chunks)): + all_input_ids.extend(prompt_chunks[i]) + if i != len(prompt_chunks) -1: + start_inx = len(all_input_ids) + size = input_images[i].size(-2) * input_images[i].size(-1) // 16 // 16 + img_inx.append([start_inx, start_inx+size]) + all_input_ids.extend([0]*size) + + return {"input_ids": all_input_ids, "pixel_values": input_images, "image_sizes": img_inx} + + + def add_prefix_instruction(self, prompt): + user_prompt = '<|user|>\n' + generation_prompt = 'Generate an image according to the following instructions\n' + assistant_prompt = '<|assistant|>\n<|diffusion|>' + prompt_suffix = "<|end|>\n" + prompt = f"{user_prompt}{generation_prompt}{prompt}{prompt_suffix}{assistant_prompt}" + return prompt + + + def __call__(self, + instructions: List[str], + input_images: List[List[str]] = None, + height: int = 1024, + width: int = 1024, + negative_prompt: str = "low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers.", + use_img_cfg: bool = True, + separate_cfg_input: bool = False, + use_input_image_size_as_output: bool=False, + ) -> Dict: + + if input_images is None: + use_img_cfg = False + if isinstance(instructions, str): + instructions = [instructions] + input_images = [input_images] + + input_data = [] + for i in range(len(instructions)): + cur_instruction = instructions[i] + cur_input_images = None if input_images is None else input_images[i] + if cur_input_images is not None and len(cur_input_images) > 0: + cur_input_images = [self.process_image(x) for x in cur_input_images] + else: + cur_input_images = None + assert "<|image_1|>" not in cur_instruction + + mllm_input = self.process_multi_modal_prompt(cur_instruction, cur_input_images) + + + neg_mllm_input, img_cfg_mllm_input = None, None + neg_mllm_input = self.process_multi_modal_prompt(negative_prompt, None) + if use_img_cfg: + if cur_input_images is not None and len(cur_input_images) >= 1: + img_cfg_prompt = [f"<|image_{i+1}|>" for i in range(len(cur_input_images))] + img_cfg_mllm_input = self.process_multi_modal_prompt(" ".join(img_cfg_prompt), cur_input_images) + else: + img_cfg_mllm_input = neg_mllm_input + + if use_input_image_size_as_output: + input_data.append((mllm_input, neg_mllm_input, img_cfg_mllm_input, [mllm_input['pixel_values'][0].size(-2), mllm_input['pixel_values'][0].size(-1)])) + else: + input_data.append((mllm_input, neg_mllm_input, img_cfg_mllm_input, [height, width])) + + if separate_cfg_input: + return self.separate_collator(input_data) + return self.collator(input_data) + + + + +class OmniGenCollator: + def __init__(self, pad_token_id=2, hidden_size=3072): + self.pad_token_id = pad_token_id + self.hidden_size = hidden_size + + def create_position(self, attention_mask, num_tokens_for_output_images): + position_ids = [] + text_length = attention_mask.size(-1) + img_length = max(num_tokens_for_output_images) + for mask in attention_mask: + temp_l = torch.sum(mask) + temp_position = [0]*(text_length-temp_l) + [i for i in range(temp_l+img_length+1)] # we add a time embedding into the sequence, so add one more token + position_ids.append(temp_position) + return torch.LongTensor(position_ids) + + def create_mask(self, attention_mask, num_tokens_for_output_images): + extended_mask = [] + padding_images = [] + text_length = attention_mask.size(-1) + img_length = max(num_tokens_for_output_images) + seq_len = text_length + img_length + 1 # we add a time embedding into the sequence, so add one more token + inx = 0 + for mask in attention_mask: + temp_l = torch.sum(mask) + pad_l = text_length - temp_l + + temp_mask = torch.tril(torch.ones(size=(temp_l+1, temp_l+1))) + + image_mask = torch.zeros(size=(temp_l+1, img_length)) + temp_mask = torch.cat([temp_mask, image_mask], dim=-1) + + image_mask = torch.ones(size=(img_length, temp_l+img_length+1)) + temp_mask = torch.cat([temp_mask, image_mask], dim=0) + + if pad_l > 0: + pad_mask = torch.zeros(size=(temp_l+1+img_length, pad_l)) + temp_mask = torch.cat([pad_mask, temp_mask], dim=-1) + + pad_mask = torch.ones(size=(pad_l, seq_len)) + temp_mask = torch.cat([pad_mask, temp_mask], dim=0) + + true_img_length = num_tokens_for_output_images[inx] + pad_img_length = img_length - true_img_length + if pad_img_length > 0: + temp_mask[:, -pad_img_length:] = 0 + temp_padding_imgs = torch.zeros(size=(1, pad_img_length, self.hidden_size)) + else: + temp_padding_imgs = None + + extended_mask.append(temp_mask.unsqueeze(0)) + padding_images.append(temp_padding_imgs) + inx += 1 + return torch.cat(extended_mask, dim=0), padding_images + + def adjust_attention_for_input_images(self, attention_mask, image_sizes): + for b_inx in image_sizes.keys(): + for start_inx, end_inx in image_sizes[b_inx]: + attention_mask[b_inx][start_inx:end_inx, start_inx:end_inx] = 1 + + return attention_mask + + def pad_input_ids(self, input_ids, image_sizes): + max_l = max([len(x) for x in input_ids]) + padded_ids = [] + attention_mask = [] + new_image_sizes = [] + + for i in range(len(input_ids)): + temp_ids = input_ids[i] + temp_l = len(temp_ids) + pad_l = max_l - temp_l + if pad_l == 0: + attention_mask.append([1]*max_l) + padded_ids.append(temp_ids) + else: + attention_mask.append([0]*pad_l+[1]*temp_l) + padded_ids.append([self.pad_token_id]*pad_l+temp_ids) + + if i in image_sizes: + new_inx = [] + for old_inx in image_sizes[i]: + new_inx.append([x+pad_l for x in old_inx]) + image_sizes[i] = new_inx + + return torch.LongTensor(padded_ids), torch.LongTensor(attention_mask), image_sizes + + + def process_mllm_input(self, mllm_inputs, target_img_size): + num_tokens_for_output_images = [] + for img_size in target_img_size: + num_tokens_for_output_images.append(img_size[0]*img_size[1]//16//16) + + pixel_values, image_sizes = [], {} + b_inx = 0 + for x in mllm_inputs: + if x['pixel_values'] is not None: + pixel_values.extend(x['pixel_values']) + for size in x['image_sizes']: + if b_inx not in image_sizes: + image_sizes[b_inx] = [size] + else: + image_sizes[b_inx].append(size) + b_inx += 1 + pixel_values = [x.unsqueeze(0) for x in pixel_values] + + + input_ids = [x['input_ids'] for x in mllm_inputs] + padded_input_ids, attention_mask, image_sizes = self.pad_input_ids(input_ids, image_sizes) + position_ids = self.create_position(attention_mask, num_tokens_for_output_images) + attention_mask, padding_images = self.create_mask(attention_mask, num_tokens_for_output_images) + attention_mask = self.adjust_attention_for_input_images(attention_mask, image_sizes) + + return padded_input_ids, position_ids, attention_mask, padding_images, pixel_values, image_sizes + + + def __call__(self, features): + mllm_inputs = [f[0] for f in features] + cfg_mllm_inputs = [f[1] for f in features] + img_cfg_mllm_input = [f[2] for f in features] + target_img_size = [f[3] for f in features] + + + if img_cfg_mllm_input[0] is not None: + mllm_inputs = mllm_inputs + cfg_mllm_inputs + img_cfg_mllm_input + target_img_size = target_img_size + target_img_size + target_img_size + else: + mllm_inputs = mllm_inputs + cfg_mllm_inputs + target_img_size = target_img_size + target_img_size + + + all_padded_input_ids, all_position_ids, all_attention_mask, all_padding_images, all_pixel_values, all_image_sizes = self.process_mllm_input(mllm_inputs, target_img_size) + + data = {"input_ids": all_padded_input_ids, + "attention_mask": all_attention_mask, + "position_ids": all_position_ids, + "input_pixel_values": all_pixel_values, + "input_image_sizes": all_image_sizes, + "padding_images": all_padding_images, + } + return data + + +class OmniGenSeparateCollator(OmniGenCollator): + def __call__(self, features): + mllm_inputs = [f[0] for f in features] + cfg_mllm_inputs = [f[1] for f in features] + img_cfg_mllm_input = [f[2] for f in features] + target_img_size = [f[3] for f in features] + + all_padded_input_ids, all_attention_mask, all_position_ids, all_pixel_values, all_image_sizes, all_padding_images = [], [], [], [], [], [] + + + padded_input_ids, position_ids, attention_mask, padding_images, pixel_values, image_sizes = self.process_mllm_input(mllm_inputs, target_img_size) + all_padded_input_ids.append(padded_input_ids) + all_attention_mask.append(attention_mask) + all_position_ids.append(position_ids) + all_pixel_values.append(pixel_values) + all_image_sizes.append(image_sizes) + all_padding_images.append(padding_images) + + if cfg_mllm_inputs[0] is not None: + padded_input_ids, position_ids, attention_mask, padding_images, pixel_values, image_sizes = self.process_mllm_input(cfg_mllm_inputs, target_img_size) + all_padded_input_ids.append(padded_input_ids) + all_attention_mask.append(attention_mask) + all_position_ids.append(position_ids) + all_pixel_values.append(pixel_values) + all_image_sizes.append(image_sizes) + all_padding_images.append(padding_images) + if img_cfg_mllm_input[0] is not None: + padded_input_ids, position_ids, attention_mask, padding_images, pixel_values, image_sizes = self.process_mllm_input(img_cfg_mllm_input, target_img_size) + all_padded_input_ids.append(padded_input_ids) + all_attention_mask.append(attention_mask) + all_position_ids.append(position_ids) + all_pixel_values.append(pixel_values) + all_image_sizes.append(image_sizes) + all_padding_images.append(padding_images) + + data = {"input_ids": all_padded_input_ids, + "attention_mask": all_attention_mask, + "position_ids": all_position_ids, + "input_pixel_values": all_pixel_values, + "input_image_sizes": all_image_sizes, + "padding_images": all_padding_images, + } + return data diff --git a/OmniGen/OmniGen/scheduler.py b/OmniGen/OmniGen/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..77247e11650ef8feb1d06460bf29049f0178a763 --- /dev/null +++ b/OmniGen/OmniGen/scheduler.py @@ -0,0 +1,181 @@ +from tqdm import tqdm +from typing import Optional, Dict, Any, Tuple, List +import gc + +import torch +from transformers.cache_utils import Cache, DynamicCache, OffloadedCache + + + +class OmniGenCache(DynamicCache): + def __init__(self, + num_tokens_for_img: int, offload_kv_cache: bool=False) -> None: + if not torch.cuda.is_available(): + raise RuntimeError("OffloadedCache can only be used with a GPU") + super().__init__() + self.original_device = [] + self.prefetch_stream = torch.cuda.Stream() + self.num_tokens_for_img = num_tokens_for_img + self.offload_kv_cache = offload_kv_cache + + def prefetch_layer(self, layer_idx: int): + "Starts prefetching the next layer cache" + if layer_idx < len(self): + with torch.cuda.stream(self.prefetch_stream): + # Prefetch next layer tensors to GPU + device = self.original_device[layer_idx] + self.key_cache[layer_idx] = self.key_cache[layer_idx].to(device, non_blocking=True) + self.value_cache[layer_idx] = self.value_cache[layer_idx].to(device, non_blocking=True) + + + def evict_previous_layer(self, layer_idx: int): + "Moves the previous layer cache to the CPU" + if len(self) > 2: + # We do it on the default stream so it occurs after all earlier computations on these tensors are done + if layer_idx == 0: + prev_layer_idx = -1 + else: + prev_layer_idx = (layer_idx - 1) % len(self) + self.key_cache[prev_layer_idx] = self.key_cache[prev_layer_idx].to("cpu", non_blocking=True) + self.value_cache[prev_layer_idx] = self.value_cache[prev_layer_idx].to("cpu", non_blocking=True) + + + def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]: + "Gets the cache for this layer to the device. Prefetches the next and evicts the previous layer." + if layer_idx < len(self): + if self.offload_kv_cache: + # Evict the previous layer if necessary + torch.cuda.current_stream().synchronize() + self.evict_previous_layer(layer_idx) + # Load current layer cache to its original device if not already there + original_device = self.original_device[layer_idx] + # self.prefetch_stream.synchronize(original_device) + torch.cuda.synchronize(self.prefetch_stream) + key_tensor = self.key_cache[layer_idx] + value_tensor = self.value_cache[layer_idx] + + # Prefetch the next layer + self.prefetch_layer((layer_idx + 1) % len(self)) + else: + key_tensor = self.key_cache[layer_idx] + value_tensor = self.value_cache[layer_idx] + return (key_tensor, value_tensor) + else: + raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}") + + + def update( + self, + key_states: torch.Tensor, + value_states: torch.Tensor, + layer_idx: int, + cache_kwargs: Optional[Dict[str, Any]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. + Parameters: + key_states (`torch.Tensor`): + The new key states to cache. + value_states (`torch.Tensor`): + The new value states to cache. + layer_idx (`int`): + The index of the layer to cache the states for. + cache_kwargs (`Dict[str, Any]`, `optional`): + Additional arguments for the cache subclass. No additional arguments are used in `OffloadedCache`. + Return: + A tuple containing the updated key and value states. + """ + # Update the cache + if len(self.key_cache) < layer_idx: + raise ValueError("OffloadedCache does not support model usage where layers are skipped. Use DynamicCache.") + elif len(self.key_cache) == layer_idx: + # only cache the states for condition tokens + key_states = key_states[..., :-(self.num_tokens_for_img+1), :] + value_states = value_states[..., :-(self.num_tokens_for_img+1), :] + + # Update the number of seen tokens + if layer_idx == 0: + self._seen_tokens += key_states.shape[-2] + + self.key_cache.append(key_states) + self.value_cache.append(value_states) + self.original_device.append(key_states.device) + if self.offload_kv_cache: + self.evict_previous_layer(layer_idx) + return self.key_cache[layer_idx], self.value_cache[layer_idx] + else: + # only cache the states for condition tokens + key_tensor, value_tensor = self[layer_idx] + k = torch.cat([key_tensor, key_states], dim=-2) + v = torch.cat([value_tensor, value_states], dim=-2) + return k, v + + + +class OmniGenScheduler: + def __init__(self, num_steps: int=50, time_shifting_factor: int=1): + self.num_steps = num_steps + self.time_shift = time_shifting_factor + + t = torch.linspace(0, 1, num_steps+1) + t = t / (t + time_shifting_factor - time_shifting_factor * t) + self.sigma = t + + def crop_kv_cache(self, past_key_values, num_tokens_for_img): + # return + crop_past_key_values = () + for layer_idx in range(len(past_key_values)): + key_states, value_states = past_key_values[layer_idx][:2] + crop_past_key_values += ((key_states[..., :-(num_tokens_for_img+1), :], value_states[..., :-(num_tokens_for_img+1), :], ),) + # return crop_past_key_values + return DynamicCache.from_legacy_cache(crop_past_key_values) + + def crop_position_ids_for_cache(self, position_ids, num_tokens_for_img): + if isinstance(position_ids, list): + for i in range(len(position_ids)): + position_ids[i] = position_ids[i][:, -(num_tokens_for_img+1):] + else: + position_ids = position_ids[:, -(num_tokens_for_img+1):] + return position_ids + + def crop_attention_mask_for_cache(self, attention_mask, num_tokens_for_img): + if isinstance(attention_mask, list): + return [x[..., -(num_tokens_for_img+1):, :] for x in attention_mask] + return attention_mask[..., -(num_tokens_for_img+1):, :] + + def crop_cache(self, cache, num_tokens_for_img): + for i in range(len(cache.key_cache)): + cache.key_cache[i] = cache.key_cache[i][..., :-(num_tokens_for_img+1), :] + cache.value_cache[i] = cache.value_cache[i][..., :-(num_tokens_for_img+1), :] + + return cache + + def __call__(self, z, func, model_kwargs, use_kv_cache: bool=True, offload_kv_cache: bool=True): + num_tokens_for_img = z.size(-1)*z.size(-2) // 4 + if isinstance(model_kwargs['input_ids'], list): + cache = [OmniGenCache(num_tokens_for_img, offload_kv_cache) for _ in range(len(model_kwargs['input_ids']))] if use_kv_cache else None + else: + cache = OmniGenCache(num_tokens_for_img, offload_kv_cache) if use_kv_cache else None + results = {} + for i in tqdm(range(self.num_steps)): + timesteps = torch.zeros(size=(len(z), )).to(z.device) + self.sigma[i] + pred, cache = func(z, timesteps, past_key_values=cache, **model_kwargs) + sigma_next = self.sigma[i+1] + sigma = self.sigma[i] + z = z + (sigma_next - sigma) * pred + if i == 0 and use_kv_cache: + num_tokens_for_img = z.size(-1)*z.size(-2) // 4 + if isinstance(cache, list): + model_kwargs['input_ids'] = [None] * len(cache) + else: + model_kwargs['input_ids'] = None + + model_kwargs['position_ids'] = self.crop_position_ids_for_cache(model_kwargs['position_ids'], num_tokens_for_img) + model_kwargs['attention_mask'] = self.crop_attention_mask_for_cache(model_kwargs['attention_mask'], num_tokens_for_img) + + del cache + torch.cuda.empty_cache() + gc.collect() + return z + + \ No newline at end of file diff --git a/OmniGen/OmniGen/train.py b/OmniGen/OmniGen/train.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/OmniGen/OmniGen/train_helper/__init__.py b/OmniGen/OmniGen/train_helper/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7463de3235eadeb2f0398cd3d53028da3a3db736 --- /dev/null +++ b/OmniGen/OmniGen/train_helper/__init__.py @@ -0,0 +1,2 @@ +from .data import DatasetFromJson, TrainDataCollator +from .loss import training_losses \ No newline at end of file diff --git a/OmniGen/OmniGen/train_helper/data.py b/OmniGen/OmniGen/train_helper/data.py new file mode 100644 index 0000000000000000000000000000000000000000..fa0bfb11a2ac22f679e6330880833aa6798d1e8f --- /dev/null +++ b/OmniGen/OmniGen/train_helper/data.py @@ -0,0 +1,116 @@ +import os +import datasets +from datasets import load_dataset, ClassLabel, concatenate_datasets +import torch +import numpy as np +import random +from PIL import Image +import json +import copy +# import torchvision.transforms as T +from torchvision import transforms +import pickle +import re + +from OmniGen import OmniGenProcessor +from OmniGen.processor import OmniGenCollator + + +class DatasetFromJson(torch.utils.data.Dataset): + def __init__( + self, + json_file: str, + image_path: str, + processer: OmniGenProcessor, + image_transform, + max_input_length_limit: int = 18000, + condition_dropout_prob: float = 0.1, + keep_raw_resolution: bool = True, + ): + + self.image_transform = image_transform + self.processer = processer + self.condition_dropout_prob = condition_dropout_prob + self.max_input_length_limit = max_input_length_limit + self.keep_raw_resolution = keep_raw_resolution + + self.data = load_dataset('json', data_files=json_file)['train'] + self.image_path = image_path + + def process_image(self, image_file): + if self.image_path is not None: + image_file = os.path.join(self.image_path, image_file) + image = Image.open(image_file).convert('RGB') + return self.image_transform(image) + + def get_example(self, index): + example = self.data[index] + + instruction, input_images, output_image = example['instruction'], example['input_images'], example['output_image'] + if random.random() < self.condition_dropout_prob: + instruction = '' + input_images = None + if input_images is not None: + input_images = [self.process_image(x) for x in input_images] + mllm_input = self.processer.process_multi_modal_prompt(instruction, input_images) + + output_image = self.process_image(output_image) + + return (mllm_input, output_image) + + + def __getitem__(self, index): + return self.get_example(index) + for _ in range(8): + try: + mllm_input, output_image = self.get_example(index) + if len(mllm_input['input_ids']) > self.max_input_length_limit: + raise RuntimeError(f"cur number of tokens={len(mllm_input['input_ids'])}, larger than max_input_length_limit={self.max_input_length_limit}") + return mllm_input, output_image + except Exception as e: + print("error when loading data: ", e) + print(self.data[index]) + index = random.randint(0, len(self.data)-1) + raise RuntimeError("Too many bad data.") + + + def __len__(self): + return len(self.data) + + + +class TrainDataCollator(OmniGenCollator): + def __init__(self, pad_token_id: int, hidden_size: int, keep_raw_resolution: bool): + self.pad_token_id = pad_token_id + self.hidden_size = hidden_size + self.keep_raw_resolution = keep_raw_resolution + + def __call__(self, features): + mllm_inputs = [f[0] for f in features] + + output_images = [f[1].unsqueeze(0) for f in features] + target_img_size = [[x.size(-2), x.size(-1)] for x in output_images] + + all_padded_input_ids, all_position_ids, all_attention_mask, all_padding_images, all_pixel_values, all_image_sizes = self.process_mllm_input(mllm_inputs, target_img_size) + + if not self.keep_raw_resolution: + output_image = torch.cat(output_image, dim=0) + if len(pixel_values) > 0: + all_pixel_values = torch.cat(all_pixel_values, dim=0) + else: + all_pixel_values = None + + data = {"input_ids": all_padded_input_ids, + "attention_mask": all_attention_mask, + "position_ids": all_position_ids, + "input_pixel_values": all_pixel_values, + "input_image_sizes": all_image_sizes, + "padding_images": all_padding_images, + "output_images": output_images, + } + return data + + + + + diff --git a/OmniGen/OmniGen/train_helper/loss.py b/OmniGen/OmniGen/train_helper/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..42be91885d6d1d410d631a681a5eab5b2b5f650d --- /dev/null +++ b/OmniGen/OmniGen/train_helper/loss.py @@ -0,0 +1,68 @@ +import torch + + +def sample_x0(x1): + """Sampling x0 & t based on shape of x1 (if needed) + Args: + x1 - data point; [batch, *dim] + """ + if isinstance(x1, (list, tuple)): + x0 = [torch.randn_like(img_start) for img_start in x1] + else: + x0 = torch.randn_like(x1) + + return x0 + +def sample_timestep(x1): + u = torch.normal(mean=0.0, std=1.0, size=(len(x1),)) + t = 1 / (1 + torch.exp(-u)) + t = t.to(x1[0]) + return t + + +def training_losses(model, x1, model_kwargs=None, snr_type='uniform'): + """Loss for training torche score model + Args: + - model: backbone model; could be score, noise, or velocity + - x1: datapoint + - model_kwargs: additional arguments for torche model + """ + if model_kwargs == None: + model_kwargs = {} + + B = len(x1) + + x0 = sample_x0(x1) + t = sample_timestep(x1) + + if isinstance(x1, (list, tuple)): + xt = [t[i] * x1[i] + (1 - t[i]) * x0[i] for i in range(B)] + ut = [x1[i] - x0[i] for i in range(B)] + else: + dims = [1] * (len(x1.size()) - 1) + t_ = t.view(t.size(0), *dims) + xt = t_ * x1 + (1 - t_) * x0 + ut = x1 - x0 + + model_output = model(xt, t, **model_kwargs) + + terms = {} + + if isinstance(x1, (list, tuple)): + assert len(model_output) == len(ut) == len(x1) + for i in range(B): + terms["loss"] = torch.stack( + [((ut[i] - model_output[i]) ** 2).mean() for i in range(B)], + dim=0, + ) + else: + terms["loss"] = mean_flat(((model_output - ut) ** 2)) + + return terms + + +def mean_flat(x): + """ + Take torche mean over all non-batch dimensions. + """ + return torch.mean(x, dim=list(range(1, len(x.size())))) diff --git a/OmniGen/OmniGen/transformer.py b/OmniGen/OmniGen/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..e105acbd59ca9279dde2667f1a177193413be252 --- /dev/null +++ b/OmniGen/OmniGen/transformer.py @@ -0,0 +1,194 @@ +import math +import warnings +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from huggingface_hub import snapshot_download + +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + SequenceClassifierOutputWithPast, + TokenClassifierOutput, +) +from transformers.modeling_utils import PreTrainedModel +from transformers import Phi3Config, Phi3Model +from transformers.cache_utils import Cache, DynamicCache, StaticCache +from transformers.utils import logging + +logger = logging.get_logger(__name__) + + +class Phi3Transformer(Phi3Model): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Phi3DecoderLayer`] + We only modified the attention mask + Args: + config: Phi3Config + """ + def prefetch_layer(self, layer_idx: int, device: torch.device): + "Starts prefetching the next layer cache" + with torch.cuda.stream(self.prefetch_stream): + # Prefetch next layer tensors to GPU + for name, param in self.layers[layer_idx].named_parameters(): + param.data = param.data.to(device, non_blocking=True) + + def evict_previous_layer(self, layer_idx: int): + "Moves the previous layer cache to the CPU" + prev_layer_idx = layer_idx - 1 + for name, param in self.layers[prev_layer_idx].named_parameters(): + param.data = param.data.to("cpu", non_blocking=True) + + def get_offlaod_layer(self, layer_idx: int, device: torch.device): + # init stream + if not hasattr(self, "prefetch_stream"): + self.prefetch_stream = torch.cuda.Stream() + + # delete previous layer + torch.cuda.current_stream().synchronize() + self.evict_previous_layer(layer_idx) + + # make sure the current layer is ready + torch.cuda.synchronize(self.prefetch_stream) + + # load next layer + self.prefetch_layer((layer_idx + 1) % len(self.layers), device) + + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + offload_model: Optional[bool] = False, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError("You must specify exactly one of input_ids or inputs_embeds") + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # kept for BC (non `Cache` `past_key_values` inputs) + return_legacy_cache = False + if use_cache and not isinstance(past_key_values, Cache): + return_legacy_cache = True + if past_key_values is None: + past_key_values = DynamicCache() + else: + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + logger.warning_once( + "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and " + "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class " + "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)" + ) + + # if inputs_embeds is None: + # inputs_embeds = self.embed_tokens(input_ids) + + # if cache_position is None: + # past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + # cache_position = torch.arange( + # past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + # ) + # if position_ids is None: + # position_ids = cache_position.unsqueeze(0) + + if attention_mask is not None and attention_mask.dim() == 3: + dtype = inputs_embeds.dtype + min_dtype = torch.finfo(dtype).min + attention_mask = (1 - attention_mask) * min_dtype + attention_mask = attention_mask.unsqueeze(1).to(inputs_embeds.dtype) + else: + raise + # causal_mask = self._update_causal_mask( + # attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions + # ) + + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + + layer_idx = -1 + for decoder_layer in self.layers: + layer_idx += 1 + + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + attention_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + cache_position, + ) + else: + if offload_model and not self.training: + self.get_offlaod_layer(layer_idx, device=inputs_embeds.device) + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + print('************') + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if return_legacy_cache: + next_cache = next_cache.to_legacy_cache() + + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + diff --git a/OmniGen/OmniGen/utils.py b/OmniGen/OmniGen/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..67a64e8d67357515167b47f0bb2b3955f0aab384 --- /dev/null +++ b/OmniGen/OmniGen/utils.py @@ -0,0 +1,110 @@ +import logging + +from PIL import Image +import torch +import numpy as np + +def create_logger(logging_dir): + """ + Create a logger that writes to a log file and stdout. + """ + logging.basicConfig( + level=logging.INFO, + format='[\033[34m%(asctime)s\033[0m] %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + handlers=[logging.StreamHandler(), logging.FileHandler(f"{logging_dir}/log.txt")] + ) + logger = logging.getLogger(__name__) + return logger + + +@torch.no_grad() +def update_ema(ema_model, model, decay=0.9999): + """ + Step the EMA model towards the current model. + """ + ema_params = dict(ema_model.named_parameters()) + for name, param in model.named_parameters(): + # TODO: Consider applying only to params that require_grad to avoid small numerical changes of pos_embed + ema_params[name].mul_(decay).add_(param.data, alpha=1 - decay) + + + + +def requires_grad(model, flag=True): + """ + Set requires_grad flag for all parameters in a model. + """ + for p in model.parameters(): + p.requires_grad = flag + + +def center_crop_arr(pil_image, image_size): + """ + Center cropping implementation from ADM. + https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126 + """ + while min(*pil_image.size) >= 2 * image_size: + pil_image = pil_image.resize( + tuple(x // 2 for x in pil_image.size), resample=Image.BOX + ) + + scale = image_size / min(*pil_image.size) + pil_image = pil_image.resize( + tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC + ) + + arr = np.array(pil_image) + crop_y = (arr.shape[0] - image_size) // 2 + crop_x = (arr.shape[1] - image_size) // 2 + return Image.fromarray(arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size]) + + + +def crop_arr(pil_image, max_image_size): + while min(*pil_image.size) >= 2 * max_image_size: + pil_image = pil_image.resize( + tuple(x // 2 for x in pil_image.size), resample=Image.BOX + ) + + if max(*pil_image.size) > max_image_size: + scale = max_image_size / max(*pil_image.size) + pil_image = pil_image.resize( + tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC + ) + + if min(*pil_image.size) < 16: + scale = 16 / min(*pil_image.size) + pil_image = pil_image.resize( + tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC + ) + + arr = np.array(pil_image) + crop_y1 = (arr.shape[0] % 16) // 2 + crop_y2 = arr.shape[0] % 16 - crop_y1 + + crop_x1 = (arr.shape[1] % 16) // 2 + crop_x2 = arr.shape[1] % 16 - crop_x1 + + arr = arr[crop_y1:arr.shape[0]-crop_y2, crop_x1:arr.shape[1]-crop_x2] + return Image.fromarray(arr) + + + +def vae_encode(vae, x, weight_dtype): + if x is not None: + if vae.config.shift_factor is not None: + x = vae.encode(x).latent_dist.sample() + x = (x - vae.config.shift_factor) * vae.config.scaling_factor + else: + x = vae.encode(x).latent_dist.sample().mul_(vae.config.scaling_factor) + x = x.to(weight_dtype) + return x + +def vae_encode_list(vae, x, weight_dtype): + latents = [] + for img in x: + img = vae_encode(vae, img, weight_dtype) + latents.append(img) + return latents + diff --git a/OmniGen/README.md b/OmniGen/README.md new file mode 100644 index 0000000000000000000000000000000000000000..31a505f8b87906cc936be0ca4976a8b50cff0928 --- /dev/null +++ b/OmniGen/README.md @@ -0,0 +1,20 @@ +--- +title: OmniGen +emoji: 🖼 +colorFrom: purple +colorTo: red +sdk: gradio +sdk_version: 5.4.0 +app_file: app.py +pinned: false +license: mit +tags: + - dwpose + - pose + - Text-to-Image + - Image-to-Image + - language models + - LLMs +short_description: Image generator/identifier/reposer +--- +Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/OmniGen/app.py b/OmniGen/app.py new file mode 100644 index 0000000000000000000000000000000000000000..ee1741f00a013258150f29bb4804bbaa5c3908d2 --- /dev/null +++ b/OmniGen/app.py @@ -0,0 +1,408 @@ +import gradio as gr +from PIL import Image +import os +import random +import spaces + +from OmniGen import OmniGenPipeline + +pipe = OmniGenPipeline.from_pretrained( + "Shitao/OmniGen-v1" +) + +@spaces.GPU(duration=180) +def generate_image(text, img1, img2, img3, height, width, guidance_scale, img_guidance_scale, inference_steps, seed, separate_cfg_infer, offload_model, + use_input_image_size_as_output, max_input_image_size, randomize_seed): + input_images = [img1, img2, img3] + # Delete None + input_images = [img for img in input_images if img is not None] + if len(input_images) == 0: + input_images = None + + if randomize_seed: + seed = random.randint(0, 10000000) + + output = pipe( + prompt=text, + input_images=input_images, + height=height, + width=width, + guidance_scale=guidance_scale, + img_guidance_scale=img_guidance_scale, + num_inference_steps=inference_steps, + separate_cfg_infer=separate_cfg_infer, + use_kv_cache=True, + offload_kv_cache=True, + offload_model=offload_model, + use_input_image_size_as_output=use_input_image_size_as_output, + seed=seed, + max_input_image_size=max_input_image_size, + ) + img = output[0] + return img + + + +def get_example(): + case = [ + [ + "A curly-haired man in a red shirt is drinking tea.", + None, + None, + None, + 1024, + 1024, + 2.5, + 1.6, + 0, + 1024, + False, + False, + ], + [ + "The woman in <|image_1|> waves her hand happily in the crowd", + "./imgs/test_cases/zhang.png", + None, + None, + 1024, + 1024, + 2.5, + 1.9, + 128, + 1024, + False, + False, + ], + [ + "A man in a black shirt is reading a book. The man is the right man in <|image_1|>.", + "./imgs/test_cases/two_man.jpg", + None, + None, + 1024, + 1024, + 2.5, + 1.6, + 0, + 1024, + False, + False, + ], + [ + "Two woman are raising fried chicken legs in a bar. A woman is <|image_1|>. Another woman is <|image_2|>.", + "./imgs/test_cases/mckenna.jpg", + "./imgs/test_cases/Amanda.jpg", + None, + 1024, + 1024, + 2.5, + 1.8, + 65, + 1024, + False, + False, + ], + [ + "A man and a short-haired woman with a wrinkled face are standing in front of a bookshelf in a library. The man is the man in the middle of <|image_1|>, and the woman is oldest woman in <|image_2|>", + "./imgs/test_cases/1.jpg", + "./imgs/test_cases/2.jpg", + None, + 1024, + 1024, + 2.5, + 1.6, + 60, + 1024, + False, + False, + ], + [ + "A man and a woman are sitting at a classroom desk. The man is the man with yellow hair in <|image_1|>. The woman is the woman on the left of <|image_2|>", + "./imgs/test_cases/3.jpg", + "./imgs/test_cases/4.jpg", + None, + 1024, + 1024, + 2.5, + 1.8, + 66, + 1024, + False, + False, + ], + [ + "The flower <|image_1|> is placed in the vase which is in the middle of <|image_2|> on a wooden table of a living room", + "./imgs/test_cases/rose.jpg", + "./imgs/test_cases/vase.jpg", + None, + 1024, + 1024, + 2.5, + 1.6, + 0, + 1024, + False, + False, + ], + [ + "<|image_1|>\n Remove the woman's earrings. Replace the mug with a clear glass filled with sparkling iced cola.", + "./imgs/demo_cases/t2i_woman_with_book.png", + None, + None, + None, + None, + 2.5, + 1.6, + 222, + 1024, + False, + True, + ], + [ + "Detect the skeleton of human in this image: <|image_1|>.", + "./imgs/test_cases/control.jpg", + None, + None, + 1024, + 1024, + 2.0, + 1.6, + 0, + 1024, + False, + True, + ], + [ + "Generate a new photo using the following picture and text as conditions: <|image_1|>\n A young boy is sitting on a sofa in the library, holding a book. His hair is neatly combed, and a faint smile plays on his lips, with a few freckles scattered across his cheeks. The library is quiet, with rows of shelves filled with books stretching out behind him.", + "./imgs/demo_cases/skeletal.png", + None, + None, + 1024, + 1024, + 2, + 1.6, + 999, + 1024, + False, + True, + ], + [ + "Following the pose of this image <|image_1|>, generate a new photo: A young boy is sitting on a sofa in the library, holding a book. His hair is neatly combed, and a faint smile plays on his lips, with a few freckles scattered across his cheeks. The library is quiet, with rows of shelves filled with books stretching out behind him.", + "./imgs/demo_cases/edit.png", + None, + None, + 1024, + 1024, + 2.0, + 1.6, + 123, + 1024, + False, + True, + ], + [ + "Following the depth mapping of this image <|image_1|>, generate a new photo: A young girl is sitting on a sofa in the library, holding a book. His hair is neatly combed, and a faint smile plays on his lips, with a few freckles scattered across his cheeks. The library is quiet, with rows of shelves filled with books stretching out behind him.", + "./imgs/demo_cases/edit.png", + None, + None, + 1024, + 1024, + 2.0, + 1.6, + 1, + 1024, + False, + True, + ], + [ + "<|image_1|><\/img> What item can be used to see the current time? Please highlight it in blue.", + "./imgs/test_cases/watch.jpg", + None, + None, + 1024, + 1024, + 2.5, + 1.6, + 666, + 1024, + False, + True, + ], + [ + "According to the following examples, generate an output for the input.\nInput: <|image_1|>\nOutput: <|image_2|>\n\nInput: <|image_3|>\nOutput: ", + "./imgs/test_cases/icl1.jpg", + "./imgs/test_cases/icl2.jpg", + "./imgs/test_cases/icl3.jpg", + 224, + 224, + 2.5, + 1.6, + 1, + 768, + False, + False, + ], + ] + return case + +def run_for_examples(text, img1, img2, img3, height, width, guidance_scale, img_guidance_scale, seed, max_input_image_size, randomize_seed, use_input_image_size_as_output): + # 在函数内部设置默认值 + inference_steps = 50 + separate_cfg_infer = True + offload_model = False + + return generate_image( + text, img1, img2, img3, height, width, guidance_scale, img_guidance_scale, + inference_steps, seed, separate_cfg_infer, offload_model, + use_input_image_size_as_output, max_input_image_size, randomize_seed + ) + +description = """ +OmniGen is a unified image generation model that you can use to perform various tasks, including but not limited to text-to-image generation, subject-driven generation, Identity-Preserving Generation, and image-conditioned generation. +For multi-modal to image generation, you should pass a string as `prompt`, and a list of image paths as `input_images`. The placeholder in the prompt should be in the format of `<|image_*|>` (for the first image, the placeholder is <|image_1|>. for the second image, the the placeholder is <|image_2|>). +For example, use an image of a woman to generate a new image: +prompt = "A woman holds a bouquet of flowers and faces the camera. Thw woman is \\<|image_1|\>\." +Tips: +- For image editing task and controlnet task, we recommend setting the height and width of output image as the same as input image. For example, if you want to edit a 512x512 image, you should set the height and width of output image as 512x512. You also can set the `use_input_image_size_as_output` to automatically set the height and width of output image as the same as input image. +- For out-of-memory or time cost, you can set `offload_model=True` or refer to [./docs/inference.md#requiremented-resources](https://github.com/VectorSpaceLab/OmniGen/blob/main/docs/inference.md#requiremented-resources) to select a appropriate setting. +- If inference time is too long when inputting multiple images, please try to reduce the `max_input_image_size`. For more details please refer to [./docs/inference.md#requiremented-resources](https://github.com/VectorSpaceLab/OmniGen/blob/main/docs/inference.md#requiremented-resources). +- Oversaturated: If the image appears oversaturated, please reduce the `guidance_scale`. +- Low-quality: More detailed prompts will lead to better results. +- Animate Style: If the generated images are in animate style, you can try to add `photo` to the prompt`. +- Edit generated image. If you generate an image by omnigen and then want to edit it, you cannot use the same seed to edit this image. For example, use seed=0 to generate image, and should use seed=1 to edit this image. +- For image editing tasks, we recommend placing the image before the editing instruction. For example, use `<|image_1|> remove suit`, rather than `remove suit <|image_1|>`. + + +**HF Spaces often encounter errors due to quota limitations, so recommend to run it locally.** +""" + +article = """ +--- +**Citation** +
+If you find this repository useful, please consider giving a star ⭐ and a citation +``` +@article{xiao2024omnigen, + title={Omnigen: Unified image generation}, + author={Xiao, Shitao and Wang, Yueze and Zhou, Junjie and Yuan, Huaying and Xing, Xingrun and Yan, Ruiran and Wang, Shuting and Huang, Tiejun and Liu, Zheng}, + journal={arXiv preprint arXiv:2409.11340}, + year={2024} +} +``` +**Contact** +
+If you have any questions, please feel free to open an issue or directly reach us out via email. +""" + + +# Gradio +with gr.Blocks() as demo: + gr.Markdown("# OmniGen: Unified Image Generation [paper](https://arxiv.org/abs/2409.11340) [code](https://github.com/VectorSpaceLab/OmniGen)") + gr.Markdown(description) + with gr.Row(): + with gr.Column(): + # text prompt + prompt_input = gr.Textbox( + label="Enter your prompt, use <|image_i|> to represent i-th input image", placeholder="Type your prompt here..." + ) + + with gr.Row(equal_height=True): + # input images + image_input_1 = gr.Image(label="<|image_1|>", type="filepath") + image_input_2 = gr.Image(label="<|image_2|>", type="filepath") + image_input_3 = gr.Image(label="<|image_3|>", type="filepath") + + # slider + height_input = gr.Slider( + label="Height", minimum=128, maximum=2048, value=1024, step=16 + ) + width_input = gr.Slider( + label="Width", minimum=128, maximum=2048, value=1024, step=16 + ) + + guidance_scale_input = gr.Slider( + label="Guidance Scale", minimum=1.0, maximum=5.0, value=2.5, step=0.1 + ) + + img_guidance_scale_input = gr.Slider( + label="img_guidance_scale", minimum=1.0, maximum=2.0, value=1.6, step=0.1 + ) + + num_inference_steps = gr.Slider( + label="Inference Steps", minimum=1, maximum=100, value=50, step=1 + ) + + seed_input = gr.Slider( + label="Seed", minimum=0, maximum=2147483647, value=42, step=1 + ) + randomize_seed = gr.Checkbox(label="Randomize seed", value=True) + + max_input_image_size = gr.Slider( + label="max_input_image_size", minimum=128, maximum=2048, value=1024, step=16 + ) + + separate_cfg_infer = gr.Checkbox( + label="separate_cfg_infer", info="Whether to use separate inference process for different guidance. This will reduce the memory cost.", value=True, + ) + offload_model = gr.Checkbox( + label="offload_model", info="Offload model to CPU, which will significantly reduce the memory cost but slow down the generation speed. You can cancel separate_cfg_infer and set offload_model=True. If both separate_cfg_infer and offload_model are True, further reduce the memory, but slowest generation", value=False, + ) + use_input_image_size_as_output = gr.Checkbox( + label="use_input_image_size_as_output", info="Automatically adjust the output image size to be same as input image size. For editing and controlnet task, it can make sure the output image has the same size as input image leading to better performance", value=False, + ) + + # generate + generate_button = gr.Button("Generate Image") + + + with gr.Column(): + # output image + output_image = gr.Image(label="Output Image") + + # click + generate_button.click( + generate_image, + inputs=[ + prompt_input, + image_input_1, + image_input_2, + image_input_3, + height_input, + width_input, + guidance_scale_input, + img_guidance_scale_input, + num_inference_steps, + seed_input, + separate_cfg_infer, + offload_model, + use_input_image_size_as_output, + max_input_image_size, + randomize_seed, + ], + outputs=output_image, + ) + + gr.Examples( + examples=get_example(), + fn=run_for_examples, + inputs=[ + prompt_input, + image_input_1, + image_input_2, + image_input_3, + height_input, + width_input, + guidance_scale_input, + img_guidance_scale_input, + seed_input, + max_input_image_size, + randomize_seed, + use_input_image_size_as_output, + ], + outputs=output_image, + ) + + gr.Markdown(article) + +# launch +demo.launch() + diff --git a/OmniGen/docs/fine-tuning.md b/OmniGen/docs/fine-tuning.md new file mode 100644 index 0000000000000000000000000000000000000000..0dccf4373dfbdbcebc22d7a96dff89d59edcd8fd --- /dev/null +++ b/OmniGen/docs/fine-tuning.md @@ -0,0 +1,172 @@ +# Fine-tuning OmniGen + +Fine-tuning Omnigen can better help you handle specific image generation tasks. For example, by fine-tuning on a person's images, you can generate multiple pictures of that person while maintaining task consistency. + +A lot of previous work focused on designing new networks to facilitate specific tasks. For instance, ControlNet was proposed to handle image conditions, and IP-Adapter was constructed to maintain ID features. If you want to perform new tasks, you need to build new architectures and repeatedly debug them. Adding and adjusting extra network parameters is usually time-consuming and labor-intensive, which is not user-friendly and cost-efficient enough. However, with Omnigen, all of this becomes very simple. + +By comparison, Omnigen can accept multi-modal conditional inputs and has been pre-trained on various tasks. You can fine-tune it on any task without designing specialized networks like ControlNet or IP-Adapter for a specific task. + +**All you need to do is prepare the data and start training. You can break the limitations of previous models, allowing Omnigen to accomplish a variety of interesting tasks, even those that have never been done before.** + + +## Installation + +```bash +git clone https://github.com/VectorSpaceLab/OmniGen.git +cd OmniGen +pip install -e . +``` + + +## Full fine-tuning + +### Fine-tuning command + +```bash +accelerate launch \ + --num_processes=1 \ + --use_fsdp \ + --fsdp_offload_params false \ + --fsdp_sharding_strategy SHARD_GRAD_OP \ + --fsdp_auto_wrap_policy TRANSFORMER_BASED_WRAP \ + --fsdp_transformer_layer_cls_to_wrap Phi3DecoderLayer \ + --fsdp_state_dict_type FULL_STATE_DICT \ + --fsdp_forward_prefetch false \ + --fsdp_use_orig_params True \ + --fsdp_cpu_ram_efficient_loading false \ + --fsdp_sync_module_states True \ + train.py \ + --model_name_or_path Shitao/OmniGen-v1 \ + --json_file ./toy_data/toy_data.jsonl \ + --image_path ./toy_data/images \ + --batch_size_per_device 1 \ + --lr 2e-5 \ + --keep_raw_resolution \ + --max_image_size 1024 \ + --gradient_accumulation_steps 1 \ + --ckpt_every 100 \ + --epochs 100 \ + --log_every 1 \ + --results_dir ./results/toy_finetune +``` + +Some important arguments: +- `num_processes`: number of GPU to use for training +- `model_name_or_path`: path to the pretrained model +- `json_file`: path to the json file containing the training data, e.g., ./toy_data/toy_data.jsonl +- `image_path`: path to the image folder, e.g., ./toy_data/images +- `batch_size_per_device`: batch size per device +- `lr`: learning rate +- `keep_raw_resolution`: whether to keep the original resolution of the image, if not, all images will be resized to (max_image_size, max_image_size) +- `max_image_size`: max image size +- `gradient_accumulation_steps`: number of steps to accumulate gradients +- `ckpt_every`: number of steps to save checkpoint +- `epochs`: number of epochs +- `log_every`: number of steps to log +- `results_dir`: path to the results folder + +The data format of json_file is as follows: +``` +{ + "instruction": str, + "input_images": [str, str, ...], + "output_images": str +} +``` +You can see a toy example in `./toy_data/toy_data.jsonl`. + +If an OOM(Out of Memory) issue occurs, you can try to decrease the `batch_size_per_device` or `max_image_size`. You can also try to use LoRA instead of full fine-tuning. + + +### Inference + +The checkpoint can be found at `{results_dir}/checkpoints/*`. You can use the following command to load saved checkpoint: +```python +from OmniGen import OmniGenPipeline + +pipe = OmniGenPipeline.from_pretrained("checkpoint_path") # e.g., ./results/toy_finetune/checkpoints/0000200 +``` + + + + + +## LoRA fine-tuning +LoRA fine-tuning is a simple way to fine-tune OmniGen with less GPU memory. To use lora, you should add `--use_lora` and `--lora_rank` to the command. + +```bash +accelerate launch \ + --num_processes=1 \ + train.py \ + --model_name_or_path Shitao/OmniGen-v1 \ + --batch_size_per_device 2 \ + --condition_dropout_prob 0.01 \ + --lr 3e-4 \ + --use_lora \ + --lora_rank 8 \ + --json_file ./toy_data/toy_data.jsonl \ + --image_path ./toy_data/images \ + --max_input_length_limit 18000 \ + --keep_raw_resolution \ + --max_image_size 1024 \ + --gradient_accumulation_steps 1 \ + --ckpt_every 100 \ + --epochs 100 \ + --log_every 1 \ + --results_dir ./results/toy_finetune_lora +``` + +### Inference + +The checkpoint can be found at `{results_dir}/checkpoints/*`. You can use the following command to load checkpoint: +```python +from OmniGen import OmniGenPipeline + +pipe = OmniGenPipeline.from_pretrained("Shitao/OmniGen-v1") +pipe.merge_lora("checkpoint_path") # e.g., ./results/toy_finetune_lora/checkpoints/0000100 +``` + + +## A simple example + +Here is an example for learning new concepts: "sks dog". We use five images of one dog from [dog-example](https://huggingface.co/datasets/diffusers/dog-example). + +The json file is `./toy_data/toy_subject_data.jsonl`, and the images have been saved in `./toy_data/images`. + +```bash +accelerate launch \ + --num_processes=1 \ + train.py \ + --model_name_or_path Shitao/OmniGen-v1 \ + --batch_size_per_device 2 \ + --condition_dropout_prob 0.01 \ + --lr 1e-3 \ + --use_lora \ + --lora_rank 8 \ + --json_file ./toy_data/toy_subject_data.jsonl \ + --image_path ./toy_data/images \ + --max_input_length_limit 18000 \ + --keep_raw_resolution \ + --max_image_size 1024 \ + --gradient_accumulation_steps 1 \ + --ckpt_every 100 \ + --epochs 200 \ + --log_every 1 \ + --results_dir ./results/toy_finetune_lora +``` + +After training, you can use the following command to generate images: +```python +from OmniGen import OmniGenPipeline + +pipe = OmniGenPipeline.from_pretrained("Shitao/OmniGen-v1") +pipe.merge_lora("checkpoint_path") # e.g., ./results/toy_finetune_lora/checkpoints/0000200 + +images = pipe( + prompt="a photo of sks dog running in the snow", + height=1024, + width=1024, + guidance_scale=3 +) +images[0].save("example_sks_dog_snow.png") +``` diff --git a/OmniGen/docs/inference.md b/OmniGen/docs/inference.md new file mode 100644 index 0000000000000000000000000000000000000000..5fdeccfdbaa6cec7c3ab1b7e757db7cde8439438 --- /dev/null +++ b/OmniGen/docs/inference.md @@ -0,0 +1,167 @@ +# Inference with OmniGen + +To handle some complex tasks, image generation models are becoming increasingly sophisticated, leading to more and more cumbersome workflows. Existing image generation models like SD and Flux require loading many additional network modules (such as ControlNet, IP-Adapter, Reference-Net) and extra preprocessing steps (e.g., face detection, pose detection, image cropping) to generate a satisfactory image. This complex workflow is not user-friendly. We believe that future image generation models should be simpler, generating various images directly through instructions, similar to how GPT works in language generation. + +Therefore, we propose OmniGen, a model capable of handling various image generation tasks within a single framework. The goal of OmniGen is to complete various image generation tasks without relying on any additional components or image preprocessing steps. OmniGen supports tasks including text-to-image generation, image editing, subject-driven image generation, and classical vision tasks, among others. More capabilities can be found in our examples. We provide inference code so you can explore more unknown functionalities yourself. + + + +## Install +```bash +git clone https://github.com/staoxiao/OmniGen.git +cd OmniGen +pip install -e . +``` + + + +## Generate Images +You can use the following code to generate images: +```python +from OmniGen import OmniGenPipeline + +pipe = OmniGenPipeline.from_pretrained("Shitao/OmniGen-v1") + +# Text to Image +images = pipe( + prompt="A curly-haired man in a red shirt is drinking tea.", + height=1024, + width=1024, + guidance_scale=2.5, + seed=0, +) +images[0].save("example_t2i.png") # save output PIL Image + +# Multi-modal to Image +# In prompt, we use the placeholder to represent the image. The image placeholder should be in the format of <|image_*|> +# You can add multiple images in the input_images. Please ensure that each image has its placeholder. For example, for the list input_images [img1_path, img2_path], the prompt needs to have two placeholders: <|image_1|>, <|image_2|>. +images = pipe( + prompt="A man in a black shirt is reading a book. The man is the right man in <|image_1|>.", + input_images=["./imgs/test_cases/two_man.jpg"], + height=1024, + width=1024, + guidance_scale=2.5, + img_guidance_scale=1.6, + max_input_image_size=1024, + separate_cfg_infer=True, + use_kv_cache=True, + offload_kv_cache=True, + offload_model=False, + use_input_image_size_as_output=False, + seed=0, +) +images[0].save("example_ti2i.png") # save output PIL image +``` + +Some important arguments: +- `guidance_scale`: The strength of the guidance. Based on our experience, it is usually best to set it between 2 and 3. The higher the value, the more similar the generated image will be to the prompt. If the image appears oversaturated, please reduce the scale. +- `height` and `width`: The height and width of the generated image. The default value is 1024x1024. OmniGen support any size, but these number must be divisible by 16. +- `num_inference_steps`: The number of steps to take in the diffusion process. The higher the value, the more detailed the generated image will be. +- `max_input_image_size`: the maximum size of input image, which will be used to crop the input image to the maximum size. A smaller number will result in faster generation speed and lower memory cost. +- `separate_cfg_infer`: Whether to use separate inference process for CFG guidance. If set to True, memory cost will be lower. Default is True. +- `use_kv_cache`: Whether to use key-value cache. Default is True. +- `offload_kv_cache`: offload the cached key and value to cpu, which can save memory but slow down the generation silightly. Default is True. +- `offload_model`: offload the model to cpu, which can save memory but slow down the generation. Default is False. +- `use_input_image_size_as_output`: whether to use the input image size as the output image size, which can be used for single-image input, e.g., image editing task. Default is False. +- `seed`: The seed for random number generator. + +**More examples please refer to [inference.ipynb](../inference.ipynb)** + + +#### Input data +OmniGen can accept multi-modal input data. Specifically, you should pass two arguments: `prompt` and `input_images`. +For text to image generation, you can pass a string as `prompt`, or pass a list of strings as `prompt` to generate multiple images. + +For multi-modal to image generation, you should pass a string as `prompt`, and a list of image paths as `input_images`. The placeholder in the prompt should be in the format of `<|image_*|>`. +For example, if you want to generate an image with a person holding a bouquet of flowers, you can pass the following prompt: +``` +prompt = "A woman holds a bouquet of flowers and faces the camera. Thw woman is <|image_1|>." +input_images = ["./imgs/test_cases/liuyifei.png"] +``` +The placeholder `<|image_1|>` will be replaced by the image at `input_images[0]`, i.e., `./imgs/test_cases/liuyifei.png`. + +If you want to generate multiple images, you can pass a list of prompts and a list of image paths. For example: +``` +prompt = ["A woman holds a bouquet of flowers and faces the camera.", "A woman holds a bouquet of flowers and faces the camera. Thw woman is <|image_1|>."] +input_images = [[], ["./imgs/test_cases/liuyifei.png"]] +``` + + +#### Gradio Demo +We have constructed a online demo in [Huggingface](https://huggingface.co/spaces/Shitao/OmniGen). + +For the local gradio demo, you can run with the following command: +```python +python app.py +``` + + +## Tips +- For out of memory or time cost, you can refer to [./docs/inference.md#requiremented-resources](https://github.com/VectorSpaceLab/OmniGen/blob/main/docs/inference.md#requiremented-resources) to select a appropriate setting. +- Oversaturated: If the image appears oversaturated, please reduce the `guidance_scale`. +- Not match the prompt: If the image does not match the prompt, please try to increase the `guidance_scale`. +- Low-quality: More detailed prompt will lead to better results. +- Animate Style: If the genereate images is in animate style, you can try to add `photo` to the prompt`. +- Edit generated image. If you generate a image by omnigen and then want to edit it, you cannot use the same seed to edit this image. For example, use seed=0 to generate image, and should use seed=1 to edit this image. +- For image editing tasks, we recommend placing the image before the editing instruction. For example, use `<|image_1|> remove suit`, rather than `remove suit <|image_1|>`. +- For image editing task and controlnet task, we recommend to set the height and width of output image as the same +as input image. For example, if you want to edit a 512x512 image, you should set the height and width of output image as 512x512. You also can set the `use_input_image_size_as_output` to automatically set the height and width of output image as the same as input image. + + +## Requiremented Resources + +We are currently experimenting with some techniques to reduce memory usage and improve speed, including `use_kv_cache, offload_kv_cache, separate_cfg_infer, offload_model`, which you can enable in the pipeline. +The default setting is`use_kv_cache=True, offload_kv_cache=True, separate_cfg_infer=True, offload_model=False`. +To reduce memory consumption while maintaining inference speed, quantization is also a method worth exploring and is left for future work. + +We conducted experiments on the A800 and RTX 3090. The memory requirements and inference times are shown in the table below. You can choose the appropriate settings based on your available resources. + +**Overall, the text-to-image task requires minimal memory and time costs, comparable to other latest text-to-image models. However, when using input images, the computational cost increases. Memory usage can be reduced by extending the processing time.** + + +- Different image size. + +Different image size (`max_input_image_size` is the max size of input image, `height` and `width` are the size of output image) with the default inference settings (`use_kv_cache=True,offload_kv_cache=True,separate_cfg_infer=True`) + +For A800 GPU: +| Settings | Only Text | Text + Single Image | Text + Two Images | +|:-------------|:----------:|:-------------------:|:---------------------:| +| max_input_image_size=1024,height=1024,width=1024 | 9G, 31s | 12G, 1m6s | 13G, 1m20s | +| max_input_image_size=512,height=1024,width=1024 | 9G, 31s | 10G, 50s | 10G, 54s | +| max_input_image_size=768,height=768,width=768 | 9G, 16s | 10G, 32s | 10G, 37s | +| max_input_image_size=512,height=512,width=512 | 9G, 7s | 9G, 14s | 9G, 15s | + +For RTX 3090 GPU(24G): +| Settings | Only Text | Text + Single Image | Text + Two Images | +|:-------------|:----------:|:-------------------:|:---------------------:| +| max_input_image_size=1024,height=1024,width=1024 | 9G, 1m17s | 12G, 2m46s | 13G, 3m23s | +| max_input_image_size=512,height=1024,width=1024 | 9G, 1m18s | 10G, 2m8s | 10G, 2m18s | +| max_input_image_size=768,height=768,width=768 | 9G, 41s | 10G, 1m22s | 10G, 1m38s | +| max_input_image_size=512,height=512,width=512 | 9G, 19s | 9G, 36s | 9G, 43s | + + +You can set smaller `max_input_image_size` to reduce memory usage, but note that the generation quality may be lower. +And please set the `height` and `width` the same as the size of input image for image editing task. + + +- Different inference settings + +Default image size: height=1024, width=1024, max_input_image_size=1024 + +For A800 GPU: +| Settings | Only Text | Text + Single Image | Text + Two Images | +|:-------------|:----------:|:-------------------:|:---------------------:| +| use_kv_cache | 18G, 30s | 36G, 1m | 48G, 1m13s | +| use_kv_cache,offload_kv_cache | 10G, 30s | 14G, 1m10s | 17G, 1m30s | +| use_kv_cache,offload_kv_cache,separate_cfg_infer | 9G, 31s | 12G, 1m6s | 13G, 1m20s | +| use_kv_cache,offload_kv_cache,offload_model | 4G, 55s | 7G, 1m30s | 11G, 1m48s | +| use_kv_cache,offload_kv_cache,separate_cfg_infer,offload_model | 3G, 1m23s | 5G, 2m19s | 6G, 2m30s | + +For RTX 3090 GPU(24G): +| Settings | Only Text | Text + Single Image | Text + Two Images | +|:-------------|:----------:|:-------------------:|:---------------------:| +| use_kv_cache | 18G, 1m14s | OOM | OOM | +| use_kv_cache,offload_kv_cache | 10G, 1m17s | 14G, 3m11s | 17G, 4m3s | +| use_kv_cache,offload_kv_cache,separate_cfg_infer | 9G, 1m18s | 12G, 2m46s | 13G, 3m21s | +| use_kv_cache,offload_kv_cache,offload_model | 4G,3m1s | 7G, 4m14s | 11G, 5m4s | +| use_kv_cache,offload_kv_cache,separate_cfg_infer,offload_model | 3G, 4m56s | 5G, 7m49s | 6G, 8m6s | diff --git a/OmniGen/imgs/.DS_Store b/OmniGen/imgs/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 Binary files /dev/null and b/OmniGen/imgs/.DS_Store differ diff --git a/OmniGen/imgs/demo_cases.png b/OmniGen/imgs/demo_cases.png new file mode 100644 index 0000000000000000000000000000000000000000..ade55ef17d226244176febdaf15ec62b48ee0a20 --- /dev/null +++ b/OmniGen/imgs/demo_cases.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0517c97c947f8226f0f39b4ca2ac61b058e52faa59ec5085668062d0162dd21e +size 3416173 diff --git a/OmniGen/imgs/demo_cases/AI_Pioneers.jpg b/OmniGen/imgs/demo_cases/AI_Pioneers.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b22291a4dae766e69c1e8846ec108fe5feda120f --- /dev/null +++ b/OmniGen/imgs/demo_cases/AI_Pioneers.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b7f51ae11a11781027d1f9e1e8d566438f937508e16c627bfb60acca5b1d7c0 +size 101471 diff --git a/OmniGen/imgs/demo_cases/edit.png b/OmniGen/imgs/demo_cases/edit.png new file mode 100644 index 0000000000000000000000000000000000000000..83a728529943269cb70fe26156e1b58f90ae13ca --- /dev/null +++ b/OmniGen/imgs/demo_cases/edit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fac5461b2c06a99664ba1299fd9fcebd781a26afa5ebc07aa07cb678ebae2af +size 1252514 diff --git a/OmniGen/imgs/demo_cases/entity.png b/OmniGen/imgs/demo_cases/entity.png new file mode 100644 index 0000000000000000000000000000000000000000..98cf1482df78a03fc88aad3b8a3f13e418534190 --- /dev/null +++ b/OmniGen/imgs/demo_cases/entity.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c622ebecd3210c80e8d913158ee3564168c77c576f04b56e34d2d28bfea9e06 +size 1284254 diff --git a/OmniGen/imgs/demo_cases/reasoning.png b/OmniGen/imgs/demo_cases/reasoning.png new file mode 100644 index 0000000000000000000000000000000000000000..cf842eecfbfb25ce2a242a8037f0ed2c9f814cd8 --- /dev/null +++ b/OmniGen/imgs/demo_cases/reasoning.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb510edcb5628c0def3871cef2e0351acc578a1ceef445ebbd72f8b6eb92fc9d +size 1243263 diff --git a/OmniGen/imgs/demo_cases/same_pose.png b/OmniGen/imgs/demo_cases/same_pose.png new file mode 100644 index 0000000000000000000000000000000000000000..2be56eedd6d401d122a260bb51357c45febc1a19 --- /dev/null +++ b/OmniGen/imgs/demo_cases/same_pose.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:beccbeabfc408f319661d9af1063005cbc21c977ba50b910491611ca3babd876 +size 1358837 diff --git a/OmniGen/imgs/demo_cases/skeletal.png b/OmniGen/imgs/demo_cases/skeletal.png new file mode 100644 index 0000000000000000000000000000000000000000..ac1e38619a521c888dc9515a7ca243a30d3ba2cc --- /dev/null +++ b/OmniGen/imgs/demo_cases/skeletal.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30c7937855228adec69da7d9bc3170c9f434a6b159feaf02d362033c1901a671 +size 106093 diff --git a/OmniGen/imgs/demo_cases/skeletal2img.png b/OmniGen/imgs/demo_cases/skeletal2img.png new file mode 100644 index 0000000000000000000000000000000000000000..b5524b2388b6b65810262a7e8a8154c4d0f74713 --- /dev/null +++ b/OmniGen/imgs/demo_cases/skeletal2img.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86c21341018bb633f364d40afbf361b5e5690bf1e6539b99150e4aea0ed695b6 +size 1282673 diff --git a/OmniGen/imgs/demo_cases/t2i_woman_with_book.png b/OmniGen/imgs/demo_cases/t2i_woman_with_book.png new file mode 100644 index 0000000000000000000000000000000000000000..e1c746b4c5ad46bce6a9d792bc5ce6cc1973ff2a --- /dev/null +++ b/OmniGen/imgs/demo_cases/t2i_woman_with_book.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe258160193adeaff960a838de01d7f7294ab09899de534f2dee99043b0c747a +size 1270715 diff --git a/OmniGen/imgs/overall.jpg b/OmniGen/imgs/overall.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd93b233deb28790a95bd3dd02568b85df0d15b6 --- /dev/null +++ b/OmniGen/imgs/overall.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffa229632ac0bb248eee87cf823a0dc18c22c0a81a57d4c639e7fb1986d4e029 +size 1127850 diff --git a/OmniGen/imgs/referring.png b/OmniGen/imgs/referring.png new file mode 100644 index 0000000000000000000000000000000000000000..4be22b2c7019bbffa4ad35c03b379853abbb8c3c --- /dev/null +++ b/OmniGen/imgs/referring.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:393fab6a4d51e84555f75162430e35a64a49670d9e6c3986cd80bca318a4fb3e +size 4088507 diff --git a/OmniGen/imgs/test_cases/1.jpg b/OmniGen/imgs/test_cases/1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87b8abe7751eabb04c0a76a82d9f32b5d8cdec0d --- /dev/null +++ b/OmniGen/imgs/test_cases/1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2dad7a81a5c609d136fbcccc2a71007c20474103d301ae5564fa63258b4a492 +size 1866247 diff --git a/OmniGen/imgs/test_cases/2.jpg b/OmniGen/imgs/test_cases/2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db416ca5bfa76b500f485eff7e4b61a37b1aefbd --- /dev/null +++ b/OmniGen/imgs/test_cases/2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:919ec1a20515ce921d04a5a0f6dcbe5aa4288f41c04cc62a0bd59103957b45db +size 1149154 diff --git a/OmniGen/imgs/test_cases/3.jpg b/OmniGen/imgs/test_cases/3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d0bed469cd8db5300c9a8592d4ffc429c2cdaaf --- /dev/null +++ b/OmniGen/imgs/test_cases/3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8fef6b304efc3fc189991ec28b83bbe15c391af55b2bfd85276eb19d49194c9 +size 1202687 diff --git a/OmniGen/imgs/test_cases/4.jpg b/OmniGen/imgs/test_cases/4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca284d262eb85d115e0e0d7216af3cfc0762d68e --- /dev/null +++ b/OmniGen/imgs/test_cases/4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:222e844198656a13facbf0f0afe327b074641a7f20d4120418fa1302e61db538 +size 1742551 diff --git a/OmniGen/imgs/test_cases/Amanda.jpg b/OmniGen/imgs/test_cases/Amanda.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ebcde3b59bfab39c9176305494b53c84281fc1f2 --- /dev/null +++ b/OmniGen/imgs/test_cases/Amanda.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c20a508b8619fca4d963f574bca51c7460f274218507c97c2853fa6eaea6d0cb +size 1654477 diff --git a/OmniGen/imgs/test_cases/cat.jpeg b/OmniGen/imgs/test_cases/cat.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..374bff929b2e46f6e3628023130a76878b683e1e --- /dev/null +++ b/OmniGen/imgs/test_cases/cat.jpeg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:076d8d520f68d1cf2f6a66366721e03ad20cf9d385839f5c26b3d2060bb6d789 +size 6110 diff --git a/OmniGen/imgs/test_cases/control.jpg b/OmniGen/imgs/test_cases/control.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c331c2c3e03d8f8abdd868a3d1c56a5ad623e09 --- /dev/null +++ b/OmniGen/imgs/test_cases/control.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ca485995cb5f4b1b792e39a99e9647745291f92689eb40f1da925f19dfdc1b5 +size 1315895 diff --git a/OmniGen/imgs/test_cases/guitar1.png b/OmniGen/imgs/test_cases/guitar1.png new file mode 100644 index 0000000000000000000000000000000000000000..9ebc6a9bba6429ce7e34451fbc3c13ee66972425 --- /dev/null +++ b/OmniGen/imgs/test_cases/guitar1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1c405aaaaa2e6660157aca7d908a4374cd5ca78585f32202b323c24d4fab756 +size 835961 diff --git a/OmniGen/imgs/test_cases/icl1.jpg b/OmniGen/imgs/test_cases/icl1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d085ba4017d7c7cc8074cdf9af77c34af2f4a8d0 --- /dev/null +++ b/OmniGen/imgs/test_cases/icl1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e2b2086ad903c43aee1cc98902b7c53864765c6c99758acf39618ac1ad54b0e +size 76585 diff --git a/OmniGen/imgs/test_cases/icl2.jpg b/OmniGen/imgs/test_cases/icl2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69d26099f9bb6244c6ff27e97a1e85d510703b9b --- /dev/null +++ b/OmniGen/imgs/test_cases/icl2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48bafc52d6721c636e1aec9ebcd1a76c017cc926909bf03270993dd423bc49f9 +size 86311 diff --git a/OmniGen/imgs/test_cases/icl3.jpg b/OmniGen/imgs/test_cases/icl3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b63f9e8c345e89ccf9638320d887cc575c808ab6 --- /dev/null +++ b/OmniGen/imgs/test_cases/icl3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:077a7c69f7ca24808922e5acc7762a62182d51031f4f9e0d035ce80b09a81d5e +size 77805 diff --git a/OmniGen/imgs/test_cases/img1.jpg b/OmniGen/imgs/test_cases/img1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd0cf08a451401d7ab2d4f223d500764648e61b6 --- /dev/null +++ b/OmniGen/imgs/test_cases/img1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0036a7c89f60de3366afc6fffedf52be91cf91da36b2fb9b1722e641debfeb7 +size 176056 diff --git a/OmniGen/imgs/test_cases/img2.jpg b/OmniGen/imgs/test_cases/img2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..be99950338cc7527247d5ba3e666b9886b55d79e --- /dev/null +++ b/OmniGen/imgs/test_cases/img2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:999845fb0e781cc8b9db0e9407ad6d4a528dc08677caec3fcc9f6ad6afc718bc +size 174576 diff --git a/OmniGen/imgs/test_cases/img3.jpg b/OmniGen/imgs/test_cases/img3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..84f35c00cab791718b4afb147d607b7493c7106d --- /dev/null +++ b/OmniGen/imgs/test_cases/img3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1d2ba59e30efb043f77644c2fc9f59082899dcfe6a26cbf803de32c9eafff18 +size 165570 diff --git a/OmniGen/imgs/test_cases/lecun.png b/OmniGen/imgs/test_cases/lecun.png new file mode 100644 index 0000000000000000000000000000000000000000..275702e1fca736d0f185ab9145e45d873bb4beee --- /dev/null +++ b/OmniGen/imgs/test_cases/lecun.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99d66323fe33f81d561c481fce08eb43f03d110fdbe54b237f7f985f54d9102a +size 293568 diff --git a/OmniGen/imgs/test_cases/liuyifei.png b/OmniGen/imgs/test_cases/liuyifei.png new file mode 100644 index 0000000000000000000000000000000000000000..cc54f557ed67627af113ef7c5a37c531180bd14b --- /dev/null +++ b/OmniGen/imgs/test_cases/liuyifei.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8f3758202cb1bd7a844b5cacdb368de61392ed989151d42b555389592ec62bb +size 39127 diff --git a/OmniGen/imgs/test_cases/mckenna.jpg b/OmniGen/imgs/test_cases/mckenna.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6d56457a7fec4aad9e70984d048cf145ff7c2380 --- /dev/null +++ b/OmniGen/imgs/test_cases/mckenna.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd20a5841f84114859e46c4000d9b8035a40378b5d40fbb2b559864813cd402f +size 1781438 diff --git a/OmniGen/imgs/test_cases/pose.png b/OmniGen/imgs/test_cases/pose.png new file mode 100644 index 0000000000000000000000000000000000000000..71fdd50c730c6d5b7d478afd00285fb876b4b967 --- /dev/null +++ b/OmniGen/imgs/test_cases/pose.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ef21d4a7117fd14a2057f653fa48e4a31f3fc849787fa188a8c1656657706d0 +size 97407 diff --git a/OmniGen/imgs/test_cases/rose.jpg b/OmniGen/imgs/test_cases/rose.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7651e8b2baf561fba4300f0efc7667d973c8380f --- /dev/null +++ b/OmniGen/imgs/test_cases/rose.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2578fcc252f8b3240f9bc621f9c22f7b186a959ee1b2a037d2c9c31be99fae91 +size 68844 diff --git a/OmniGen/imgs/test_cases/taylor.png b/OmniGen/imgs/test_cases/taylor.png new file mode 100644 index 0000000000000000000000000000000000000000..97ff767320d236206ca5ab0015c98ad97630bc6f --- /dev/null +++ b/OmniGen/imgs/test_cases/taylor.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7459d17f9b77a5d52ae1026b1a41b25645e64a915b0bf663f6bda3d1edcf138 +size 45702 diff --git a/OmniGen/imgs/test_cases/trump.png b/OmniGen/imgs/test_cases/trump.png new file mode 100644 index 0000000000000000000000000000000000000000..945cc477168216e938596f441f2202ae0c8bc7dc --- /dev/null +++ b/OmniGen/imgs/test_cases/trump.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:075e742aecba02d0c0b1f4480e62ec2e1f33592960eda8859930bb307acc26a4 +size 26238 diff --git a/OmniGen/imgs/test_cases/turing.png b/OmniGen/imgs/test_cases/turing.png new file mode 100644 index 0000000000000000000000000000000000000000..b22291a4dae766e69c1e8846ec108fe5feda120f --- /dev/null +++ b/OmniGen/imgs/test_cases/turing.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b7f51ae11a11781027d1f9e1e8d566438f937508e16c627bfb60acca5b1d7c0 +size 101471 diff --git a/OmniGen/imgs/test_cases/two_man.jpg b/OmniGen/imgs/test_cases/two_man.jpg new file mode 100644 index 0000000000000000000000000000000000000000..927aa91a42a5349c4e55e3ab9657577f142cca5a --- /dev/null +++ b/OmniGen/imgs/test_cases/two_man.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c940253b06b1b32d472375b445474571bd7838a01fcd069b2ae34fb3e5d8d1f +size 1276640 diff --git a/OmniGen/imgs/test_cases/vase.jpg b/OmniGen/imgs/test_cases/vase.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bdc0674d417c23dbe397362b3fdbccdd741d16a4 --- /dev/null +++ b/OmniGen/imgs/test_cases/vase.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab4e3e4b1228d85e7a9c4979bb9d825817d88799fe187f12216a22e2c3ceaa93 +size 31482 diff --git a/OmniGen/imgs/test_cases/watch.jpg b/OmniGen/imgs/test_cases/watch.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31666818f40f3c0ee77b53bf20e18e315921d56e --- /dev/null +++ b/OmniGen/imgs/test_cases/watch.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f930542af14fc7816e32cc429cbd7855d4420ad797e1ff72aa523b61778ee9f4 +size 492845 diff --git a/OmniGen/imgs/test_cases/woman.png b/OmniGen/imgs/test_cases/woman.png new file mode 100644 index 0000000000000000000000000000000000000000..bc7b7f853385b5f9aa2140867b4c32916d48ae8d --- /dev/null +++ b/OmniGen/imgs/test_cases/woman.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:911f596d617d89bde63ecb8aaf1fde71d1fa183d4ac0cdac770a9fc8627a3b50 +size 1475294 diff --git a/OmniGen/imgs/test_cases/yifei2.png b/OmniGen/imgs/test_cases/yifei2.png new file mode 100644 index 0000000000000000000000000000000000000000..f0bbbdffefff5b63f7e2a0eeb5f0d7e83c4a0416 --- /dev/null +++ b/OmniGen/imgs/test_cases/yifei2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0087233e61184829ab345d6be2991918e85727076e0d4f8c7bbfc865ca4d6d0 +size 101292 diff --git a/OmniGen/imgs/test_cases/young_musk.jpg b/OmniGen/imgs/test_cases/young_musk.jpg new file mode 100644 index 0000000000000000000000000000000000000000..118032a8202b802db6ed029d117e7a1ac8ed8569 --- /dev/null +++ b/OmniGen/imgs/test_cases/young_musk.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91290c822c51d51302ae2d4e67b924d329b9b7faa767ca238f9b58736c0610f7 +size 43281 diff --git a/OmniGen/imgs/test_cases/young_trump.jpeg b/OmniGen/imgs/test_cases/young_trump.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..1c8d85c387ac17a0442c695f26b1950e426f95aa --- /dev/null +++ b/OmniGen/imgs/test_cases/young_trump.jpeg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4329fdb452774bf0517e19c91065ea7e87e4d2e874c6e3da2d5ee8a64093b4a0 +size 319674 diff --git a/OmniGen/imgs/test_cases/zhang.png b/OmniGen/imgs/test_cases/zhang.png new file mode 100644 index 0000000000000000000000000000000000000000..0665dd2a4500cc98b6d6efe9403ef931bae47b43 --- /dev/null +++ b/OmniGen/imgs/test_cases/zhang.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:020925b411e9e053354876116e92722e7a5ee003d45070a2cf58b1902f2162cd +size 158138 diff --git a/OmniGen/requirements.txt b/OmniGen/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..0c34784cdf494938cdf4ada635fc3a59936d353b --- /dev/null +++ b/OmniGen/requirements.txt @@ -0,0 +1,9 @@ +accelerate +diffusers +invisible_watermark +torch +transformers +xformers +timm +peft +safetensors \ No newline at end of file diff --git a/OmniGen/setup.py b/OmniGen/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..6cb3b40d25ae7ae42d42a2ecd00c8b8a965c1d6f --- /dev/null +++ b/OmniGen/setup.py @@ -0,0 +1,26 @@ +from setuptools import setup, find_packages + +with open("README.md", mode="r", encoding="utf-8") as readme_file: + readme = readme_file.read() + +setup( + name='OmniGen', + version='1.0.3', + description='OmniGen', + long_description=readme, + long_description_content_type="text/markdown", + author_email='2906698981@qq.com', + url='https://github.com/VectorSpaceLab/OmniGen', + packages=find_packages(), + include_package_data=True, + install_requires=[ + 'torch==2.3.1', + 'transformers==4.45.2', + 'datasets', + 'accelerate==0.26.1', + 'diffusers==0.30.3', + "timm", + "peft==0.9.0", + "safetensors" + ], +) diff --git a/OmniGen/train.py b/OmniGen/train.py new file mode 100644 index 0000000000000000000000000000000000000000..c76eacec3d833f3d1a0f19f16d76f38effa30c63 --- /dev/null +++ b/OmniGen/train.py @@ -0,0 +1,373 @@ +import json +from time import time +import argparse +import logging +import os +from pathlib import Path +import math + +import numpy as np +from PIL import Image +from copy import deepcopy + +import torch +import torch.distributed as dist +from torch.utils.data import Dataset, DataLoader +from torch.utils.data.distributed import DistributedSampler +from torchvision import transforms + +from accelerate import Accelerator +from accelerate.utils import ProjectConfiguration, set_seed +from diffusers.optimization import get_scheduler +from accelerate.utils import DistributedType +from peft import LoraConfig, set_peft_model_state_dict, PeftModel, get_peft_model +from peft.utils import get_peft_model_state_dict +from huggingface_hub import snapshot_download +from safetensors.torch import save_file + +from diffusers.models import AutoencoderKL + +from OmniGen import OmniGen, OmniGenProcessor +from OmniGen.train_helper import DatasetFromJson, TrainDataCollator +from OmniGen.train_helper import training_losses +from OmniGen.utils import ( + create_logger, + update_ema, + requires_grad, + center_crop_arr, + crop_arr, + vae_encode, + vae_encode_list +) + +def main(args): + # Setup accelerator: + from accelerate import DistributedDataParallelKwargs as DDPK + kwargs = DDPK(find_unused_parameters=False) + accelerator = Accelerator( + gradient_accumulation_steps=args.gradient_accumulation_steps, + mixed_precision=args.mixed_precision, + log_with=args.report_to, + project_dir=args.results_dir, + kwargs_handlers=[kwargs], + ) + device = accelerator.device + accelerator.init_trackers("tensorboard_log", config=args.__dict__) + + # Setup an experiment folder: + checkpoint_dir = f"{args.results_dir}/checkpoints" # Stores saved model checkpoints + logger = create_logger(args.results_dir) + if accelerator.is_main_process: + os.makedirs(checkpoint_dir, exist_ok=True) + logger.info(f"Experiment directory created at {args.results_dir}") + json.dump(args.__dict__, open(os.path.join(args.results_dir, 'train_args.json'), 'w')) + + + # Create model: + if not os.path.exists(args.model_name_or_path): + cache_folder = os.getenv('HF_HUB_CACHE') + args.model_name_or_path = snapshot_download(repo_id=args.model_name_or_path, + cache_dir=cache_folder, + ignore_patterns=['flax_model.msgpack', 'rust_model.ot', 'tf_model.h5']) + logger.info(f"Downloaded model to {args.model_name_or_path}") + model = OmniGen.from_pretrained(args.model_name_or_path) + model.llm.config.use_cache = False + model.llm.gradient_checkpointing_enable() + model = model.to(device) + + if args.vae_path is None: + print(args.model_name_or_path) + vae_path = os.path.join(args.model_name_or_path, "vae") + if os.path.exists(vae_path): + vae = AutoencoderKL.from_pretrained(vae_path).to(device) + else: + logger.info("No VAE found in model, downloading stabilityai/sdxl-vae from HF") + logger.info("If you have VAE in local folder, please specify the path with --vae_path") + vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae").to(device) + else: + vae = AutoencoderKL.from_pretrained(args.vae_path).to(device) + + weight_dtype = torch.float32 + if accelerator.mixed_precision == "fp16": + weight_dtype = torch.float16 + elif accelerator.mixed_precision == "bf16": + weight_dtype = torch.bfloat16 + vae.to(dtype=torch.float32) + model.to(weight_dtype) + + processor = OmniGenProcessor.from_pretrained(args.model_name_or_path) + + requires_grad(vae, False) + if args.use_lora: + if accelerator.distributed_type == DistributedType.FSDP: + raise NotImplementedError("FSDP does not support LoRA") + requires_grad(model, False) + transformer_lora_config = LoraConfig( + r=args.lora_rank, + lora_alpha=args.lora_rank, + init_lora_weights="gaussian", + target_modules=["qkv_proj", "o_proj"], + ) + model.llm.enable_input_require_grads() + model = get_peft_model(model, transformer_lora_config) + model.to(weight_dtype) + transformer_lora_parameters = list(filter(lambda p: p.requires_grad, model.parameters())) + for n,p in model.named_parameters(): + print(n, p.requires_grad) + opt = torch.optim.AdamW(transformer_lora_parameters, lr=args.lr, weight_decay=args.adam_weight_decay) + else: + opt = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.adam_weight_decay) + + ema = None + if args.use_ema: + ema = deepcopy(model).to(device) # Create an EMA of the model for use after training + requires_grad(ema, False) + + + # Setup data: + crop_func = crop_arr + if not args.keep_raw_resolution: + crop_func = center_crop_arr + image_transform = transforms.Compose([ + transforms.Lambda(lambda pil_image: crop_func(pil_image, args.max_image_size)), + transforms.ToTensor(), + transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True) + ]) + + dataset = DatasetFromJson(json_file=args.json_file, + image_path=args.image_path, + processer=processor, + image_transform=image_transform, + max_input_length_limit=args.max_input_length_limit, + condition_dropout_prob=args.condition_dropout_prob, + keep_raw_resolution=args.keep_raw_resolution + ) + collate_fn = TrainDataCollator(pad_token_id=processor.text_tokenizer.eos_token_id, hidden_size=model.llm.config.hidden_size, keep_raw_resolution=args.keep_raw_resolution) + + loader = DataLoader( + dataset, + collate_fn=collate_fn, + batch_size=args.batch_size_per_device, + shuffle=True, + num_workers=args.num_workers, + pin_memory=True, + drop_last=True, + prefetch_factor=2, + ) + + if accelerator.is_main_process: + logger.info(f"Dataset contains {len(dataset):,}") + + num_update_steps_per_epoch = math.ceil(len(loader) / args.gradient_accumulation_steps) + max_train_steps = args.epochs * num_update_steps_per_epoch + lr_scheduler = get_scheduler( + args.lr_scheduler, + optimizer=opt, + num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=max_train_steps * args.gradient_accumulation_steps, + ) + + # Prepare models for training: + model.train() # important! This enables embedding dropout for classifier-free guidance + + if ema is not None: + update_ema(ema, model, decay=0) # Ensure EMA is initialized with synced weights + ema.eval() # EMA model should always be in eval mode + + + if ema is not None: + model, ema = accelerator.prepare(model, ema) + else: + model = accelerator.prepare(model) + + opt, loader, lr_scheduler = accelerator.prepare(opt, loader, lr_scheduler) + + + # Variables for monitoring/logging purposes: + train_steps, log_steps = 0, 0 + running_loss = 0 + start_time = time() + + if accelerator.is_main_process: + logger.info(f"Training for {args.epochs} epochs...") + for epoch in range(args.epochs): + if accelerator.is_main_process: + logger.info(f"Beginning epoch {epoch}...") + + for data in loader: + with accelerator.accumulate(model): + with torch.no_grad(): + output_images = data['output_images'] + input_pixel_values = data['input_pixel_values'] + if isinstance(output_images, list): + output_images = vae_encode_list(vae, output_images, weight_dtype) + if input_pixel_values is not None: + input_pixel_values = vae_encode_list(vae, input_pixel_values, weight_dtype) + else: + output_images = vae_encode(vae, output_images, weight_dtype) + if input_pixel_values is not None: + input_pixel_values = vae_encode(vae, input_pixel_values, weight_dtype) + + + model_kwargs = dict(input_ids=data['input_ids'], input_img_latents=input_pixel_values, input_image_sizes=data['input_image_sizes'], attention_mask=data['attention_mask'], position_ids=data['position_ids'], padding_latent=data['padding_images'], past_key_values=None, return_past_key_values=False) + + loss_dict = training_losses(model, output_images, model_kwargs) + loss = loss_dict["loss"].mean() + + running_loss += loss.item() + accelerator.backward(loss) + if args.max_grad_norm is not None and accelerator.sync_gradients: + accelerator.clip_grad_norm_(model.parameters(), args.max_grad_norm) + opt.step() + lr_scheduler.step() + opt.zero_grad() + + log_steps += 1 + train_steps += 1 + + accelerator.log({"training_loss": loss.item()}, step=train_steps) + if train_steps % args.gradient_accumulation_steps == 0: + if accelerator.sync_gradients and ema is not None: + update_ema(ema, model) + + if train_steps % (args.log_every * args.gradient_accumulation_steps) == 0 and train_steps > 0: + torch.cuda.synchronize() + end_time = time() + steps_per_sec = log_steps / args.gradient_accumulation_steps / (end_time - start_time) + # Reduce loss history over all processes: + avg_loss = torch.tensor(running_loss / log_steps, device=device) + dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM) + avg_loss = avg_loss.item() / accelerator.num_processes + + if accelerator.is_main_process: + cur_lr = opt.param_groups[0]["lr"] + logger.info(f"(step={int(train_steps/args.gradient_accumulation_steps):07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}, Epoch: {train_steps/len(loader)}, LR: {cur_lr}") + + # Reset monitoring variables: + running_loss = 0 + log_steps = 0 + start_time = time() + + + if train_steps % (args.ckpt_every * args.gradient_accumulation_steps) == 0 and train_steps > 0: + if accelerator.distributed_type == DistributedType.FSDP: + state_dict = accelerator.get_state_dict(model) + ema_state_dict = accelerator.get_state_dict(ema) if ema is not None else None + else: + if not args.use_lora: + state_dict = model.module.state_dict() + ema_state_dict = accelerator.get_state_dict(ema) if ema is not None else None + + if accelerator.is_main_process: + if args.use_lora: + checkpoint_path = f"{checkpoint_dir}/{int(train_steps/args.gradient_accumulation_steps):07d}/" + os.makedirs(checkpoint_path, exist_ok=True) + + model.module.save_pretrained(checkpoint_path) + else: + checkpoint_path = f"{checkpoint_dir}/{int(train_steps/args.gradient_accumulation_steps):07d}/" + os.makedirs(checkpoint_path, exist_ok=True) + torch.save(state_dict, os.path.join(checkpoint_path, "model.pt")) + processor.text_tokenizer.save_pretrained(checkpoint_path) + model.llm.config.save_pretrained(checkpoint_path) + if ema_state_dict is not None: + checkpoint_path = f"{checkpoint_dir}/{int(train_steps/args.gradient_accumulation_steps):07d}_ema" + os.makedirs(checkpoint_path, exist_ok=True) + torch.save(state_dict, os.path.join(checkpoint_path, "model.pt")) + processor.text_tokenizer.save_pretrained(checkpoint_path) + model.llm.config.save_pretrained(checkpoint_path) + logger.info(f"Saved checkpoint to {checkpoint_path}") + + dist.barrier() + accelerator.end_training() + model.eval() + + if accelerator.is_main_process: + logger.info("Done!") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--results_dir", type=str, default="results") + parser.add_argument("--model_name_or_path", type=str, default="OmniGen") + parser.add_argument("--json_file", type=str) + parser.add_argument("--image_path", type=str, default=None) + parser.add_argument("--epochs", type=int, default=1400) + parser.add_argument("--batch_size_per_device", type=int, default=1) + parser.add_argument("--vae_path", type=str, default=None) + parser.add_argument("--num_workers", type=int, default=4) + parser.add_argument("--log_every", type=int, default=100) + parser.add_argument("--ckpt_every", type=int, default=20000) + parser.add_argument("--max_grad_norm", type=float, default=1.0) + parser.add_argument("--lr", type=float, default=1e-4) + parser.add_argument("--max_input_length_limit", type=int, default=1024) + parser.add_argument("--condition_dropout_prob", type=float, default=0.1) + parser.add_argument("--adam_weight_decay", type=float, default=0.0) + parser.add_argument( + "--keep_raw_resolution", + action="store_true", + help="multiple_resolutions", + ) + parser.add_argument("--max_image_size", type=int, default=1344) + + parser.add_argument( + "--use_lora", + action="store_true", + ) + parser.add_argument( + "--lora_rank", + type=int, + default=8 + ) + + parser.add_argument( + "--use_ema", + action="store_true", + help="Whether or not to use ema.", + ) + parser.add_argument( + "--lr_scheduler", + type=str, + default="constant", + help=( + 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' + ' "constant", "constant_with_warmup"]' + ), + ) + parser.add_argument( + "--lr_warmup_steps", type=int, default=1000, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument( + "--report_to", + type=str, + default="tensorboard", + help=( + 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' + ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' + ), + ) + parser.add_argument( + "--mixed_precision", + type=str, + default="bf16", + choices=["no", "fp16", "bf16"], + help=( + "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" + " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" + " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." + ), + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + + + args = parser.parse_args() + assert args.max_image_size % 16 == 0, "Image size must be divisible by 16." + + main(args) + +