|
|
from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny |
|
|
from diffusers.image_processor import VaeImageProcessor |
|
|
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler |
|
|
from huggingface_hub.constants import HF_HUB_CACHE |
|
|
from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel |
|
|
import torch |
|
|
import torch._dynamo |
|
|
import gc |
|
|
from PIL import Image as img |
|
|
from PIL.Image import Image |
|
|
from pipelines.models import TextToImageRequest |
|
|
from torch import Generator |
|
|
import time |
|
|
|
|
|
from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only |
|
|
import os |
|
|
os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True" |
|
|
|
|
|
import torch |
|
|
import math |
|
|
from typing import Type, Dict, Any, Tuple, Callable, Optional, Union, List |
|
|
import ghanta |
|
|
import numpy as np |
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
|
|
|
from diffusers.configuration_utils import ConfigMixin, register_to_config |
|
|
from diffusers.loaders import FromOriginalModelMixin, PeftAdapterMixin |
|
|
from diffusers.models.attention import FeedForward |
|
|
from diffusers.models.attention_processor import ( |
|
|
Attention, |
|
|
AttentionProcessor, |
|
|
FluxAttnProcessor2_0, |
|
|
FusedFluxAttnProcessor2_0, |
|
|
) |
|
|
from diffusers.models.modeling_utils import ModelMixin |
|
|
from diffusers.models.normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle |
|
|
from diffusers.utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers |
|
|
from diffusers.utils.import_utils import is_torch_npu_available |
|
|
from diffusers.utils.torch_utils import maybe_allow_in_graph |
|
|
from diffusers.models.embeddings import CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjEmbeddings, FluxPosEmbed |
|
|
from diffusers.models.modeling_outputs import Transformer2DModelOutput |
|
|
from torchao.quantization import quantize_, int8_weight_only |
|
|
|
|
|
from diffusers.loaders import FluxLoraLoaderMixin, FromSingleFileMixin, TextualInversionLoaderMixin |
|
|
from diffusers.models.autoencoders import AutoencoderKL |
|
|
|
|
|
from diffusers.utils import ( |
|
|
USE_PEFT_BACKEND, |
|
|
is_torch_xla_available, |
|
|
logging, |
|
|
replace_example_docstring, |
|
|
scale_lora_layers, |
|
|
unscale_lora_layers, |
|
|
) |
|
|
from diffusers.utils.torch_utils import randn_tensor |
|
|
from diffusers.pipelines.pipeline_utils import DiffusionPipeline |
|
|
from diffusers.pipelines.flux.pipeline_output import FluxPipelineOutput |
|
|
|
|
|
if is_torch_xla_available(): |
|
|
import torch_xla.core.xla_model as xm |
|
|
|
|
|
XLA_AVAILABLE = True |
|
|
else: |
|
|
XLA_AVAILABLE = False |
|
|
import inspect |
|
|
|
|
|
|
|
|
def calculate_shift( |
|
|
image_seq_len, |
|
|
base_seq_len: int = 256, |
|
|
max_seq_len: int = 4096, |
|
|
base_shift: float = 0.5, |
|
|
max_shift: float = 1.16, |
|
|
): |
|
|
m = (max_shift - base_shift) / (max_seq_len - base_seq_len) |
|
|
b = base_shift - m * base_seq_len |
|
|
mu = image_seq_len * m + b |
|
|
return mu |
|
|
|
|
|
|
|
|
|
|
|
def retrieve_timesteps( |
|
|
scheduler, |
|
|
num_inference_steps: Optional[int] = None, |
|
|
device: Optional[Union[str, torch.device]] = None, |
|
|
timesteps: Optional[List[int]] = None, |
|
|
sigmas: Optional[List[float]] = None, |
|
|
**kwargs, |
|
|
): |
|
|
if timesteps is not None and sigmas is not None: |
|
|
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") |
|
|
if timesteps is not None: |
|
|
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) |
|
|
if not accepts_timesteps: |
|
|
raise ValueError( |
|
|
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" |
|
|
f" timestep schedules. Please check whether you are using the correct scheduler." |
|
|
) |
|
|
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) |
|
|
timesteps = scheduler.timesteps |
|
|
num_inference_steps = len(timesteps) |
|
|
elif sigmas is not None: |
|
|
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) |
|
|
if not accept_sigmas: |
|
|
raise ValueError( |
|
|
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" |
|
|
f" sigmas schedules. Please check whether you are using the correct scheduler." |
|
|
) |
|
|
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) |
|
|
timesteps = scheduler.timesteps |
|
|
num_inference_steps = len(timesteps) |
|
|
else: |
|
|
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) |
|
|
timesteps = scheduler.timesteps |
|
|
return timesteps, num_inference_steps |
|
|
|
|
|
|
|
|
def inicializar_generador(dispositivo: torch.device, respaldo: torch.Generator = None): |
|
|
if dispositivo.type == "cpu": |
|
|
return torch.Generator(device="cpu").set_state(torch.get_rng_state()) |
|
|
elif dispositivo.type == "cuda": |
|
|
return torch.Generator(device=dispositivo).set_state(torch.cuda.get_rng_state()) |
|
|
else: |
|
|
if respaldo is None: |
|
|
return inicializar_generador(torch.device("cpu")) |
|
|
else: |
|
|
return respaldo |
|
|
|
|
|
def calcular_fusion(x: torch.Tensor, info_tome: Dict[str, Any]) -> Tuple[Callable, ...]: |
|
|
alto_original, ancho_original = info_tome["size"] |
|
|
tokens_originales = alto_original * ancho_original |
|
|
submuestreo = int(math.ceil(math.sqrt(tokens_originales // x.shape[1]))) |
|
|
argumentos = info_tome["args"] |
|
|
if submuestreo <= argumentos["down"]: |
|
|
ancho = int(math.ceil(ancho_original / submuestreo)) |
|
|
alto = int(math.ceil(alto_original / submuestreo)) |
|
|
radio = int(x.shape[1] * argumentos["ratio"]) |
|
|
|
|
|
if argumentos["generator"] is None: |
|
|
argumentos["generator"] = inicializar_generador(x.device) |
|
|
elif argumentos["generator"].device != x.device: |
|
|
argumentos["generator"] = inicializar_generador(x.device, respaldo=argumentos["generator"]) |
|
|
|
|
|
usar_aleatoriedad = argumentos["rando"] |
|
|
fusion, desfusion = ghanta.emparejamiento_suave_aleatorio_2d( |
|
|
x, ancho, alto, argumentos["sx"], argumentos["sy"], radio, |
|
|
sin_aleatoriedad=not usar_aleatoriedad, generador=argumentos["generator"] |
|
|
) |
|
|
else: |
|
|
fusion, desfusion = (ghanta.hacer_nada, ghanta.hacer_nada) |
|
|
fusion_a, desfusion_a = (fusion, desfusion) if argumentos["m1"] else (ghanta.hacer_nada, ghanta.hacer_nada) |
|
|
fusion_c, desfusion_c = (fusion, desfusion) if argumentos["m2"] else (ghanta.hacer_nada, ghanta.hacer_nada) |
|
|
fusion_m, desfusion_m = (fusion, desfusion) if argumentos["m3"] else (ghanta.hacer_nada, ghanta.hacer_nada) |
|
|
return fusion_a, fusion_c, fusion_m, desfusion_a, desfusion_c, desfusion_m |
|
|
|
|
|
@maybe_allow_in_graph |
|
|
class FluxSingleTransformerBlock(nn.Module): |
|
|
|
|
|
def __init__(self, dim, num_attention_heads, attention_head_dim, mlp_ratio=4.0): |
|
|
super().__init__() |
|
|
self.mlp_hidden_dim = int(dim * mlp_ratio) |
|
|
|
|
|
self.norm = AdaLayerNormZeroSingle(dim) |
|
|
self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim) |
|
|
self.act_mlp = nn.GELU(approximate="tanh") |
|
|
self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim) |
|
|
|
|
|
processor = FluxAttnProcessor2_0() |
|
|
self.attn = Attention( |
|
|
query_dim=dim, |
|
|
cross_attention_dim=None, |
|
|
dim_head=attention_head_dim, |
|
|
heads=num_attention_heads, |
|
|
out_dim=dim, |
|
|
bias=True, |
|
|
processor=processor, |
|
|
qk_norm="rms_norm", |
|
|
eps=1e-6, |
|
|
pre_only=True, |
|
|
) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.FloatTensor, |
|
|
temb: torch.FloatTensor, |
|
|
image_rotary_emb=None, |
|
|
joint_attention_kwargs=None, |
|
|
tinfo: Dict[str, Any] = None, |
|
|
): |
|
|
if tinfo is not None: |
|
|
m_a, m_c, mom, u_a, u_c, u_m = calcular_fusion(hidden_states, tinfo) |
|
|
else: |
|
|
m_a, m_c, mom, u_a, u_c, u_m = (ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada) |
|
|
|
|
|
|
|
|
residual = hidden_states |
|
|
norm_hidden_states, gate = self.norm(hidden_states, emb=temb) |
|
|
norm_hidden_states = m_a(norm_hidden_states) |
|
|
mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) |
|
|
joint_attention_kwargs = joint_attention_kwargs or {} |
|
|
attn_output = self.attn( |
|
|
hidden_states=norm_hidden_states, |
|
|
image_rotary_emb=image_rotary_emb, |
|
|
**joint_attention_kwargs, |
|
|
) |
|
|
|
|
|
hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) |
|
|
gate = gate.unsqueeze(1) |
|
|
hidden_states = gate * self.proj_out(hidden_states) |
|
|
hidden_states = u_a(residual + hidden_states) |
|
|
|
|
|
return hidden_states |
|
|
|
|
|
|
|
|
@maybe_allow_in_graph |
|
|
class FluxTransformerBlock(nn.Module): |
|
|
|
|
|
def __init__(self, dim, num_attention_heads, attention_head_dim, qk_norm="rms_norm", eps=1e-6): |
|
|
super().__init__() |
|
|
|
|
|
self.norm1 = AdaLayerNormZero(dim) |
|
|
|
|
|
self.norm1_context = AdaLayerNormZero(dim) |
|
|
|
|
|
if hasattr(F, "scaled_dot_product_attention"): |
|
|
processor = FluxAttnProcessor2_0() |
|
|
else: |
|
|
raise ValueError( |
|
|
"The current PyTorch version does not support the `scaled_dot_product_attention` function." |
|
|
) |
|
|
self.attn = Attention( |
|
|
query_dim=dim, |
|
|
cross_attention_dim=None, |
|
|
added_kv_proj_dim=dim, |
|
|
dim_head=attention_head_dim, |
|
|
heads=num_attention_heads, |
|
|
out_dim=dim, |
|
|
context_pre_only=False, |
|
|
bias=True, |
|
|
processor=processor, |
|
|
qk_norm=qk_norm, |
|
|
eps=eps, |
|
|
) |
|
|
|
|
|
self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) |
|
|
self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") |
|
|
|
|
|
self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) |
|
|
self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate") |
|
|
self._chunk_size = None |
|
|
self._chunk_dim = 0 |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.FloatTensor, |
|
|
encoder_hidden_states: torch.FloatTensor, |
|
|
temb: torch.FloatTensor, |
|
|
image_rotary_emb=None, |
|
|
joint_attention_kwargs=None, |
|
|
tinfo: Dict[str, Any] = None, |
|
|
): |
|
|
|
|
|
if tinfo is not None: |
|
|
m_a, m_c, mom, u_a, u_c, u_m = calcular_fusion(hidden_states, tinfo) |
|
|
else: |
|
|
m_a, m_c, mom, u_a, u_c, u_m = (ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada) |
|
|
|
|
|
|
|
|
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) |
|
|
|
|
|
norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( |
|
|
encoder_hidden_states, emb=temb |
|
|
) |
|
|
joint_attention_kwargs = joint_attention_kwargs or {} |
|
|
norm_hidden_states = m_a(norm_hidden_states) |
|
|
norm_encoder_hidden_states = m_c(norm_encoder_hidden_states) |
|
|
|
|
|
attn_output, context_attn_output = self.attn( |
|
|
hidden_states=norm_hidden_states, |
|
|
encoder_hidden_states=norm_encoder_hidden_states, |
|
|
image_rotary_emb=image_rotary_emb, |
|
|
**joint_attention_kwargs, |
|
|
) |
|
|
|
|
|
attn_output = gate_msa.unsqueeze(1) * attn_output |
|
|
hidden_states = u_a(attn_output) + hidden_states |
|
|
|
|
|
norm_hidden_states = self.norm2(hidden_states) |
|
|
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] |
|
|
|
|
|
norm_hidden_states = mom(norm_hidden_states) |
|
|
|
|
|
ff_output = self.ff(norm_hidden_states) |
|
|
ff_output = gate_mlp.unsqueeze(1) * ff_output |
|
|
|
|
|
hidden_states = u_m(ff_output) + hidden_states |
|
|
context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output |
|
|
encoder_hidden_states = u_c(context_attn_output) + encoder_hidden_states |
|
|
|
|
|
norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) |
|
|
norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None] |
|
|
|
|
|
context_ff_output = self.ff_context(norm_encoder_hidden_states) |
|
|
encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output |
|
|
|
|
|
return encoder_hidden_states, hidden_states |
|
|
|
|
|
|
|
|
class FluxTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin): |
|
|
|
|
|
_supports_gradient_checkpointing = True |
|
|
_no_split_modules = ["FluxTransformerBlock", "FluxSingleTransformerBlock"] |
|
|
|
|
|
@register_to_config |
|
|
def __init__( |
|
|
self, |
|
|
patch_size: int = 1, |
|
|
in_channels: int = 64, |
|
|
out_channels: Optional[int] = None, |
|
|
num_layers: int = 19, |
|
|
num_single_layers: int = 38, |
|
|
attention_head_dim: int = 128, |
|
|
num_attention_heads: int = 24, |
|
|
joint_attention_dim: int = 4096, |
|
|
pooled_projection_dim: int = 768, |
|
|
guidance_embeds: bool = False, |
|
|
axes_dims_rope: Tuple[int] = (16, 56, 56), |
|
|
generator: Optional[torch.Generator] = None, |
|
|
): |
|
|
super().__init__() |
|
|
self.out_channels = out_channels or in_channels |
|
|
self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim |
|
|
|
|
|
self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope) |
|
|
|
|
|
text_time_guidance_cls = ( |
|
|
CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings |
|
|
) |
|
|
self.time_text_embed = text_time_guidance_cls( |
|
|
embedding_dim=self.inner_dim, pooled_projection_dim=self.config.pooled_projection_dim |
|
|
) |
|
|
|
|
|
self.context_embedder = nn.Linear(self.config.joint_attention_dim, self.inner_dim) |
|
|
self.x_embedder = nn.Linear(self.config.in_channels, self.inner_dim) |
|
|
|
|
|
self.transformer_blocks = nn.ModuleList( |
|
|
[ |
|
|
FluxTransformerBlock( |
|
|
dim=self.inner_dim, |
|
|
num_attention_heads=self.config.num_attention_heads, |
|
|
attention_head_dim=self.config.attention_head_dim, |
|
|
) |
|
|
for i in range(self.config.num_layers) |
|
|
] |
|
|
) |
|
|
|
|
|
self.single_transformer_blocks = nn.ModuleList( |
|
|
[ |
|
|
FluxSingleTransformerBlock( |
|
|
dim=self.inner_dim, |
|
|
num_attention_heads=self.config.num_attention_heads, |
|
|
attention_head_dim=self.config.attention_head_dim, |
|
|
) |
|
|
for i in range(self.config.num_single_layers) |
|
|
] |
|
|
) |
|
|
|
|
|
self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6) |
|
|
self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True) |
|
|
ratio: float = 0.5 |
|
|
down: int = 1 |
|
|
sx: int = 2 |
|
|
sy: int = 2 |
|
|
rando: bool = False |
|
|
m1: bool = True |
|
|
m2: bool = True |
|
|
m3: bool = False |
|
|
|
|
|
self.tinfo = { |
|
|
"size": None, |
|
|
"args": { |
|
|
"ratio": ratio, |
|
|
"down": down, |
|
|
"sx": sx, |
|
|
"sy": sy, |
|
|
"rando": rando, |
|
|
"m1": m1, |
|
|
"m2": m2, |
|
|
"m3": m3, |
|
|
"generator": generator |
|
|
} |
|
|
} |
|
|
|
|
|
self.gradient_checkpointing = False |
|
|
|
|
|
@property |
|
|
def attn_processors(self) -> Dict[str, AttentionProcessor]: |
|
|
r""" |
|
|
Returns: |
|
|
`dict` of attention processors: A dictionary containing all attention processors used in the model with |
|
|
indexed by its weight name. |
|
|
""" |
|
|
processors = {} |
|
|
|
|
|
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): |
|
|
if hasattr(module, "get_processor"): |
|
|
processors[f"{name}.processor"] = module.get_processor() |
|
|
|
|
|
for sub_name, child in module.named_children(): |
|
|
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) |
|
|
|
|
|
return processors |
|
|
|
|
|
for name, module in self.named_children(): |
|
|
fn_recursive_add_processors(name, module, processors) |
|
|
|
|
|
return processors |
|
|
|
|
|
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): |
|
|
count = len(self.attn_processors.keys()) |
|
|
|
|
|
if isinstance(processor, dict) and len(processor) != count: |
|
|
raise ValueError( |
|
|
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" |
|
|
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." |
|
|
) |
|
|
|
|
|
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): |
|
|
if hasattr(module, "set_processor"): |
|
|
if not isinstance(processor, dict): |
|
|
module.set_processor(processor) |
|
|
else: |
|
|
module.set_processor(processor.pop(f"{name}.processor")) |
|
|
|
|
|
for sub_name, child in module.named_children(): |
|
|
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) |
|
|
|
|
|
for name, module in self.named_children(): |
|
|
fn_recursive_attn_processor(name, module, processor) |
|
|
|
|
|
|
|
|
def fuse_qkv_projections(self): |
|
|
self.original_attn_processors = None |
|
|
|
|
|
for _, attn_processor in self.attn_processors.items(): |
|
|
if "Added" in str(attn_processor.__class__.__name__): |
|
|
raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") |
|
|
|
|
|
self.original_attn_processors = self.attn_processors |
|
|
|
|
|
for module in self.modules(): |
|
|
if isinstance(module, Attention): |
|
|
module.fuse_projections(fuse=True) |
|
|
|
|
|
self.set_attn_processor(FusedFluxAttnProcessor2_0()) |
|
|
|
|
|
|
|
|
def unfuse_qkv_projections(self): |
|
|
if self.original_attn_processors is not None: |
|
|
self.set_attn_processor(self.original_attn_processors) |
|
|
|
|
|
def _set_gradient_checkpointing(self, module, value=False): |
|
|
if hasattr(module, "gradient_checkpointing"): |
|
|
module.gradient_checkpointing = value |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.Tensor, |
|
|
encoder_hidden_states: torch.Tensor = None, |
|
|
pooled_projections: torch.Tensor = None, |
|
|
timestep: torch.LongTensor = None, |
|
|
img_ids: torch.Tensor = None, |
|
|
txt_ids: torch.Tensor = None, |
|
|
guidance: torch.Tensor = None, |
|
|
joint_attention_kwargs: Optional[Dict[str, Any]] = None, |
|
|
controlnet_block_samples=None, |
|
|
controlnet_single_block_samples=None, |
|
|
return_dict: bool = True, |
|
|
controlnet_blocks_repeat: bool = False, |
|
|
) -> Union[torch.FloatTensor, Transformer2DModelOutput]: |
|
|
|
|
|
if len(hidden_states.shape) == 4: |
|
|
self.tinfo["size"] = (hidden_states.shape[2], hidden_states.shape[3]) |
|
|
if len(hidden_states.shape) == 3: |
|
|
self.tinfo["size"] = (hidden_states.shape[1], hidden_states.shape[2]) |
|
|
|
|
|
if joint_attention_kwargs is not None: |
|
|
joint_attention_kwargs = joint_attention_kwargs.copy() |
|
|
lora_scale = joint_attention_kwargs.pop("scale", 1.0) |
|
|
else: |
|
|
lora_scale = 1.0 |
|
|
|
|
|
if USE_PEFT_BACKEND: |
|
|
|
|
|
scale_lora_layers(self, lora_scale) |
|
|
else: |
|
|
if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None: |
|
|
logger.warning( |
|
|
"Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." |
|
|
) |
|
|
|
|
|
hidden_states = self.x_embedder(hidden_states) |
|
|
|
|
|
|
|
|
timestep = timestep.to(hidden_states.dtype) * 1000 |
|
|
if guidance is not None: |
|
|
guidance = guidance.to(hidden_states.dtype) * 1000 |
|
|
else: |
|
|
guidance = None |
|
|
|
|
|
temb = ( |
|
|
self.time_text_embed(timestep, pooled_projections) |
|
|
if guidance is None |
|
|
else self.time_text_embed(timestep, guidance, pooled_projections) |
|
|
) |
|
|
encoder_hidden_states = self.context_embedder(encoder_hidden_states) |
|
|
|
|
|
if txt_ids.ndim == 3: |
|
|
logger.warning( |
|
|
"Passing `txt_ids` 3d torch.Tensor is deprecated." |
|
|
"Please remove the batch dimension and pass it as a 2d torch Tensor" |
|
|
) |
|
|
txt_ids = txt_ids[0] |
|
|
if img_ids.ndim == 3: |
|
|
logger.warning( |
|
|
"Passing `img_ids` 3d torch.Tensor is deprecated." |
|
|
"Please remove the batch dimension and pass it as a 2d torch Tensor" |
|
|
) |
|
|
img_ids = img_ids[0] |
|
|
|
|
|
ids = torch.cat((txt_ids, img_ids), dim=0) |
|
|
image_rotary_emb = self.pos_embed(ids) |
|
|
|
|
|
for index_block, block in enumerate(self.transformer_blocks): |
|
|
if torch.is_grad_enabled() and self.gradient_checkpointing: |
|
|
|
|
|
def create_custom_forward(module, return_dict=None): |
|
|
def custom_forward(*inputs): |
|
|
if return_dict is not None: |
|
|
return module(*inputs, return_dict=return_dict) |
|
|
else: |
|
|
return module(*inputs) |
|
|
|
|
|
return custom_forward |
|
|
|
|
|
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} |
|
|
encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint( |
|
|
create_custom_forward(block), |
|
|
hidden_states, |
|
|
encoder_hidden_states, |
|
|
temb, |
|
|
image_rotary_emb, |
|
|
**ckpt_kwargs, |
|
|
) |
|
|
|
|
|
else: |
|
|
encoder_hidden_states, hidden_states = block( |
|
|
hidden_states=hidden_states, |
|
|
encoder_hidden_states=encoder_hidden_states, |
|
|
temb=temb, |
|
|
image_rotary_emb=image_rotary_emb, |
|
|
joint_attention_kwargs=joint_attention_kwargs, |
|
|
tinfo=self.tinfo |
|
|
) |
|
|
|
|
|
if controlnet_block_samples is not None: |
|
|
interval_control = len(self.transformer_blocks) / len(controlnet_block_samples) |
|
|
interval_control = int(np.ceil(interval_control)) |
|
|
if controlnet_blocks_repeat: |
|
|
hidden_states = ( |
|
|
hidden_states + controlnet_block_samples[index_block % len(controlnet_block_samples)] |
|
|
) |
|
|
else: |
|
|
hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control] |
|
|
|
|
|
hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) |
|
|
|
|
|
for index_block, block in enumerate(self.single_transformer_blocks): |
|
|
if torch.is_grad_enabled() and self.gradient_checkpointing: |
|
|
|
|
|
def create_custom_forward(module, return_dict=None): |
|
|
def custom_forward(*inputs): |
|
|
if return_dict is not None: |
|
|
return module(*inputs, return_dict=return_dict) |
|
|
else: |
|
|
return module(*inputs) |
|
|
|
|
|
return custom_forward |
|
|
|
|
|
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} |
|
|
hidden_states = torch.utils.checkpoint.checkpoint( |
|
|
create_custom_forward(block), |
|
|
hidden_states, |
|
|
temb, |
|
|
image_rotary_emb, |
|
|
**ckpt_kwargs, |
|
|
) |
|
|
|
|
|
else: |
|
|
hidden_states = block( |
|
|
hidden_states=hidden_states, |
|
|
temb=temb, |
|
|
image_rotary_emb=image_rotary_emb, |
|
|
joint_attention_kwargs=joint_attention_kwargs, |
|
|
tinfo=self.tinfo |
|
|
) |
|
|
|
|
|
if controlnet_single_block_samples is not None: |
|
|
interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples) |
|
|
interval_control = int(np.ceil(interval_control)) |
|
|
hidden_states[:, encoder_hidden_states.shape[1] :, ...] = ( |
|
|
hidden_states[:, encoder_hidden_states.shape[1] :, ...] |
|
|
+ controlnet_single_block_samples[index_block // interval_control] |
|
|
) |
|
|
|
|
|
hidden_states = hidden_states[:, encoder_hidden_states.shape[1] :, ...] |
|
|
|
|
|
hidden_states = self.norm_out(hidden_states, temb) |
|
|
output = self.proj_out(hidden_states) |
|
|
|
|
|
if USE_PEFT_BACKEND: |
|
|
unscale_lora_layers(self, lora_scale) |
|
|
|
|
|
if not return_dict: |
|
|
return (output,) |
|
|
|
|
|
return Transformer2DModelOutput(sample=output) |
|
|
|
|
|
|
|
|
class FluxPipeline( |
|
|
DiffusionPipeline, |
|
|
FluxLoraLoaderMixin, |
|
|
FromSingleFileMixin, |
|
|
TextualInversionLoaderMixin, |
|
|
): |
|
|
model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" |
|
|
_optional_components = [] |
|
|
_callback_tensor_inputs = ["latents", "prompt_embeds"] |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
scheduler: FlowMatchEulerDiscreteScheduler, |
|
|
vae: AutoencoderKL, |
|
|
text_encoder: CLIPTextModel, |
|
|
tokenizer: CLIPTokenizer, |
|
|
text_encoder_2: T5EncoderModel, |
|
|
tokenizer_2: T5TokenizerFast, |
|
|
transformer: FluxTransformer2DModel, |
|
|
): |
|
|
super().__init__() |
|
|
|
|
|
self.register_modules( |
|
|
vae=vae, |
|
|
text_encoder=text_encoder, |
|
|
text_encoder_2=text_encoder_2, |
|
|
tokenizer=tokenizer, |
|
|
tokenizer_2=tokenizer_2, |
|
|
transformer=transformer, |
|
|
scheduler=scheduler, |
|
|
) |
|
|
self.vae_scale_factor = ( |
|
|
2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 |
|
|
) |
|
|
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2) |
|
|
self.tokenizer_max_length = ( |
|
|
self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77 |
|
|
) |
|
|
self.default_sample_size = 128 |
|
|
|
|
|
def _get_t5_prompt_embeds( |
|
|
self, |
|
|
prompt: Union[str, List[str]] = None, |
|
|
num_images_per_prompt: int = 1, |
|
|
max_sequence_length: int = 512, |
|
|
device: Optional[torch.device] = None, |
|
|
dtype: Optional[torch.dtype] = None, |
|
|
): |
|
|
device = device or self._execution_device |
|
|
dtype = dtype or self.text_encoder.dtype |
|
|
|
|
|
prompt = [prompt] if isinstance(prompt, str) else prompt |
|
|
batch_size = len(prompt) |
|
|
|
|
|
if isinstance(self, TextualInversionLoaderMixin): |
|
|
prompt = self.maybe_convert_prompt(prompt, self.tokenizer_2) |
|
|
|
|
|
text_inputs = self.tokenizer_2( |
|
|
prompt, |
|
|
padding="max_length", |
|
|
max_length=max_sequence_length, |
|
|
truncation=True, |
|
|
return_length=False, |
|
|
return_overflowing_tokens=False, |
|
|
return_tensors="pt", |
|
|
) |
|
|
text_input_ids = text_inputs.input_ids |
|
|
untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids |
|
|
|
|
|
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): |
|
|
removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) |
|
|
logger.warning( |
|
|
"The following part of your input was truncated because `max_sequence_length` is set to " |
|
|
f" {max_sequence_length} tokens: {removed_text}" |
|
|
) |
|
|
|
|
|
prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0] |
|
|
|
|
|
dtype = self.text_encoder_2.dtype |
|
|
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) |
|
|
|
|
|
_, seq_len, _ = prompt_embeds.shape |
|
|
|
|
|
|
|
|
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) |
|
|
prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) |
|
|
|
|
|
return prompt_embeds |
|
|
|
|
|
def _get_clip_prompt_embeds( |
|
|
self, |
|
|
prompt: Union[str, List[str]], |
|
|
num_images_per_prompt: int = 1, |
|
|
device: Optional[torch.device] = None, |
|
|
): |
|
|
device = device or self._execution_device |
|
|
|
|
|
prompt = [prompt] if isinstance(prompt, str) else prompt |
|
|
batch_size = len(prompt) |
|
|
|
|
|
if isinstance(self, TextualInversionLoaderMixin): |
|
|
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) |
|
|
|
|
|
text_inputs = self.tokenizer( |
|
|
prompt, |
|
|
padding="max_length", |
|
|
max_length=self.tokenizer_max_length, |
|
|
truncation=True, |
|
|
return_overflowing_tokens=False, |
|
|
return_length=False, |
|
|
return_tensors="pt", |
|
|
) |
|
|
|
|
|
text_input_ids = text_inputs.input_ids |
|
|
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids |
|
|
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): |
|
|
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1]) |
|
|
logger.warning( |
|
|
"The following part of your input was truncated because CLIP can only handle sequences up to" |
|
|
f" {self.tokenizer_max_length} tokens: {removed_text}" |
|
|
) |
|
|
prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False) |
|
|
|
|
|
prompt_embeds = prompt_embeds.pooler_output |
|
|
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) |
|
|
|
|
|
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt) |
|
|
prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1) |
|
|
|
|
|
return prompt_embeds |
|
|
|
|
|
def encode_prompt( |
|
|
self, |
|
|
prompt: Union[str, List[str]], |
|
|
prompt_2: Union[str, List[str]], |
|
|
device: Optional[torch.device] = None, |
|
|
num_images_per_prompt: int = 1, |
|
|
prompt_embeds: Optional[torch.FloatTensor] = None, |
|
|
pooled_prompt_embeds: Optional[torch.FloatTensor] = None, |
|
|
max_sequence_length: int = 512, |
|
|
lora_scale: Optional[float] = None, |
|
|
): |
|
|
device = device or self._execution_device |
|
|
|
|
|
if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin): |
|
|
self._lora_scale = lora_scale |
|
|
if self.text_encoder is not None and USE_PEFT_BACKEND: |
|
|
scale_lora_layers(self.text_encoder, lora_scale) |
|
|
if self.text_encoder_2 is not None and USE_PEFT_BACKEND: |
|
|
scale_lora_layers(self.text_encoder_2, lora_scale) |
|
|
|
|
|
prompt = [prompt] if isinstance(prompt, str) else prompt |
|
|
|
|
|
if prompt_embeds is None: |
|
|
prompt_2 = prompt_2 or prompt |
|
|
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2 |
|
|
|
|
|
|
|
|
pooled_prompt_embeds = self._get_clip_prompt_embeds( |
|
|
prompt=prompt, |
|
|
device=device, |
|
|
num_images_per_prompt=num_images_per_prompt, |
|
|
) |
|
|
prompt_embeds = self._get_t5_prompt_embeds( |
|
|
prompt=prompt_2, |
|
|
num_images_per_prompt=num_images_per_prompt, |
|
|
max_sequence_length=max_sequence_length, |
|
|
device=device, |
|
|
) |
|
|
|
|
|
if self.text_encoder is not None: |
|
|
if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: |
|
|
|
|
|
unscale_lora_layers(self.text_encoder, lora_scale) |
|
|
|
|
|
if self.text_encoder_2 is not None: |
|
|
if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND: |
|
|
|
|
|
unscale_lora_layers(self.text_encoder_2, lora_scale) |
|
|
|
|
|
dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype |
|
|
text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype) |
|
|
|
|
|
return prompt_embeds, pooled_prompt_embeds, text_ids |
|
|
|
|
|
def check_inputs( |
|
|
self, |
|
|
prompt, |
|
|
prompt_2, |
|
|
height, |
|
|
width, |
|
|
prompt_embeds=None, |
|
|
pooled_prompt_embeds=None, |
|
|
callback_on_step_end_tensor_inputs=None, |
|
|
max_sequence_length=None, |
|
|
): |
|
|
if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0: |
|
|
logger.warning( |
|
|
f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly" |
|
|
) |
|
|
|
|
|
if callback_on_step_end_tensor_inputs is not None and not all( |
|
|
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs |
|
|
): |
|
|
raise ValueError( |
|
|
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" |
|
|
) |
|
|
|
|
|
if prompt is not None and prompt_embeds is not None: |
|
|
raise ValueError( |
|
|
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" |
|
|
" only forward one of the two." |
|
|
) |
|
|
elif prompt_2 is not None and prompt_embeds is not None: |
|
|
raise ValueError( |
|
|
f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" |
|
|
" only forward one of the two." |
|
|
) |
|
|
elif prompt is None and prompt_embeds is None: |
|
|
raise ValueError( |
|
|
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." |
|
|
) |
|
|
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): |
|
|
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") |
|
|
elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): |
|
|
raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") |
|
|
|
|
|
if prompt_embeds is not None and pooled_prompt_embeds is None: |
|
|
raise ValueError( |
|
|
"If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." |
|
|
) |
|
|
|
|
|
if max_sequence_length is not None and max_sequence_length > 512: |
|
|
raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}") |
|
|
|
|
|
@staticmethod |
|
|
def _prepare_latent_image_ids(batch_size, height, width, device, dtype): |
|
|
latent_image_ids = torch.zeros(height, width, 3) |
|
|
latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None] |
|
|
latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width)[None, :] |
|
|
|
|
|
latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape |
|
|
|
|
|
latent_image_ids = latent_image_ids.reshape( |
|
|
latent_image_id_height * latent_image_id_width, latent_image_id_channels |
|
|
) |
|
|
|
|
|
return latent_image_ids.to(device=device, dtype=dtype) |
|
|
|
|
|
@staticmethod |
|
|
def _pack_latents(latents, batch_size, num_channels_latents, height, width): |
|
|
latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2) |
|
|
latents = latents.permute(0, 2, 4, 1, 3, 5) |
|
|
latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4) |
|
|
|
|
|
return latents |
|
|
|
|
|
@staticmethod |
|
|
def _unpack_latents(latents, height, width, vae_scale_factor): |
|
|
batch_size, num_patches, channels = latents.shape |
|
|
|
|
|
|
|
|
|
|
|
height = 2 * (int(height) // (vae_scale_factor * 2)) |
|
|
width = 2 * (int(width) // (vae_scale_factor * 2)) |
|
|
|
|
|
latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2) |
|
|
latents = latents.permute(0, 3, 1, 4, 2, 5) |
|
|
|
|
|
latents = latents.reshape(batch_size, channels // (2 * 2), height, width) |
|
|
|
|
|
return latents |
|
|
|
|
|
def prepare_latents( |
|
|
self, |
|
|
batch_size, |
|
|
num_channels_latents, |
|
|
height, |
|
|
width, |
|
|
dtype, |
|
|
device, |
|
|
generator, |
|
|
latents=None, |
|
|
): |
|
|
|
|
|
|
|
|
height = 2 * (int(height) // (self.vae_scale_factor * 2)) |
|
|
width = 2 * (int(width) // (self.vae_scale_factor * 2)) |
|
|
|
|
|
shape = (batch_size, num_channels_latents, height, width) |
|
|
|
|
|
if latents is not None: |
|
|
latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) |
|
|
return latents.to(device=device, dtype=dtype), latent_image_ids |
|
|
|
|
|
if isinstance(generator, list) and len(generator) != batch_size: |
|
|
raise ValueError( |
|
|
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" |
|
|
f" size of {batch_size}. Make sure the batch size matches the length of the generators." |
|
|
) |
|
|
|
|
|
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) |
|
|
latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width) |
|
|
|
|
|
latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype) |
|
|
|
|
|
return latents, latent_image_ids |
|
|
|
|
|
@property |
|
|
def guidance_scale(self): |
|
|
return self._guidance_scale |
|
|
|
|
|
@property |
|
|
def joint_attention_kwargs(self): |
|
|
return self._joint_attention_kwargs |
|
|
|
|
|
@property |
|
|
def num_timesteps(self): |
|
|
return self._num_timesteps |
|
|
|
|
|
@property |
|
|
def interrupt(self): |
|
|
return self._interrupt |
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
|
def __call__( |
|
|
self, |
|
|
prompt: Union[str, List[str]] = None, |
|
|
prompt_2: Optional[Union[str, List[str]]] = None, |
|
|
height: Optional[int] = None, |
|
|
width: Optional[int] = None, |
|
|
num_inference_steps: int = 28, |
|
|
sigmas: Optional[List[float]] = None, |
|
|
guidance_scale: float = 3.5, |
|
|
num_images_per_prompt: Optional[int] = 1, |
|
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, |
|
|
latents: Optional[torch.FloatTensor] = None, |
|
|
prompt_embeds: Optional[torch.FloatTensor] = None, |
|
|
pooled_prompt_embeds: Optional[torch.FloatTensor] = None, |
|
|
output_type: Optional[str] = "pil", |
|
|
return_dict: bool = True, |
|
|
joint_attention_kwargs: Optional[Dict[str, Any]] = None, |
|
|
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, |
|
|
callback_on_step_end_tensor_inputs: List[str] = ["latents"], |
|
|
max_sequence_length: int = 512, |
|
|
): |
|
|
|
|
|
height = height or self.default_sample_size * self.vae_scale_factor |
|
|
width = width or self.default_sample_size * self.vae_scale_factor |
|
|
|
|
|
|
|
|
self.check_inputs( |
|
|
prompt, |
|
|
prompt_2, |
|
|
height, |
|
|
width, |
|
|
prompt_embeds=prompt_embeds, |
|
|
pooled_prompt_embeds=pooled_prompt_embeds, |
|
|
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, |
|
|
max_sequence_length=max_sequence_length, |
|
|
) |
|
|
|
|
|
self._guidance_scale = guidance_scale |
|
|
self._joint_attention_kwargs = joint_attention_kwargs |
|
|
self._interrupt = False |
|
|
|
|
|
|
|
|
if prompt is not None and isinstance(prompt, str): |
|
|
batch_size = 1 |
|
|
elif prompt is not None and isinstance(prompt, list): |
|
|
batch_size = len(prompt) |
|
|
else: |
|
|
batch_size = prompt_embeds.shape[0] |
|
|
|
|
|
device = self._execution_device |
|
|
|
|
|
lora_scale = ( |
|
|
self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None |
|
|
) |
|
|
( |
|
|
prompt_embeds, |
|
|
pooled_prompt_embeds, |
|
|
text_ids, |
|
|
) = self.encode_prompt( |
|
|
prompt=prompt, |
|
|
prompt_2=prompt_2, |
|
|
prompt_embeds=prompt_embeds, |
|
|
pooled_prompt_embeds=pooled_prompt_embeds, |
|
|
device=device, |
|
|
num_images_per_prompt=num_images_per_prompt, |
|
|
max_sequence_length=max_sequence_length, |
|
|
lora_scale=lora_scale, |
|
|
) |
|
|
|
|
|
|
|
|
num_channels_latents = self.transformer.config.in_channels // 4 |
|
|
latents, latent_image_ids = self.prepare_latents( |
|
|
batch_size * num_images_per_prompt, |
|
|
num_channels_latents, |
|
|
height, |
|
|
width, |
|
|
prompt_embeds.dtype, |
|
|
device, |
|
|
generator, |
|
|
latents, |
|
|
) |
|
|
|
|
|
|
|
|
sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas |
|
|
image_seq_len = latents.shape[1] |
|
|
mu = calculate_shift( |
|
|
image_seq_len, |
|
|
self.scheduler.config.base_image_seq_len, |
|
|
self.scheduler.config.max_image_seq_len, |
|
|
self.scheduler.config.base_shift, |
|
|
self.scheduler.config.max_shift, |
|
|
) |
|
|
timesteps, num_inference_steps = retrieve_timesteps( |
|
|
self.scheduler, |
|
|
num_inference_steps, |
|
|
device, |
|
|
sigmas=sigmas, |
|
|
mu=mu, |
|
|
) |
|
|
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) |
|
|
self._num_timesteps = len(timesteps) |
|
|
|
|
|
|
|
|
if self.transformer.config.guidance_embeds: |
|
|
guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) |
|
|
guidance = guidance.expand(latents.shape[0]) |
|
|
else: |
|
|
guidance = None |
|
|
|
|
|
|
|
|
for i, t in enumerate(timesteps): |
|
|
if self.interrupt: |
|
|
continue |
|
|
|
|
|
|
|
|
timestep = t.expand(latents.shape[0]).to(latents.dtype) |
|
|
|
|
|
noise_pred = self.transformer( |
|
|
hidden_states=latents, |
|
|
timestep=timestep / 1000, |
|
|
guidance=guidance, |
|
|
pooled_projections=pooled_prompt_embeds, |
|
|
encoder_hidden_states=prompt_embeds, |
|
|
txt_ids=text_ids, |
|
|
img_ids=latent_image_ids, |
|
|
joint_attention_kwargs=self.joint_attention_kwargs, |
|
|
return_dict=False, |
|
|
)[0] |
|
|
|
|
|
|
|
|
latents_dtype = latents.dtype |
|
|
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] |
|
|
|
|
|
if latents.dtype != latents_dtype: |
|
|
if torch.backends.mps.is_available(): |
|
|
|
|
|
latents = latents.to(latents_dtype) |
|
|
|
|
|
if callback_on_step_end is not None: |
|
|
callback_kwargs = {} |
|
|
for k in callback_on_step_end_tensor_inputs: |
|
|
callback_kwargs[k] = locals()[k] |
|
|
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) |
|
|
|
|
|
latents = callback_outputs.pop("latents", latents) |
|
|
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) |
|
|
|
|
|
if XLA_AVAILABLE: |
|
|
xm.mark_step() |
|
|
|
|
|
if output_type == "latent": |
|
|
image = latents |
|
|
|
|
|
else: |
|
|
latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) |
|
|
latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor |
|
|
image = self.vae.decode(latents, return_dict=False)[0] |
|
|
image = self.image_processor.postprocess(image, output_type=output_type) |
|
|
|
|
|
|
|
|
self.maybe_free_model_hooks() |
|
|
|
|
|
if not return_dict: |
|
|
return (image,) |
|
|
|
|
|
return FluxPipelineOutput(images=image) |
|
|
|
|
|
Pipeline = None |
|
|
torch.backends.cuda.matmul.allow_tf32 = True |
|
|
torch.backends.cudnn.enabled = True |
|
|
torch.backends.cudnn.benchmark = True |
|
|
|
|
|
|
|
|
|
|
|
ckpt_id = "silentdriver/4b68f38c0b" |
|
|
ckpt_revision = "36a3cf4a9f733fc5f31257099b56b304fb2eceab" |
|
|
def empty_cache(): |
|
|
gc.collect() |
|
|
torch.cuda.empty_cache() |
|
|
torch.cuda.reset_max_memory_allocated() |
|
|
torch.cuda.reset_peak_memory_stats() |
|
|
|
|
|
def load_pipeline() -> Pipeline: |
|
|
empty_cache() |
|
|
|
|
|
dtype, device = torch.bfloat16, "cuda" |
|
|
|
|
|
text_encoder_2 = T5EncoderModel.from_pretrained( |
|
|
"silentdriver/aadb864af9", revision = "060dabc7fa271c26dfa3fd43c16e7c5bf3ac7892", torch_dtype=torch.bfloat16 |
|
|
).to(memory_format=torch.channels_last) |
|
|
|
|
|
path = os.path.join(HF_HUB_CACHE, "models--silentdriver--7d92df966a/snapshots/add1b8d9a84c728c1209448c4a695759240bad3c") |
|
|
generator = torch.Generator(device=device) |
|
|
model = FluxTransformer2DModel.from_pretrained(path, torch_dtype=dtype, use_safetensors=False, generator= generator).to(memory_format=torch.channels_last) |
|
|
pipeline = FluxPipeline.from_pretrained( |
|
|
ckpt_id, |
|
|
revision=ckpt_revision, |
|
|
transformer=model, |
|
|
text_encoder_2=text_encoder_2, |
|
|
torch_dtype=dtype, |
|
|
).to(device) |
|
|
|
|
|
for _ in range(3): |
|
|
pipeline(prompt="blah blah waah waah oneshot oneshot gang gang", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256) |
|
|
|
|
|
empty_cache() |
|
|
return pipeline |
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
|
def infer(request: TextToImageRequest, pipeline: Pipeline, generator: Generator) -> Image: |
|
|
image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0] |
|
|
return image |