| |
| |
|
|
| |
| |
| |
|
|
| |
|
|
| |
| |
| |
| |
| |
|
|
| from dataclasses import dataclass |
|
|
| import torch |
| from torch import Tensor, nn |
|
|
| from .modules.layers import ( |
| DoubleStreamBlock, |
| EmbedND, |
| LastLayer, |
| MLPEmbedder, |
| SingleStreamBlock, |
| timestep_embedding, |
| SigLIPMultiFeatProjModel, |
| ) |
| import os |
|
|
|
|
| @dataclass |
| class FluxParams: |
| in_channels: int |
| vec_in_dim: int |
| context_in_dim: int |
| hidden_size: int |
| mlp_ratio: float |
| num_heads: int |
| depth: int |
| depth_single_blocks: int |
| axes_dim: list[int] |
| theta: int |
| qkv_bias: bool |
| guidance_embed: bool |
|
|
|
|
| class Flux(nn.Module): |
| """ |
| Transformer model for flow matching on sequences. |
| """ |
|
|
| _supports_gradient_checkpointing = True |
|
|
| def __init__(self, params: FluxParams): |
| super().__init__() |
|
|
| self.params = params |
| self.in_channels = params.in_channels |
| self.out_channels = self.in_channels |
| if params.hidden_size % params.num_heads != 0: |
| raise ValueError( |
| f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}" |
| ) |
| pe_dim = params.hidden_size // params.num_heads |
| if sum(params.axes_dim) != pe_dim: |
| raise ValueError( |
| f"Got {params.axes_dim} but expected positional dim {pe_dim}" |
| ) |
| self.hidden_size = params.hidden_size |
| self.num_heads = params.num_heads |
| self.pe_embedder = EmbedND( |
| dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim |
| ) |
| self.img_in = nn.Linear(self.in_channels, self.hidden_size, bias=True) |
| self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) |
| self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size) |
| self.guidance_in = ( |
| MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) |
| if params.guidance_embed |
| else nn.Identity() |
| ) |
| self.txt_in = nn.Linear(params.context_in_dim, self.hidden_size) |
|
|
| self.double_blocks = nn.ModuleList( |
| [ |
| DoubleStreamBlock( |
| self.hidden_size, |
| self.num_heads, |
| mlp_ratio=params.mlp_ratio, |
| qkv_bias=params.qkv_bias, |
| ) |
| for _ in range(params.depth) |
| ] |
| ) |
|
|
| self.single_blocks = nn.ModuleList( |
| [ |
| SingleStreamBlock( |
| self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio |
| ) |
| for _ in range(params.depth_single_blocks) |
| ] |
| ) |
|
|
| self.final_layer = LastLayer(self.hidden_size, 1, self.out_channels) |
| self.gradient_checkpointing = False |
|
|
| |
| self.feature_embedder = SigLIPMultiFeatProjModel( |
| siglip_token_nums=729, |
| style_token_nums=64, |
| siglip_token_dims=1152, |
| hidden_size=self.hidden_size, |
| context_layer_norm=True, |
| ) |
| print("use semantic encoder siglip multi-feat to encode style image") |
|
|
| self.vision_encoder = None |
|
|
| def _set_gradient_checkpointing(self, module, value=False): |
| if hasattr(module, "gradient_checkpointing"): |
| module.gradient_checkpointing = value |
|
|
| @property |
| def attn_processors(self): |
| |
| processors = {} |
|
|
| def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors): |
| if hasattr(module, "set_processor"): |
| processors[f"{name}.processor"] = module.processor |
|
|
| for sub_name, child in module.named_children(): |
| fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) |
|
|
| return processors |
|
|
| for name, module in self.named_children(): |
| fn_recursive_add_processors(name, module, processors) |
|
|
| return processors |
|
|
| def set_attn_processor(self, processor): |
| r""" |
| Sets the attention processor to use to compute attention. |
| |
| Parameters: |
| processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): |
| The instantiated processor class or a dictionary of processor classes that will be set as the processor |
| for **all** `Attention` layers. |
| |
| If `processor` is a dict, the key needs to define the path to the corresponding cross attention |
| processor. This is strongly recommended when setting trainable attention processors. |
| |
| """ |
| count = len(self.attn_processors.keys()) |
|
|
| if isinstance(processor, dict) and len(processor) != count: |
| raise ValueError( |
| f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" |
| f" number of attention layers: {count}. Please make sure to pass {count} processor classes." |
| ) |
|
|
| def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): |
| if hasattr(module, "set_processor"): |
| if not isinstance(processor, dict): |
| module.set_processor(processor) |
| else: |
| module.set_processor(processor.pop(f"{name}.processor")) |
|
|
| for sub_name, child in module.named_children(): |
| fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) |
|
|
| for name, module in self.named_children(): |
| fn_recursive_attn_processor(name, module, processor) |
|
|
| def forward( |
| self, |
| img: Tensor, |
| img_ids: Tensor, |
| txt: Tensor, |
| txt_ids: Tensor, |
| timesteps: Tensor, |
| y: Tensor, |
| guidance: Tensor | None = None, |
| ref_img: Tensor | None = None, |
| ref_img_ids: Tensor | None = None, |
| siglip_inputs: list[Tensor] | None = None, |
| ) -> Tensor: |
| if img.ndim != 3 or txt.ndim != 3: |
| raise ValueError("Input img and txt tensors must have 3 dimensions.") |
|
|
| |
| img = self.img_in(img) |
| vec = self.time_in(timestep_embedding(timesteps, 256)) |
| if self.params.guidance_embed: |
| if guidance is None: |
| raise ValueError( |
| "Didn't get guidance strength for guidance distilled model." |
| ) |
| vec = vec + self.guidance_in(timestep_embedding(guidance, 256)) |
| vec = vec + self.vector_in(y) |
| txt = self.txt_in(txt) |
| if self.feature_embedder is not None and siglip_inputs is not None and len(siglip_inputs) > 0 and self.vision_encoder is not None: |
| |
| siglip_embedding = [self.vision_encoder(**emb, output_hidden_states=True) for emb in siglip_inputs] |
| |
| siglip_embedding = torch.cat([self.feature_embedder(emb) for emb in siglip_embedding], dim=1) |
| txt = torch.cat((siglip_embedding, txt), dim=1) |
| siglip_embedding_ids = torch.zeros( |
| siglip_embedding.shape[0], siglip_embedding.shape[1], 3 |
| ).to(txt_ids.device) |
| txt_ids = torch.cat((siglip_embedding_ids, txt_ids), dim=1) |
|
|
| ids = torch.cat((txt_ids, img_ids), dim=1) |
|
|
| |
| img_end = img.shape[1] |
| if ref_img is not None: |
| if isinstance(ref_img, tuple) or isinstance(ref_img, list): |
| img_in = [img] + [self.img_in(ref) for ref in ref_img] |
| img_ids = [ids] + [ref_ids for ref_ids in ref_img_ids] |
| img = torch.cat(img_in, dim=1) |
| ids = torch.cat(img_ids, dim=1) |
| else: |
| img = torch.cat((img, self.img_in(ref_img)), dim=1) |
| ids = torch.cat((ids, ref_img_ids), dim=1) |
| pe = self.pe_embedder(ids) |
|
|
| for index_block, block in enumerate(self.double_blocks): |
| if self.training and self.gradient_checkpointing: |
| img, txt = torch.utils.checkpoint.checkpoint( |
| block, |
| img=img, |
| txt=txt, |
| vec=vec, |
| pe=pe, |
| use_reentrant=False, |
| ) |
| else: |
| img, txt = block(img=img, txt=txt, vec=vec, pe=pe) |
|
|
| img = torch.cat((txt, img), 1) |
| for block in self.single_blocks: |
| if self.training and self.gradient_checkpointing: |
| img = torch.utils.checkpoint.checkpoint( |
| block, img, vec=vec, pe=pe, use_reentrant=False |
| ) |
| else: |
| img = block(img, vec=vec, pe=pe) |
| img = img[:, txt.shape[1] :, ...] |
| |
| img = img[:, :img_end, ...] |
|
|
| img = self.final_layer(img, vec) |
| return img |
|
|