| | |
| | |
| |
|
| | from dataclasses import dataclass |
| | import math |
| | from typing import Dict, List, Optional, Union |
| |
|
| | from .device_utils import init_ipex |
| | from .custom_offloading_utils import ModelOffloader |
| | init_ipex() |
| |
|
| | import torch |
| | from einops import rearrange |
| | from torch import Tensor, nn |
| | from torch.utils.checkpoint import checkpoint |
| |
|
| | |
| |
|
| |
|
| | @dataclass |
| | class FluxParams: |
| | in_channels: int |
| | vec_in_dim: int |
| | context_in_dim: int |
| | hidden_size: int |
| | mlp_ratio: float |
| | num_heads: int |
| | depth: int |
| | depth_single_blocks: int |
| | axes_dim: list[int] |
| | theta: int |
| | qkv_bias: bool |
| | guidance_embed: bool |
| |
|
| |
|
| | |
| |
|
| |
|
| | @dataclass |
| | class AutoEncoderParams: |
| | resolution: int |
| | in_channels: int |
| | ch: int |
| | out_ch: int |
| | ch_mult: list[int] |
| | num_res_blocks: int |
| | z_channels: int |
| | scale_factor: float |
| | shift_factor: float |
| |
|
| |
|
| | def swish(x: Tensor) -> Tensor: |
| | return x * torch.sigmoid(x) |
| |
|
| |
|
| | class AttnBlock(nn.Module): |
| | def __init__(self, in_channels: int): |
| | super().__init__() |
| | self.in_channels = in_channels |
| |
|
| | self.norm = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) |
| |
|
| | self.q = nn.Conv2d(in_channels, in_channels, kernel_size=1) |
| | self.k = nn.Conv2d(in_channels, in_channels, kernel_size=1) |
| | self.v = nn.Conv2d(in_channels, in_channels, kernel_size=1) |
| | self.proj_out = nn.Conv2d(in_channels, in_channels, kernel_size=1) |
| |
|
| | def attention(self, h_: Tensor) -> Tensor: |
| | h_ = self.norm(h_) |
| | q = self.q(h_) |
| | k = self.k(h_) |
| | v = self.v(h_) |
| |
|
| | b, c, h, w = q.shape |
| | q = rearrange(q, "b c h w -> b 1 (h w) c").contiguous() |
| | k = rearrange(k, "b c h w -> b 1 (h w) c").contiguous() |
| | v = rearrange(v, "b c h w -> b 1 (h w) c").contiguous() |
| | h_ = nn.functional.scaled_dot_product_attention(q, k, v) |
| |
|
| | return rearrange(h_, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b) |
| |
|
| | def forward(self, x: Tensor) -> Tensor: |
| | return x + self.proj_out(self.attention(x)) |
| |
|
| |
|
| | class ResnetBlock(nn.Module): |
| | def __init__(self, in_channels: int, out_channels: int): |
| | super().__init__() |
| | self.in_channels = in_channels |
| | out_channels = in_channels if out_channels is None else out_channels |
| | self.out_channels = out_channels |
| |
|
| | self.norm1 = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) |
| | self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) |
| | self.norm2 = nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=1e-6, affine=True) |
| | self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) |
| | if self.in_channels != self.out_channels: |
| | self.nin_shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0) |
| |
|
| | def forward(self, x): |
| | h = x |
| | h = self.norm1(h) |
| | h = swish(h) |
| | h = self.conv1(h) |
| |
|
| | h = self.norm2(h) |
| | h = swish(h) |
| | h = self.conv2(h) |
| |
|
| | if self.in_channels != self.out_channels: |
| | x = self.nin_shortcut(x) |
| |
|
| | return x + h |
| |
|
| |
|
| | class Downsample(nn.Module): |
| | def __init__(self, in_channels: int): |
| | super().__init__() |
| | |
| | self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0) |
| |
|
| | def forward(self, x: Tensor): |
| | pad = (0, 1, 0, 1) |
| | x = nn.functional.pad(x, pad, mode="constant", value=0) |
| | x = self.conv(x) |
| | return x |
| |
|
| |
|
| | class Upsample(nn.Module): |
| | def __init__(self, in_channels: int): |
| | super().__init__() |
| | self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) |
| |
|
| | def forward(self, x: Tensor): |
| | x = nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") |
| | x = self.conv(x) |
| | return x |
| |
|
| |
|
| | class Encoder(nn.Module): |
| | def __init__( |
| | self, |
| | resolution: int, |
| | in_channels: int, |
| | ch: int, |
| | ch_mult: list[int], |
| | num_res_blocks: int, |
| | z_channels: int, |
| | ): |
| | super().__init__() |
| | self.ch = ch |
| | self.num_resolutions = len(ch_mult) |
| | self.num_res_blocks = num_res_blocks |
| | self.resolution = resolution |
| | self.in_channels = in_channels |
| | |
| | self.conv_in = nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1) |
| |
|
| | curr_res = resolution |
| | in_ch_mult = (1,) + tuple(ch_mult) |
| | self.in_ch_mult = in_ch_mult |
| | self.down = nn.ModuleList() |
| | block_in = self.ch |
| | for i_level in range(self.num_resolutions): |
| | block = nn.ModuleList() |
| | attn = nn.ModuleList() |
| | block_in = ch * in_ch_mult[i_level] |
| | block_out = ch * ch_mult[i_level] |
| | for _ in range(self.num_res_blocks): |
| | block.append(ResnetBlock(in_channels=block_in, out_channels=block_out)) |
| | block_in = block_out |
| | down = nn.Module() |
| | down.block = block |
| | down.attn = attn |
| | if i_level != self.num_resolutions - 1: |
| | down.downsample = Downsample(block_in) |
| | curr_res = curr_res // 2 |
| | self.down.append(down) |
| |
|
| | |
| | self.mid = nn.Module() |
| | self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in) |
| | self.mid.attn_1 = AttnBlock(block_in) |
| | self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in) |
| |
|
| | |
| | self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True) |
| | self.conv_out = nn.Conv2d(block_in, 2 * z_channels, kernel_size=3, stride=1, padding=1) |
| |
|
| | def forward(self, x: Tensor) -> Tensor: |
| | |
| | hs = [self.conv_in(x)] |
| | for i_level in range(self.num_resolutions): |
| | for i_block in range(self.num_res_blocks): |
| | h = self.down[i_level].block[i_block](hs[-1]) |
| | if len(self.down[i_level].attn) > 0: |
| | h = self.down[i_level].attn[i_block](h) |
| | hs.append(h) |
| | if i_level != self.num_resolutions - 1: |
| | hs.append(self.down[i_level].downsample(hs[-1])) |
| |
|
| | |
| | h = hs[-1] |
| | h = self.mid.block_1(h) |
| | h = self.mid.attn_1(h) |
| | h = self.mid.block_2(h) |
| | |
| | h = self.norm_out(h) |
| | h = swish(h) |
| | h = self.conv_out(h) |
| | return h |
| |
|
| |
|
| | class Decoder(nn.Module): |
| | def __init__( |
| | self, |
| | ch: int, |
| | out_ch: int, |
| | ch_mult: list[int], |
| | num_res_blocks: int, |
| | in_channels: int, |
| | resolution: int, |
| | z_channels: int, |
| | ): |
| | super().__init__() |
| | self.ch = ch |
| | self.num_resolutions = len(ch_mult) |
| | self.num_res_blocks = num_res_blocks |
| | self.resolution = resolution |
| | self.in_channels = in_channels |
| | self.ffactor = 2 ** (self.num_resolutions - 1) |
| |
|
| | |
| | block_in = ch * ch_mult[self.num_resolutions - 1] |
| | curr_res = resolution // 2 ** (self.num_resolutions - 1) |
| | self.z_shape = (1, z_channels, curr_res, curr_res) |
| |
|
| | |
| | self.conv_in = nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1) |
| |
|
| | |
| | self.mid = nn.Module() |
| | self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in) |
| | self.mid.attn_1 = AttnBlock(block_in) |
| | self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in) |
| |
|
| | |
| | self.up = nn.ModuleList() |
| | for i_level in reversed(range(self.num_resolutions)): |
| | block = nn.ModuleList() |
| | attn = nn.ModuleList() |
| | block_out = ch * ch_mult[i_level] |
| | for _ in range(self.num_res_blocks + 1): |
| | block.append(ResnetBlock(in_channels=block_in, out_channels=block_out)) |
| | block_in = block_out |
| | up = nn.Module() |
| | up.block = block |
| | up.attn = attn |
| | if i_level != 0: |
| | up.upsample = Upsample(block_in) |
| | curr_res = curr_res * 2 |
| | self.up.insert(0, up) |
| |
|
| | |
| | self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True) |
| | self.conv_out = nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1) |
| |
|
| | def forward(self, z: Tensor) -> Tensor: |
| | |
| | h = self.conv_in(z) |
| |
|
| | |
| | h = self.mid.block_1(h) |
| | h = self.mid.attn_1(h) |
| | h = self.mid.block_2(h) |
| |
|
| | |
| | for i_level in reversed(range(self.num_resolutions)): |
| | for i_block in range(self.num_res_blocks + 1): |
| | h = self.up[i_level].block[i_block](h) |
| | if len(self.up[i_level].attn) > 0: |
| | h = self.up[i_level].attn[i_block](h) |
| | if i_level != 0: |
| | h = self.up[i_level].upsample(h) |
| |
|
| | |
| | h = self.norm_out(h) |
| | h = swish(h) |
| | h = self.conv_out(h) |
| | return h |
| |
|
| |
|
| | class DiagonalGaussian(nn.Module): |
| | def __init__(self, sample: bool = True, chunk_dim: int = 1): |
| | super().__init__() |
| | self.sample = sample |
| | self.chunk_dim = chunk_dim |
| |
|
| | def forward(self, z: Tensor) -> Tensor: |
| | mean, logvar = torch.chunk(z, 2, dim=self.chunk_dim) |
| | if self.sample: |
| | std = torch.exp(0.5 * logvar) |
| | return mean + std * torch.randn_like(mean) |
| | else: |
| | return mean |
| |
|
| |
|
| | class AutoEncoder(nn.Module): |
| | def __init__(self, params: AutoEncoderParams): |
| | super().__init__() |
| | self.encoder = Encoder( |
| | resolution=params.resolution, |
| | in_channels=params.in_channels, |
| | ch=params.ch, |
| | ch_mult=params.ch_mult, |
| | num_res_blocks=params.num_res_blocks, |
| | z_channels=params.z_channels, |
| | ) |
| | self.decoder = Decoder( |
| | resolution=params.resolution, |
| | in_channels=params.in_channels, |
| | ch=params.ch, |
| | out_ch=params.out_ch, |
| | ch_mult=params.ch_mult, |
| | num_res_blocks=params.num_res_blocks, |
| | z_channels=params.z_channels, |
| | ) |
| | self.reg = DiagonalGaussian() |
| |
|
| | self.scale_factor = params.scale_factor |
| | self.shift_factor = params.shift_factor |
| |
|
| | @property |
| | def device(self) -> torch.device: |
| | return next(self.parameters()).device |
| |
|
| | @property |
| | def dtype(self) -> torch.dtype: |
| | return next(self.parameters()).dtype |
| |
|
| | def encode(self, x: Tensor) -> Tensor: |
| | z = self.reg(self.encoder(x)) |
| | z = self.scale_factor * (z - self.shift_factor) |
| | return z |
| |
|
| | def decode(self, z: Tensor) -> Tensor: |
| | z = z / self.scale_factor + self.shift_factor |
| | return self.decoder(z) |
| |
|
| | def forward(self, x: Tensor) -> Tensor: |
| | return self.decode(self.encode(x)) |
| |
|
| |
|
| | |
| | |
| |
|
| |
|
| | @dataclass |
| | class ModelSpec: |
| | params: FluxParams |
| | ae_params: AutoEncoderParams |
| | ckpt_path: str | None |
| | ae_path: str | None |
| | |
| | |
| | |
| |
|
| |
|
| | configs = { |
| | "dev": ModelSpec( |
| | |
| | |
| | |
| | ckpt_path=None, |
| | params=FluxParams( |
| | in_channels=64, |
| | vec_in_dim=768, |
| | context_in_dim=4096, |
| | hidden_size=3072, |
| | mlp_ratio=4.0, |
| | num_heads=24, |
| | depth=19, |
| | depth_single_blocks=38, |
| | axes_dim=[16, 56, 56], |
| | theta=10_000, |
| | qkv_bias=True, |
| | guidance_embed=True, |
| | ), |
| | ae_path=None, |
| | ae_params=AutoEncoderParams( |
| | resolution=256, |
| | in_channels=3, |
| | ch=128, |
| | out_ch=3, |
| | ch_mult=[1, 2, 4, 4], |
| | num_res_blocks=2, |
| | z_channels=16, |
| | scale_factor=0.3611, |
| | shift_factor=0.1159, |
| | ), |
| | ), |
| | "schnell": ModelSpec( |
| | |
| | |
| | |
| | ckpt_path=None, |
| | params=FluxParams( |
| | in_channels=64, |
| | vec_in_dim=768, |
| | context_in_dim=4096, |
| | hidden_size=3072, |
| | mlp_ratio=4.0, |
| | num_heads=24, |
| | depth=19, |
| | depth_single_blocks=38, |
| | axes_dim=[16, 56, 56], |
| | theta=10_000, |
| | qkv_bias=True, |
| | guidance_embed=False, |
| | ), |
| | ae_path=None, |
| | ae_params=AutoEncoderParams( |
| | resolution=256, |
| | in_channels=3, |
| | ch=128, |
| | out_ch=3, |
| | ch_mult=[1, 2, 4, 4], |
| | num_res_blocks=2, |
| | z_channels=16, |
| | scale_factor=0.3611, |
| | shift_factor=0.1159, |
| | ), |
| | ), |
| | } |
| |
|
| |
|
| | |
| |
|
| | |
| |
|
| |
|
| | def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, attn_mask: Optional[Tensor] = None) -> Tensor: |
| | q, k = apply_rope(q, k, pe) |
| |
|
| | x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask) |
| | x = rearrange(x, "B H L D -> B L (H D)") |
| |
|
| | return x |
| |
|
| |
|
| | def rope(pos: Tensor, dim: int, theta: int) -> Tensor: |
| | assert dim % 2 == 0 |
| | scale = torch.arange(0, dim, 2, dtype=torch.float64, device=pos.device) / dim |
| | omega = 1.0 / (theta**scale) |
| | out = torch.einsum("...n,d->...nd", pos, omega) |
| | out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1) |
| | out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2) |
| | return out.float() |
| |
|
| |
|
| | def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor) -> tuple[Tensor, Tensor]: |
| | xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2) |
| | xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2) |
| | xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1] |
| | xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1] |
| | return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk) |
| |
|
| |
|
| | |
| |
|
| |
|
| | |
| |
|
| |
|
| | |
| |
|
| |
|
| | def to_cuda(x): |
| | if isinstance(x, torch.Tensor): |
| | return x.cuda() |
| | elif isinstance(x, (list, tuple)): |
| | return [to_cuda(elem) for elem in x] |
| | elif isinstance(x, dict): |
| | return {k: to_cuda(v) for k, v in x.items()} |
| | else: |
| | return x |
| |
|
| |
|
| | def to_cpu(x): |
| | if isinstance(x, torch.Tensor): |
| | return x.cpu() |
| | elif isinstance(x, (list, tuple)): |
| | return [to_cpu(elem) for elem in x] |
| | elif isinstance(x, dict): |
| | return {k: to_cpu(v) for k, v in x.items()} |
| | else: |
| | return x |
| |
|
| |
|
| | class EmbedND(nn.Module): |
| | def __init__(self, dim: int, theta: int, axes_dim: list[int]): |
| | super().__init__() |
| | self.dim = dim |
| | self.theta = theta |
| | self.axes_dim = axes_dim |
| |
|
| | def forward(self, ids: Tensor) -> Tensor: |
| | n_axes = ids.shape[-1] |
| | emb = torch.cat( |
| | [rope(ids[..., i], self.axes_dim[i], self.theta) for i in range(n_axes)], |
| | dim=-3, |
| | ) |
| |
|
| | return emb.unsqueeze(1) |
| |
|
| |
|
| | def timestep_embedding(t: Tensor, dim, max_period=10000, time_factor: float = 1000.0): |
| | """ |
| | Create sinusoidal timestep embeddings. |
| | :param t: a 1-D Tensor of N indices, one per batch element. |
| | These may be fractional. |
| | :param dim: the dimension of the output. |
| | :param max_period: controls the minimum frequency of the embeddings. |
| | :return: an (N, D) Tensor of positional embeddings. |
| | """ |
| | t = time_factor * t |
| | half = dim // 2 |
| | freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(t.device) |
| |
|
| | args = t[:, None].float() * freqs[None] |
| | embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) |
| | if dim % 2: |
| | embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) |
| | if torch.is_floating_point(t): |
| | embedding = embedding.to(t) |
| | return embedding |
| |
|
| |
|
| | class MLPEmbedder(nn.Module): |
| | def __init__(self, in_dim: int, hidden_dim: int): |
| | super().__init__() |
| | self.in_layer = nn.Linear(in_dim, hidden_dim, bias=True) |
| | self.silu = nn.SiLU() |
| | self.out_layer = nn.Linear(hidden_dim, hidden_dim, bias=True) |
| |
|
| | self.gradient_checkpointing = False |
| |
|
| | def enable_gradient_checkpointing(self): |
| | self.gradient_checkpointing = True |
| |
|
| | def disable_gradient_checkpointing(self): |
| | self.gradient_checkpointing = False |
| |
|
| | def _forward(self, x: Tensor) -> Tensor: |
| | return self.out_layer(self.silu(self.in_layer(x))) |
| |
|
| | def forward(self, *args, **kwargs): |
| | if self.training and self.gradient_checkpointing: |
| | return checkpoint(self._forward, *args, use_reentrant=False, **kwargs) |
| | else: |
| | return self._forward(*args, **kwargs) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | class RMSNorm(torch.nn.Module): |
| | def __init__(self, dim: int): |
| | super().__init__() |
| | self.scale = nn.Parameter(torch.ones(dim)) |
| |
|
| | def forward(self, x: Tensor): |
| | x_dtype = x.dtype |
| | x = x.float() |
| | rrms = torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + 1e-6) |
| | |
| | return ((x * rrms) * self.scale.float()).to(dtype=x_dtype) |
| |
|
| |
|
| | class QKNorm(torch.nn.Module): |
| | def __init__(self, dim: int): |
| | super().__init__() |
| | self.query_norm = RMSNorm(dim) |
| | self.key_norm = RMSNorm(dim) |
| |
|
| | def forward(self, q: Tensor, k: Tensor, v: Tensor) -> tuple[Tensor, Tensor]: |
| | q = self.query_norm(q) |
| | k = self.key_norm(k) |
| | return q.to(v), k.to(v) |
| |
|
| |
|
| | class SelfAttention(nn.Module): |
| | def __init__(self, dim: int, num_heads: int = 8, qkv_bias: bool = False): |
| | super().__init__() |
| | self.num_heads = num_heads |
| | head_dim = dim // num_heads |
| |
|
| | self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) |
| | self.norm = QKNorm(head_dim) |
| | self.proj = nn.Linear(dim, dim) |
| |
|
| | |
| | def forward(self, x: Tensor, pe: Tensor) -> Tensor: |
| | qkv = self.qkv(x) |
| | q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads) |
| | q, k = self.norm(q, k, v) |
| | x = attention(q, k, v, pe=pe) |
| | x = self.proj(x) |
| | return x |
| |
|
| |
|
| | @dataclass |
| | class ModulationOut: |
| | shift: Tensor |
| | scale: Tensor |
| | gate: Tensor |
| |
|
| |
|
| | class Modulation(nn.Module): |
| | def __init__(self, dim: int, double: bool): |
| | super().__init__() |
| | self.is_double = double |
| | self.multiplier = 6 if double else 3 |
| | self.lin = nn.Linear(dim, self.multiplier * dim, bias=True) |
| |
|
| | def forward(self, vec: Tensor) -> tuple[ModulationOut, ModulationOut | None]: |
| | out = self.lin(nn.functional.silu(vec))[:, None, :].chunk(self.multiplier, dim=-1) |
| |
|
| | return ( |
| | ModulationOut(*out[:3]), |
| | ModulationOut(*out[3:]) if self.is_double else None, |
| | ) |
| |
|
| |
|
| | class DoubleStreamBlock(nn.Module): |
| | def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False): |
| | super().__init__() |
| |
|
| | mlp_hidden_dim = int(hidden_size * mlp_ratio) |
| | self.num_heads = num_heads |
| | self.hidden_size = hidden_size |
| | self.img_mod = Modulation(hidden_size, double=True) |
| | self.img_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) |
| | self.img_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias) |
| |
|
| | self.img_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) |
| | self.img_mlp = nn.Sequential( |
| | nn.Linear(hidden_size, mlp_hidden_dim, bias=True), |
| | nn.GELU(approximate="tanh"), |
| | nn.Linear(mlp_hidden_dim, hidden_size, bias=True), |
| | ) |
| |
|
| | self.txt_mod = Modulation(hidden_size, double=True) |
| | self.txt_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) |
| | self.txt_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias) |
| |
|
| | self.txt_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) |
| | self.txt_mlp = nn.Sequential( |
| | nn.Linear(hidden_size, mlp_hidden_dim, bias=True), |
| | nn.GELU(approximate="tanh"), |
| | nn.Linear(mlp_hidden_dim, hidden_size, bias=True), |
| | ) |
| |
|
| | self.gradient_checkpointing = False |
| | self.cpu_offload_checkpointing = False |
| |
|
| | def enable_gradient_checkpointing(self, cpu_offload: bool = False): |
| | self.gradient_checkpointing = True |
| | self.cpu_offload_checkpointing = cpu_offload |
| |
|
| | def disable_gradient_checkpointing(self): |
| | self.gradient_checkpointing = False |
| | self.cpu_offload_checkpointing = False |
| |
|
| | def _forward( |
| | self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor, txt_attention_mask: Optional[Tensor] = None |
| | ) -> tuple[Tensor, Tensor]: |
| | img_mod1, img_mod2 = self.img_mod(vec) |
| | txt_mod1, txt_mod2 = self.txt_mod(vec) |
| |
|
| | |
| | img_modulated = self.img_norm1(img) |
| | img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift |
| | img_qkv = self.img_attn.qkv(img_modulated) |
| | img_q, img_k, img_v = rearrange(img_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads) |
| | img_q, img_k = self.img_attn.norm(img_q, img_k, img_v) |
| |
|
| | |
| | txt_modulated = self.txt_norm1(txt) |
| | txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift |
| | txt_qkv = self.txt_attn.qkv(txt_modulated) |
| | txt_q, txt_k, txt_v = rearrange(txt_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads) |
| | txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v) |
| |
|
| | |
| | q = torch.cat((txt_q, img_q), dim=2) |
| | k = torch.cat((txt_k, img_k), dim=2) |
| | v = torch.cat((txt_v, img_v), dim=2) |
| |
|
| | |
| | attn_mask = None |
| | if txt_attention_mask is not None: |
| | |
| | attn_mask = txt_attention_mask.to(torch.bool) |
| | attn_mask = torch.cat( |
| | (attn_mask, torch.ones(attn_mask.shape[0], img.shape[1], device=attn_mask.device, dtype=torch.bool)), dim=1 |
| | ) |
| |
|
| | |
| | attn_mask = attn_mask[:, None, None, :].expand(-1, q.shape[1], q.shape[2], -1) |
| |
|
| | attn = attention(q, k, v, pe=pe, attn_mask=attn_mask) |
| | txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :] |
| |
|
| | |
| | img = img + img_mod1.gate * self.img_attn.proj(img_attn) |
| | img = img + img_mod2.gate * self.img_mlp((1 + img_mod2.scale) * self.img_norm2(img) + img_mod2.shift) |
| |
|
| | |
| | txt = txt + txt_mod1.gate * self.txt_attn.proj(txt_attn) |
| | txt = txt + txt_mod2.gate * self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift) |
| | return img, txt |
| |
|
| | def forward( |
| | self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor, txt_attention_mask: Optional[Tensor] = None |
| | ) -> tuple[Tensor, Tensor]: |
| | if self.training and self.gradient_checkpointing: |
| | if not self.cpu_offload_checkpointing: |
| | return checkpoint(self._forward, img, txt, vec, pe, txt_attention_mask, use_reentrant=False) |
| | |
| |
|
| | def create_custom_forward(func): |
| | def custom_forward(*inputs): |
| | cuda_inputs = to_cuda(inputs) |
| | outputs = func(*cuda_inputs) |
| | return to_cpu(outputs) |
| |
|
| | return custom_forward |
| |
|
| | return torch.utils.checkpoint.checkpoint( |
| | create_custom_forward(self._forward), img, txt, vec, pe, txt_attention_mask, use_reentrant=False |
| | ) |
| |
|
| | else: |
| | return self._forward(img, txt, vec, pe, txt_attention_mask) |
| |
|
| |
|
| | class SingleStreamBlock(nn.Module): |
| | """ |
| | A DiT block with parallel linear layers as described in |
| | https://arxiv.org/abs/2302.05442 and adapted modulation interface. |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | hidden_size: int, |
| | num_heads: int, |
| | mlp_ratio: float = 4.0, |
| | qk_scale: float | None = None, |
| | ): |
| | super().__init__() |
| | self.hidden_dim = hidden_size |
| | self.num_heads = num_heads |
| | head_dim = hidden_size // num_heads |
| | self.scale = qk_scale or head_dim**-0.5 |
| |
|
| | self.mlp_hidden_dim = int(hidden_size * mlp_ratio) |
| | |
| | self.linear1 = nn.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim) |
| | |
| | self.linear2 = nn.Linear(hidden_size + self.mlp_hidden_dim, hidden_size) |
| |
|
| | self.norm = QKNorm(head_dim) |
| |
|
| | self.hidden_size = hidden_size |
| | self.pre_norm = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) |
| |
|
| | self.mlp_act = nn.GELU(approximate="tanh") |
| | self.modulation = Modulation(hidden_size, double=False) |
| |
|
| | self.gradient_checkpointing = False |
| | self.cpu_offload_checkpointing = False |
| |
|
| | def enable_gradient_checkpointing(self, cpu_offload: bool = False): |
| | self.gradient_checkpointing = True |
| | self.cpu_offload_checkpointing = cpu_offload |
| |
|
| | def disable_gradient_checkpointing(self): |
| | self.gradient_checkpointing = False |
| | self.cpu_offload_checkpointing = False |
| |
|
| | def _forward(self, x: Tensor, vec: Tensor, pe: Tensor, txt_attention_mask: Optional[Tensor] = None) -> Tensor: |
| | mod, _ = self.modulation(vec) |
| | x_mod = (1 + mod.scale) * self.pre_norm(x) + mod.shift |
| | qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1) |
| |
|
| | q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads) |
| | q, k = self.norm(q, k, v) |
| |
|
| | |
| | attn_mask = None |
| | if txt_attention_mask is not None: |
| | |
| | attn_mask = txt_attention_mask.to(torch.bool) |
| | attn_mask = torch.cat( |
| | ( |
| | attn_mask, |
| | torch.ones( |
| | attn_mask.shape[0], x.shape[1] - txt_attention_mask.shape[1], device=attn_mask.device, dtype=torch.bool |
| | ), |
| | ), |
| | dim=1, |
| | ) |
| |
|
| | |
| | attn_mask = attn_mask[:, None, None, :].expand(-1, q.shape[1], q.shape[2], -1) |
| |
|
| | |
| | attn = attention(q, k, v, pe=pe, attn_mask=attn_mask) |
| |
|
| | |
| | output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2)) |
| | return x + mod.gate * output |
| |
|
| | def forward(self, x: Tensor, vec: Tensor, pe: Tensor, txt_attention_mask: Optional[Tensor] = None) -> Tensor: |
| | if self.training and self.gradient_checkpointing: |
| | if not self.cpu_offload_checkpointing: |
| | return checkpoint(self._forward, x, vec, pe, txt_attention_mask, use_reentrant=False) |
| |
|
| | |
| |
|
| | def create_custom_forward(func): |
| | def custom_forward(*inputs): |
| | cuda_inputs = to_cuda(inputs) |
| | outputs = func(*cuda_inputs) |
| | return to_cpu(outputs) |
| |
|
| | return custom_forward |
| |
|
| | return torch.utils.checkpoint.checkpoint( |
| | create_custom_forward(self._forward), x, vec, pe, txt_attention_mask, use_reentrant=False |
| | ) |
| | else: |
| | return self._forward(x, vec, pe, txt_attention_mask) |
| |
|
| |
|
| | class LastLayer(nn.Module): |
| | def __init__(self, hidden_size: int, patch_size: int, out_channels: int): |
| | super().__init__() |
| | self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) |
| | self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True) |
| | self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True)) |
| |
|
| | def forward(self, x: Tensor, vec: Tensor) -> Tensor: |
| | shift, scale = self.adaLN_modulation(vec).chunk(2, dim=1) |
| | x = (1 + scale[:, None, :]) * self.norm_final(x) + shift[:, None, :] |
| | x = self.linear(x) |
| | return x |
| |
|
| |
|
| | |
| |
|
| |
|
| | class Flux(nn.Module): |
| | """ |
| | Transformer model for flow matching on sequences. |
| | """ |
| |
|
| | def __init__(self, params: FluxParams): |
| | super().__init__() |
| |
|
| | self.params = params |
| | self.in_channels = params.in_channels |
| | self.out_channels = self.in_channels |
| | if params.hidden_size % params.num_heads != 0: |
| | raise ValueError(f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}") |
| | pe_dim = params.hidden_size // params.num_heads |
| | if sum(params.axes_dim) != pe_dim: |
| | raise ValueError(f"Got {params.axes_dim} but expected positional dim {pe_dim}") |
| | self.hidden_size = params.hidden_size |
| | self.num_heads = params.num_heads |
| | self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim) |
| | self.img_in = nn.Linear(self.in_channels, self.hidden_size, bias=True) |
| | self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) |
| | self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size) |
| | self.guidance_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) if params.guidance_embed else nn.Identity() |
| | self.txt_in = nn.Linear(params.context_in_dim, self.hidden_size) |
| |
|
| | self.double_blocks = nn.ModuleList( |
| | [ |
| | DoubleStreamBlock( |
| | self.hidden_size, |
| | self.num_heads, |
| | mlp_ratio=params.mlp_ratio, |
| | qkv_bias=params.qkv_bias, |
| | ) |
| | for _ in range(params.depth) |
| | ] |
| | ) |
| |
|
| | self.single_blocks = nn.ModuleList( |
| | [ |
| | SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio) |
| | for _ in range(params.depth_single_blocks) |
| | ] |
| | ) |
| |
|
| | self.final_layer = LastLayer(self.hidden_size, 1, self.out_channels) |
| |
|
| | self.gradient_checkpointing = False |
| | self.cpu_offload_checkpointing = False |
| | self.blocks_to_swap = None |
| |
|
| | self.offloader_double = None |
| | self.offloader_single = None |
| | self.num_double_blocks = len(self.double_blocks) |
| | self.num_single_blocks = len(self.single_blocks) |
| |
|
| | @property |
| | def device(self): |
| | return next(self.parameters()).device |
| |
|
| | @property |
| | def dtype(self): |
| | return next(self.parameters()).dtype |
| |
|
| | def enable_gradient_checkpointing(self, cpu_offload: bool = False): |
| | self.gradient_checkpointing = True |
| | self.cpu_offload_checkpointing = cpu_offload |
| |
|
| | self.time_in.enable_gradient_checkpointing() |
| | self.vector_in.enable_gradient_checkpointing() |
| | if self.guidance_in.__class__ != nn.Identity: |
| | self.guidance_in.enable_gradient_checkpointing() |
| |
|
| | for block in self.double_blocks + self.single_blocks: |
| | block.enable_gradient_checkpointing(cpu_offload=cpu_offload) |
| |
|
| | print(f"FLUX: Gradient checkpointing enabled. CPU offload: {cpu_offload}") |
| |
|
| | def disable_gradient_checkpointing(self): |
| | self.gradient_checkpointing = False |
| | self.cpu_offload_checkpointing = False |
| |
|
| | self.time_in.disable_gradient_checkpointing() |
| | self.vector_in.disable_gradient_checkpointing() |
| | if self.guidance_in.__class__ != nn.Identity: |
| | self.guidance_in.disable_gradient_checkpointing() |
| |
|
| | for block in self.double_blocks + self.single_blocks: |
| | block.disable_gradient_checkpointing() |
| |
|
| | print("FLUX: Gradient checkpointing disabled.") |
| |
|
| | def enable_block_swap(self, num_blocks: int, device: torch.device): |
| | self.blocks_to_swap = num_blocks |
| | double_blocks_to_swap = num_blocks // 2 |
| | single_blocks_to_swap = (num_blocks - double_blocks_to_swap) * 2 |
| |
|
| | assert double_blocks_to_swap <= self.num_double_blocks - 2 and single_blocks_to_swap <= self.num_single_blocks - 2, ( |
| | f"Cannot swap more than {self.num_double_blocks - 2} double blocks and {self.num_single_blocks - 2} single blocks. " |
| | f"Requested {double_blocks_to_swap} double blocks and {single_blocks_to_swap} single blocks." |
| | ) |
| |
|
| | self.offloader_double = ModelOffloader( |
| | self.double_blocks, self.num_double_blocks, double_blocks_to_swap, device |
| | ) |
| | self.offloader_single = ModelOffloader( |
| | self.single_blocks, self.num_single_blocks, single_blocks_to_swap, device |
| | ) |
| | print( |
| | f"FLUX: Block swap enabled. Swapping {num_blocks} blocks, double blocks: {double_blocks_to_swap}, single blocks: {single_blocks_to_swap}." |
| | ) |
| |
|
| | def move_to_device_except_swap_blocks(self, device: torch.device): |
| | |
| | if self.blocks_to_swap: |
| | save_double_blocks = self.double_blocks |
| | save_single_blocks = self.single_blocks |
| | self.double_blocks = None |
| | self.single_blocks = None |
| |
|
| | self.to(device) |
| |
|
| | if self.blocks_to_swap: |
| | self.double_blocks = save_double_blocks |
| | self.single_blocks = save_single_blocks |
| |
|
| | def prepare_block_swap_before_forward(self): |
| | if self.blocks_to_swap is None or self.blocks_to_swap == 0: |
| | return |
| | self.offloader_double.prepare_block_devices_before_forward(self.double_blocks) |
| | self.offloader_single.prepare_block_devices_before_forward(self.single_blocks) |
| |
|
| | def forward( |
| | self, |
| | img: Tensor, |
| | img_ids: Tensor, |
| | txt: Tensor, |
| | txt_ids: Tensor, |
| | timesteps: Tensor, |
| | y: Tensor, |
| | guidance: Tensor | None = None, |
| | txt_attention_mask: Tensor | None = None, |
| | ) -> Tensor: |
| | if img.ndim != 3 or txt.ndim != 3: |
| | raise ValueError("Input img and txt tensors must have 3 dimensions.") |
| |
|
| | |
| | img = self.img_in(img) |
| | vec = self.time_in(timestep_embedding(timesteps, 256)) |
| | if self.params.guidance_embed: |
| | if guidance is None: |
| | raise ValueError("Didn't get guidance strength for guidance distilled model.") |
| | vec = vec + self.guidance_in(timestep_embedding(guidance, 256)) |
| | vec = vec + self.vector_in(y) |
| | txt = self.txt_in(txt) |
| |
|
| | ids = torch.cat((txt_ids, img_ids), dim=1) |
| | pe = self.pe_embedder(ids) |
| |
|
| | if not self.blocks_to_swap: |
| | for block in self.double_blocks: |
| | img, txt = block(img=img, txt=txt, vec=vec, pe=pe, txt_attention_mask=txt_attention_mask) |
| | img = torch.cat((txt, img), 1) |
| | for block in self.single_blocks: |
| | img = block(img, vec=vec, pe=pe, txt_attention_mask=txt_attention_mask) |
| | else: |
| | for block_idx, block in enumerate(self.double_blocks): |
| | self.offloader_double.wait_for_block(block_idx) |
| |
|
| | img, txt = block(img=img, txt=txt, vec=vec, pe=pe, txt_attention_mask=txt_attention_mask) |
| |
|
| | self.offloader_double.submit_move_blocks(self.double_blocks, block_idx) |
| |
|
| | img = torch.cat((txt, img), 1) |
| |
|
| | for block_idx, block in enumerate(self.single_blocks): |
| | self.offloader_single.wait_for_block(block_idx) |
| |
|
| | img = block(img, vec=vec, pe=pe, txt_attention_mask=txt_attention_mask) |
| |
|
| | self.offloader_single.submit_move_blocks(self.single_blocks, block_idx) |
| |
|
| | img = img[:, txt.shape[1] :, ...] |
| |
|
| | if self.training and self.cpu_offload_checkpointing: |
| | img = img.to(self.device) |
| | vec = vec.to(self.device) |
| |
|
| | img = self.final_layer(img, vec) |
| |
|
| | return img |
| |
|