Upload 3 files
Browse files- basic.py +583 -0
- generate_infinity_images.py +104 -0
- infinity.py +800 -0
basic.py
ADDED
|
@@ -0,0 +1,583 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Definitions of blocks of VAR transformer model.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import os
|
| 7 |
+
from functools import partial
|
| 8 |
+
from typing import Optional, Tuple, Union
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import torch.nn as nn
|
| 12 |
+
import torch.nn.functional as F
|
| 13 |
+
import numpy as np
|
| 14 |
+
from timm.models.layers import DropPath, drop_path
|
| 15 |
+
from torch.utils.checkpoint import checkpoint
|
| 16 |
+
|
| 17 |
+
# Import flash_attn's attention
|
| 18 |
+
flash_attn_func = None
|
| 19 |
+
flash_attn_varlen_kvpacked_func = None
|
| 20 |
+
|
| 21 |
+
from torch.nn.functional import scaled_dot_product_attention as slow_attn # q, k, v: BHLc
|
| 22 |
+
|
| 23 |
+
# Import flash_attn's fused ops
|
| 24 |
+
try:
|
| 25 |
+
from flash_attn.ops.layer_norm import dropout_add_layer_norm
|
| 26 |
+
from flash_attn.ops.rms_norm import dropout_add_rms_norm
|
| 27 |
+
from flash_attn.ops.rms_norm import rms_norm as rms_norm_impl
|
| 28 |
+
from flash_attn.ops.fused_dense import fused_mlp_func
|
| 29 |
+
flash_fused_op_installed = True
|
| 30 |
+
except ImportError:
|
| 31 |
+
dropout_add_layer_norm = dropout_add_rms_norm = fused_mlp_func = None
|
| 32 |
+
flash_fused_op_installed = False
|
| 33 |
+
|
| 34 |
+
def rms_norm_impl(x, weight, epsilon):
|
| 35 |
+
return (x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True).add_(epsilon))) * weight
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def precompute_rope2d_freqs_grid(dim, dynamic_resolution_h_w, rope2d_normalized_by_hw, pad_to_multiplier=1, max_height=2048 // 16, max_width=2048 // 16, base=10000.0, device=None, scaling_factor=1.0):
|
| 39 |
+
# split the dimension into half, one for x and one for y
|
| 40 |
+
half_dim = dim // 2
|
| 41 |
+
inv_freq = 1.0 / (base ** (torch.arange(0, half_dim, 2, dtype=torch.int64).float().to(device) / half_dim)) # namely theta, 1 / (10000^(i/half_dim)), i=0,2,..., half_dim-2
|
| 42 |
+
t_height = torch.arange(max_height, device=device, dtype=torch.int64).type_as(inv_freq)
|
| 43 |
+
t_width = torch.arange(max_width, device=device, dtype=torch.int64).type_as(inv_freq)
|
| 44 |
+
t_height = t_height / scaling_factor
|
| 45 |
+
freqs_height = torch.outer(t_height, inv_freq) # (max_height, dim / (1 for 1d, 2 for 2d, 3 for 3d) / 2), namely y*theta
|
| 46 |
+
t_width = t_width / scaling_factor
|
| 47 |
+
freqs_width = torch.outer(t_width, inv_freq) # (max_width, dim / (1 for 1d, 2 for 2d, 3 for 3d) / 2), namely x*theta
|
| 48 |
+
freqs_grid_map = torch.concat([
|
| 49 |
+
freqs_height[:, None, :].expand(-1, max_width, -1), # (max_height, max_width, dim / (1 for 1d, 2 for 2d, 3 for 3d) / 2)
|
| 50 |
+
freqs_width[None, :, :].expand(max_height, -1, -1), # (max_height, max_width, dim / (1 for 1d, 2 for 2d, 3 for 3d) / 2)
|
| 51 |
+
], dim=-1) # (max_height, max_width, dim / (1 for 1d, 2 for 2d, 3 for 3d))
|
| 52 |
+
freqs_grid_map = torch.stack([torch.cos(freqs_grid_map), torch.sin(freqs_grid_map)], dim=0)
|
| 53 |
+
# (2, max_height, max_width, dim / (1 for 1d, 2 for 2d, 3 for 3d))
|
| 54 |
+
|
| 55 |
+
rope2d_freqs_grid = {}
|
| 56 |
+
for h_div_w in dynamic_resolution_h_w:
|
| 57 |
+
scale_schedule = dynamic_resolution_h_w[h_div_w]['1M']['scales']
|
| 58 |
+
_, ph, pw = scale_schedule[-1]
|
| 59 |
+
max_edge_length = freqs_grid_map.shape[1]
|
| 60 |
+
if ph >= pw:
|
| 61 |
+
uph, upw = max_edge_length, int(max_edge_length / ph * pw)
|
| 62 |
+
else:
|
| 63 |
+
uph, upw = int(max_edge_length / pw * ph), max_edge_length
|
| 64 |
+
rope_cache_list = []
|
| 65 |
+
for (_, ph, pw) in scale_schedule:
|
| 66 |
+
ph_mul_pw = ph * pw
|
| 67 |
+
if rope2d_normalized_by_hw == 1: # downsample
|
| 68 |
+
rope_cache = F.interpolate(freqs_grid_map[:, :uph, :upw, :].permute([0,3,1,2]), size=(ph, pw), mode='bilinear', align_corners=True)
|
| 69 |
+
rope_cache = rope_cache.permute([0,2,3,1]) # (2, ph, pw, half_head_dim)
|
| 70 |
+
elif rope2d_normalized_by_hw == 2: # star stylee
|
| 71 |
+
_, uph, upw = scale_schedule[-1]
|
| 72 |
+
indices = torch.stack([
|
| 73 |
+
(torch.arange(ph) * (uph / ph)).reshape(ph, 1).expand(ph, pw),
|
| 74 |
+
(torch.arange(pw) * (upw / pw)).reshape(1, pw).expand(ph, pw),
|
| 75 |
+
], dim=-1).round().int() # (ph, pw, 2)
|
| 76 |
+
indices = indices.reshape(-1, 2) # (ph*pw, 2)
|
| 77 |
+
rope_cache = freqs_grid_map[:, indices[:,0], indices[:,1], :] # (2, ph*pw, half_head_dim)
|
| 78 |
+
rope_cache = rope_cache.reshape(2, ph, pw, -1)
|
| 79 |
+
elif rope2d_normalized_by_hw == 0:
|
| 80 |
+
rope_cache = freqs_grid_map[:, :ph, :pw, :] # (2, ph, pw, half_head_dim)
|
| 81 |
+
else:
|
| 82 |
+
raise ValueError(f'Unknown rope2d_normalized_by_hw: {rope2d_normalized_by_hw}')
|
| 83 |
+
rope_cache_list.append(rope_cache.reshape(2, ph_mul_pw, -1))
|
| 84 |
+
cat_rope_cache = torch.cat(rope_cache_list, 1) # (2, seq_len, half_head_dim)
|
| 85 |
+
if cat_rope_cache.shape[1] % pad_to_multiplier:
|
| 86 |
+
pad = torch.zeros(2, pad_to_multiplier - cat_rope_cache.shape[1] % pad_to_multiplier, half_dim)
|
| 87 |
+
cat_rope_cache = torch.cat([cat_rope_cache, pad], dim=1)
|
| 88 |
+
cat_rope_cache = cat_rope_cache[:,None,None,None] # (2, 1, 1, 1, seq_len, half_dim)
|
| 89 |
+
for pn in dynamic_resolution_h_w[h_div_w]:
|
| 90 |
+
scale_schedule = dynamic_resolution_h_w[h_div_w][pn]['scales']
|
| 91 |
+
tmp_scale_schedule = [(1, h, w) for _, h, w in scale_schedule]
|
| 92 |
+
rope2d_freqs_grid[str(tuple(tmp_scale_schedule))] = cat_rope_cache
|
| 93 |
+
return rope2d_freqs_grid
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def apply_rotary_emb(q, k, scale_schedule, rope2d_freqs_grid, pad_to_multiplier, rope2d_normalized_by_hw, scale_ind):
|
| 97 |
+
qk = torch.stack((q, k), dim=0) #(2, batch_size, heads, seq_len, head_dim)
|
| 98 |
+
device_type = qk.device.type
|
| 99 |
+
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
|
| 100 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
| 101 |
+
seq_len = qk.shape[3]
|
| 102 |
+
start = 0
|
| 103 |
+
if scale_ind >= 1:
|
| 104 |
+
assert len(scale_schedule[0]) == 3
|
| 105 |
+
start = np.sum([item[0] * item[1] * item[2] for item in scale_schedule[:scale_ind]])
|
| 106 |
+
rope2d_freqs_grid[str(tuple(scale_schedule))] = rope2d_freqs_grid[str(tuple(scale_schedule))].to(qk.device)
|
| 107 |
+
assert start+seq_len <= rope2d_freqs_grid[str(tuple(scale_schedule))].shape[4]
|
| 108 |
+
rope_cache = rope2d_freqs_grid[str(tuple(scale_schedule))][:, :, :, :, start:start+seq_len] # rope_cache shape: [2, 1, 1, 1, seq_len, half_head_dim]
|
| 109 |
+
qk = qk.reshape(*qk.shape[:-1], -1, 2) #(2, batch_size, heads, seq_len, half_head_dim, 2)
|
| 110 |
+
qk = torch.stack([
|
| 111 |
+
rope_cache[0] * qk[...,0] - rope_cache[1] * qk[...,1],
|
| 112 |
+
rope_cache[1] * qk[...,0] + rope_cache[0] * qk[...,1],
|
| 113 |
+
], dim=-1) # (2, batch_size, heads, seq_len, half_head_dim, 2), here stack + reshape should not be concate
|
| 114 |
+
qk = qk.reshape(*qk.shape[:-2], -1) #(2, batch_size, heads, seq_len, head_dim)
|
| 115 |
+
q, k = qk.unbind(dim=0) # (batch_size, heads, seq_len, head_dim)
|
| 116 |
+
return q, k
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class FastRMSNorm(nn.Module):
|
| 120 |
+
def __init__(self, C, eps=1e-6, elementwise_affine=True):
|
| 121 |
+
super().__init__()
|
| 122 |
+
self.C = C
|
| 123 |
+
self.eps = eps
|
| 124 |
+
self.elementwise_affine = elementwise_affine
|
| 125 |
+
if self.elementwise_affine:
|
| 126 |
+
self.weight = nn.Parameter(torch.ones(C))
|
| 127 |
+
else:
|
| 128 |
+
self.register_buffer('weight', torch.ones(C))
|
| 129 |
+
|
| 130 |
+
def forward(self, x):
|
| 131 |
+
src_type = x.dtype
|
| 132 |
+
return rms_norm_impl(x.float(), self.weight, epsilon=self.eps).to(src_type)
|
| 133 |
+
|
| 134 |
+
def extra_repr(self) -> str:
|
| 135 |
+
return f'C={self.C}, eps={self.eps:g}, elementwise_affine={self.elementwise_affine}'
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def get_dropout_layer(p):
|
| 139 |
+
return nn.Dropout(p, inplace=True) if p > 0 else nn.Identity()
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
class FFN(nn.Module):
|
| 143 |
+
def __init__(self, in_features, hidden_features=None, out_features=None, drop=0., fused_mlp=False):
|
| 144 |
+
super().__init__()
|
| 145 |
+
self.fused_mlp_func = fused_mlp_func if fused_mlp else None
|
| 146 |
+
out_features = out_features or in_features
|
| 147 |
+
hidden_features = hidden_features or in_features
|
| 148 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
| 149 |
+
self.act = nn.GELU(approximate='tanh')
|
| 150 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
| 151 |
+
self.drop = get_dropout_layer(drop)
|
| 152 |
+
self.heuristic = -1
|
| 153 |
+
|
| 154 |
+
def forward(self, x):
|
| 155 |
+
if self.fused_mlp_func is not None:
|
| 156 |
+
return self.drop(self.fused_mlp_func(
|
| 157 |
+
x=x,
|
| 158 |
+
weight1=self.fc1.weight,
|
| 159 |
+
weight2=self.fc2.weight,
|
| 160 |
+
bias1=self.fc1.bias,
|
| 161 |
+
bias2=self.fc2.bias,
|
| 162 |
+
activation='gelu_approx',
|
| 163 |
+
save_pre_act=self.training,
|
| 164 |
+
return_residual=False,
|
| 165 |
+
checkpoint_lvl=0,
|
| 166 |
+
heuristic=self.heuristic,
|
| 167 |
+
process_group=None,
|
| 168 |
+
))
|
| 169 |
+
else:
|
| 170 |
+
return self.drop(self.fc2( self.act(self.fc1(x)) ))
|
| 171 |
+
|
| 172 |
+
def extra_repr(self) -> str:
|
| 173 |
+
return f'fused_mlp={self.fused_mlp_func is not None}'
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
class FFNSwiGLU(nn.Module):
|
| 177 |
+
def __init__(self, in_features, hidden_features, out_features=None, drop=0., fused_mlp=False):
|
| 178 |
+
super().__init__()
|
| 179 |
+
self.fused_mlp_func = None
|
| 180 |
+
hidden_features = round(2 * hidden_features / 3 / 256) * 256
|
| 181 |
+
|
| 182 |
+
out_features = out_features or in_features
|
| 183 |
+
self.fcg = nn.Linear(in_features, hidden_features, bias=False)
|
| 184 |
+
self.fc1 = nn.Linear(in_features, hidden_features, bias=False)
|
| 185 |
+
self.fc2 = nn.Linear(hidden_features, out_features, bias=False)
|
| 186 |
+
self.drop = get_dropout_layer(drop)
|
| 187 |
+
|
| 188 |
+
def forward(self, x):
|
| 189 |
+
return self.drop(self.fc2( F.silu(self.fcg(x), inplace=True).mul_(self.fc1(x)) ))
|
| 190 |
+
|
| 191 |
+
def extra_repr(self) -> str:
|
| 192 |
+
return f'fused_mlp={self.fused_mlp_func is not None}'
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
class SelfAttention(nn.Module):
|
| 196 |
+
def __init__(
|
| 197 |
+
self, embed_dim=768, num_heads=12,
|
| 198 |
+
proj_drop=0., tau=1, cos_attn=False, customized_flash_attn=True, use_flex_attn=False,
|
| 199 |
+
batch_size=2, pad_to_multiplier=1, rope2d_normalized_by_hw=0,
|
| 200 |
+
):
|
| 201 |
+
"""
|
| 202 |
+
:param embed_dim: model's width
|
| 203 |
+
:param num_heads: num heads of multi-head attention
|
| 204 |
+
:param proj_drop: always 0 for testing
|
| 205 |
+
:param tau: always 1
|
| 206 |
+
:param cos_attn: always True: during attention, q and k will be L2-normalized and scaled by a head-wise learnable parameter self.scale_mul_1H11
|
| 207 |
+
:param customized_flash_attn:
|
| 208 |
+
"""
|
| 209 |
+
super().__init__()
|
| 210 |
+
assert embed_dim % num_heads == 0
|
| 211 |
+
self.using_flash = False
|
| 212 |
+
|
| 213 |
+
self.num_heads, self.head_dim = num_heads, embed_dim // num_heads
|
| 214 |
+
self.tau, self.cos_attn = tau, cos_attn
|
| 215 |
+
if self.cos_attn:
|
| 216 |
+
self.scale = 1
|
| 217 |
+
size = (1, 1, self.num_heads, 1) if self.using_flash else (1, self.num_heads, 1, 1)
|
| 218 |
+
# size: 11H1 or 1H11
|
| 219 |
+
self.scale_mul_1H11 = nn.Parameter(torch.full(size=size, fill_value=4.0).log(), requires_grad=True)
|
| 220 |
+
self.max_scale_mul = torch.log(torch.tensor(100)).item()
|
| 221 |
+
else:
|
| 222 |
+
self.scale = 1 / math.sqrt(self.head_dim) / self.tau
|
| 223 |
+
|
| 224 |
+
self.mat_qkv = nn.Linear(embed_dim, embed_dim * 3, bias=False)
|
| 225 |
+
self.q_bias, self.v_bias = nn.Parameter(torch.zeros(embed_dim)), nn.Parameter(torch.zeros(embed_dim))
|
| 226 |
+
self.register_buffer('zero_k_bias', torch.zeros(embed_dim))
|
| 227 |
+
|
| 228 |
+
self.proj = nn.Linear(embed_dim, embed_dim)
|
| 229 |
+
self.proj_drop = get_dropout_layer(proj_drop)
|
| 230 |
+
|
| 231 |
+
self.caching = False # kv caching: only used during inference
|
| 232 |
+
self.cached_k = None # kv caching: only used during inference
|
| 233 |
+
self.cached_v = None # kv caching: only used during inference
|
| 234 |
+
|
| 235 |
+
self.batch_size = batch_size
|
| 236 |
+
self.use_flex_attn = use_flex_attn
|
| 237 |
+
self.pad_to_multiplier = pad_to_multiplier
|
| 238 |
+
|
| 239 |
+
self.rope2d_normalized_by_hw = rope2d_normalized_by_hw
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def kv_caching(self, enable: bool): # kv caching: only used during inference
|
| 243 |
+
self.caching = enable
|
| 244 |
+
self.cached_k = None
|
| 245 |
+
self.cached_v = None
|
| 246 |
+
|
| 247 |
+
# NOTE: attn_bias_or_two_vector is None during inference
|
| 248 |
+
def forward(self, x, attn_bias_or_two_vector: Union[torch.Tensor, Tuple[torch.IntTensor, torch.IntTensor]], attn_fn=None, scale_schedule=None, rope2d_freqs_grid=None, scale_ind=0):
|
| 249 |
+
"""
|
| 250 |
+
:param (fp32) x: shaped (B or batch_size, L or seq_length, C or hidden_dim); if seq-parallel is used, the `L` dim would be shared
|
| 251 |
+
:param (fp32) attn_bias_or_two_vector:
|
| 252 |
+
if not using_flash:
|
| 253 |
+
a block-wise, lower-triangle matrix, like:
|
| 254 |
+
[[[[0, -, -, -, -, -, -, -, -, -, -, -, -, -],
|
| 255 |
+
[0, 0, 0, 0, 0, -, -, -, -, -, -, -, -, -],
|
| 256 |
+
[0, 0, 0, 0, 0, -, -, -, -, -, -, -, -, -],
|
| 257 |
+
[0, 0, 0, 0, 0, -, -, -, -, -, -, -, -, -],
|
| 258 |
+
[0, 0, 0, 0, 0, -, -, -, -, -, -, -, -, -],
|
| 259 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 260 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 261 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 262 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 263 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 264 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 265 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 266 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
| 267 |
+
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]]]
|
| 268 |
+
where 0 means visible and - means invisible (-inf)
|
| 269 |
+
else:
|
| 270 |
+
a tuple of two 1-dim int vector (VAR_visible_kvlen, VAR_invisible_qlen)
|
| 271 |
+
:return: shaped (B or batch_size, L or seq_length, C or hidden_dim); if seq-parallel is used, the `L` dim would be shared
|
| 272 |
+
"""
|
| 273 |
+
# x: fp32
|
| 274 |
+
B, L, C = x.shape
|
| 275 |
+
|
| 276 |
+
# qkv: amp, bf16
|
| 277 |
+
qkv = F.linear(input=x, weight=self.mat_qkv.weight, bias=torch.cat((self.q_bias, self.zero_k_bias, self.v_bias))).view(B, L, 3, self.num_heads, self.head_dim) # BL3Hc
|
| 278 |
+
if self.using_flash: q, k, v = qkv.unbind(dim=2); L_dim = 1 # q or k or v: all are shaped in (B:batch_size, L:seq_len, H:heads, c:head_dim)
|
| 279 |
+
else: q, k, v = qkv.permute(2, 0, 3, 1, 4).unbind(dim=0); L_dim = 2 # q or k or v: all are shaped in (B:batch_size, H:heads, L:seq_len, c:head_dim)
|
| 280 |
+
|
| 281 |
+
if self.cos_attn: # always True
|
| 282 |
+
scale_mul = self.scale_mul_1H11.clamp_max(self.max_scale_mul).exp() # 11H1 (flash), or 1H11 (not flash)
|
| 283 |
+
q = F.normalize(q, dim=-1, eps=1e-12).mul(scale_mul).contiguous() # fp32
|
| 284 |
+
k = F.normalize(k, dim=-1, eps=1e-12).contiguous() # fp32
|
| 285 |
+
v = v.contiguous() # bf16
|
| 286 |
+
else: # be contiguous, to make kernel happy
|
| 287 |
+
q = q.contiguous() # bf16
|
| 288 |
+
k = k.contiguous() # bf16
|
| 289 |
+
v = v.contiguous() # bf16
|
| 290 |
+
if rope2d_freqs_grid is not None:
|
| 291 |
+
q, k = apply_rotary_emb(q, k, scale_schedule, rope2d_freqs_grid, self.pad_to_multiplier, self.rope2d_normalized_by_hw, scale_ind) #, freqs_cis=freqs_cis)
|
| 292 |
+
if self.caching: # kv caching: only used during inference
|
| 293 |
+
if self.cached_k is None: self.cached_k = k; self.cached_v = v
|
| 294 |
+
else: k = self.cached_k = torch.cat((self.cached_k, k), dim=L_dim); v = self.cached_v = torch.cat((self.cached_v, v), dim=L_dim)
|
| 295 |
+
|
| 296 |
+
if self.using_flash:
|
| 297 |
+
if attn_bias_or_two_vector is not None: # training
|
| 298 |
+
kw = dict(VAR_visible_kvlen=attn_bias_or_two_vector[0], VAR_invisible_qlen=attn_bias_or_two_vector[1])
|
| 299 |
+
else: # inference (autoregressive sampling)
|
| 300 |
+
kw = dict()
|
| 301 |
+
oup = flash_attn_func(q.to(v.dtype), k.to(v.dtype), v, dropout_p=0, softmax_scale=self.scale, **kw).view(B, L, C)
|
| 302 |
+
else:
|
| 303 |
+
# if self.cos_attn: q, k are in fp32; v is in bf16
|
| 304 |
+
# else: q, k, v are in bf16
|
| 305 |
+
if self.use_flex_attn and attn_fn is not None:
|
| 306 |
+
oup = attn_fn(q, k, v, scale=self.scale).transpose(1, 2).reshape(B, L, C)
|
| 307 |
+
else:
|
| 308 |
+
oup = slow_attn(query=q, key=k, value=v, scale=self.scale, attn_mask=attn_bias_or_two_vector, dropout_p=0).transpose(1, 2).reshape(B, L, C)
|
| 309 |
+
# oup: bf16
|
| 310 |
+
|
| 311 |
+
return self.proj_drop(self.proj(oup))
|
| 312 |
+
|
| 313 |
+
def extra_repr(self) -> str:
|
| 314 |
+
tail = ''
|
| 315 |
+
return f'using_flash={self.using_flash}, tau={self.tau}, cos_attn={self.cos_attn}{tail}'
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
class CrossAttention(nn.Module):
|
| 319 |
+
def __init__(
|
| 320 |
+
self, for_attn_pool=False, embed_dim=768, kv_dim=4096, num_heads=12,
|
| 321 |
+
proj_drop=0., cos_attn=False,
|
| 322 |
+
):
|
| 323 |
+
"""
|
| 324 |
+
:param for_attn_pool: only used in VAR.text_proj_for_sos
|
| 325 |
+
:param embed_dim: Q's dim
|
| 326 |
+
:param kv_dim: K's and V's dim
|
| 327 |
+
:param num_heads: num heads of multi-head attention
|
| 328 |
+
:param proj_drop: proj drop out
|
| 329 |
+
:param cos_attn: during attention, q and k will be L2-normalized and scaled by a head-wise learnable parameter self.scale_mul_1H11
|
| 330 |
+
"""
|
| 331 |
+
cos_attn = False # TODO: never use cos attn in cross attention with T5 kv
|
| 332 |
+
super().__init__()
|
| 333 |
+
self.for_attn_pool = for_attn_pool
|
| 334 |
+
self.embed_dim = embed_dim
|
| 335 |
+
self.kv_dim = kv_dim
|
| 336 |
+
assert embed_dim % num_heads == 0
|
| 337 |
+
self.num_heads, self.head_dim = num_heads, embed_dim // num_heads # =64
|
| 338 |
+
self.cos_attn = cos_attn
|
| 339 |
+
if self.cos_attn:
|
| 340 |
+
self.scale = 1
|
| 341 |
+
self.scale_mul_1H1 = nn.Parameter(torch.full(size=(1, self.num_heads, 1, 1), fill_value=4.0).log(), requires_grad=True)
|
| 342 |
+
self.max_scale_mul = torch.log(torch.tensor(100)).item()
|
| 343 |
+
else:
|
| 344 |
+
self.scale = 1 / math.sqrt(self.head_dim)
|
| 345 |
+
|
| 346 |
+
if for_attn_pool:
|
| 347 |
+
q = torch.empty(1, self.num_heads, self.head_dim)
|
| 348 |
+
nn.init.trunc_normal_(q, mean=0, std=math.sqrt(1 / embed_dim / 3))
|
| 349 |
+
self.mat_q = nn.Parameter(q)
|
| 350 |
+
else:
|
| 351 |
+
self.mat_q = nn.Linear(embed_dim, embed_dim, bias=True)
|
| 352 |
+
self.mat_kv = nn.Linear(kv_dim, embed_dim*2, bias=False)
|
| 353 |
+
self.v_bias = nn.Parameter(torch.zeros(embed_dim))
|
| 354 |
+
self.register_buffer('zero_k_bias', torch.zeros(embed_dim))
|
| 355 |
+
|
| 356 |
+
self.proj = nn.Linear(embed_dim, embed_dim)
|
| 357 |
+
self.proj_drop = get_dropout_layer(proj_drop)
|
| 358 |
+
|
| 359 |
+
def forward(self, q, ca_kv):
|
| 360 |
+
"""
|
| 361 |
+
:param q: shaped as (batch, seq_len, Q_dim)
|
| 362 |
+
:param ca_kv: contains several vectors, each of which is shaped as (len_i, KV_dim). We have [len_1xKV_dim, len_2xKV_dim, len_3xKV_dim, ...] and lens == [len_1, len_2, len_3, ...]
|
| 363 |
+
- kv_compact: shaped as (sum(lens), KV_dim)
|
| 364 |
+
- cu_seqlens_k: cumulated sum of lens
|
| 365 |
+
- max_seqlen_k: int, max(lens)
|
| 366 |
+
NOTE: seq_len (num of Qs) can reach 10k; but len_i (num of KVs) must <= 256
|
| 367 |
+
|
| 368 |
+
:return: shaped as (batch, seq_len, Q_dim)
|
| 369 |
+
"""
|
| 370 |
+
kv_compact, cu_seqlens_k, max_seqlen_k = ca_kv
|
| 371 |
+
N = kv_compact.shape[0]
|
| 372 |
+
|
| 373 |
+
kv_compact = F.linear(kv_compact, weight=self.mat_kv.weight, bias=torch.cat((self.zero_k_bias, self.v_bias))).view(N, 2, self.num_heads, self.head_dim) # NC => N2Hc
|
| 374 |
+
# attn_bias = xformers.ops.fmha.BlockDiagonalMask.from_seqlens
|
| 375 |
+
|
| 376 |
+
if not self.for_attn_pool:
|
| 377 |
+
B, Lq = q.shape[:2]
|
| 378 |
+
q_compact = self.mat_q(q).view(-1, self.num_heads, self.head_dim)
|
| 379 |
+
else:
|
| 380 |
+
B = cu_seqlens_k.shape[0] - 1
|
| 381 |
+
Lq = 1
|
| 382 |
+
q_compact = self.mat_q.repeat(B, 1, 1).to(dtype=kv_compact.dtype)
|
| 383 |
+
|
| 384 |
+
if self.cos_attn: # always False
|
| 385 |
+
scale_mul = self.scale_mul_1H1.clamp_max(self.max_scale_mul).exp()
|
| 386 |
+
k, v = kv_compact.unbind(dim=1)
|
| 387 |
+
q_compact = F.normalize(q_compact, dim=-1).mul(scale_mul)
|
| 388 |
+
k = F.normalize(k, dim=-1)
|
| 389 |
+
kv_compact = torch.stack((k, v), dim=1)
|
| 390 |
+
|
| 391 |
+
q_compact = q_compact.contiguous()
|
| 392 |
+
kv_compact = kv_compact.contiguous()
|
| 393 |
+
|
| 394 |
+
cu_seqlens_q = torch.arange(0, Lq * (B+1), Lq, dtype=torch.int32, device=q_compact.device)
|
| 395 |
+
k, v = kv_compact.unbind(dim=1)
|
| 396 |
+
|
| 397 |
+
# reshape to BHLc
|
| 398 |
+
q_ = q_compact.view(B, Lq, self.num_heads, self.head_dim).transpose(1, 2)
|
| 399 |
+
k_ = k.view(B, -1, self.num_heads, self.head_dim).transpose(1, 2)
|
| 400 |
+
v_ = v.view(B, -1, self.num_heads, self.head_dim).transpose(1, 2)
|
| 401 |
+
|
| 402 |
+
oup = slow_attn(
|
| 403 |
+
query=q_,
|
| 404 |
+
key=k_,
|
| 405 |
+
value=v_,
|
| 406 |
+
scale=self.scale,
|
| 407 |
+
dropout_p=0,
|
| 408 |
+
).transpose(1, 2).reshape(B, Lq, -1)
|
| 409 |
+
return self.proj_drop(self.proj(oup))
|
| 410 |
+
|
| 411 |
+
def extra_repr(self) -> str:
|
| 412 |
+
return f'Cq={self.embed_dim}, Ckv={self.kv_dim}, cos_attn={self.cos_attn}'
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
class SelfAttnBlock(nn.Module):
|
| 416 |
+
def __init__(
|
| 417 |
+
self, embed_dim, kv_dim, cross_attn_layer_scale, cond_dim, act: bool, shared_aln: bool, norm_layer: partial,
|
| 418 |
+
num_heads, mlp_ratio=4., drop=0., drop_path=0., tau=1, cos_attn=False,
|
| 419 |
+
swiglu=False, customized_flash_attn=False, fused_mlp=False, fused_norm_func=None, checkpointing_sa_only=False,
|
| 420 |
+
):
|
| 421 |
+
super(SelfAttnBlock, self).__init__()
|
| 422 |
+
self.C, self.D = embed_dim, cond_dim
|
| 423 |
+
self.drop_path_rate = drop_path
|
| 424 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
| 425 |
+
self.attn = SelfAttention(
|
| 426 |
+
embed_dim=embed_dim, num_heads=num_heads, proj_drop=drop, tau=tau, cos_attn=cos_attn, customized_flash_attn=customized_flash_attn, attn_fn = attn_fn
|
| 427 |
+
)
|
| 428 |
+
self.using_swiglu = swiglu
|
| 429 |
+
self.ffn = (FFNSwiGLU if swiglu else FFN)(in_features=embed_dim, hidden_features=round(embed_dim * mlp_ratio / 256) * 256, drop=drop, fused_mlp=fused_mlp)
|
| 430 |
+
|
| 431 |
+
self.ln_wo_grad = norm_layer(embed_dim, elementwise_affine=False)
|
| 432 |
+
self.fused_norm_func = fused_norm_func
|
| 433 |
+
self.norm_eps = norm_layer.keywords.get('eps', 1e-6)
|
| 434 |
+
|
| 435 |
+
self.shared_aln = shared_aln
|
| 436 |
+
if self.shared_aln:
|
| 437 |
+
self.ada_gss = nn.Parameter(torch.randn(1, 1, 6, embed_dim) / embed_dim**0.5)
|
| 438 |
+
else:
|
| 439 |
+
lin = nn.Linear(cond_dim, 6*embed_dim)
|
| 440 |
+
self.ada_lin = nn.Sequential(nn.SiLU(inplace=False), lin) if act else nn.Sequential(lin)
|
| 441 |
+
|
| 442 |
+
# NOTE: attn_bias_or_two_vector is None during inference
|
| 443 |
+
def forward(self, x, cond_BD, ca_kv, attn_bias_or_two_vector): # todo: minGPT and vqgan also uses pre-norm, just like this, while MaskGiT uses post-norm
|
| 444 |
+
with torch.cuda.amp.autocast(enabled=False):
|
| 445 |
+
if self.shared_aln: # always True; (1, 1, 6, C) + (B, 1, 6, C)
|
| 446 |
+
gamma1, gamma2, scale1, scale2, shift1, shift2 = (self.ada_gss + cond_BD).unbind(2) # 116C + B16C =unbind(2)=> 6 B1C
|
| 447 |
+
else:
|
| 448 |
+
gamma1, gamma2, scale1, scale2, shift1, shift2 = self.ada_lin(cond_BD).view(-1, 1, 6, self.C).unbind(2)
|
| 449 |
+
|
| 450 |
+
if self.fused_ada_norm is None:
|
| 451 |
+
x = x + self.drop_path(self.attn( self.ln_wo_grad(x.float()).mul(scale1.add(1)).add_(shift1), attn_bias_or_two_vector=attn_bias_or_two_vector ).mul_(gamma1))
|
| 452 |
+
x = x + self.drop_path(self.ffn( self.ln_wo_grad(x.float()).mul(scale2.add(1)).add_(shift2) ).mul(gamma2)) # this mul(gamma2) cannot be in-placed cuz we possibly use FusedMLP
|
| 453 |
+
else:
|
| 454 |
+
x = x + self.drop_path(self.attn(self.fused_ada_norm(C=self.C, eps=self.norm_eps, x=x, scale=scale1, shift=shift1), attn_bias_or_two_vector=attn_bias_or_two_vector).mul_(gamma1))
|
| 455 |
+
x = x + self.drop_path(self.ffn(self.fused_ada_norm(C=self.C, eps=self.norm_eps, x=x, scale=scale2, shift=shift2)).mul(gamma2)) # this mul(gamma2) cannot be in-placed cuz we possibly use FusedMLP
|
| 456 |
+
return x
|
| 457 |
+
|
| 458 |
+
def extra_repr(self) -> str:
|
| 459 |
+
return f'shared_aln={self.shared_aln}, fused_norm={self.fused_norm_func is not None}'
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
class CrossAttnBlock(nn.Module):
|
| 463 |
+
def __init__(
|
| 464 |
+
self,
|
| 465 |
+
embed_dim, kv_dim, cross_attn_layer_scale, cond_dim, act: bool, shared_aln: bool, norm_layer: partial,
|
| 466 |
+
num_heads, mlp_ratio=4., drop=0., drop_path=0., tau=1, cos_attn=False,
|
| 467 |
+
swiglu=False, customized_flash_attn=False, fused_mlp=False, fused_norm_func=None, checkpointing_sa_only=False,
|
| 468 |
+
use_flex_attn=False, batch_size=2, pad_to_multiplier=1, apply_rope2d=False, rope2d_normalized_by_hw=False,
|
| 469 |
+
):
|
| 470 |
+
super(CrossAttnBlock, self).__init__()
|
| 471 |
+
self.C, self.D = embed_dim, cond_dim
|
| 472 |
+
self.drop_path_rate = drop_path
|
| 473 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
| 474 |
+
self.sa = SelfAttention(
|
| 475 |
+
embed_dim=embed_dim, num_heads=num_heads, proj_drop=drop, tau=tau, cos_attn=cos_attn, customized_flash_attn=customized_flash_attn,
|
| 476 |
+
use_flex_attn=use_flex_attn, batch_size=batch_size, pad_to_multiplier=pad_to_multiplier, rope2d_normalized_by_hw=rope2d_normalized_by_hw,
|
| 477 |
+
)
|
| 478 |
+
self.ca = CrossAttention(embed_dim=embed_dim, kv_dim=kv_dim, num_heads=num_heads, proj_drop=drop, cos_attn=cos_attn)
|
| 479 |
+
self.using_swiglu = swiglu
|
| 480 |
+
self.ffn = (FFNSwiGLU if swiglu else FFN)(in_features=embed_dim, hidden_features=round(embed_dim * mlp_ratio / 256) * 256, drop=drop, fused_mlp=fused_mlp)
|
| 481 |
+
|
| 482 |
+
self.ln_wo_grad = norm_layer(embed_dim, elementwise_affine=False)
|
| 483 |
+
self.fused_norm_func = fused_norm_func
|
| 484 |
+
self.norm_eps = norm_layer.keywords.get('eps', 1e-6)
|
| 485 |
+
self.ca_norm = norm_layer(embed_dim, elementwise_affine=True)
|
| 486 |
+
|
| 487 |
+
self.shared_aln = shared_aln
|
| 488 |
+
if self.shared_aln: # always True
|
| 489 |
+
self.ada_gss = nn.Parameter(torch.randn(1, 1, 6, embed_dim) / embed_dim**0.5)
|
| 490 |
+
else:
|
| 491 |
+
lin = nn.Linear(cond_dim, 6*embed_dim)
|
| 492 |
+
self.ada_lin = nn.Sequential(nn.SiLU(inplace=False), lin) if act else nn.Sequential(lin)
|
| 493 |
+
|
| 494 |
+
if cross_attn_layer_scale >= 0:
|
| 495 |
+
self.ca_gamma = nn.Parameter(cross_attn_layer_scale * torch.ones(embed_dim), requires_grad=True)
|
| 496 |
+
else:
|
| 497 |
+
self.ca_gamma = 1
|
| 498 |
+
|
| 499 |
+
self.checkpointing_sa_only = checkpointing_sa_only
|
| 500 |
+
|
| 501 |
+
# NOTE: attn_bias_or_two_vector is None during inference
|
| 502 |
+
def forward(self, x, cond_BD, ca_kv, attn_bias_or_two_vector, attn_fn=None, scale_schedule=None, rope2d_freqs_grid=None, scale_ind=0): # todo: minGPT and vqgan also uses pre-norm, just like this, while MaskGiT uses post-norm
|
| 503 |
+
with torch.cuda.amp.autocast(enabled=False): # disable half precision
|
| 504 |
+
if self.shared_aln: # always True; (1, 1, 6, C) + (B, 1, 6, C)
|
| 505 |
+
gamma1, gamma2, scale1, scale2, shift1, shift2 = (self.ada_gss + cond_BD).unbind(2) # 116C + B16C =unbind(2)=> 6 B1C
|
| 506 |
+
else:
|
| 507 |
+
gamma1, gamma2, scale1, scale2, shift1, shift2 = self.ada_lin(cond_BD).view(-1, 1, 6, self.C).unbind(2)
|
| 508 |
+
|
| 509 |
+
if self.fused_norm_func is None:
|
| 510 |
+
x_sa = self.ln_wo_grad(x.float()).mul(scale1.add(1)).add_(shift1)
|
| 511 |
+
if self.checkpointing_sa_only and self.training:
|
| 512 |
+
x_sa = checkpoint(self.sa, x_sa, attn_bias_or_two_vector, attn_fn, scale_schedule, rope2d_freqs_grid, use_reentrant=False)
|
| 513 |
+
else:
|
| 514 |
+
x_sa = self.sa(x_sa, attn_bias_or_two_vector, attn_fn, scale_schedule, rope2d_freqs_grid)
|
| 515 |
+
x = x + self.drop_path(x_sa.mul_(gamma1))
|
| 516 |
+
x = x + self.ca(self.ca_norm(x), ca_kv).float().mul_(self.ca_gamma)
|
| 517 |
+
x = x + self.drop_path(self.ffn( self.ln_wo_grad(x.float()).mul(scale2.add(1)).add_(shift2) ).mul(gamma2)) # this mul(gamma2) cannot be in-placed cuz we possibly use FusedMLP
|
| 518 |
+
else:
|
| 519 |
+
x_sa = self.fused_norm_func(C=self.C, eps=self.norm_eps, x=x, scale=scale1, shift=shift1)
|
| 520 |
+
if self.checkpointing_sa_only and self.training:
|
| 521 |
+
x_sa = checkpoint(self.sa, x_sa, attn_bias_or_two_vector, attn_fn, scale_schedule, rope2d_freqs_grid, use_reentrant=False)
|
| 522 |
+
else:
|
| 523 |
+
x_sa = self.sa(x_sa, attn_bias_or_two_vector, attn_fn, scale_schedule, rope2d_freqs_grid, scale_ind=scale_ind)
|
| 524 |
+
x = x + self.drop_path(x_sa.mul_(gamma1))
|
| 525 |
+
x = x + self.ca(self.ca_norm(x), ca_kv).float().mul_(self.ca_gamma)
|
| 526 |
+
x = x + self.drop_path(self.ffn(self.fused_norm_func(C=self.C, eps=self.norm_eps, x=x, scale=scale2, shift=shift2)).mul(gamma2)) # this mul(gamma2) cannot be in-placed cuz we possibly use FusedMLP
|
| 527 |
+
return x
|
| 528 |
+
|
| 529 |
+
def extra_repr(self) -> str:
|
| 530 |
+
return f'shared_aln={self.shared_aln}, fused_norm={self.fused_norm_func is not None}, ca_gamma={"<learnable>" if isinstance(self.ca_gamma, nn.Parameter) else self.ca_gamma}'
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
class AdaLNBeforeHead(nn.Module):
|
| 534 |
+
def __init__(self, C, D, act: bool, norm_layer: partial, fused_norm_func=None): # C: embed_dim, D: cond_dim
|
| 535 |
+
super().__init__()
|
| 536 |
+
self.C, self.D = C, D
|
| 537 |
+
self.ln_wo_grad = norm_layer(C, elementwise_affine=False)
|
| 538 |
+
self.fused_norm_func = fused_norm_func
|
| 539 |
+
self.norm_eps = norm_layer.keywords.get('eps', 1e-6)
|
| 540 |
+
lin = nn.Linear(D, 2*C)
|
| 541 |
+
self.ada_lin = nn.Sequential(nn.SiLU(inplace=False), lin) if act else nn.Sequential(lin)
|
| 542 |
+
|
| 543 |
+
def forward(self, x_BLC: torch.Tensor, cond_BD: Optional[torch.Tensor]):
|
| 544 |
+
scale, shift = self.ada_lin(cond_BD).view(-1, 1, 2, self.C).unbind(2)
|
| 545 |
+
if self.fused_norm_func is None:
|
| 546 |
+
return self.ln_wo_grad(x_BLC).mul(scale.add(1)).add_(shift)
|
| 547 |
+
else:
|
| 548 |
+
return self.fused_norm_func(C=self.C, eps=self.norm_eps, x=x_BLC, scale=scale, shift=shift)
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
def main():
|
| 552 |
+
dev = 'cpu' # 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 553 |
+
rng = torch.Generator(device=dev)
|
| 554 |
+
# for Li in ([1, 3, 5], [1, 3]):
|
| 555 |
+
rng.manual_seed(0)
|
| 556 |
+
B, H, cq, ckv = 4, 8, 64, 96
|
| 557 |
+
Cq = H*cq
|
| 558 |
+
Ckv = H*ckv
|
| 559 |
+
|
| 560 |
+
Li = [5, 4, 7, 6]
|
| 561 |
+
Lq = 10
|
| 562 |
+
L = max(Li)
|
| 563 |
+
attn_bias = torch.zeros(B, 1, Lq, L, device=dev)
|
| 564 |
+
for i, x in enumerate(Li):
|
| 565 |
+
attn_bias[i, 0, :, x:] = -torch.inf
|
| 566 |
+
|
| 567 |
+
q = torch.randn(B, Lq, H, cq, generator=rng, device=dev)
|
| 568 |
+
k = torch.randn(B, L, H, ckv, generator=rng, device=dev)
|
| 569 |
+
v = torch.randn(B, L, H, ckv, generator=rng, device=dev)
|
| 570 |
+
tq, tk, tv = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) # BHLc
|
| 571 |
+
|
| 572 |
+
seqlen_k = torch.tensor(Li, dtype=torch.int32, device=dev)
|
| 573 |
+
cu_seqlens_k = F.pad(torch.cumsum(seqlen_k, dim=0, dtype=torch.torch.int32), (1, 0))
|
| 574 |
+
kv = torch.stack([k, v], dim=2)
|
| 575 |
+
kv_compact = torch.cat([kv[i, :Li[i]] for i in range(B)], dim=0)
|
| 576 |
+
|
| 577 |
+
ca = CrossAttention(for_attn_pool=False, embed_dim=Cq, kv_dim=Ckv, num_heads=H)
|
| 578 |
+
CrossAttention.forward
|
| 579 |
+
ca(q, (kv_compact, cu_seqlens_k, max(Li))).mean().backward()
|
| 580 |
+
|
| 581 |
+
|
| 582 |
+
if __name__ == '__main__':
|
| 583 |
+
main()
|
generate_infinity_images.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import torch
|
| 3 |
+
torch.cuda.set_device(0)
|
| 4 |
+
import cv2
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
import sys
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
sys.path.insert(0, os.path.abspath("."))
|
| 11 |
+
from tools.run_infinity import *
|
| 12 |
+
|
| 13 |
+
import infinity.models.basic as basic
|
| 14 |
+
|
| 15 |
+
import csv
|
| 16 |
+
from torch.utils.data import Dataset
|
| 17 |
+
|
| 18 |
+
basic.flash_attn_func = None
|
| 19 |
+
basic.flash_attn_varlen_kvpacked_func = None
|
| 20 |
+
basic.flash_attn_varlen_qkvpacked_func = None
|
| 21 |
+
basic.flash_attn_varlen_func = None
|
| 22 |
+
basic.flash_fused_op_installed = False
|
| 23 |
+
|
| 24 |
+
model_path='weights/infinity_2b_reg.pth'
|
| 25 |
+
vae_path='weights/infinity_vae_d32reg.pth'
|
| 26 |
+
text_encoder_ckpt = 'google/flan-t5-xl'
|
| 27 |
+
args=argparse.Namespace(
|
| 28 |
+
pn='1M',
|
| 29 |
+
model_path=model_path,
|
| 30 |
+
cfg_insertion_layer=0,
|
| 31 |
+
vae_type=32,
|
| 32 |
+
vae_path=vae_path,
|
| 33 |
+
add_lvl_embeding_only_first_block=1,
|
| 34 |
+
use_bit_label=1,
|
| 35 |
+
model_type='infinity_2b',
|
| 36 |
+
rope2d_each_sa_layer=1,
|
| 37 |
+
rope2d_normalized_by_hw=2,
|
| 38 |
+
use_scale_schedule_embedding=0,
|
| 39 |
+
sampling_per_bits=1,
|
| 40 |
+
text_encoder_ckpt=text_encoder_ckpt,
|
| 41 |
+
text_channels=2048,
|
| 42 |
+
apply_spatial_patchify=0,
|
| 43 |
+
h_div_w_template=1.000,
|
| 44 |
+
use_flex_attn=0,
|
| 45 |
+
cache_dir='/dev/shm',
|
| 46 |
+
checkpoint_type='torch',
|
| 47 |
+
seed=0,
|
| 48 |
+
bf16=1,
|
| 49 |
+
save_file='tmp.jpg',
|
| 50 |
+
enable_model_cache=0,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
# load text encoder
|
| 54 |
+
text_tokenizer, text_encoder = load_tokenizer(t5_path=args.text_encoder_ckpt)
|
| 55 |
+
# load vae
|
| 56 |
+
vae = load_visual_tokenizer(args)
|
| 57 |
+
# load infinity
|
| 58 |
+
infinity = load_transformer(vae, args)
|
| 59 |
+
|
| 60 |
+
# PROMPT
|
| 61 |
+
prompts = {
|
| 62 |
+
"stockholm": "A panorama photo of the beautiful city of Stockholm.",
|
| 63 |
+
"hackathon": "A photorealistic image of a room full of energetic and motivated people working on programming tasks."
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
# OUTPUT
|
| 67 |
+
output_dir = "outputs"
|
| 68 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 69 |
+
|
| 70 |
+
# GEN IMG
|
| 71 |
+
for category, prompt in prompts.items():
|
| 72 |
+
cfg = 3
|
| 73 |
+
tau = 0.5
|
| 74 |
+
h_div_w = 1/1 # Aspect Ratio
|
| 75 |
+
seed = random.randint(0, 10000)
|
| 76 |
+
enable_positive_prompt = 0
|
| 77 |
+
|
| 78 |
+
h_div_w_template_ = h_div_w_templates[np.argmin(np.abs(h_div_w_templates-h_div_w))]
|
| 79 |
+
scale_schedule = dynamic_resolution_h_w[h_div_w_template_][args.pn]['scales']
|
| 80 |
+
scale_schedule = [(1, h, w) for (_, h, w) in scale_schedule]
|
| 81 |
+
|
| 82 |
+
# GEN
|
| 83 |
+
generated_image = gen_one_img(
|
| 84 |
+
infinity,
|
| 85 |
+
vae,
|
| 86 |
+
text_tokenizer,
|
| 87 |
+
text_encoder,
|
| 88 |
+
prompt,
|
| 89 |
+
g_seed=seed,
|
| 90 |
+
gt_leak=0,
|
| 91 |
+
gt_ls_Bl=None,
|
| 92 |
+
cfg_list=cfg,
|
| 93 |
+
tau_list=tau,
|
| 94 |
+
scale_schedule=scale_schedule,
|
| 95 |
+
cfg_insertion_layer=[args.cfg_insertion_layer],
|
| 96 |
+
vae_type=args.vae_type,
|
| 97 |
+
sampling_per_bits=args.sampling_per_bits,
|
| 98 |
+
enable_positive_prompt=enable_positive_prompt,
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
# SAVE
|
| 102 |
+
save_path = osp.join(output_dir, f"{category}.jpg")
|
| 103 |
+
cv2.imwrite(save_path, generated_image.cpu().numpy())
|
| 104 |
+
print(f"{category} image saved to {save_path}")
|
infinity.py
ADDED
|
@@ -0,0 +1,800 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Definition of Infinity transformer model.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import random
|
| 7 |
+
import time
|
| 8 |
+
from contextlib import nullcontext
|
| 9 |
+
from functools import partial
|
| 10 |
+
from typing import List, Optional, Tuple, Union, Dict, Any
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
import torch.nn as nn
|
| 14 |
+
import torch.nn.functional as F
|
| 15 |
+
from timm.models import register_model
|
| 16 |
+
from torch.utils.checkpoint import checkpoint
|
| 17 |
+
from PIL import Image
|
| 18 |
+
import numpy as np
|
| 19 |
+
|
| 20 |
+
import infinity.utils.dist as dist
|
| 21 |
+
from infinity.utils.dist import for_visualize
|
| 22 |
+
from infinity.models.basic import flash_attn_func, flash_fused_op_installed, AdaLNBeforeHead, CrossAttnBlock, SelfAttnBlock, CrossAttention, FastRMSNorm, precompute_rope2d_freqs_grid
|
| 23 |
+
from infinity.utils import misc
|
| 24 |
+
from infinity.models.flex_attn import FlexAttn
|
| 25 |
+
from infinity.utils.dynamic_resolution import dynamic_resolution_h_w, h_div_w_templates
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
from infinity.models.fused_op import fused_ada_layer_norm, fused_ada_rms_norm
|
| 29 |
+
except:
|
| 30 |
+
fused_ada_layer_norm, fused_ada_rms_norm = None, None
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class MultiInpIdentity(nn.Module):
|
| 34 |
+
def forward(self, x, *args, **kwargs):
|
| 35 |
+
return x
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class TextAttentivePool(nn.Module):
|
| 39 |
+
def __init__(self, Ct5: int, D: int):
|
| 40 |
+
super().__init__()
|
| 41 |
+
self.Ct5, self.D = Ct5, D
|
| 42 |
+
if D > 4096:
|
| 43 |
+
self.head_dim = 64
|
| 44 |
+
else:
|
| 45 |
+
self.head_dim = 128
|
| 46 |
+
|
| 47 |
+
self.num_heads = Ct5 // self.head_dim
|
| 48 |
+
self.ca = CrossAttention(for_attn_pool=True, embed_dim=self.D, kv_dim=Ct5, num_heads=self.num_heads)
|
| 49 |
+
def forward(self, ca_kv):
|
| 50 |
+
return self.ca(None, ca_kv).squeeze(1)
|
| 51 |
+
|
| 52 |
+
class SharedAdaLin(nn.Linear):
|
| 53 |
+
def forward(self, cond_BD):
|
| 54 |
+
C = self.weight.shape[0] // 6
|
| 55 |
+
return super().forward(cond_BD).reshape(-1, 1, 6, C) # B16C
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class MultipleLayers(nn.Module):
|
| 59 |
+
def __init__(self, ls, num_blocks_in_a_chunk, index):
|
| 60 |
+
super().__init__()
|
| 61 |
+
self.module = nn.ModuleList()
|
| 62 |
+
for i in range(index, index+num_blocks_in_a_chunk):
|
| 63 |
+
self.module.append(ls[i])
|
| 64 |
+
|
| 65 |
+
def forward(self, x, cond_BD, ca_kv, attn_bias_or_two_vector, attn_fn=None, scale_schedule=None, checkpointing_full_block=False, rope2d_freqs_grid=None):
|
| 66 |
+
h = x
|
| 67 |
+
for m in self.module:
|
| 68 |
+
if checkpointing_full_block:
|
| 69 |
+
h = torch.utils.checkpoint.checkpoint(m, h, cond_BD, ca_kv, attn_bias_or_two_vector, attn_fn, scale_schedule, rope2d_freqs_grid, use_reentrant=False)
|
| 70 |
+
else:
|
| 71 |
+
h = m(h, cond_BD, ca_kv, attn_bias_or_two_vector, attn_fn, scale_schedule, rope2d_freqs_grid)
|
| 72 |
+
return h
|
| 73 |
+
|
| 74 |
+
class Infinity(nn.Module):
|
| 75 |
+
def __init__(
|
| 76 |
+
self, vae_local,
|
| 77 |
+
text_channels=0, text_maxlen=0, # text-cond generation
|
| 78 |
+
selecting_idx=None, # class-cond generation
|
| 79 |
+
embed_dim=1024, depth=16, num_heads=16, mlp_ratio=4., # model's architecture
|
| 80 |
+
drop_rate=0., drop_path_rate=0., # drop out and drop path
|
| 81 |
+
norm_eps=1e-6, rms_norm=False, # norm layer
|
| 82 |
+
shared_aln=False, head_aln=True, # adaptive norm
|
| 83 |
+
cond_drop_rate=0.1, # for classifier-free guidance
|
| 84 |
+
rand_uncond=False,
|
| 85 |
+
cross_attn_layer_scale=-1., nm0=False, tau=1, cos_attn=True, swiglu=False,
|
| 86 |
+
raw_scale_schedule=(1, 2, 3, 4, 5, 6, 8, 10, 13, 16),
|
| 87 |
+
head_depth=1,
|
| 88 |
+
top_p=0.0, top_k=0.0,
|
| 89 |
+
customized_flash_attn=False, fused_mlp=False, fused_norm=False,
|
| 90 |
+
block_chunks=1,
|
| 91 |
+
checkpointing=None,
|
| 92 |
+
pad_to_multiplier=0,
|
| 93 |
+
use_flex_attn=False,
|
| 94 |
+
batch_size=2,
|
| 95 |
+
add_lvl_embeding_only_first_block=1,
|
| 96 |
+
use_bit_label=1,
|
| 97 |
+
rope2d_each_sa_layer=0,
|
| 98 |
+
rope2d_normalized_by_hw=0,
|
| 99 |
+
pn=None,
|
| 100 |
+
train_h_div_w_list=None,
|
| 101 |
+
video_frames=1,
|
| 102 |
+
always_training_scales=20,
|
| 103 |
+
apply_spatial_patchify = 0,
|
| 104 |
+
inference_mode=False,
|
| 105 |
+
):
|
| 106 |
+
# set hyperparameters
|
| 107 |
+
self.C = embed_dim
|
| 108 |
+
self.inference_mode = inference_mode
|
| 109 |
+
self.apply_spatial_patchify = apply_spatial_patchify
|
| 110 |
+
if self.apply_spatial_patchify:
|
| 111 |
+
self.d_vae = vae_local.embed_dim * 4
|
| 112 |
+
else:
|
| 113 |
+
self.d_vae = vae_local.embed_dim
|
| 114 |
+
self.use_bit_label = use_bit_label
|
| 115 |
+
self.codebook_dim = self.d_vae
|
| 116 |
+
self.V = (self.codebook_dim * 2) if self.use_bit_label else vae_local.vocab_size
|
| 117 |
+
self.bit_mask = vae_local.quantizer.lfq.mask if self.use_bit_label else None
|
| 118 |
+
self.Ct5 = text_channels
|
| 119 |
+
self.depth = depth
|
| 120 |
+
self.num_heads = num_heads
|
| 121 |
+
self.batch_size = batch_size
|
| 122 |
+
self.mlp_ratio = mlp_ratio
|
| 123 |
+
self.cond_drop_rate = cond_drop_rate
|
| 124 |
+
self.norm_eps = norm_eps
|
| 125 |
+
self.prog_si = -1
|
| 126 |
+
self.pn = pn
|
| 127 |
+
self.train_h_div_w_list = train_h_div_w_list if train_h_div_w_list else h_div_w_templates
|
| 128 |
+
self.video_frames = video_frames
|
| 129 |
+
self.always_training_scales = always_training_scales
|
| 130 |
+
|
| 131 |
+
assert add_lvl_embeding_only_first_block in [0,1]
|
| 132 |
+
self.add_lvl_embeding_only_first_block = add_lvl_embeding_only_first_block
|
| 133 |
+
assert rope2d_each_sa_layer in [0,1]
|
| 134 |
+
self.rope2d_each_sa_layer = rope2d_each_sa_layer
|
| 135 |
+
self.rope2d_normalized_by_hw = rope2d_normalized_by_hw
|
| 136 |
+
print(f'self.codebook_dim: {self.codebook_dim}, self.add_lvl_embeding_only_first_block: {self.add_lvl_embeding_only_first_block}, \
|
| 137 |
+
self.use_bit_label: {self.use_bit_label}, self.rope2d_each_sa_layer: {rope2d_each_sa_layer}, self.rope2d_normalized_by_hw: {self.rope2d_normalized_by_hw}')
|
| 138 |
+
head_up_method = ''
|
| 139 |
+
word_patch_size = 1 if head_up_method in {'', 'no'} else 2
|
| 140 |
+
if word_patch_size > 1:
|
| 141 |
+
assert all(raw_pn % word_patch_size == 0 for raw_pn in raw_scale_schedule), f'raw_scale_schedule={raw_scale_schedule}, not compatible with word_patch_size={word_patch_size}'
|
| 142 |
+
|
| 143 |
+
self.checkpointing = checkpointing
|
| 144 |
+
self.pad_to_multiplier = max(1, pad_to_multiplier)
|
| 145 |
+
|
| 146 |
+
if flash_attn_func is None:
|
| 147 |
+
customized_kernel_installed = False
|
| 148 |
+
else:
|
| 149 |
+
customized_kernel_installed = any(
|
| 150 |
+
'Infinity' in arg_name
|
| 151 |
+
for arg_name in flash_attn_func.__code__.co_varnames
|
| 152 |
+
)
|
| 153 |
+
self.customized_flash_attn = customized_flash_attn and customized_kernel_installed
|
| 154 |
+
if customized_flash_attn and not customized_kernel_installed:
|
| 155 |
+
import inspect, warnings
|
| 156 |
+
file_path = inspect.getsourcefile(flash_attn_func)
|
| 157 |
+
line_number = inspect.getsourcelines(flash_attn_func)[1]
|
| 158 |
+
info = (
|
| 159 |
+
f'>>>>>> Customized FlashAttention2 is not installed or compiled, but specified in args by --flash=1. Set customized_flash_attn = False. <<<<<<\n'
|
| 160 |
+
f'>>>>>> `flash_attn_func` is in [line {line_number}] [file {file_path}] <<<<<<\n'
|
| 161 |
+
f'>>>>>> {flash_attn_func.__code__.co_varnames=} <<<<<<\n'
|
| 162 |
+
)
|
| 163 |
+
warnings.warn(info, ImportWarning)
|
| 164 |
+
print(info, flush=True)
|
| 165 |
+
|
| 166 |
+
self.raw_scale_schedule = raw_scale_schedule # 'raw' means before any patchifying
|
| 167 |
+
self.first_l = 1
|
| 168 |
+
# solve top-p top-k sampling hyperparameters
|
| 169 |
+
self.top_p, self.top_k = max(min(top_p, 1), 0), (round(top_k * self.V) if 0 < top_k < 1 else round(top_k))
|
| 170 |
+
if self.top_p < 1e-5: self.top_p = 0
|
| 171 |
+
if self.top_k >= self.V or self.top_k <= 0: self.top_k = 0
|
| 172 |
+
|
| 173 |
+
t = torch.zeros(dist.get_world_size(), device=dist.get_device())
|
| 174 |
+
t[dist.get_rank()] = float(flash_fused_op_installed)
|
| 175 |
+
dist.barrier()
|
| 176 |
+
dist.allreduce(t)
|
| 177 |
+
assert round(t.sum().item()) in {0, dist.get_world_size()}, f'flash_fused_op_installed: {t}'
|
| 178 |
+
|
| 179 |
+
super().__init__()
|
| 180 |
+
self.rng = torch.Generator(device=dist.get_device())
|
| 181 |
+
self.maybe_record_function = nullcontext
|
| 182 |
+
self.text_maxlen = text_maxlen
|
| 183 |
+
self.t2i = text_channels != 0
|
| 184 |
+
|
| 185 |
+
# [inp & position embedding]
|
| 186 |
+
init_std = math.sqrt(1 / self.C / 3)
|
| 187 |
+
self.norm0_cond = nn.Identity()
|
| 188 |
+
if self.t2i:
|
| 189 |
+
self.selecting_idx = None
|
| 190 |
+
self.num_classes = 0
|
| 191 |
+
self.D = self.C
|
| 192 |
+
|
| 193 |
+
cfg_uncond = torch.empty(self.text_maxlen, self.Ct5)
|
| 194 |
+
rng = torch.Generator(device='cpu')
|
| 195 |
+
rng.manual_seed(0)
|
| 196 |
+
torch.nn.init.trunc_normal_(cfg_uncond, std=1.2, generator=rng)
|
| 197 |
+
cfg_uncond /= self.Ct5 ** 0.5
|
| 198 |
+
if rand_uncond:
|
| 199 |
+
self.register_buffer('cfg_uncond', cfg_uncond)
|
| 200 |
+
else:
|
| 201 |
+
self.cfg_uncond = nn.Parameter(cfg_uncond)
|
| 202 |
+
|
| 203 |
+
self.text_norm = FastRMSNorm(self.Ct5, elementwise_affine=True, eps=norm_eps)
|
| 204 |
+
self.text_proj_for_sos = TextAttentivePool(self.Ct5, self.D)
|
| 205 |
+
self.text_proj_for_ca = nn.Sequential(
|
| 206 |
+
nn.Linear(self.Ct5, self.D),
|
| 207 |
+
nn.GELU(approximate='tanh'),
|
| 208 |
+
nn.Linear(self.D, self.D),
|
| 209 |
+
)
|
| 210 |
+
else: # class-label cond
|
| 211 |
+
if selecting_idx is None:
|
| 212 |
+
num_classes = 1000
|
| 213 |
+
print(f'======= WARNING: selecting_idx not specified, set to 1/{num_classes} @ {dist.get_device()} =======')
|
| 214 |
+
selecting_idx = torch.full((1, num_classes), fill_value=1/num_classes, dtype=torch.float32, device=dist.get_device())
|
| 215 |
+
self.selecting_idx = selecting_idx
|
| 216 |
+
self.num_classes = selecting_idx.shape[-1]
|
| 217 |
+
self.D = self.C
|
| 218 |
+
self.class_emb = nn.Embedding(self.num_classes + 1, self.C)
|
| 219 |
+
nn.init.trunc_normal_(self.class_emb.weight.data, mean=0, std=init_std)
|
| 220 |
+
|
| 221 |
+
self.pos_start = nn.Parameter(torch.empty(1, self.first_l, self.C))
|
| 222 |
+
nn.init.trunc_normal_(self.pos_start.data, mean=0, std=init_std)
|
| 223 |
+
if self.rope2d_each_sa_layer:
|
| 224 |
+
rope2d_freqs_grid = precompute_rope2d_freqs_grid(dim=self.C//self.num_heads, dynamic_resolution_h_w=dynamic_resolution_h_w, pad_to_multiplier=self.pad_to_multiplier, rope2d_normalized_by_hw=self.rope2d_normalized_by_hw)
|
| 225 |
+
self.rope2d_freqs_grid = rope2d_freqs_grid
|
| 226 |
+
else:
|
| 227 |
+
raise ValueError(f'self.rope2d_each_sa_layer={self.rope2d_each_sa_layer} not implemented')
|
| 228 |
+
self.lvl_embed = nn.Embedding(15, self.C)
|
| 229 |
+
nn.init.trunc_normal_(self.lvl_embed.weight.data, mean=0, std=init_std)
|
| 230 |
+
|
| 231 |
+
# [input layers] input norm && input embedding
|
| 232 |
+
norm_layer = partial(FastRMSNorm if rms_norm else nn.LayerNorm, eps=norm_eps)
|
| 233 |
+
self.norm0_ve = norm_layer(self.d_vae) if nm0 else nn.Identity()
|
| 234 |
+
self.word_embed = nn.Linear(self.d_vae, self.C)
|
| 235 |
+
|
| 236 |
+
# [shared adaptive layernorm mapping network]
|
| 237 |
+
self.shared_ada_lin = nn.Sequential(nn.SiLU(inplace=False), SharedAdaLin(self.D, 6*self.C)) if shared_aln else nn.Identity()
|
| 238 |
+
|
| 239 |
+
# fused norm
|
| 240 |
+
if fused_norm:
|
| 241 |
+
fused_norm_func = fused_ada_rms_norm if rms_norm else fused_ada_layer_norm
|
| 242 |
+
if fused_norm_func is not None: # pre-compile
|
| 243 |
+
B = 2
|
| 244 |
+
x = torch.randn(B, 1, self.C).requires_grad_(True)
|
| 245 |
+
scale = torch.randn(B, 1, self.C).mul_(0.01).requires_grad_(True)
|
| 246 |
+
shift = torch.randn(B, 1, self.C).mul_(0.01).requires_grad_(True)
|
| 247 |
+
# fused_norm_func(C=self.C, eps=self.norm_eps, x=x, scale=scale, shift=shift).mean().backward()
|
| 248 |
+
del B, x, scale, shift
|
| 249 |
+
else:
|
| 250 |
+
fused_norm_func = None
|
| 251 |
+
|
| 252 |
+
# [backbone and head]
|
| 253 |
+
self.use_flex_attn = use_flex_attn
|
| 254 |
+
self.attn_fn_compile_dict = {}
|
| 255 |
+
self.batch_size = batch_size
|
| 256 |
+
if self.use_flex_attn:
|
| 257 |
+
self.attn_fn_compile_dict = self.compile_flex_attn()
|
| 258 |
+
|
| 259 |
+
self.drop_path_rate = drop_path_rate
|
| 260 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # dpr means drop path rate (linearly increasing)
|
| 261 |
+
self.unregistered_blocks = []
|
| 262 |
+
for block_idx in range(depth):
|
| 263 |
+
block = (CrossAttnBlock if self.t2i else SelfAttnBlock)(
|
| 264 |
+
embed_dim=self.C, kv_dim=self.D, cross_attn_layer_scale=cross_attn_layer_scale, cond_dim=self.D, act=True, shared_aln=shared_aln, norm_layer=norm_layer,
|
| 265 |
+
num_heads=num_heads, mlp_ratio=mlp_ratio, drop=drop_rate, drop_path=dpr[block_idx], tau=tau, cos_attn=cos_attn,
|
| 266 |
+
swiglu=swiglu, customized_flash_attn=self.customized_flash_attn, fused_mlp=fused_mlp, fused_norm_func=fused_norm_func,
|
| 267 |
+
checkpointing_sa_only=self.checkpointing == 'self-attn',
|
| 268 |
+
use_flex_attn=use_flex_attn, batch_size=batch_size, pad_to_multiplier=pad_to_multiplier, rope2d_normalized_by_hw=rope2d_normalized_by_hw,
|
| 269 |
+
)
|
| 270 |
+
self.unregistered_blocks.append(block)
|
| 271 |
+
|
| 272 |
+
# [head]
|
| 273 |
+
V = self.V
|
| 274 |
+
if head_aln:
|
| 275 |
+
self.head_nm = AdaLNBeforeHead(self.C, self.D, act=True, norm_layer=norm_layer, fused_norm_func=fused_norm_func)
|
| 276 |
+
self.head = nn.Linear(self.C, V) if head_depth == 1 else nn.Sequential(nn.Linear(self.C, self.C, bias=True), nn.GELU(approximate='tanh'), nn.Linear(self.C, V))
|
| 277 |
+
else:
|
| 278 |
+
self.head_nm = MultiInpIdentity()
|
| 279 |
+
self.head = nn.Sequential(norm_layer(self.C), nn.Linear(self.C, V)) if head_depth == 1 else nn.Sequential(norm_layer(self.C), nn.Linear(self.C, self.C, bias=True), nn.GELU(approximate='tanh'), nn.Linear(self.C, V))
|
| 280 |
+
|
| 281 |
+
self.num_block_chunks = block_chunks or 1
|
| 282 |
+
self.num_blocks_in_a_chunk = depth // block_chunks
|
| 283 |
+
print(f"{self.num_blocks_in_a_chunk=}, {depth=}, {block_chunks=}")
|
| 284 |
+
assert self.num_blocks_in_a_chunk * block_chunks == depth
|
| 285 |
+
if self.num_block_chunks == 1:
|
| 286 |
+
self.blocks = nn.ModuleList(self.unregistered_blocks)
|
| 287 |
+
else:
|
| 288 |
+
self.block_chunks = nn.ModuleList()
|
| 289 |
+
for i in range(self.num_block_chunks):
|
| 290 |
+
self.block_chunks.append(MultipleLayers(self.unregistered_blocks, self.num_blocks_in_a_chunk, i*self.num_blocks_in_a_chunk))
|
| 291 |
+
print(
|
| 292 |
+
f'\n[constructor] ==== customized_flash_attn={self.customized_flash_attn} (using_flash={sum((b.sa.using_flash if self.t2i else b.attn.using_flash) for b in self.unregistered_blocks)}/{self.depth}), fused_mlp={fused_mlp} (fused_mlp={sum(b.ffn.fused_mlp_func is not None for b in self.unregistered_blocks)}/{self.depth}) ==== \n'
|
| 293 |
+
f' [Infinity config ] embed_dim={embed_dim}, num_heads={num_heads}, depth={depth}, mlp_ratio={mlp_ratio}, swiglu={swiglu} num_blocks_in_a_chunk={self.num_blocks_in_a_chunk}\n'
|
| 294 |
+
f' [drop ratios] drop_rate={drop_rate}, drop_path_rate={drop_path_rate:g} ({torch.linspace(0, drop_path_rate, depth)})',
|
| 295 |
+
end='\n\n', flush=True
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def compile_flex_attn(self):
|
| 300 |
+
attn_fn_compile_dict = {}
|
| 301 |
+
for h_div_w in self.train_h_div_w_list:
|
| 302 |
+
h_div_w_template = h_div_w_templates[np.argmin(np.abs(float(h_div_w) - h_div_w_templates))]
|
| 303 |
+
full_scale_schedule = dynamic_resolution_h_w[h_div_w_template][self.pn]['scales']
|
| 304 |
+
if self.inference_mode:
|
| 305 |
+
apply_flex_attn_scales = list(range(1, 1+len(full_scale_schedule)))
|
| 306 |
+
mask_type = "infinity_infer_mask_with_kv_cache"
|
| 307 |
+
auto_padding = True
|
| 308 |
+
else:
|
| 309 |
+
mask_type = 'var'
|
| 310 |
+
auto_padding = False
|
| 311 |
+
apply_flex_attn_scales = [min(self.always_training_scales, len(full_scale_schedule))]
|
| 312 |
+
for scales_num in apply_flex_attn_scales:
|
| 313 |
+
print(f'====== apply flex attn hdivw: {h_div_w} scales: {scales_num} ======')
|
| 314 |
+
scale_schedule = full_scale_schedule[:scales_num]
|
| 315 |
+
scale_schedule = [ (min(t, self.video_frames//4+1), h, w) for (t,h, w) in scale_schedule]
|
| 316 |
+
patchs_nums_tuple = tuple(scale_schedule)
|
| 317 |
+
SEQ_L = sum( pt * ph * pw for pt, ph, pw in patchs_nums_tuple)
|
| 318 |
+
aligned_L = SEQ_L+ (self.pad_to_multiplier - SEQ_L % self.pad_to_multiplier) if SEQ_L % self.pad_to_multiplier != 0 else SEQ_L
|
| 319 |
+
attn_fn = FlexAttn(block_scales = patchs_nums_tuple,
|
| 320 |
+
mask_type = mask_type,
|
| 321 |
+
B = self.batch_size,
|
| 322 |
+
H = self.num_heads,
|
| 323 |
+
L = aligned_L,
|
| 324 |
+
auto_padding=auto_padding)
|
| 325 |
+
attn_fn_compile_dict[patchs_nums_tuple] = attn_fn
|
| 326 |
+
|
| 327 |
+
if self.video_frames > 1: # append image attn_fn when self.video_frames > 1 (namely videos)
|
| 328 |
+
scale_schedule = [ (1, h, w) for (t,h, w) in scale_schedule]
|
| 329 |
+
patchs_nums_tuple = tuple(scale_schedule)
|
| 330 |
+
SEQ_L = sum( pt * ph * pw for pt, ph, pw in patchs_nums_tuple)
|
| 331 |
+
aligned_L = SEQ_L+ (self.pad_to_multiplier - SEQ_L % self.pad_to_multiplier) if SEQ_L % self.pad_to_multiplier != 0 else SEQ_L
|
| 332 |
+
attn_fn = FlexAttn(block_scales = patchs_nums_tuple,
|
| 333 |
+
mask_type = mask_type,
|
| 334 |
+
B = self.batch_size,
|
| 335 |
+
H = self.num_heads,
|
| 336 |
+
L = aligned_L)
|
| 337 |
+
attn_fn_compile_dict[patchs_nums_tuple] = attn_fn
|
| 338 |
+
return attn_fn_compile_dict
|
| 339 |
+
|
| 340 |
+
def get_logits(self, h: torch.Tensor, cond_BD: Optional[torch.Tensor]):
|
| 341 |
+
"""
|
| 342 |
+
:param h: hidden_state, shaped (B or batch_size, L or seq_len, C or hidden_dim)
|
| 343 |
+
:param cond_BD: shaped (B or batch_size, D or cond_dim)
|
| 344 |
+
:param tau: temperature
|
| 345 |
+
:return: logits, shaped (B or batch_size, V or vocabulary_size)
|
| 346 |
+
"""
|
| 347 |
+
with torch.amp.autocast('cuda', enabled=False):
|
| 348 |
+
return self.head(self.head_nm(h.float(), cond_BD.float()))
|
| 349 |
+
|
| 350 |
+
def add_lvl_embeding(self, feature, scale_ind, scale_schedule, need_to_pad=0):
|
| 351 |
+
bs, seq_len, c = feature.shape
|
| 352 |
+
patch_t, patch_h, patch_w = scale_schedule[scale_ind]
|
| 353 |
+
t_mul_h_mul_w = patch_t * patch_h * patch_w
|
| 354 |
+
assert t_mul_h_mul_w + need_to_pad == seq_len
|
| 355 |
+
feature[:, :t_mul_h_mul_w] += self.lvl_embed(scale_ind*torch.ones((bs, t_mul_h_mul_w),dtype=torch.int).to(feature.device))
|
| 356 |
+
return feature
|
| 357 |
+
|
| 358 |
+
def add_lvl_embeding_for_x_BLC(self, x_BLC, scale_schedule, need_to_pad=0):
|
| 359 |
+
ptr = 0
|
| 360 |
+
x_BLC_list = []
|
| 361 |
+
for scale_ind, patch_t_h_w in enumerate(scale_schedule):
|
| 362 |
+
scale_seq_len = np.array(patch_t_h_w).prod()
|
| 363 |
+
x_BLC_this_scale = x_BLC[:,ptr:ptr+scale_seq_len] # shape: [bs, patch_h*patch_w, c]
|
| 364 |
+
ptr += scale_seq_len
|
| 365 |
+
x_BLC_this_scale = self.add_lvl_embeding(x_BLC_this_scale, scale_ind, scale_schedule)
|
| 366 |
+
x_BLC_list.append(x_BLC_this_scale)
|
| 367 |
+
assert x_BLC.shape[1] == (ptr + need_to_pad), f'{x_BLC.shape[1]} != {ptr} + {need_to_pad}'
|
| 368 |
+
x_BLC_list.append(x_BLC[:,ptr:])
|
| 369 |
+
x_BLC = torch.cat(x_BLC_list, dim=1)
|
| 370 |
+
return x_BLC
|
| 371 |
+
|
| 372 |
+
def forward(self, label_B_or_BLT: Union[torch.LongTensor, Tuple[torch.FloatTensor, torch.IntTensor, int]], x_BLC_wo_prefix: torch.Tensor, scale_schedule: List[Tuple[int]],
|
| 373 |
+
cfg_infer=False,
|
| 374 |
+
**kwargs,
|
| 375 |
+
) -> Union[torch.Tensor, List[torch.Tensor]]: # returns logits_BLV
|
| 376 |
+
"""
|
| 377 |
+
label_B_or_BLT: label_B or (kv_compact, cu_seqlens_k, max_seqlen_k)
|
| 378 |
+
:return: logits BLV, V is vocab_size
|
| 379 |
+
"""
|
| 380 |
+
if cfg_infer:
|
| 381 |
+
return self.autoregressive_infer_cfg(label_B_or_BLT=label_B_or_BLT, scale_schedule=scale_schedule, **kwargs)
|
| 382 |
+
|
| 383 |
+
x_BLC_wo_prefix = x_BLC_wo_prefix.float() # input should be float32
|
| 384 |
+
B = x_BLC_wo_prefix.shape[0]
|
| 385 |
+
|
| 386 |
+
# [1. get input sequence x_BLC]
|
| 387 |
+
with torch.amp.autocast('cuda', enabled=False):
|
| 388 |
+
kv_compact, lens, cu_seqlens_k, max_seqlen_k = label_B_or_BLT
|
| 389 |
+
# drop cond
|
| 390 |
+
total = 0
|
| 391 |
+
for le in lens:
|
| 392 |
+
if random.random() < self.cond_drop_rate:
|
| 393 |
+
kv_compact[total:total+le] = self.cfg_uncond[:le]
|
| 394 |
+
total += le
|
| 395 |
+
must_on_graph = self.cfg_uncond[0, 0] * 0
|
| 396 |
+
kv_compact = self.text_norm(kv_compact).contiguous()
|
| 397 |
+
sos = cond_BD = self.text_proj_for_sos((kv_compact, cu_seqlens_k, max_seqlen_k)).float().contiguous() # cond_BD should be float32
|
| 398 |
+
kv_compact = self.text_proj_for_ca(kv_compact).contiguous()
|
| 399 |
+
kv_compact[0, 0] += must_on_graph
|
| 400 |
+
ca_kv = kv_compact, cu_seqlens_k, max_seqlen_k
|
| 401 |
+
|
| 402 |
+
cond_BD_or_gss = self.shared_ada_lin(cond_BD).contiguous() # gss: gamma, scale, shift; cond_BD_or_gss should be float32
|
| 403 |
+
|
| 404 |
+
sos = sos.unsqueeze(1).expand(B, 1, -1) + self.pos_start.expand(B, 1, -1)
|
| 405 |
+
x_BLC = torch.cat((sos, self.word_embed(self.norm0_ve(x_BLC_wo_prefix))), dim=1)
|
| 406 |
+
|
| 407 |
+
# [1.1. pad the seqlen dim]
|
| 408 |
+
l_end = x_BLC.shape[1]
|
| 409 |
+
need_to_pad = (l_end + self.pad_to_multiplier - 1) // self.pad_to_multiplier * self.pad_to_multiplier - l_end # 0
|
| 410 |
+
|
| 411 |
+
if self.customized_flash_attn:
|
| 412 |
+
Infinity_visible_kvlen = self.Infinity_visible_kvlen[:l_end]
|
| 413 |
+
Infinity_invisible_qlen = self.Infinity_invisible_qlen[:l_end]
|
| 414 |
+
attn_bias_or_two_vector = (Infinity_visible_kvlen, Infinity_invisible_qlen)
|
| 415 |
+
# todo: solve need_to_pad here
|
| 416 |
+
elif self.use_flex_attn:
|
| 417 |
+
if need_to_pad:
|
| 418 |
+
x_BLC = F.pad(x_BLC, (0, 0, 0, need_to_pad))
|
| 419 |
+
assert x_BLC.shape[-1] % 128 == 0, 'x_BLC.shape[-1] % 128 != 0'
|
| 420 |
+
attn_bias_or_two_vector = None
|
| 421 |
+
else:
|
| 422 |
+
d: torch.Tensor = torch.cat([torch.full((pn[0]*pn[1]*pn[2],), i) for i, pn in enumerate(scale_schedule)]).view(1, l_end, 1)
|
| 423 |
+
dT = d.transpose(1, 2) # dT: 11L
|
| 424 |
+
attn_bias_for_masking = torch.where(d >= dT, 0., -torch.inf).reshape(1, 1, l_end, l_end)
|
| 425 |
+
attn_bias = attn_bias_for_masking[:, :, :l_end, :l_end].contiguous() # attn_bias: 11LL
|
| 426 |
+
if need_to_pad:
|
| 427 |
+
attn_bias = F.pad(attn_bias, (0, need_to_pad, 0, need_to_pad), value=-torch.inf)
|
| 428 |
+
attn_bias[0, 0, l_end:, 0] = 0
|
| 429 |
+
x_BLC = F.pad(x_BLC, (0, 0, 0, need_to_pad))
|
| 430 |
+
attn_bias_or_two_vector = attn_bias.type_as(x_BLC).to(x_BLC.device)
|
| 431 |
+
|
| 432 |
+
if self.use_flex_attn:
|
| 433 |
+
attn_fn = self.attn_fn_compile_dict[tuple(scale_schedule)]
|
| 434 |
+
else:
|
| 435 |
+
attn_fn = None
|
| 436 |
+
|
| 437 |
+
# [2. block loop]
|
| 438 |
+
SelfAttnBlock.forward, CrossAttnBlock.forward
|
| 439 |
+
checkpointing_full_block = self.checkpointing == 'full-block' and self.training
|
| 440 |
+
if self.num_block_chunks == 1:
|
| 441 |
+
for i, b in enumerate(self.blocks):
|
| 442 |
+
if self.add_lvl_embeding_only_first_block and i == 0:
|
| 443 |
+
x_BLC = self.add_lvl_embeding_for_x_BLC(x_BLC, scale_schedule, need_to_pad)
|
| 444 |
+
if not self.add_lvl_embeding_only_first_block:
|
| 445 |
+
x_BLC = self.add_lvl_embeding_for_x_BLC(x_BLC, scale_schedule, need_to_pad)
|
| 446 |
+
if checkpointing_full_block:
|
| 447 |
+
x_BLC = torch.utils.checkpoint.checkpoint(b, x_BLC, cond_BD_or_gss, ca_kv, attn_bias_or_two_vector, attn_fn, scale_schedule, self.rope2d_freqs_grid, use_reentrant=False)
|
| 448 |
+
else:
|
| 449 |
+
x_BLC = b(x=x_BLC, cond_BD=cond_BD_or_gss, ca_kv=ca_kv, attn_bias_or_two_vector=attn_bias_or_two_vector, attn_fn=attn_fn, scale_schedule=scale_schedule, rope2d_freqs_grid=self.rope2d_freqs_grid)
|
| 450 |
+
else:
|
| 451 |
+
for i, chunk in enumerate(self.block_chunks): # this path
|
| 452 |
+
if self.add_lvl_embeding_only_first_block and i == 0:
|
| 453 |
+
x_BLC = self.add_lvl_embeding_for_x_BLC(x_BLC, scale_schedule, need_to_pad)
|
| 454 |
+
if not self.add_lvl_embeding_only_first_block:
|
| 455 |
+
x_BLC = self.add_lvl_embeding_for_x_BLC(x_BLC, scale_schedule, need_to_pad)
|
| 456 |
+
x_BLC = chunk(x=x_BLC, cond_BD=cond_BD_or_gss, ca_kv=ca_kv, attn_bias_or_two_vector=attn_bias_or_two_vector, attn_fn=attn_fn, scale_schedule=scale_schedule, checkpointing_full_block=checkpointing_full_block, rope2d_freqs_grid=self.rope2d_freqs_grid)
|
| 457 |
+
|
| 458 |
+
# [3. unpad the seqlen dim, and then get logits]
|
| 459 |
+
return self.get_logits(x_BLC[:, :l_end], cond_BD) # return logits BLV, V is vocab_size
|
| 460 |
+
|
| 461 |
+
@torch.no_grad()
|
| 462 |
+
def autoregressive_infer_cfg(
|
| 463 |
+
self,
|
| 464 |
+
vae=None,
|
| 465 |
+
scale_schedule=None,
|
| 466 |
+
label_B_or_BLT=None,
|
| 467 |
+
B=1, negative_label_B_or_BLT=None, force_gt_Bhw=None,
|
| 468 |
+
g_seed=None, cfg_list=[], tau_list=[], cfg_sc=3, top_k=0, top_p=0.0,
|
| 469 |
+
returns_vemb=0, ratio_Bl1=None, gumbel=0, norm_cfg=False,
|
| 470 |
+
cfg_exp_k: float=0.0, cfg_insertion_layer=[-5],
|
| 471 |
+
vae_type=0, softmax_merge_topk=-1, ret_img=False,
|
| 472 |
+
trunk_scale=1000,
|
| 473 |
+
gt_leak=0, gt_ls_Bl=None,
|
| 474 |
+
inference_mode=False,
|
| 475 |
+
save_img_path=None,
|
| 476 |
+
sampling_per_bits=1,
|
| 477 |
+
): # returns List[idx_Bl]
|
| 478 |
+
if g_seed is None: rng = None
|
| 479 |
+
else: self.rng.manual_seed(g_seed); rng = self.rng
|
| 480 |
+
assert len(cfg_list) >= len(scale_schedule)
|
| 481 |
+
assert len(tau_list) >= len(scale_schedule)
|
| 482 |
+
|
| 483 |
+
# scale_schedule is used by infinity, vae_scale_schedule is used by vae if there exists a spatial patchify,
|
| 484 |
+
# we need to convert scale_schedule to vae_scale_schedule by multiply 2 to h and w
|
| 485 |
+
if self.apply_spatial_patchify:
|
| 486 |
+
vae_scale_schedule = [(pt, 2*ph, 2*pw) for pt, ph, pw in scale_schedule]
|
| 487 |
+
else:
|
| 488 |
+
vae_scale_schedule = scale_schedule
|
| 489 |
+
|
| 490 |
+
kv_compact, lens, cu_seqlens_k, max_seqlen_k = label_B_or_BLT
|
| 491 |
+
if any(np.array(cfg_list) != 1):
|
| 492 |
+
bs = 2*B
|
| 493 |
+
if not negative_label_B_or_BLT:
|
| 494 |
+
kv_compact_un = kv_compact.clone()
|
| 495 |
+
total = 0
|
| 496 |
+
for le in lens:
|
| 497 |
+
kv_compact_un[total:total+le] = (self.cfg_uncond)[:le]
|
| 498 |
+
total += le
|
| 499 |
+
kv_compact = torch.cat((kv_compact, kv_compact_un), dim=0)
|
| 500 |
+
cu_seqlens_k = torch.cat((cu_seqlens_k, cu_seqlens_k[1:]+cu_seqlens_k[-1]), dim=0)
|
| 501 |
+
else:
|
| 502 |
+
kv_compact_un, lens_un, cu_seqlens_k_un, max_seqlen_k_un = negative_label_B_or_BLT
|
| 503 |
+
kv_compact = torch.cat((kv_compact, kv_compact_un), dim=0)
|
| 504 |
+
cu_seqlens_k = torch.cat((cu_seqlens_k, cu_seqlens_k_un[1:]+cu_seqlens_k[-1]), dim=0)
|
| 505 |
+
max_seqlen_k = max(max_seqlen_k, max_seqlen_k_un)
|
| 506 |
+
else:
|
| 507 |
+
bs = B
|
| 508 |
+
|
| 509 |
+
kv_compact = self.text_norm(kv_compact)
|
| 510 |
+
sos = cond_BD = self.text_proj_for_sos((kv_compact, cu_seqlens_k, max_seqlen_k)) # sos shape: [2, 4096]
|
| 511 |
+
kv_compact = self.text_proj_for_ca(kv_compact) # kv_compact shape: [304, 4096]
|
| 512 |
+
ca_kv = kv_compact, cu_seqlens_k, max_seqlen_k
|
| 513 |
+
last_stage = sos.unsqueeze(1).expand(bs, 1, -1) + self.pos_start.expand(bs, 1, -1)
|
| 514 |
+
|
| 515 |
+
with torch.amp.autocast('cuda', enabled=False):
|
| 516 |
+
cond_BD_or_gss = self.shared_ada_lin(cond_BD.float()).float().contiguous()
|
| 517 |
+
accu_BChw, cur_L, ret = None, 0, [] # current length, list of reconstructed images
|
| 518 |
+
idx_Bl_list, idx_Bld_list = [], []
|
| 519 |
+
|
| 520 |
+
if inference_mode:
|
| 521 |
+
for b in self.unregistered_blocks: (b.sa if isinstance(b, CrossAttnBlock) else b.attn).kv_caching(True)
|
| 522 |
+
else:
|
| 523 |
+
assert self.num_block_chunks > 1
|
| 524 |
+
for block_chunk_ in self.block_chunks:
|
| 525 |
+
for module in block_chunk_.module.module:
|
| 526 |
+
(module.sa if isinstance(module, CrossAttnBlock) else module.attn).kv_caching(True)
|
| 527 |
+
|
| 528 |
+
abs_cfg_insertion_layers = []
|
| 529 |
+
add_cfg_on_logits, add_cfg_on_probs = False, False
|
| 530 |
+
leng = len(self.unregistered_blocks)
|
| 531 |
+
for item in cfg_insertion_layer:
|
| 532 |
+
if item == 0: # add cfg on logits
|
| 533 |
+
add_cfg_on_logits = True
|
| 534 |
+
elif item == 1: # add cfg on probs
|
| 535 |
+
add_cfg_on_probs = True # todo in the future, we may want to add cfg on logits and probs
|
| 536 |
+
elif item < 0: # determine to add cfg at item-th layer's output
|
| 537 |
+
assert leng+item > 0, f'cfg_insertion_layer: {item} is not valid since len(unregistered_blocks)={self.num_block_chunks}'
|
| 538 |
+
abs_cfg_insertion_layers.append(leng+item)
|
| 539 |
+
else:
|
| 540 |
+
raise ValueError(f'cfg_insertion_layer: {item} is not valid')
|
| 541 |
+
|
| 542 |
+
num_stages_minus_1 = len(scale_schedule)-1
|
| 543 |
+
summed_codes = 0
|
| 544 |
+
for si, pn in enumerate(scale_schedule): # si: i-th segment
|
| 545 |
+
cfg = cfg_list[si]
|
| 546 |
+
if si >= trunk_scale:
|
| 547 |
+
break
|
| 548 |
+
cur_L += np.array(pn).prod()
|
| 549 |
+
|
| 550 |
+
need_to_pad = 0
|
| 551 |
+
attn_fn = None
|
| 552 |
+
if self.use_flex_attn:
|
| 553 |
+
# need_to_pad = (self.pad_to_multiplier - cur_L % self.pad_to_multiplier) % self.pad_to_multiplier
|
| 554 |
+
# if need_to_pad:
|
| 555 |
+
# last_stage = F.pad(last_stage, (0, 0, 0, need_to_pad))
|
| 556 |
+
attn_fn = self.attn_fn_compile_dict.get(tuple(scale_schedule[:(si+1)]), None)
|
| 557 |
+
|
| 558 |
+
# assert self.attn_bias_for_masking[:, :, last_L:cur_L, :cur_L].sum() == 0, f'AR with {(self.attn_bias_for_masking[:, :, last_L:cur_L, :cur_L] != 0).sum()} / {self.attn_bias_for_masking[:, :, last_L:cur_L, :cur_L].numel()} mask item'
|
| 559 |
+
layer_idx = 0
|
| 560 |
+
for block_idx, b in enumerate(self.block_chunks):
|
| 561 |
+
# last_stage shape: [4, 1, 2048], cond_BD_or_gss.shape: [4, 1, 6, 2048], ca_kv[0].shape: [64, 2048], ca_kv[1].shape [5], ca_kv[2]: int
|
| 562 |
+
if self.add_lvl_embeding_only_first_block and block_idx == 0:
|
| 563 |
+
last_stage = self.add_lvl_embeding(last_stage, si, scale_schedule, need_to_pad=need_to_pad)
|
| 564 |
+
if not self.add_lvl_embeding_only_first_block:
|
| 565 |
+
last_stage = self.add_lvl_embeding(last_stage, si, scale_schedule, need_to_pad=need_to_pad)
|
| 566 |
+
|
| 567 |
+
for m in b.module:
|
| 568 |
+
last_stage = m(x=last_stage, cond_BD=cond_BD_or_gss, ca_kv=ca_kv, attn_bias_or_two_vector=None, attn_fn=attn_fn, scale_schedule=scale_schedule, rope2d_freqs_grid=self.rope2d_freqs_grid, scale_ind=si)
|
| 569 |
+
if (cfg != 1) and (layer_idx in abs_cfg_insertion_layers):
|
| 570 |
+
# print(f'add cfg={cfg} on {layer_idx}-th layer output')
|
| 571 |
+
last_stage = cfg * last_stage[:B] + (1-cfg) * last_stage[B:]
|
| 572 |
+
last_stage = torch.cat((last_stage, last_stage), 0)
|
| 573 |
+
layer_idx += 1
|
| 574 |
+
|
| 575 |
+
if (cfg != 1) and add_cfg_on_logits:
|
| 576 |
+
# print(f'add cfg on add_cfg_on_logits')
|
| 577 |
+
logits_BlV = self.get_logits(last_stage, cond_BD).mul(1/tau_list[si])
|
| 578 |
+
logits_BlV = cfg * logits_BlV[:B] + (1-cfg) * logits_BlV[B:]
|
| 579 |
+
else:
|
| 580 |
+
logits_BlV = self.get_logits(last_stage[:B], cond_BD[:B]).mul(1/tau_list[si])
|
| 581 |
+
|
| 582 |
+
if self.use_bit_label:
|
| 583 |
+
tmp_bs, tmp_seq_len = logits_BlV.shape[:2]
|
| 584 |
+
logits_BlV = logits_BlV.reshape(tmp_bs, -1, 2)
|
| 585 |
+
idx_Bld = sample_with_top_k_top_p_also_inplace_modifying_logits_(logits_BlV, rng=rng, top_k=top_k or self.top_k, top_p=top_p or self.top_p, num_samples=1)[:, :, 0]
|
| 586 |
+
idx_Bld = idx_Bld.reshape(tmp_bs, tmp_seq_len, -1)
|
| 587 |
+
else:
|
| 588 |
+
idx_Bl = sample_with_top_k_top_p_also_inplace_modifying_logits_(logits_BlV, rng=rng, top_k=top_k or self.top_k, top_p=top_p or self.top_p, num_samples=1)[:, :, 0]
|
| 589 |
+
if vae_type != 0:
|
| 590 |
+
assert returns_vemb
|
| 591 |
+
if si < gt_leak:
|
| 592 |
+
idx_Bld = gt_ls_Bl[si]
|
| 593 |
+
else:
|
| 594 |
+
assert pn[0] == 1
|
| 595 |
+
idx_Bld = idx_Bld.reshape(B, pn[1], pn[2], -1) # shape: [B, h, w, d] or [B, h, w, 4d]
|
| 596 |
+
if self.apply_spatial_patchify: # unpatchify operation
|
| 597 |
+
idx_Bld = idx_Bld.permute(0,3,1,2) # [B, 4d, h, w]
|
| 598 |
+
idx_Bld = torch.nn.functional.pixel_shuffle(idx_Bld, 2) # [B, d, 2h, 2w]
|
| 599 |
+
idx_Bld = idx_Bld.permute(0,2,3,1) # [B, 2h, 2w, d]
|
| 600 |
+
idx_Bld = idx_Bld.unsqueeze(1) # [B, 1, h, w, d] or [B, 1, 2h, 2w, d]
|
| 601 |
+
|
| 602 |
+
idx_Bld_list.append(idx_Bld)
|
| 603 |
+
codes = vae.quantizer.lfq.indices_to_codes(idx_Bld, label_type='bit_label') # [B, d, 1, h, w] or [B, d, 1, 2h, 2w]
|
| 604 |
+
if si != num_stages_minus_1:
|
| 605 |
+
summed_codes += F.interpolate(codes, size=vae_scale_schedule[-1], mode=vae.quantizer.z_interplote_up)
|
| 606 |
+
last_stage = F.interpolate(summed_codes, size=vae_scale_schedule[si+1], mode=vae.quantizer.z_interplote_up) # [B, d, 1, h, w] or [B, d, 1, 2h, 2w]
|
| 607 |
+
last_stage = last_stage.squeeze(-3) # [B, d, h, w] or [B, d, 2h, 2w]
|
| 608 |
+
if self.apply_spatial_patchify: # patchify operation
|
| 609 |
+
last_stage = torch.nn.functional.pixel_unshuffle(last_stage, 2) # [B, 4d, h, w]
|
| 610 |
+
last_stage = last_stage.reshape(*last_stage.shape[:2], -1) # [B, d, h*w] or [B, 4d, h*w]
|
| 611 |
+
last_stage = torch.permute(last_stage, [0,2,1]) # [B, h*w, d] or [B, h*w, 4d]
|
| 612 |
+
else:
|
| 613 |
+
summed_codes += codes
|
| 614 |
+
else:
|
| 615 |
+
if si < gt_leak:
|
| 616 |
+
idx_Bl = gt_ls_Bl[si]
|
| 617 |
+
h_BChw = self.quant_only_used_in_inference[0].embedding(idx_Bl).float() # BlC
|
| 618 |
+
|
| 619 |
+
# h_BChw = h_BChw.float().transpose_(1, 2).reshape(B, self.d_vae, scale_schedule[si][0], scale_schedule[si][1])
|
| 620 |
+
h_BChw = h_BChw.transpose_(1, 2).reshape(B, self.d_vae, scale_schedule[si][0], scale_schedule[si][1], scale_schedule[si][2])
|
| 621 |
+
ret.append(h_BChw if returns_vemb != 0 else idx_Bl)
|
| 622 |
+
idx_Bl_list.append(idx_Bl)
|
| 623 |
+
if si != num_stages_minus_1:
|
| 624 |
+
accu_BChw, last_stage = self.quant_only_used_in_inference[0].one_step_fuse(si, num_stages_minus_1+1, accu_BChw, h_BChw, scale_schedule)
|
| 625 |
+
|
| 626 |
+
if si != num_stages_minus_1:
|
| 627 |
+
last_stage = self.word_embed(self.norm0_ve(last_stage))
|
| 628 |
+
last_stage = last_stage.repeat(bs//B, 1, 1)
|
| 629 |
+
|
| 630 |
+
if inference_mode:
|
| 631 |
+
for b in self.unregistered_blocks: (b.sa if isinstance(b, CrossAttnBlock) else b.attn).kv_caching(False)
|
| 632 |
+
else:
|
| 633 |
+
assert self.num_block_chunks > 1
|
| 634 |
+
for block_chunk_ in self.block_chunks:
|
| 635 |
+
for module in block_chunk_.module.module:
|
| 636 |
+
(module.sa if isinstance(module, CrossAttnBlock) else module.attn).kv_caching(False)
|
| 637 |
+
|
| 638 |
+
if not ret_img:
|
| 639 |
+
return ret, idx_Bl_list, []
|
| 640 |
+
|
| 641 |
+
if vae_type != 0:
|
| 642 |
+
img = vae.decode(summed_codes.squeeze(-3))
|
| 643 |
+
else:
|
| 644 |
+
img = vae.viz_from_ms_h_BChw(ret, scale_schedule=scale_schedule, same_shape=True, last_one=True)
|
| 645 |
+
|
| 646 |
+
img = (img + 1) / 2
|
| 647 |
+
img = img.permute(0, 2, 3, 1).mul_(255).to(torch.uint8).flip(dims=(3,))
|
| 648 |
+
return ret, idx_Bl_list, img
|
| 649 |
+
|
| 650 |
+
@for_visualize
|
| 651 |
+
def vis_key_params(self, ep):
|
| 652 |
+
return
|
| 653 |
+
|
| 654 |
+
def load_state_dict(self, state_dict: Dict[str, Any], strict=False, assign=False):
|
| 655 |
+
for k in state_dict:
|
| 656 |
+
if 'cfg_uncond' in k:
|
| 657 |
+
old, new = state_dict[k], self.cfg_uncond.data
|
| 658 |
+
min_tlen = min(old.shape[0], new.shape[0])
|
| 659 |
+
if min_tlen == old.shape[0]:
|
| 660 |
+
state_dict[k] = torch.cat((old.to(device=new.device, dtype=new.dtype), new[min_tlen:]))
|
| 661 |
+
else:
|
| 662 |
+
state_dict[k] = old[:min_tlen]
|
| 663 |
+
|
| 664 |
+
for buf_name in ('lvl_1L', 'attn_bias_for_masking', 'Infinity_visible_kvlen', 'Infinity_invisible_qlen'):
|
| 665 |
+
state_dict.pop(buf_name, None)
|
| 666 |
+
if hasattr(self, buf_name):
|
| 667 |
+
state_dict[buf_name] = getattr(self, buf_name)
|
| 668 |
+
|
| 669 |
+
return super().load_state_dict(state_dict=state_dict, strict=strict, assign=assign)
|
| 670 |
+
|
| 671 |
+
def special_init(
|
| 672 |
+
self,
|
| 673 |
+
aln_init: float,
|
| 674 |
+
aln_gamma_init: float,
|
| 675 |
+
scale_head: float,
|
| 676 |
+
scale_proj: int,
|
| 677 |
+
):
|
| 678 |
+
# init head's norm
|
| 679 |
+
if isinstance(self.head_nm, AdaLNBeforeHead):
|
| 680 |
+
self.head_nm.ada_lin[-1].weight.data.mul_(aln_init) # there's no gamma for head
|
| 681 |
+
if hasattr(self.head_nm.ada_lin[-1], 'bias') and self.head_nm.ada_lin[-1].bias is not None:
|
| 682 |
+
self.head_nm.ada_lin[-1].bias.data.zero_()
|
| 683 |
+
|
| 684 |
+
# init head's proj
|
| 685 |
+
if scale_head >= 0:
|
| 686 |
+
if isinstance(self.head, nn.Linear):
|
| 687 |
+
self.head.weight.data.mul_(scale_head)
|
| 688 |
+
self.head.bias.data.zero_()
|
| 689 |
+
elif isinstance(self.head, nn.Sequential):
|
| 690 |
+
self.head[-1].weight.data.mul_(scale_head)
|
| 691 |
+
self.head[-1].bias.data.zero_()
|
| 692 |
+
|
| 693 |
+
depth = len(self.unregistered_blocks)
|
| 694 |
+
for block_idx, sab in enumerate(self.unregistered_blocks):
|
| 695 |
+
sab: Union[SelfAttnBlock, CrossAttnBlock]
|
| 696 |
+
# init proj
|
| 697 |
+
scale = 1 / math.sqrt(2*depth if scale_proj == 1 else 2*(1 + block_idx))
|
| 698 |
+
if scale_proj == 1:
|
| 699 |
+
if self.t2i:
|
| 700 |
+
sab.sa.proj.weight.data.mul_(scale)
|
| 701 |
+
sab.ca.proj.weight.data.mul_(scale)
|
| 702 |
+
else:
|
| 703 |
+
sab.attn.proj.weight.data.mul_(scale)
|
| 704 |
+
sab.ffn.fc2.weight.data.mul_(scale)
|
| 705 |
+
# if sab.using_swiglu:
|
| 706 |
+
# nn.init.ones_(sab.ffn.fcg.bias)
|
| 707 |
+
# nn.init.trunc_normal_(sab.ffn.fcg.weight, std=1e-5)
|
| 708 |
+
|
| 709 |
+
# init ada_lin
|
| 710 |
+
if hasattr(sab, 'ada_lin'):
|
| 711 |
+
lin = sab.ada_lin[-1]
|
| 712 |
+
lin.weight.data[:2*self.C].mul_(aln_gamma_init) # init gamma
|
| 713 |
+
lin.weight.data[2*self.C:].mul_(aln_init) # init scale and shift
|
| 714 |
+
if hasattr(lin, 'bias') and lin.bias is not None:
|
| 715 |
+
lin.bias.data.zero_()
|
| 716 |
+
elif hasattr(sab, 'ada_gss'):
|
| 717 |
+
sab.ada_gss.data[:, :, :2, :].mul_(aln_gamma_init) # init gamma
|
| 718 |
+
sab.ada_gss.data[:, :, 2:, :].mul_(aln_init) # init scale and shift
|
| 719 |
+
|
| 720 |
+
def extra_repr(self):
|
| 721 |
+
return f'drop_path_rate={self.drop_path_rate}'
|
| 722 |
+
|
| 723 |
+
def get_layer_id_and_scale_exp(self, para_name: str):
|
| 724 |
+
raise NotImplementedError
|
| 725 |
+
|
| 726 |
+
|
| 727 |
+
def sample_with_top_k_top_p_also_inplace_modifying_logits_(logits_BlV: torch.Tensor, top_k: int = 0, top_p: float = 0.0, rng=None, num_samples=1) -> torch.Tensor: # return idx, shaped (B, l)
|
| 728 |
+
B, l, V = logits_BlV.shape
|
| 729 |
+
if top_k > 0:
|
| 730 |
+
top_k = min(top_k, V)
|
| 731 |
+
idx_to_remove = logits_BlV < logits_BlV.topk(top_k, largest=True, sorted=False, dim=-1)[0].amin(dim=-1, keepdim=True)
|
| 732 |
+
logits_BlV.masked_fill_(idx_to_remove, -torch.inf)
|
| 733 |
+
if top_p > 0:
|
| 734 |
+
sorted_logits, sorted_idx = logits_BlV.sort(dim=-1, descending=False)
|
| 735 |
+
sorted_idx_to_remove = sorted_logits.softmax(dim=-1).cumsum_(dim=-1) <= (1 - top_p)
|
| 736 |
+
sorted_idx_to_remove[..., -1:] = False
|
| 737 |
+
logits_BlV.masked_fill_(sorted_idx_to_remove.scatter(sorted_idx.ndim - 1, sorted_idx, sorted_idx_to_remove), -torch.inf)
|
| 738 |
+
# sample (have to squeeze cuz multinomial can only be used on 2D tensor)
|
| 739 |
+
replacement = num_samples >= 0
|
| 740 |
+
num_samples = abs(num_samples)
|
| 741 |
+
return torch.multinomial(logits_BlV.softmax(dim=-1).view(-1, V), num_samples=num_samples, replacement=replacement, generator=rng).view(B, l, num_samples)
|
| 742 |
+
|
| 743 |
+
def sampling_with_top_k_top_p_also_inplace_modifying_probs_(probs_BlV: torch.Tensor, top_k: int = 0, top_p: float = 0.0, rng=None, num_samples=1) -> torch.Tensor: # return idx, shaped (B, l)
|
| 744 |
+
B, l, V = probs_BlV.shape
|
| 745 |
+
if top_k > 0:
|
| 746 |
+
top_k = min(top_k, V)
|
| 747 |
+
idx_to_remove = probs_BlV < probs_BlV.topk(top_k, largest=True, sorted=False, dim=-1)[0].amin(dim=-1, keepdim=True)
|
| 748 |
+
probs_BlV.masked_fill_(idx_to_remove, 0)
|
| 749 |
+
if top_p > 0:
|
| 750 |
+
sorted_probs, sorted_idx = probs_BlV.sort(dim=-1, descending=False)
|
| 751 |
+
sorted_idx_to_remove = sorted_probs.softmax(dim=-1).cumsum_(dim=-1) <= (1 - top_p)
|
| 752 |
+
sorted_idx_to_remove[..., -1:] = False
|
| 753 |
+
probs_BlV.masked_fill_(sorted_idx_to_remove.scatter(sorted_idx.ndim - 1, sorted_idx, sorted_idx_to_remove), 0)
|
| 754 |
+
# sample (have to squeeze cuz multinomial can only be used on 2D tensor)
|
| 755 |
+
probs_BlV = probs_BlV / probs_BlV.sum(-1, keepdims=True)
|
| 756 |
+
replacement = num_samples >= 0
|
| 757 |
+
num_samples = abs(num_samples)
|
| 758 |
+
return torch.multinomial(probs_BlV.view(-1, V), num_samples=num_samples, replacement=replacement, generator=rng).view(B, l, num_samples)
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
def get_params_num(d, w, mlp):
|
| 762 |
+
m = round(mlp * w / 256) * 256
|
| 763 |
+
s = d * (w**2 * 8 + w*m * 2) # sa+ca, mlp
|
| 764 |
+
s += w**2 * 6 # saln
|
| 765 |
+
s += 4096 * w # pred
|
| 766 |
+
s += 32 * w # we
|
| 767 |
+
|
| 768 |
+
Ct5 = 4096
|
| 769 |
+
s += Ct5*w * 4 # T5 attn pool
|
| 770 |
+
s += Ct5*w + w*w # T5 mlp
|
| 771 |
+
return f'{s/1e9:.2f}B'
|
| 772 |
+
|
| 773 |
+
|
| 774 |
+
TIMM_KEYS = {'img_size', 'pretrained', 'pretrained_cfg', 'pretrained_cfg_overlay', 'global_pool'}
|
| 775 |
+
|
| 776 |
+
@register_model
|
| 777 |
+
def infinity_2b(depth=32, embed_dim=2048, num_heads=2048//128, drop_path_rate=0.1, **kwargs): return Infinity(depth=depth, embed_dim=embed_dim, num_heads=num_heads, mlp_ratio=4, drop_path_rate=drop_path_rate, **{k: v for k, v in kwargs.items() if k not in TIMM_KEYS})
|
| 778 |
+
|
| 779 |
+
@register_model
|
| 780 |
+
def infinity_20b(depth=58, embed_dim=4608, num_heads=4608//128, drop_path_rate=0.25, **kwargs): return Infinity(depth=depth, embed_dim=embed_dim, num_heads=num_heads, mlp_ratio=4, drop_path_rate=drop_path_rate, **{k: v for k, v in kwargs.items() if k not in TIMM_KEYS})
|
| 781 |
+
|
| 782 |
+
# model configuration for scaling Infinity transformer
|
| 783 |
+
@register_model
|
| 784 |
+
def infinity_layer12(depth=12, embed_dim=768, num_heads=8, drop_path_rate=0.1, **kwargs):
|
| 785 |
+
return Infinity(depth=depth, embed_dim=embed_dim, num_heads=num_heads, mlp_ratio=4, drop_path_rate=drop_path_rate, **{k: v for k, v in kwargs.items() if k not in TIMM_KEYS})
|
| 786 |
+
@register_model
|
| 787 |
+
def infinity_layer16(depth=16, embed_dim=1152, num_heads=12, drop_path_rate=0.1, **kwargs):
|
| 788 |
+
return Infinity(depth=depth, embed_dim=embed_dim, num_heads=num_heads, mlp_ratio=4, drop_path_rate=drop_path_rate, **{k: v for k, v in kwargs.items() if k not in TIMM_KEYS})
|
| 789 |
+
@register_model
|
| 790 |
+
def infinity_layer24(depth=24, embed_dim=1536, num_heads=16, drop_path_rate=0.1, **kwargs):
|
| 791 |
+
return Infinity(depth=depth, embed_dim=embed_dim, num_heads=num_heads, mlp_ratio=4, drop_path_rate=drop_path_rate, **{k: v for k, v in kwargs.items() if k not in TIMM_KEYS})
|
| 792 |
+
@register_model
|
| 793 |
+
def infinity_layer32(depth=32, embed_dim=2080, num_heads=20, drop_path_rate=0.1, **kwargs):
|
| 794 |
+
return Infinity(depth=depth, embed_dim=embed_dim, num_heads=num_heads, mlp_ratio=4, drop_path_rate=drop_path_rate, **{k: v for k, v in kwargs.items() if k not in TIMM_KEYS})
|
| 795 |
+
@register_model
|
| 796 |
+
def infinity_layer40(depth=40, embed_dim=2688, num_heads=24, drop_path_rate=0.1, **kwargs):
|
| 797 |
+
return Infinity(depth=depth, embed_dim=embed_dim, num_heads=num_heads, mlp_ratio=4, drop_path_rate=drop_path_rate, **{k: v for k, v in kwargs.items() if k not in TIMM_KEYS})
|
| 798 |
+
@register_model
|
| 799 |
+
def infinity_layer48(depth=48, embed_dim=3360, num_heads=28, drop_path_rate=0.1, **kwargs):
|
| 800 |
+
return Infinity(depth=depth, embed_dim=embed_dim, num_heads=num_heads, mlp_ratio=4, drop_path_rate=drop_path_rate, **{k: v for k, v in kwargs.items() if k not in TIMM_KEYS})
|