diffsynth_for_g3 / diffsynth /models /qwen_image_extractor.py
lucky-lzh's picture
Upload folder using huggingface_hub
920fd91 verified
import torch
import torch.nn as nn
from einops import rearrange
from timm.models.vision_transformer import PatchEmbed
# --- 1. 从 Qwen2.5-VL 借鉴的 RoPE 辅助函数 ---
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_multimodal_rotary_pos_emb(
q: torch.Tensor,
k: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
mrope_section: list[int], #qwen是3584/28=128 然后传入[16,24,24] 我们是1024/16=64 传入[8,12,12]
unsqueeze_dim: int = 2
) -> tuple[torch.Tensor, torch.Tensor]:
"""
[重写] 严格按照 Qwen2.5-VL 的实现。
将 3D RoPE (T, H, W) 应用于 query 和 key。
Args:
q (`torch.Tensor`): query (B, N_img, H, Hc)
k (`torch.Tensor`): key (B, N_img, H, Hc)
cos (`torch.Tensor`): cosine (3, B, N_img, Hc)
sin (`torch.Tensor`): sine (3, B, N_img, Hc)
mrope_section (`List[int]`):
T, H, W 的通道维度列表, e.g., [16, 24, 24].
注意: 2 * sum(mrope_section) 必须等于 Hc.
unsqueeze_dim (`int`, *optional*, defaults to 2):
为 (cos, sin) 增加的广播维度。
在我们的 Attention 模块中, q/k 形状为 (B, N_img, H, Hc),
因此我们使用 unsqueeze_dim=2 使 cos/sin 形状变为 (B, N_img, 1, Hc)
以便在 H (头数) 维度上广播。
(Qwen-VL 原始代码默认为 1, 因为它在 (B, H, N_img, Hc) 上操作)
"""
split_sections = mrope_section * 2
#print("cos1",cos.shape)
cos_chunks = cos.split(split_sections, dim=-1)
sin_chunks = sin.split(split_sections, dim=-1)
#print("cos_chunks1",len(cos_chunks),cos_chunks[0].shape,cos_chunks[1].shape,cos_chunks[2].shape)
# 最终形状 (B, N_img, Hc)
cos_emb = torch.cat(
[m[i % 3] for i, m in enumerate(cos_chunks)],
dim=-1
).unsqueeze(unsqueeze_dim)
#print("cos_emb1",cos_emb.shape)
# 最终形状 (B, N_img, Hc)
sin_emb = torch.cat(
[m[i % 3] for i, m in enumerate(sin_chunks)],
dim=-1
).unsqueeze(unsqueeze_dim)
# 应用 RoPE
# (B, N_img, H, Hc) * (B, N_img, 1, Hc)
q_embed = (q * cos_emb) + (rotate_half(q) * sin_emb)
k_embed = (k * cos_emb) + (rotate_half(k) * sin_emb)
return q_embed, k_embed
# --- 2. 替换为 3D RoPE 模块 (基于 Qwen2.5-VL Text RoPE) ---
class Multimodal3DRotaryEmbedding(nn.Module):
"""
Qwen2.5-VL 文本模型使用的 3D RoPE 实现。
它接收 3D 坐标 (T, H, W) 并为每个坐标计算独立的 RoPE。
"""
inv_freq: torch.Tensor
def __init__(self, head_dim: int, theta: float = 10000.0, device=None):
"""
dim: 应该是 head_dim (Hc)
"""
super().__init__()
self.head_dim = head_dim
self.theta = theta
# inv_freq 维度为 [head_dim // 2]
# 这一个 inv_freq 将被用于 T, H, W 三个维度的计算
inv_freq = 1.0 / (
self.theta ** (torch.arange(0, self.head_dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / self.head_dim)
)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.attention_scaling = 1.0
def forward(self, x: torch.Tensor, position_ids: torch.Tensor):
"""
x: 任意张量,仅用于获取 device 和 dtype
position_ids: [3, B, N_img] (T, H, W 坐标)
"""
# [1, 1, Hc/2, 1] -> [3, 1, Hc/2, 1]
#print("position_ids",position_ids.shape)# position_ids torch.Size([3, 1, 2016])
inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1)
# [3, B, N_img] -> [3, B, 1, N_img]
position_ids_expanded = position_ids.float().unsqueeze(2)#position_ids_expanded torch.Size([3, 1, 1, 2016])
#print("position_ids_expanded",position_ids_expanded.shape)
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # 强制 float32
# ( [3, 1, Hc/2, 1] @ [3, B, 1, N_img] ) -> [3, B, Hc/2, N_img]
# .transpose(2, 3) -> [3, B, N_img, Hc/2]
freqs = (inv_freq_expanded @ position_ids_expanded).transpose(2, 3)
# [3, B, N_img, Hc]
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
# --- 3. 你原有的模块 (RMSNorm, FeedForward) ---
class RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
class FeedForward(nn.Module):
def __init__(
self,
dim: int,
hidden_dim: int,
):
super().__init__()
self.fc1 = nn.Linear(dim, hidden_dim)
self.act = nn.GELU()
self.fc2 = nn.Linear(hidden_dim, dim)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.fc2(x)
return x
# --- 4. 修改后的 Attention 和 Block 模块 ---
class Attention(nn.Module):
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = False,
qk_norm: bool = True,
attn_drop: float = 0.,
proj_drop: float = 0.,
norm_layer: nn.Module = RMSNorm,
) -> None:
super().__init__()
assert dim % num_heads == 0, "dim should be divisible by num_heads"
self.dim = dim
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.mrope_section = [8,12,12]#硬编码 qwen是3584/28=128 然后传入[16,24,24] 我们是1024/16=64 传入[8,12,12]
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.attn_drop = attn_drop
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x: torch.Tensor, pos: tuple[torch.Tensor, torch.Tensor], mask=None) -> torch.Tensor:
"""
x: [B, N_total, C] (N_total = N_images + N_query)
pos: (cos, sin)
cos/sin 形状为 [3, B, N_total, Hc]
"""
B, N_total, C = x.shape
qkv = self.qkv(x).reshape(B, N_total, 3, self.num_heads, C // self.num_heads).permute(2, 0, 1, 3, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # B, N_total, H, Hc
q = self.q_norm(q)
k = self.k_norm(k)
# --- ⭐️ (修改点 2) RoPE 应用修改 ---
cos, sin = pos
# 此时 cos/sin 的长度应该覆盖整个序列 (Images + Query)
# 直接对整个 q, k 应用 RoPE
q, k = apply_multimodal_rotary_pos_emb(
q,
k,
cos=cos,
sin=sin,
mrope_section=self.mrope_section,
unsqueeze_dim=2
)
# v 保持不变 (B, N_total, H, Hc)
# ---------------------
# 重塑 q, k, v 用于 SDPA
q = q.transpose(1, 2) # B, H, N_total, Hc
k = k.transpose(1, 2) # B, H, N_total, Hc
v = v.transpose(1, 2) # B, H, N_total, Hc
# Only enable flash attention backend
# with sdpa_kernel(SDPBackend.FLASH_ATTENTION):# debug
# x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=self.attn_drop)#会报错 pytorch flash-attn 不支持attn_mask非空
#with sdpa_kernel(SDPBackend.EFFICIENT_ATTENTION):
x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=self.attn_drop)#支持
x = x.transpose(1, 2).reshape(B, N_total, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, hidden_size, num_heads, mlp_ratio=4.0):
super().__init__()
self.norm1 = RMSNorm(hidden_size, eps=1e-6)
# 注意:这里我们使用修改后的 Attention 类
self.attn = Attention(hidden_size, num_heads=num_heads, qkv_bias=False)
self.norm2 = RMSNorm(hidden_size, eps=1e-6)
self.mlp = FeedForward(hidden_size, int(hidden_size * mlp_ratio))
def forward(self, x, pos, mask=None):
# 注意:pos 参数被传递给 self.attn
residual = x
x = self.norm1(x)
x = self.attn(x, pos, mask=mask)
x = residual + x
residual = x
x = self.norm2(x)
x = self.mlp(x)
x = residual + x
return x
class QwenImageExtractor(nn.Module):
def __init__(self, hidden_size=1024,num_layers=16,num_heads=16,query_length=256,patch_size=2,in_chans=16,output_dim=3584):
super().__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_heads = num_heads
self.head_dim = hidden_size // num_heads#64
self.output_dim=output_dim
self.patch_embed = PatchEmbed(
img_size = None,
patch_size = patch_size,
in_chans = in_chans,
embed_dim = hidden_size,
strict_img_size=False
)
self.query_length=query_length
self.query = nn.Parameter(torch.randn(1, self.query_length, self.hidden_size))
# 使用 Block (带 3D RoPE)
self.blocks = nn.ModuleList([Block(hidden_size, num_heads) for _ in range(self.num_layers)])
self.norm2 = nn.LayerNorm(hidden_size)
self.output_proj = nn.Linear(hidden_size, output_dim)
# --- ⭐️ (修改点 3) RoPE 实例化 ---
# 1. 实例化 3D RoPE 模块
# 传入 head_dim, 而不是 head_dim // 2
self.rotary_pos_emb = Multimodal3DRotaryEmbedding(self.head_dim)
# --- 动态 RoPE 的缓存 ---
# 2. 初始化缓存 (现在缓存 3D position_ids)
self.cached_pos_ids_shape = None
self.cached_pos_ids = None
def _compute_dynamic_pos_ids(self, H: int, W: int, device: torch.device) -> torch.Tensor:
"""
⭐️ (修改点 4)
根据输入的 H 和 W 动态计算 3D RoPE 的 Position IDs
返回: [3, L_total] (L_images + L_query)
"""
patch_size_h, patch_size_w = self.patch_embed.patch_size
grid_h = H // patch_size_h
grid_w = W // patch_size_w
current_shape = (grid_h, grid_w)
# 1. 检查缓存
if current_shape == self.cached_pos_ids_shape:
if self.cached_pos_ids.device == device:
return self.cached_pos_ids
else:
# 移动到当前设备
self.cached_pos_ids = self.cached_pos_ids.to(device)
return self.cached_pos_ids
# --- 缓存未命中,重新计算 ---
num_patches_per_image = grid_h * grid_w
# 2. 计算 2D 空间坐标 (H, W)
hpos_ids = torch.arange(grid_h, device=device).unsqueeze(1).expand(-1, grid_w).flatten()
wpos_ids = torch.arange(grid_w, device=device).unsqueeze(0).expand(grid_h, -1).flatten()
# 3. 创建 Img1 (T=0) 的坐标
# T 坐标 [N_patches]
tpos_ids_0 = torch.zeros(num_patches_per_image, device=device, dtype=torch.long)
# [3, N_patches]
pos_ids_0 = torch.stack([tpos_ids_0, hpos_ids, wpos_ids], dim=0)
# 4. 创建 Img2 (T=1) 的坐标
# T 坐标 [N_patches]
tpos_ids_1 = torch.ones(num_patches_per_image, device=device, dtype=torch.long)
# [3, N_patches]
pos_ids_1 = torch.stack([tpos_ids_1, hpos_ids, wpos_ids], dim=0)
# 5. 拼接 Img1 和 Img2
# [3, 2 * N_patches]
img_pos_ids = torch.cat([pos_ids_0, pos_ids_1], dim=1)
# --- 新增: Query 的位置编码 ---
# 接续前面的编码: (max_grid, max_grid, max_grid), ...
start_idx = max(grid_h, grid_w)
query_indices = torch.arange(start_idx, start_idx + self.query_length, device=device, dtype=torch.long)
# [3, query_length]
query_pos_ids = query_indices.unsqueeze(0).expand(3, -1)
all_pos_ids = torch.cat([img_pos_ids, query_pos_ids], dim=1)
# 6. 更新缓存
self.cached_pos_ids_shape = current_shape
self.cached_pos_ids = all_pos_ids
return all_pos_ids
def forward(self, x):
"""
x: [B, 2, C, H, W]
"""
B, N, C, H, W = x.shape
assert N == 2, "This model is hardcoded for N=2"
# --- ⭐️ (修改点 5) 动态 RoPE 计算 ---
# 1. 获取 3D 位置 ID (T, H, W)
# pos_ids 形状: [3, L_total]
pos_ids = self._compute_dynamic_pos_ids(H, W, device=x.device)
# 2. 为 batch 扩展
# [3, L_total] -> [3, B, L_total]
pos_ids_batch = pos_ids.unsqueeze(1).expand(-1, B, -1)
# 3. 计算 (cos, sin) tuple
# pos_tuple[0] 形状: [3, B, L_total, Hc]
pos_tuple = self.rotary_pos_emb(x, pos_ids_batch)
# ---------------------
x = rearrange(x, "B N C H W -> (B N) C H W")
x = self.patch_embed(x)
x = rearrange(x, "(B N) l d -> B (N l) d", B=B, N=N) # N=2
# print("x",x.shape)
# x.shape: [B, L_images, D]
# torch.save(x.detach().cpu(), "1.pt")
# 序列: [img1_patches, img2_patches, query_tokens]
# B, (L_images + L_query), D
x = torch.cat([x, self.query.repeat(B, 1, 1)], dim=1)
# ⭐️ 检查 L(x) 是否等于 L(pos_ids)
if x.shape[1] != pos_ids.shape[1]:
raise ValueError(
f"Total sequence length mismatch. "
f"Input tensor 'x' length is {x.shape[1]}, "
f"but calculated 3D RoPE length is {pos_ids.shape[1]}. "
f"Check H/W ({H}/{W}) vs patch_size ({self.patch_embed.patch_size})."
)
for block in self.blocks:
# 传入 3D RoPE 的 (cos, sin) tuple
x = block(x, pos=pos_tuple)
x=x[:, -self.query_length:, :]
x = self.norm2(x)
x = self.output_proj(x)
return x
# #获取模型结构
# model=QwenImageExtractor()
# # # print(model)
# # # #只查看有参数的层
# # # for name, param in model.named_parameters():
# # # if param.requires_grad:
# # # print(name)
# # #将模型保存为safetensors格式
# import torch
# from safetensors.torch import save_file
# state_dict = model.state_dict()
# save_file(state_dict, "/root/workspace/lzh/my-DiffSynth-Studio/new_module/extractor.safetensors")