titans_NPC / examples /qwen_mac_integration.py
ChipYTY's picture
Update full code snapshot (exclude data and model checkpoints)
4196369 verified
"""
MAC (Memory-as-Context) 结构集成到 Qwen 的详细实现
=== MAC 结构核心原理 ===
1. 将长序列分成多个 segment(如每 128 个 token 一个 segment)
2. 在每个 segment 的【开头】插入 longterm_mem_tokens(如 16 个)
3. 这些 memory tokens 会参与 attention 计算
4. 使用 NeuralMemory 模块来动态更新这些 memory tokens 的内容
原始序列: [t1, t2, t3, ..., t128, t129, ..., t256, ...]
MAC 序列: [M1..M16, t1...t128, M1..M16, t129...t256, ...]
↑ ↑
memory tokens memory tokens
=== Qwen2 架构 ===
Qwen2DecoderLayer:
├── input_layernorm (RMSNorm)
├── self_attn (Qwen2Attention/Qwen2FlashAttention2)
│ ├── q_proj, k_proj, v_proj
│ ├── RoPE (rotary positional embedding)
│ └── o_proj
├── post_attention_layernorm (RMSNorm)
└── mlp (Qwen2MLP: gate_proj, up_proj, down_proj with SiLU)
我们需要在特定层添加 NeuralMemory 模块
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from typing import Optional, Tuple, List, Dict, Any
from copy import deepcopy
from functools import partial
from einops import rearrange, repeat, pack, unpack
# Titans 组件
from titans_pytorch import NeuralMemory, MemoryMLP
from titans_pytorch.neural_memory import NeuralMemState
# ============================================================================
# 辅助函数
# ============================================================================
def exists(v):
return v is not None
def default(v, d):
return v if exists(v) else d
def divisible_by(num, den):
return (num % den) == 0
def round_up_multiple(seq, mult):
return ((seq + mult - 1) // mult) * mult
# ============================================================================
# MAC 风格的 Qwen 实现
# ============================================================================
class QwenMACTransformer(nn.Module):
"""
将 MAC 结构应用到 Qwen 模型的完整实现
架构图:
Input IDs
┌─────────────────┐
│ Token Embed │
└────────┬────────┘
┌─────────────────────────────────────────┐
│ 插入 Memory Tokens 到每个 Segment 开头 │
│ [M1..Mn, t1..t_seg, M1..Mn, ...] │
└────────┬────────────────────────────────┘
╔═════════════════════════════════════════╗
║ Qwen Decoder Layer 1 ║
║ ┌────────────────────────────────┐ ║
║ │ RMSNorm → Self-Attention → Add │ ║
║ └────────────────────────────────┘ ║
║ ┌────────────────────────────────┐ ║
║ │ RMSNorm → MLP → Add │ ║
║ └────────────────────────────────┘ ║
╚═════════════════════════════════════════╝
╔═════════════════════════════════════════╗
║ Qwen Decoder Layer 2 (with Memory) ║
║ ┌────────────────────────────────┐ ║
║ │ RMSNorm → Self-Attention → Add │ ║
║ └────────────────────────────────┘ ║
║ ┌──────────────────────────────────┐ ║
║ │ ★ NeuralMemory 记忆增强 ★ │ ║
║ │ retrieved = mem(hidden_states) │ ║
║ │ hidden += gate * retrieved │ ║
║ └──────────────────────────────────┘ ║
║ ┌────────────────────────────────┐ ║
║ │ RMSNorm → MLP → Add │ ║
║ └────────────────────────────────┘ ║
╚═════════════════════════════════════════╝
... 更多层 ...
┌─────────────────┐
│ Final RMSNorm │
└────────┬────────┘
┌─────────────────┐
│ LM Head │
└────────┬────────┘
Logits
"""
def __init__(
self,
qwen_model,
# === Segment 配置 ===
segment_len: int = 128, # 每个 segment 的长度
num_longterm_mem_tokens: int = 16, # 每个 segment 开头的 memory token 数量
num_persist_mem_tokens: int = 4, # 全局持久 memory token 数量
# === NeuralMemory 配置 ===
neural_memory_layers: Tuple[int, ...] = (2, 4, 6), # 哪些层使用记忆
memory_chunk_size: int = 64, # 记忆模块的 chunk 大小
memory_batch_size: int = 128, # 记忆更新的批次大小
memory_depth: int = 2, # 记忆 MLP 的深度
# === 其他配置 ===
dim_head: int = 64,
num_heads: int = None, # 默认从模型配置读取
use_momentum: bool = True,
gate_memory_output: bool = False, # 是否用记忆门控 attention 输出
):
super().__init__()
# 保存原始 Qwen 模型
self.qwen = qwen_model
self.config = qwen_model.config
# 获取模型维度
self.hidden_size = self.config.hidden_size
self.num_layers = self.config.num_hidden_layers
num_heads = default(num_heads, self.hidden_size // dim_head)
# Segment 配置
self.segment_len = segment_len
self.num_longterm_mem_tokens = num_longterm_mem_tokens
self.num_persist_mem_tokens = num_persist_mem_tokens
self.total_segment_len = segment_len + num_longterm_mem_tokens
# =====================================================================
# Memory Tokens (这是 MAC 的核心!)
# =====================================================================
# 持久记忆 tokens - 放在序列最前面,所有 segment 共享
# 用于存储全局上下文信息
self.persist_mem_tokens = nn.Parameter(
torch.randn(num_persist_mem_tokens, self.hidden_size) * 0.02
)
# 长期记忆 tokens - 插入到每个 segment 的开头
# 这些 token 会被 NeuralMemory 动态更新
self.longterm_mem_tokens = nn.Parameter(
torch.randn(num_longterm_mem_tokens, self.hidden_size) * 0.02
)
# =====================================================================
# NeuralMemory 模块
# =====================================================================
self.neural_memory_layers = neural_memory_layers
self.gate_memory_output = gate_memory_output
# 为每个指定层创建 NeuralMemory
self.neural_memories = nn.ModuleDict()
self.memory_projections = nn.ModuleDict() # 投影层
self.memory_gates = nn.ModuleDict() # 门控层
# 创建记忆网络模板
memory_model_template = MemoryMLP(
dim=dim_head,
depth=memory_depth,
expansion_factor=2.0
)
for layer_idx in neural_memory_layers:
layer_key = str(layer_idx)
# NeuralMemory 模块
self.neural_memories[layer_key] = NeuralMemory(
dim=self.hidden_size,
chunk_size=memory_chunk_size,
batch_size=memory_batch_size,
dim_head=dim_head,
heads=num_heads,
model=deepcopy(memory_model_template),
momentum=use_momentum,
momentum_order=1,
qk_rmsnorm=True,
pre_rmsnorm=True,
default_step_transform_max_lr=0.1,
)
# 门控层 - 控制记忆的影响程度
self.memory_gates[layer_key] = nn.Sequential(
nn.Linear(self.hidden_size, self.hidden_size),
nn.Sigmoid()
)
print(f"[QwenMAC] 初始化完成:")
print(f" - 隐藏层大小: {self.hidden_size}")
print(f" - 层数: {self.num_layers}")
print(f" - Segment 长度: {segment_len}")
print(f" - Longterm Memory Tokens: {num_longterm_mem_tokens}")
print(f" - Persist Memory Tokens: {num_persist_mem_tokens}")
print(f" - 记忆层: {neural_memory_layers}")
def _insert_memory_tokens(
self,
hidden_states: Tensor, # [batch, seq_len, hidden]
batch_size: int,
seq_len: int,
) -> Tuple[Tensor, int]:
"""
在序列中插入 memory tokens
输入: [batch, seq_len, hidden]
输出: [batch, new_seq_len, hidden]
处理流程:
原始: [t1, t2, ..., t128, t129, ..., t256]
1. 分成 segments:
Seg1: [t1, ..., t128]
Seg2: [t129, ..., t256]
2. 每个 segment 前插入 longterm_mem:
Seg1: [M1, ..., M16, t1, ..., t128]
Seg2: [M1, ..., M16, t129, ..., t256]
3. 合并 + 前置 persist_mem:
[P1, ..., P4, M1..M16, t1..t128, M1..M16, t129..t256]
"""
segment_len = self.segment_len
num_longterm = self.num_longterm_mem_tokens
num_persist = self.num_persist_mem_tokens
# 计算需要多少个 segment
num_segments = (seq_len + segment_len - 1) // segment_len
# Padding 到 segment_len 的整数倍
padded_len = num_segments * segment_len
if seq_len < padded_len:
padding = padded_len - seq_len
hidden_states = F.pad(hidden_states, (0, 0, 0, padding))
# 重塑为 segments: [batch, num_segments, segment_len, hidden]
hidden_states = rearrange(
hidden_states,
'b (s n) d -> b s n d',
s=num_segments,
n=segment_len
)
# 扩展 longterm memory tokens: [batch, num_segments, num_longterm, hidden]
longterm_mem = repeat(
self.longterm_mem_tokens,
'n d -> b s n d',
b=batch_size,
s=num_segments
)
# 在每个 segment 前插入 memory tokens
# [batch, num_segments, num_longterm + segment_len, hidden]
hidden_states = torch.cat([longterm_mem, hidden_states], dim=2)
# 展平 segments: [batch, num_segments * (num_longterm + segment_len), hidden]
hidden_states = rearrange(hidden_states, 'b s n d -> b (s n) d')
# 添加持久记忆 tokens 在最前面
persist_mem = repeat(
self.persist_mem_tokens,
'n d -> b n d',
b=batch_size
)
hidden_states = torch.cat([persist_mem, hidden_states], dim=1)
new_seq_len = hidden_states.shape[1]
return hidden_states, new_seq_len
def _remove_memory_tokens(
self,
hidden_states: Tensor,
original_seq_len: int,
) -> Tensor:
"""
从输出中移除 memory tokens,恢复原始序列长度
"""
segment_len = self.segment_len
num_longterm = self.num_longterm_mem_tokens
num_persist = self.num_persist_mem_tokens
total_segment_len = segment_len + num_longterm
batch_size = hidden_states.shape[0]
# 移除 persist tokens
hidden_states = hidden_states[:, num_persist:]
# 计算 segments
num_segments = (original_seq_len + segment_len - 1) // segment_len
# 重塑为 segments
hidden_states = rearrange(
hidden_states,
'b (s n) d -> b s n d',
s=num_segments,
n=total_segment_len
)
# 移除每个 segment 开头的 memory tokens
hidden_states = hidden_states[:, :, num_longterm:, :]
# 展平并截取原始长度
hidden_states = rearrange(hidden_states, 'b s n d -> b (s n) d')
hidden_states = hidden_states[:, :original_seq_len, :]
return hidden_states
def _create_mac_attention_mask(
self,
seq_len_with_mem: int,
device: torch.device,
dtype: torch.dtype,
) -> Tensor:
"""
创建 MAC 风格的 attention mask
MAC mask 的特点:
1. Persist memory tokens 对所有位置可见
2. 每个 segment 内部是 causal 的
3. Memory tokens 可以 attend 到之前的 segment
这是一个简化版本,完整版需要考虑更多细节
"""
# 创建基础 causal mask
mask = torch.ones(seq_len_with_mem, seq_len_with_mem, device=device, dtype=dtype)
mask = torch.tril(mask)
# Persist memory 对所有位置可见
num_persist = self.num_persist_mem_tokens
mask[:, :num_persist] = 1.0
return mask
def forward(
self,
input_ids: Tensor,
attention_mask: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
memory_states: Optional[Dict[str, NeuralMemState]] = None,
return_memory_states: bool = True,
**kwargs
) -> Dict[str, Any]:
"""
前向传播
Args:
input_ids: [batch, seq_len]
attention_mask: [batch, seq_len]
memory_states: 各层的记忆状态(用于增量推理)
Returns:
dict with 'logits', 'hidden_states', 'memory_states'
"""
batch_size, seq_len = input_ids.shape
device = input_ids.device
# =====================================================================
# Step 1: Token Embedding
# =====================================================================
if hasattr(self.qwen.model, 'embed_tokens'):
hidden_states = self.qwen.model.embed_tokens(input_ids)
else:
hidden_states = self.qwen.get_input_embeddings()(input_ids)
# =====================================================================
# Step 2: 插入 Memory Tokens
# =====================================================================
hidden_states, seq_len_with_mem = self._insert_memory_tokens(
hidden_states, batch_size, seq_len
)
# =====================================================================
# Step 3: 创建 Attention Mask (简化版)
# =====================================================================
# 注意: 完整实现需要更复杂的 mask 处理
# 这里用 None 让模型使用默认 causal mask
mac_attention_mask = None
# =====================================================================
# Step 4: 逐层处理
# =====================================================================
if memory_states is None:
memory_states = {}
next_memory_states = {}
# 遍历 Qwen 的所有层
for layer_idx, layer in enumerate(self.qwen.model.layers):
layer_key = str(layer_idx)
# -----------------------------------------------------------------
# 4.1 标准的 Qwen Decoder Layer 前向传播
# -----------------------------------------------------------------
# Qwen2DecoderLayer.forward() 的简化版本
residual = hidden_states
# Input LayerNorm
hidden_states = layer.input_layernorm(hidden_states)
# Self Attention
# 注意: 这里简化了 attention 的调用,实际可能需要更多参数
attn_output = layer.self_attn(
hidden_states=hidden_states,
attention_mask=mac_attention_mask,
position_ids=None, # 会自动生成
)
# 处理不同返回格式
if isinstance(attn_output, tuple):
attn_output = attn_output[0]
hidden_states = residual + attn_output
# -----------------------------------------------------------------
# 4.2 NeuralMemory 记忆增强 (仅在指定层)
# -----------------------------------------------------------------
if layer_key in self.neural_memories:
neural_mem = self.neural_memories[layer_key]
gate_fn = self.memory_gates[layer_key]
# 获取该层的记忆状态
mem_state = memory_states.get(layer_key)
# 记忆检索和更新
retrieved, next_mem_state = neural_mem(
hidden_states,
state=mem_state
)
# 门控融合
gate = gate_fn(hidden_states)
if self.gate_memory_output:
# 方式1: 用检索到的记忆门控后续输出
hidden_states = hidden_states * (1 + gate * retrieved.sigmoid())
else:
# 方式2: 直接加上门控后的记忆(更常用)
hidden_states = hidden_states + gate * retrieved
# 保存记忆状态
next_memory_states[layer_key] = next_mem_state
# -----------------------------------------------------------------
# 4.3 Feed Forward Network
# -----------------------------------------------------------------
residual = hidden_states
hidden_states = layer.post_attention_layernorm(hidden_states)
hidden_states = layer.mlp(hidden_states)
hidden_states = residual + hidden_states
# =====================================================================
# Step 5: Final LayerNorm
# =====================================================================
hidden_states = self.qwen.model.norm(hidden_states)
# =====================================================================
# Step 6: 移除 Memory Tokens
# =====================================================================
hidden_states = self._remove_memory_tokens(hidden_states, seq_len)
# =====================================================================
# Step 7: LM Head
# =====================================================================
logits = self.qwen.lm_head(hidden_states)
# =====================================================================
# 返回结果
# =====================================================================
result = {
'logits': logits,
'hidden_states': hidden_states,
}
if return_memory_states:
result['memory_states'] = next_memory_states
return result
def generate(
self,
input_ids: Tensor,
max_new_tokens: int = 100,
temperature: float = 1.0,
top_p: float = 0.9,
memory_states: Optional[Dict] = None,
**kwargs
) -> Tensor:
"""
简单的生成函数
"""
generated = input_ids.clone()
for _ in range(max_new_tokens):
outputs = self.forward(
generated,
memory_states=memory_states,
return_memory_states=True
)
logits = outputs['logits'][:, -1, :]
memory_states = outputs['memory_states']
# 采样
if temperature > 0:
probs = F.softmax(logits / temperature, dim=-1)
next_token = torch.multinomial(probs, num_samples=1)
else:
next_token = logits.argmax(dim=-1, keepdim=True)
generated = torch.cat([generated, next_token], dim=-1)
# 检查 EOS
if hasattr(self.config, 'eos_token_id'):
if (next_token == self.config.eos_token_id).all():
break
return generated
# ============================================================================
# 使用示例
# ============================================================================
def main():
"""
完整的使用示例
"""
print("=" * 70)
print("MAC (Memory-as-Context) 集成到 Qwen 的示例")
print("=" * 70)
# -------------------------------------------------------------------------
# 方式 1: 使用 Hugging Face 的 Qwen 模型
# -------------------------------------------------------------------------
try:
from transformers import AutoModelForCausalLM, AutoTokenizer
print("\n[1] 加载 Qwen 模型...")
model_name = "Qwen/Qwen2-0.5B"
tokenizer = AutoTokenizer.from_pretrained(
model_name,
trust_remote_code=True
)
qwen_model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
device_map="auto" if torch.cuda.is_available() else None,
trust_remote_code=True
)
print(f" 模型配置:")
print(f" - hidden_size: {qwen_model.config.hidden_size}")
print(f" - num_layers: {qwen_model.config.num_hidden_layers}")
print(f" - num_heads: {qwen_model.config.num_attention_heads}")
# 创建 MAC 版本
print("\n[2] 创建 QwenMAC 模型...")
mac_model = QwenMACTransformer(
qwen_model=qwen_model,
segment_len=64, # 每 64 个 token 一个 segment
num_longterm_mem_tokens=8, # 每个 segment 8 个 memory token
num_persist_mem_tokens=4, # 4 个全局 memory token
neural_memory_layers=(1, 3, 5), # 在第 1, 3, 5 层添加记忆
memory_chunk_size=32,
memory_batch_size=64,
)
if torch.cuda.is_available():
mac_model = mac_model.cuda()
# 测试前向传播
print("\n[3] 测试前向传播...")
test_text = "人工智能正在改变世界,它可以"
inputs = tokenizer(test_text, return_tensors="pt")
device = next(mac_model.parameters()).device
input_ids = inputs.input_ids.to(device)
with torch.no_grad():
outputs = mac_model(input_ids)
print(f" 输入形状: {input_ids.shape}")
print(f" 输出 logits 形状: {outputs['logits'].shape}")
print(f" 记忆状态数量: {len(outputs['memory_states'])}")
# 测试生成
print("\n[4] 测试文本生成...")
with torch.no_grad():
generated = mac_model.generate(
input_ids,
max_new_tokens=50,
temperature=0.7
)
generated_text = tokenizer.decode(generated[0], skip_special_tokens=True)
print(f" 生成文本: {generated_text}")
except ImportError as e:
print(f"\n注意: 需要安装 transformers")
print(f"pip install transformers")
print(f"错误: {e}")
# -------------------------------------------------------------------------
# 方式 2: 独立测试 NeuralMemory 组件
# -------------------------------------------------------------------------
print("\n" + "=" * 70)
print("[独立测试] NeuralMemory 组件")
print("=" * 70)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 创建 NeuralMemory
mem = NeuralMemory(
dim=512, # 隐藏维度
chunk_size=32, # 分块大小
batch_size=64, # 批次大小
dim_head=64, # 每个头的维度
heads=8, # 头数
model=MemoryMLP(dim=64, depth=2),
momentum=True,
qk_rmsnorm=True,
).to(device)
# 模拟输入
batch_size = 2
seq_len = 256
hidden_dim = 512
x = torch.randn(batch_size, seq_len, hidden_dim).to(device)
print(f"\n输入形状: {x.shape}")
# 第一次前向传播
retrieved, state = mem(x)
print(f"检索输出形状: {retrieved.shape}")
print(f"记忆状态 seq_index: {state.seq_index}")
# 第二次前向传播(传入之前的状态)
x2 = torch.randn(batch_size, seq_len, hidden_dim).to(device)
retrieved2, state2 = mem(x2, state=state)
print(f"第二次检索输出形状: {retrieved2.shape}")
print(f"更新后 seq_index: {state2.seq_index}")
print("\n" + "=" * 70)
print("完成!")
print("=" * 70)
if __name__ == "__main__":
main()