""" Titans Neural Memory 与 Qwen 模型集成示例 本文件展示了如何将 Titans 的 NeuralMemory 模块集成到 Qwen 模型中, 以增强其长期记忆能力。 主要集成方案: 1. 作为独立的记忆增强模块(Memory Augmented) 2. 替换/增强特定层的注意力机制 3. Memory-as-Context 方式(类似 MAC Transformer) """ import torch import torch.nn as nn from torch import Tensor from typing import Optional, Tuple from einops import rearrange, repeat from copy import deepcopy # 导入 Titans 的核心组件 from titans_pytorch import NeuralMemory, MemoryMLP, NeuralMemState # ============================================================================ # 方案 1: 简单的记忆增强包装器 (Memory Augmented Wrapper) # ============================================================================ class TitansMemoryWrapper(nn.Module): """ 最简单的集成方式:在 Qwen 模型外部添加 Titans 记忆模块 工作原理: 1. 使用 NeuralMemory 存储和检索长期信息 2. 将检索到的记忆与 Qwen 的输出融合 适用场景: - 不想修改 Qwen 内部结构 - 需要快速验证 Titans 记忆的效果 """ def __init__( self, qwen_model, hidden_size: int = 896, # Qwen2-0.5B 的隐藏层大小 chunk_size: int = 64, memory_batch_size: int = 128, num_heads: int = 4, dim_head: int = 64, memory_depth: int = 2, ): super().__init__() self.qwen = qwen_model # 投影层:将 Qwen 的隐藏状态投影到记忆维度 self.mem_dim = dim_head * num_heads self.to_mem_input = nn.Linear(hidden_size, self.mem_dim) self.from_mem_output = nn.Linear(self.mem_dim, hidden_size) # 创建 Titans 记忆模块 memory_model = MemoryMLP( dim=dim_head, depth=memory_depth, expansion_factor=2.0 ) self.neural_memory = NeuralMemory( dim=self.mem_dim, chunk_size=chunk_size, batch_size=memory_batch_size, dim_head=dim_head, heads=num_heads, model=memory_model, momentum=True, momentum_order=1, qk_rmsnorm=True, ) # 融合门控 self.fusion_gate = nn.Sequential( nn.Linear(hidden_size * 2, hidden_size), nn.Sigmoid() ) def forward( self, input_ids: Tensor, attention_mask: Optional[Tensor] = None, memory_state: Optional[NeuralMemState] = None, **kwargs ): # 获取 Qwen 的隐藏状态 qwen_outputs = self.qwen( input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True, **kwargs ) hidden_states = qwen_outputs.hidden_states[-1] # 最后一层隐藏状态 # 投影到记忆空间 mem_input = self.to_mem_input(hidden_states) # 使用 Titans 记忆模块存储和检索 retrieved, next_memory_state = self.neural_memory( mem_input, state=memory_state ) # 投影回原始维度 retrieved_hidden = self.from_mem_output(retrieved) # 门控融合 gate = self.fusion_gate(torch.cat([hidden_states, retrieved_hidden], dim=-1)) enhanced_hidden = hidden_states + gate * retrieved_hidden # 使用增强的隐藏状态计算 logits # 注意:这里需要访问 Qwen 的 lm_head if hasattr(self.qwen, 'lm_head'): logits = self.qwen.lm_head(enhanced_hidden) else: logits = qwen_outputs.logits return { 'logits': logits, 'hidden_states': enhanced_hidden, 'memory_state': next_memory_state, 'qwen_outputs': qwen_outputs } # ============================================================================ # 方案 2: 将 Titans 记忆嵌入到 Qwen 的特定层中 # ============================================================================ class QwenDecoderLayerWithMemory(nn.Module): """ 修改后的 Qwen Decoder 层,集成了 Titans 记忆模块 在每个 attention 层后添加记忆检索和更新 """ def __init__( self, original_layer, hidden_size: int, chunk_size: int = 64, memory_batch_size: int = 128, num_heads: int = 4, dim_head: int = 64, ): super().__init__() # 保留原始层的组件 self.self_attn = original_layer.self_attn self.mlp = original_layer.mlp self.input_layernorm = original_layer.input_layernorm self.post_attention_layernorm = original_layer.post_attention_layernorm # 添加 Titans 记忆模块 self.mem_dim = dim_head * num_heads self.to_mem = nn.Linear(hidden_size, self.mem_dim) self.from_mem = nn.Linear(self.mem_dim, hidden_size) memory_model = MemoryMLP(dim=dim_head, depth=2) self.neural_memory = NeuralMemory( dim=self.mem_dim, chunk_size=chunk_size, batch_size=memory_batch_size, dim_head=dim_head, heads=num_heads, model=memory_model, momentum=True, ) # 记忆输出的门控 self.mem_gate = nn.Sequential( nn.Linear(hidden_size, hidden_size), nn.Sigmoid() ) def forward( self, hidden_states: Tensor, attention_mask: Optional[Tensor] = None, position_ids: Optional[Tensor] = None, memory_state: Optional[NeuralMemState] = None, **kwargs ): # 标准的 attention 前向传播 residual = hidden_states hidden_states = self.input_layernorm(hidden_states) attn_output, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, **kwargs ) hidden_states = residual + attn_output # === Titans 记忆增强 === mem_input = self.to_mem(hidden_states) retrieved, next_memory_state = self.neural_memory( mem_input, state=memory_state ) mem_output = self.from_mem(retrieved) # 门控融合记忆 gate = self.mem_gate(hidden_states) hidden_states = hidden_states + gate * mem_output # ======================== # 标准的 FFN 前向传播 residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states, next_memory_state # ============================================================================ # 方案 3: Memory-as-Context 方式(最接近原论文) # ============================================================================ class QwenWithMAC(nn.Module): """ Memory-as-Context 方式集成 Titans 到 Qwen 核心思想: 1. 将长序列分成多个 segment 2. 每个 segment 的开头添加 longterm memory tokens 3. 使用 NeuralMemory 来更新这些 memory tokens 这种方式最接近 Titans 论文中的 MAC 配置 """ def __init__( self, qwen_model, hidden_size: int = 896, segment_len: int = 128, num_longterm_mem_tokens: int = 16, num_persist_mem_tokens: int = 4, memory_layers: Tuple[int, ...] = (2, 4, 6), chunk_size: int = 64, memory_batch_size: int = 128, ): super().__init__() self.qwen = qwen_model self.hidden_size = hidden_size self.segment_len = segment_len self.num_longterm_mem_tokens = num_longterm_mem_tokens # 持久记忆 tokens(全局共享) self.persist_mem = nn.Parameter( torch.randn(num_persist_mem_tokens, hidden_size) * 0.02 ) # 长期记忆 tokens(每个 segment 独立) self.longterm_mem = nn.Parameter( torch.randn(num_longterm_mem_tokens, hidden_size) * 0.02 ) # 为指定层创建 NeuralMemory 模块 self.memory_layers = memory_layers self.neural_memories = nn.ModuleDict() memory_model = MemoryMLP(dim=64, depth=2) for layer_idx in memory_layers: self.neural_memories[str(layer_idx)] = NeuralMemory( dim=hidden_size, chunk_size=chunk_size, batch_size=memory_batch_size, dim_head=64, heads=hidden_size // 64, model=deepcopy(memory_model), momentum=True, qk_rmsnorm=True, ) def prepare_inputs_with_memory( self, hidden_states: Tensor, batch_size: int, ) -> Tensor: """ 在每个 segment 开头插入 memory tokens """ seq_len = hidden_states.shape[1] num_segments = (seq_len + self.segment_len - 1) // self.segment_len # 扩展 longterm memory longterm = repeat( self.longterm_mem, 'n d -> b s n d', b=batch_size, s=num_segments ) # 将序列分成 segments padded_len = num_segments * self.segment_len if seq_len < padded_len: hidden_states = nn.functional.pad( hidden_states, (0, 0, 0, padded_len - seq_len) ) hidden_states = rearrange( hidden_states, 'b (s n) d -> b s n d', n=self.segment_len ) # 在每个 segment 前添加 memory tokens hidden_states = torch.cat([longterm, hidden_states], dim=2) # 合并回完整序列 hidden_states = rearrange(hidden_states, 'b s n d -> b (s n) d') # 添加持久记忆 tokens 在最前面 persist = repeat(self.persist_mem, 'n d -> b n d', b=batch_size) hidden_states = torch.cat([persist, hidden_states], dim=1) return hidden_states def forward( self, input_ids: Tensor, attention_mask: Optional[Tensor] = None, memory_states: Optional[dict] = None, **kwargs ): batch_size = input_ids.shape[0] # 获取 token embeddings if hasattr(self.qwen.model, 'embed_tokens'): hidden_states = self.qwen.model.embed_tokens(input_ids) else: hidden_states = self.qwen.get_input_embeddings()(input_ids) # 添加 memory tokens hidden_states = self.prepare_inputs_with_memory(hidden_states, batch_size) # 初始化记忆状态 if memory_states is None: memory_states = {} next_memory_states = {} # 遍历 Qwen 的层 for layer_idx, layer in enumerate(self.qwen.model.layers): # 标准的 transformer 层前向传播 layer_outputs = layer( hidden_states, attention_mask=None, # 需要修改 attention mask 来处理 memory tokens **kwargs ) hidden_states = layer_outputs[0] # 在指定层应用 Titans 记忆 if str(layer_idx) in self.neural_memories: neural_mem = self.neural_memories[str(layer_idx)] mem_state = memory_states.get(str(layer_idx)) retrieved, next_state = neural_mem( hidden_states, state=mem_state ) # 融合检索到的记忆 hidden_states = hidden_states + retrieved * 0.1 # 可学习的权重 next_memory_states[str(layer_idx)] = next_state # 最终的 layer norm hidden_states = self.qwen.model.norm(hidden_states) # 计算 logits logits = self.qwen.lm_head(hidden_states) return { 'logits': logits, 'hidden_states': hidden_states, 'memory_states': next_memory_states } # ============================================================================ # 使用示例 # ============================================================================ def example_usage(): """展示如何使用上述集成方案""" print("=" * 60) print("Titans Neural Memory 与 Qwen 集成示例") print("=" * 60) # 注意:需要先安装 transformers 和 qwen 相关依赖 # pip install transformers torch titans-pytorch try: from transformers import AutoModelForCausalLM, AutoTokenizer # 加载 Qwen 模型(以 Qwen2-0.5B 为例) model_name = "Qwen/Qwen2-0.5B" print(f"\n加载模型: {model_name}") tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) qwen_model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.float16, device_map="auto", trust_remote_code=True ) # 获取隐藏层大小 hidden_size = qwen_model.config.hidden_size print(f"模型隐藏层大小: {hidden_size}") # 方案 1: 简单包装器 print("\n--- 方案 1: TitansMemoryWrapper ---") wrapped_model = TitansMemoryWrapper( qwen_model=qwen_model, hidden_size=hidden_size, chunk_size=64, memory_batch_size=128, ) # 测试输入 text = "人工智能的发展历程" inputs = tokenizer(text, return_tensors="pt") with torch.no_grad(): outputs = wrapped_model( input_ids=inputs.input_ids.to(qwen_model.device), ) print(f"输出 logits 形状: {outputs['logits'].shape}") print(f"记忆状态: {type(outputs['memory_state'])}") except ImportError as e: print(f"\n注意: 需要安装相关依赖") print(f"pip install transformers torch titans-pytorch") print(f"错误: {e}") # 独立测试 NeuralMemory print("\n--- 独立测试 NeuralMemory ---") mem = NeuralMemory( dim=384, chunk_size=64, batch_size=128, dim_head=64, heads=4, model=MemoryMLP(dim=64, depth=2), momentum=True, ).cuda() if torch.cuda.is_available() else NeuralMemory( dim=384, chunk_size=64, batch_size=128, dim_head=64, heads=4, model=MemoryMLP(dim=64, depth=2), momentum=True, ) # 模拟输入 device = 'cuda' if torch.cuda.is_available() else 'cpu' seq = torch.randn(2, 256, 384).to(device) retrieved, mem_state = mem(seq) print(f"输入形状: {seq.shape}") print(f"检索输出形状: {retrieved.shape}") print(f"记忆状态序列索引: {mem_state.seq_index}") print("\n" + "=" * 60) print("集成完成!") print("=" * 60) if __name__ == "__main__": example_usage()