titans_NPC / examples /qwen_titans_streaming.py
ChipYTY's picture
Update full code snapshot (exclude data and model checkpoints)
4196369 verified
"""
Qwen + Titans 流式处理超长序列
核心思想:
- Qwen 作为 Core(短期处理器),每次只处理一个 chunk
- Titans NeuralMemory 作为长期记忆,跨 chunk 保持状态
- 虽然 Core 窗口有限(如 4k/8k),但整体能处理任意长度的上下文
处理流程:
┌─────────────────────────────────────────────────────────────────────┐
│ 超长文档 (1M tokens) │
│ [chunk_0] [chunk_1] [chunk_2] ... [chunk_n-1] [chunk_n + question] │
└─────────────────────────────────────────────────────────────────────┘
│ │ │ │ │
▼ ▼ ▼ ▼ ▼
┌─────┐ ┌─────┐ ┌─────┐ ┌─────┐ ┌─────┐
│Qwen │ │Qwen │ │Qwen │ ... │Qwen │ │Qwen │
│Core │ │Core │ │Core │ │Core │ │Core │
└──┬──┘ └──┬──┘ └──┬──┘ └──┬──┘ └──┬──┘
│ │ │ │ │
▼ ▼ ▼ ▼ ▼
┌─────────────────────────────────────────────────────────┐
│ Titans Long-term Memory │
│ M_0 ──write──> M_1 ──write──> M_2 ... M_n-1 ──read──> │
│ │
│ 存储:关键事实、实体关系、重要信息 │
│ 检索:回答问题时取回相关记忆 │
└─────────────────────────────────────────────────────────┘
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from typing import Optional, List, Dict, Any, Tuple
from dataclasses import dataclass
from tqdm import tqdm
import math
from einops import rearrange, repeat
# Titans 组件
from titans_pytorch import NeuralMemory, MemoryMLP
from titans_pytorch.neural_memory import NeuralMemState
@dataclass
class StreamingConfig:
"""流式处理配置"""
chunk_size: int = 4096 # 每个 chunk 的 token 数
memory_chunk_size: int = 64 # NeuralMemory 内部的 chunk 大小
memory_batch_size: int = 128 # NeuralMemory 的 batch size
num_memory_tokens: int = 16 # 每次注入的 memory token 数量
overlap_size: int = 128 # chunk 之间的重叠(可选,帮助上下文连贯)
class TitansLongTermMemory(nn.Module):
"""
Titans 长期记忆模块
负责:
1. 将 chunk 的信息写入长期记忆
2. 从长期记忆中检索相关信息
3. 生成 memory tokens 注入到 Core 中
"""
def __init__(
self,
hidden_size: int,
chunk_size: int = 64,
batch_size: int = 128,
dim_head: int = 64,
num_heads: int = None,
memory_depth: int = 2,
):
super().__init__()
self.hidden_size = hidden_size
num_heads = num_heads or (hidden_size // dim_head)
# 创建记忆网络
memory_model = MemoryMLP(
dim=dim_head,
depth=memory_depth,
expansion_factor=2.0
)
# NeuralMemory - 这是长期记忆的核心
self.neural_memory = NeuralMemory(
dim=hidden_size,
chunk_size=chunk_size,
batch_size=batch_size,
dim_head=dim_head,
heads=num_heads,
model=memory_model,
momentum=True,
momentum_order=1,
qk_rmsnorm=True,
pre_rmsnorm=True,
default_step_transform_max_lr=0.1,
)
# Memory tokens - 可学习的查询向量
# 用于从长期记忆中检索信息
self.memory_query_tokens = nn.Parameter(
torch.randn(1, 16, hidden_size) * 0.02
)
# 投影层:将检索结果转换为适合注入 Core 的格式
self.memory_proj = nn.Sequential(
nn.LayerNorm(hidden_size),
nn.Linear(hidden_size, hidden_size),
nn.GELU(),
nn.Linear(hidden_size, hidden_size),
)
def write(
self,
hidden_states: Tensor, # [batch, seq_len, hidden]
state: Optional[NeuralMemState] = None,
) -> Tuple[Tensor, NeuralMemState]:
"""
将当前 chunk 的信息写入长期记忆
Args:
hidden_states: 当前 chunk 经过 Qwen 处理后的隐藏状态
state: 之前的记忆状态
Returns:
retrieved: 从记忆中检索到的信息
next_state: 更新后的记忆状态
"""
# NeuralMemory 同时执行 store(写入)和 retrieve(读取)
retrieved, next_state = self.neural_memory(
hidden_states,
state=state
)
return retrieved, next_state
def read(
self,
batch_size: int,
state: NeuralMemState,
num_tokens: int = 16,
) -> Tensor:
"""
从长期记忆中读取信息,生成 memory tokens
这些 tokens 会被注入到 Core 的输入中
"""
# 扩展 query tokens
queries = repeat(
self.memory_query_tokens[:, :num_tokens],
'1 n d -> b n d',
b=batch_size
)
# 使用 query tokens 从记忆中检索
retrieved, _ = self.neural_memory(
queries,
state=state
)
# 投影
memory_tokens = self.memory_proj(retrieved)
return memory_tokens
class QwenTitansStreaming(nn.Module):
"""
Qwen + Titans 流式处理模型
能够处理任意长度的序列,通过:
1. 将序列分成 chunks
2. 每个 chunk 用 Qwen Core 处理
3. 用 Titans 长期记忆跨 chunk 传递信息
"""
def __init__(
self,
qwen_model,
config: StreamingConfig = None,
):
super().__init__()
self.qwen = qwen_model
self.config = config or StreamingConfig()
self.hidden_size = qwen_model.config.hidden_size
# 长期记忆模块
self.long_term_memory = TitansLongTermMemory(
hidden_size=self.hidden_size,
chunk_size=self.config.memory_chunk_size,
batch_size=self.config.memory_batch_size,
)
# 记忆融合门控
self.memory_gate = nn.Sequential(
nn.Linear(self.hidden_size * 2, self.hidden_size),
nn.Sigmoid()
)
print(f"[QwenTitansStreaming] 初始化完成:")
print(f" - Chunk size: {self.config.chunk_size}")
print(f" - Memory chunk size: {self.config.memory_chunk_size}")
print(f" - Memory batch size: {self.config.memory_batch_size}")
print(f" - Overlap size: {self.config.overlap_size}")
def _split_into_chunks(
self,
input_ids: Tensor,
chunk_size: int,
overlap: int = 0,
) -> List[Tensor]:
"""
将输入序列分成 chunks
Args:
input_ids: [batch, seq_len]
chunk_size: 每个 chunk 的大小
overlap: chunk 之间的重叠
Returns:
List of chunks, each [batch, chunk_size]
"""
batch_size, seq_len = input_ids.shape
chunks = []
stride = chunk_size - overlap
for start in range(0, seq_len, stride):
end = min(start + chunk_size, seq_len)
chunk = input_ids[:, start:end]
# Padding if needed
if chunk.shape[1] < chunk_size:
pad_len = chunk_size - chunk.shape[1]
chunk = F.pad(chunk, (0, pad_len), value=0)
chunks.append(chunk)
if end >= seq_len:
break
return chunks
def process_document(
self,
input_ids: Tensor,
attention_mask: Optional[Tensor] = None,
return_all_hidden_states: bool = False,
show_progress: bool = True,
) -> Dict[str, Any]:
"""
流式处理整个文档
这是核心方法:
1. 将文档分成 chunks
2. 逐个 chunk 处理
3. 每个 chunk 后更新长期记忆
Args:
input_ids: [batch, seq_len] - 可以是任意长度!
attention_mask: [batch, seq_len]
Returns:
包含最终隐藏状态、记忆状态等的字典
"""
batch_size, total_seq_len = input_ids.shape
device = input_ids.device
# 分成 chunks
chunks = self._split_into_chunks(
input_ids,
self.config.chunk_size,
self.config.overlap_size
)
num_chunks = len(chunks)
print(f"[process_document] 总长度: {total_seq_len}, 分成 {num_chunks} 个 chunks")
# 初始化记忆状态
memory_state = None
all_hidden_states = []
# 逐个 chunk 处理
iterator = tqdm(enumerate(chunks), total=num_chunks, desc="Processing chunks") \
if show_progress else enumerate(chunks)
for chunk_idx, chunk_ids in iterator:
# =========================================================
# Step 1: 从长期记忆读取 memory tokens(除了第一个 chunk)
# =========================================================
memory_tokens = None
if memory_state is not None and chunk_idx > 0:
memory_tokens = self.long_term_memory.read(
batch_size=batch_size,
state=memory_state,
num_tokens=self.config.num_memory_tokens
)
# =========================================================
# Step 2: 用 Qwen Core 处理当前 chunk
# =========================================================
chunk_hidden = self._process_chunk_with_memory(
chunk_ids,
memory_tokens=memory_tokens,
)
# =========================================================
# Step 3: 将当前 chunk 的信息写入长期记忆
# =========================================================
_, memory_state = self.long_term_memory.write(
chunk_hidden,
state=memory_state
)
if return_all_hidden_states:
all_hidden_states.append(chunk_hidden)
# 返回结果
result = {
'last_hidden_states': chunk_hidden,
'memory_state': memory_state,
'num_chunks_processed': num_chunks,
}
if return_all_hidden_states:
result['all_hidden_states'] = all_hidden_states
return result
def _process_chunk_with_memory(
self,
chunk_ids: Tensor,
memory_tokens: Optional[Tensor] = None,
) -> Tensor:
"""
处理单个 chunk,可选地注入 memory tokens
Args:
chunk_ids: [batch, chunk_size]
memory_tokens: [batch, num_mem_tokens, hidden] - 从长期记忆检索的
Returns:
hidden_states: [batch, chunk_size, hidden]
"""
batch_size = chunk_ids.shape[0]
# 获取 token embeddings
if hasattr(self.qwen.model, 'embed_tokens'):
hidden_states = self.qwen.model.embed_tokens(chunk_ids)
else:
hidden_states = self.qwen.get_input_embeddings()(chunk_ids)
# 如果有 memory tokens,将其拼接到输入前面
if memory_tokens is not None:
# [batch, num_mem + chunk_size, hidden]
hidden_states = torch.cat([memory_tokens, hidden_states], dim=1)
# 通过 Qwen 的所有层
for layer in self.qwen.model.layers:
layer_output = layer(hidden_states, attention_mask=None)
if isinstance(layer_output, tuple):
hidden_states = layer_output[0]
else:
hidden_states = layer_output
# Final norm
hidden_states = self.qwen.model.norm(hidden_states)
# 如果添加了 memory tokens,需要移除它们
if memory_tokens is not None:
num_mem = memory_tokens.shape[1]
hidden_states = hidden_states[:, num_mem:]
return hidden_states
def generate_answer(
self,
question_ids: Tensor,
memory_state: NeuralMemState,
max_new_tokens: int = 100,
temperature: float = 0.7,
) -> Tensor:
"""
基于长期记忆生成答案
关键:虽然 Core 只看到问题,但它能从长期记忆中
检索到之前 1M tokens 中的相关事实!
Args:
question_ids: [batch, question_len] - 问题的 token ids
memory_state: 处理完整个文档后的记忆状态
max_new_tokens: 最大生成长度
Returns:
generated_ids: [batch, question_len + generated_len]
"""
batch_size = question_ids.shape[0]
generated = question_ids.clone()
for _ in range(max_new_tokens):
# 从长期记忆读取相关信息
memory_tokens = self.long_term_memory.read(
batch_size=batch_size,
state=memory_state,
num_tokens=self.config.num_memory_tokens
)
# 处理当前序列 + memory tokens
hidden = self._process_chunk_with_memory(
generated,
memory_tokens=memory_tokens
)
# 预测下一个 token
logits = self.qwen.lm_head(hidden[:, -1:, :])
if temperature > 0:
probs = F.softmax(logits.squeeze(1) / temperature, dim=-1)
next_token = torch.multinomial(probs, num_samples=1)
else:
next_token = logits.squeeze(1).argmax(dim=-1, keepdim=True)
generated = torch.cat([generated, next_token], dim=1)
# 检查 EOS
if hasattr(self.qwen.config, 'eos_token_id'):
if (next_token == self.qwen.config.eos_token_id).all():
break
return generated
# ============================================================================
# BABILong 风格的使用示例
# ============================================================================
def babilong_style_example():
"""
演示如何用 Qwen + Titans 处理 BABILong 风格的超长序列任务
BABILong 任务结构:
- 前面是很长的背景文档(可能 1M tokens)
- 最后是一个问题
- 需要从文档中找到相关事实来回答
"""
print("=" * 70)
print("Qwen + Titans 流式处理超长序列示例")
print("=" * 70)
try:
from transformers import AutoModelForCausalLM, AutoTokenizer
# 加载模型
print("\n[1] 加载 Qwen 模型...")
model_name = "Qwen/Qwen2-0.5B" # 或 Qwen/Qwen3-4B
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
qwen_model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
device_map="auto" if torch.cuda.is_available() else None,
trust_remote_code=True
)
# 创建流式处理模型
print("\n[2] 创建 QwenTitansStreaming 模型...")
config = StreamingConfig(
chunk_size=2048, # 每次处理 2k tokens
memory_chunk_size=64,
memory_batch_size=128,
num_memory_tokens=16,
overlap_size=64, # chunk 间 64 token 重叠
)
model = QwenTitansStreaming(qwen_model, config)
if torch.cuda.is_available():
model = model.cuda()
# =====================================================================
# 模拟 BABILong 任务
# =====================================================================
print("\n[3] 模拟 BABILong 任务...")
# 模拟一个长文档(实际可能有几十万 tokens)
long_document = """
这是一个关于人工智能发展历史的长篇文档。
第一章:早期发展
人工智能的概念最早可以追溯到 1950 年代。1956 年的达特茅斯会议
被认为是人工智能作为一门学科正式诞生的标志。
[这里假设有很多很多内容...]
重要事实:达特茅斯会议在 1956 年举行。
[更多内容...]
第五十章:现代发展
2022 年,大型语言模型取得了突破性进展。
重要事实:GPT-4 在 2023 年发布。
[更多内容...]
"""
# 复制文档以模拟超长序列
# 实际使用时这里会是真正的长文档
very_long_document = long_document * 100 # 模拟长文档
question = "\n问题:达特茅斯会议是在哪一年举行的?"
full_input = very_long_document + question
# Tokenize
print(f" 文档长度(字符): {len(full_input)}")
inputs = tokenizer(full_input, return_tensors="pt")
input_ids = inputs.input_ids
print(f" 文档长度(tokens): {input_ids.shape[1]}")
device = next(model.parameters()).device
input_ids = input_ids.to(device)
# 流式处理
print("\n[4] 流式处理文档...")
with torch.no_grad():
result = model.process_document(
input_ids,
show_progress=True
)
print(f"\n 处理完成!")
print(f" - 处理了 {result['num_chunks_processed']} 个 chunks")
print(f" - 记忆状态 seq_index: {result['memory_state'].seq_index}")
# 生成答案(实际场景)
# print("\n[5] 基于长期记忆生成答案...")
# answer = model.generate_answer(
# question_ids,
# memory_state=result['memory_state'],
# max_new_tokens=50
# )
except ImportError as e:
print(f"\n需要安装依赖: pip install transformers")
print(f"错误: {e}")
# =========================================================================
# 独立测试
# =========================================================================
print("\n" + "=" * 70)
print("[独立测试] Titans 长期记忆模块")
print("=" * 70)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 创建长期记忆模块
ltm = TitansLongTermMemory(
hidden_size=512,
chunk_size=64,
batch_size=128,
dim_head=64,
num_heads=8,
).to(device)
# 模拟多个 chunk 的处理
batch_size = 2
chunk_size = 256
hidden_dim = 512
num_chunks = 5
print(f"\n模拟处理 {num_chunks} 个 chunks:")
memory_state = None
for i in range(num_chunks):
# 模拟当前 chunk 的隐藏状态
chunk_hidden = torch.randn(batch_size, chunk_size, hidden_dim).to(device)
# 写入长期记忆
retrieved, memory_state = ltm.write(chunk_hidden, state=memory_state)
print(f" Chunk {i}: 写入完成, seq_index = {memory_state.seq_index}")
# 从记忆中读取
print(f"\n从长期记忆中读取:")
memory_tokens = ltm.read(batch_size, memory_state, num_tokens=16)
print(f" Memory tokens 形状: {memory_tokens.shape}")
print("\n" + "=" * 70)
print("完成!")
print("=" * 70)
if __name__ == "__main__":
babilong_style_example()