| | """ |
| | Qwen3 + Titans training on BABILong QA1 (32k). |
| | |
| | Key ideas: |
| | - Fixed-length 32k samples for DDP/FSDP stability. |
| | - Stream long sequences by chunk (default 8k). |
| | - Insert Titans memory modules into Qwen layers (stride configurable). |
| | """ |
| |
|
| | import os |
| | import json |
| | import math |
| | import argparse |
| | import logging |
| | import weakref |
| | from contextlib import nullcontext |
| | from dataclasses import dataclass, asdict |
| | from typing import Optional, Dict, Any, List, Tuple, Callable |
| |
|
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| | import torch.distributed as dist |
| | from torch.utils.data import Dataset, DataLoader |
| | from torch.optim import AdamW |
| | from torch.optim.lr_scheduler import CosineAnnealingLR |
| | from torch.nn.parallel import DistributedDataParallel as DDP |
| | from tqdm import tqdm |
| |
|
| | from einops import rearrange |
| |
|
| | |
| | import sys |
| | sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) |
| |
|
| | |
| | from titans_pytorch import NeuralMemory, MemoryMLP |
| | from titans_pytorch.neural_memory import NeuralMemState |
| |
|
| | logging.basicConfig( |
| | level=logging.INFO, |
| | format="%(asctime)s - %(levelname)s - %(message)s" |
| | ) |
| | logger = logging.getLogger(__name__) |
| |
|
| |
|
| | @dataclass |
| | class TrainingConfig: |
| | |
| | model_path: str = "/data/huangyifei/huggingface_cache/hub/models--Qwen--Qwen3-4B-Instruct-2507/snapshots/cdbee75f17c01a7cc42f958dc650907174af0554" |
| | data_path: str = "/data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json" |
| | output_dir: str = "./outputs/qwen_titans_babilong" |
| |
|
| | |
| | num_epochs: int = 10 |
| | batch_size: int = 2 |
| | gradient_accumulation_steps: int = 8 |
| | max_grad_norm: float = 1.0 |
| |
|
| | |
| | lr_memory: float = 1e-4 |
| | lr_pretrained: float = 5e-6 |
| | weight_decay: float = 0.01 |
| | warmup_steps: int = 100 |
| |
|
| | |
| | chunk_size: int = 8192 |
| | use_memory: bool = True |
| | memory_chunk_size: int = 128 |
| | memory_batch_size: int = 128 |
| | memory_heads: int = 8 |
| | memory_dim_head: int = 64 |
| | memory_depth: int = 1 |
| | memory_layer_stride: int = 8 |
| | memory_fp32: bool = True |
| | detach_mem_state: bool = True |
| | freeze_base_model: bool = False |
| |
|
| | |
| | eval_steps: int = 200 |
| | eval_topk: int = 0 |
| | logging_steps: int = 10 |
| | log_every_batches: int = 80 |
| | final_eval_print_examples: int = 10 |
| | debug_data_samples: int = 0 |
| | debug_label_batches: int = 0 |
| | debug_eval_stats: bool = False |
| | debug_grad_norm: bool = False |
| |
|
| | |
| | bf16: bool = True |
| | fp16: bool = False |
| | use_tf32: bool = True |
| | gradient_checkpointing: bool = False |
| | chunkwise_backward: bool = True |
| | chunkwise_backward: bool = True |
| |
|
| | |
| | max_length: int = 32768 |
| | answer_reserve_tokens: int = 64 |
| | label_prefix_tokens: int = 0 |
| | max_samples: Optional[int] = 500 |
| |
|
| | |
| | use_fsdp: bool = False |
| | fsdp_use_orig_params: bool = True |
| | ddp_find_unused_parameters: bool = False |
| |
|
| | |
| | save_full_checkpoint: bool = True |
| | final_ckpt_name: str = "final_memory_checkpoint.pt" |
| | final_full_ckpt_name: str = "final_full_checkpoint.pt" |
| |
|
| | seed: int = 42 |
| |
|
| |
|
| | class BABILongDataset(Dataset): |
| | def __init__( |
| | self, |
| | data_path: str, |
| | tokenizer, |
| | max_length: int = 32768, |
| | answer_reserve_tokens: int = 64, |
| | label_prefix_tokens: int = 0, |
| | max_samples: Optional[int] = None, |
| | ): |
| | self.tokenizer = tokenizer |
| | self.max_length = max_length |
| | self.answer_reserve_tokens = answer_reserve_tokens |
| | self.label_prefix_tokens = int(label_prefix_tokens) |
| |
|
| | logger.info(f"Loading dataset: {data_path}") |
| | with open(data_path, "r") as f: |
| | self.data = json.load(f) |
| |
|
| | if max_samples: |
| | self.data = self.data[:max_samples] |
| |
|
| | logger.info(f"Dataset size: {len(self.data)}") |
| |
|
| | def __len__(self): |
| | return len(self.data) |
| |
|
| | def __getitem__(self, idx): |
| | item = self.data[idx] |
| | text = f"{item['input']}\n\nQuestion: {item['question']}\nAnswer:" |
| | target = item["target"] |
| |
|
| | pad_id = self.tokenizer.pad_token_id or 0 |
| | reserve = int(self.answer_reserve_tokens) |
| |
|
| | prompt_ids = self.tokenizer( |
| | text, |
| | max_length=max(self.max_length - reserve, 1), |
| | truncation=True, |
| | add_special_tokens=True, |
| | return_tensors="pt", |
| | ).input_ids.squeeze(0) |
| |
|
| | answer_ids = self.tokenizer( |
| | f" {target}", |
| | add_special_tokens=False, |
| | return_tensors="pt", |
| | ).input_ids.squeeze(0) |
| |
|
| | available = max(self.max_length - prompt_ids.numel(), 0) |
| | answer_ids = answer_ids[:available] |
| |
|
| | input_ids = torch.cat([prompt_ids, answer_ids], dim=0)[: self.max_length] |
| |
|
| | labels = torch.full_like(input_ids, fill_value=-100) |
| | if answer_ids.numel() > 0: |
| | start = prompt_ids.numel() |
| | end = min(start + answer_ids.numel(), labels.numel()) |
| | labels[start:end] = input_ids[start:end] |
| | if self.label_prefix_tokens > 0: |
| | prefix = min(start, self.label_prefix_tokens) |
| | if prefix > 0: |
| | labels[start - prefix:start] = input_ids[start - prefix:start] |
| |
|
| | seq_len = input_ids.numel() |
| | if seq_len < self.max_length: |
| | pad_len = self.max_length - seq_len |
| | input_ids = F.pad(input_ids, (0, pad_len), value=int(pad_id)) |
| | labels = F.pad(labels, (0, pad_len), value=-100) |
| | attention_mask = torch.cat( |
| | [torch.ones(seq_len, dtype=torch.long), torch.zeros(pad_len, dtype=torch.long)], |
| | dim=0, |
| | ) |
| | else: |
| | attention_mask = torch.ones(self.max_length, dtype=torch.long) |
| |
|
| | return { |
| | "input_ids": input_ids.to(dtype=torch.long), |
| | "labels": labels.to(dtype=torch.long), |
| | "attention_mask": attention_mask, |
| | } |
| |
|
| |
|
| | def collate_fn(batch): |
| | keys = batch[0].keys() |
| | return {k: torch.stack([b[k] for b in batch], dim=0) for k in keys} |
| |
|
| |
|
| | def _get_raw_dataset_item(dataset, idx: int) -> Optional[Dict[str, Any]]: |
| | base = dataset |
| | true_idx = idx |
| | if isinstance(dataset, torch.utils.data.Subset): |
| | base = dataset.dataset |
| | true_idx = dataset.indices[idx] |
| | if isinstance(base, BABILongDataset) and hasattr(base, "data"): |
| | try: |
| | return base.data[true_idx] |
| | except Exception: |
| | return None |
| | return None |
| |
|
| |
|
| | def log_dataset_debug_stats(dataset, tokenizer, name: str, num_samples: int) -> None: |
| | if num_samples <= 0: |
| | return |
| | total = len(dataset) |
| | if total <= 0: |
| | logger.warning(f"[DATA DEBUG] {name}: empty dataset") |
| | return |
| |
|
| | n = min(int(num_samples), total) |
| | zero_label = 0 |
| | total_label_tokens = 0 |
| | total_loss_tokens = 0 |
| | total_attn_tokens = 0 |
| |
|
| | for i in range(n): |
| | sample = dataset[i] |
| | labels = sample["labels"] |
| | attn = sample["attention_mask"] |
| |
|
| | label_mask = labels != -100 |
| | label_tokens = int(label_mask.sum().item()) |
| | loss_tokens = int((labels[1:] != -100).sum().item()) if labels.numel() > 1 else 0 |
| | attn_tokens = int(attn.sum().item()) |
| |
|
| | total_label_tokens += label_tokens |
| | total_loss_tokens += loss_tokens |
| | total_attn_tokens += attn_tokens |
| | if label_tokens == 0: |
| | zero_label += 1 |
| |
|
| | if i < min(3, n): |
| | label_pos = label_mask.nonzero(as_tuple=False).view(-1) |
| | first_label = int(label_pos[0].item()) if label_pos.numel() > 0 else -1 |
| | last_label = int(label_pos[-1].item()) if label_pos.numel() > 0 else -1 |
| |
|
| | decoded = "" |
| | if tokenizer is not None and label_pos.numel() > 0: |
| | answer_ids = labels[label_pos].tolist() |
| | decoded = tokenizer.decode(answer_ids, skip_special_tokens=True).strip() |
| | if len(decoded) > 200: |
| | decoded = decoded[:200] + "..." |
| |
|
| | raw_item = _get_raw_dataset_item(dataset, i) |
| | target_chars = None |
| | target_tokens = None |
| | if raw_item is not None and tokenizer is not None: |
| | target_text = str(raw_item.get("target", "")) |
| | target_chars = len(target_text) |
| | target_ids = tokenizer( |
| | f" {target_text}", |
| | add_special_tokens=False, |
| | return_tensors="pt", |
| | ).input_ids.squeeze(0) |
| | target_tokens = int(target_ids.numel()) |
| |
|
| | logger.info( |
| | f"[DATA DEBUG] {name} sample {i}: attn_tokens={attn_tokens}, " |
| | f"label_tokens={label_tokens}, loss_tokens={loss_tokens}, " |
| | f"label_span=[{first_label},{last_label}]" |
| | ) |
| | if target_chars is not None or decoded: |
| | logger.info( |
| | f"[DATA DEBUG] {name} target_chars={target_chars}, " |
| | f"target_tokens={target_tokens}, decoded_answer={repr(decoded)}" |
| | ) |
| |
|
| | avg_label = total_label_tokens / max(n, 1) |
| | avg_loss = total_loss_tokens / max(n, 1) |
| | avg_attn = total_attn_tokens / max(n, 1) |
| | logger.info( |
| | f"[DATA DEBUG] {name} summary: samples={n}, zero_label_samples={zero_label}, " |
| | f"avg_label_tokens={avg_label:.2f}, avg_loss_tokens={avg_loss:.2f}, avg_attn_tokens={avg_attn:.2f}" |
| | ) |
| |
|
| |
|
| | class QwenDecoderLayerWithTitansMemory(nn.Module): |
| | def __init__( |
| | self, |
| | base_layer: nn.Module, |
| | *, |
| | hidden_size: int, |
| | chunk_size: int, |
| | batch_size: int, |
| | dim_head: int, |
| | num_heads: int, |
| | memory_depth: int, |
| | memory_fp32: bool, |
| | detach_mem_state: bool, |
| | parent_model: Optional[nn.Module] = None, |
| | ): |
| | super().__init__() |
| | self.layer = base_layer |
| | self.memory_fp32 = memory_fp32 |
| | self.detach_mem_state = bool(detach_mem_state) |
| | self.memory_state: Optional[NeuralMemState] = None |
| | self.parent_model_ref = weakref.ref(parent_model) if parent_model is not None else None |
| |
|
| | memory_model = MemoryMLP( |
| | dim=dim_head, |
| | depth=memory_depth, |
| | expansion_factor=2.0, |
| | ) |
| |
|
| | self.neural_memory = NeuralMemory( |
| | dim=hidden_size, |
| | chunk_size=chunk_size, |
| | batch_size=batch_size, |
| | dim_head=dim_head, |
| | heads=num_heads, |
| | model=memory_model, |
| | momentum=True, |
| | momentum_order=1, |
| | qk_rmsnorm=True, |
| | pre_rmsnorm=True, |
| | default_step_transform_max_lr=1e-2, |
| | init_adaptive_step_bias=-6.0, |
| | max_grad_norm=1.0, |
| | spectral_norm_surprises=True, |
| | use_accelerated_scan=False, |
| | ) |
| |
|
| | self.mem_gate = nn.Sequential( |
| | nn.Linear(hidden_size * 2, hidden_size), |
| | nn.Sigmoid(), |
| | ) |
| |
|
| | try: |
| | layer_device = next(base_layer.parameters()).device |
| | layer_dtype = next(base_layer.parameters()).dtype |
| | except StopIteration: |
| | layer_device = None |
| | layer_dtype = None |
| |
|
| | if layer_device is not None: |
| | mem_dtype = torch.float32 if memory_fp32 else layer_dtype |
| | self.neural_memory = self.neural_memory.to(device=layer_device, dtype=mem_dtype) |
| | if layer_dtype is not None: |
| | self.mem_gate = self.mem_gate.to(device=layer_device, dtype=layer_dtype) |
| | else: |
| | self.mem_gate = self.mem_gate.to(device=layer_device) |
| |
|
| | def reset_memory_state(self): |
| | self.memory_state = None |
| |
|
| | def _get_store_mask(self, hidden_states: torch.Tensor) -> Optional[torch.Tensor]: |
| | parent_model = self.parent_model_ref() if self.parent_model_ref is not None else None |
| | if parent_model is None or not hasattr(parent_model, "_mem_store_mask"): |
| | return None |
| | store_mask = getattr(parent_model, "_mem_store_mask") |
| | if store_mask is None: |
| | return None |
| | store_mask = store_mask.to(device=hidden_states.device).bool() |
| | if store_mask.shape[:2] != hidden_states.shape[:2]: |
| | return None |
| | return store_mask |
| |
|
| | def forward(self, *args, **kwargs): |
| | outputs = self.layer(*args, **kwargs) |
| |
|
| | if isinstance(outputs, (tuple, list)): |
| | hidden_states = outputs[0] |
| | rest = outputs[1:] |
| | else: |
| | hidden_states = outputs |
| | rest = None |
| |
|
| | full_store_mask = self._get_store_mask(hidden_states) |
| | mem_inp = hidden_states.float() if self.memory_fp32 else hidden_states |
| |
|
| | store_seq = None |
| | store_mask = full_store_mask |
| | if store_mask is not None: |
| | store_seq = mem_inp |
| | if store_mask.shape[1] > 0 and not store_mask[:, 0].any(): |
| | store_seq = store_seq[:, 1:] |
| | store_mask = store_mask[:, 1:] |
| |
|
| | store_chunk = self.neural_memory.store_chunk_size |
| | remainder = store_seq.shape[1] % store_chunk |
| | if remainder != 0: |
| | store_seq = store_seq[:, :-remainder] |
| | store_mask = store_mask[:, :-remainder] |
| |
|
| | if store_mask is not None and store_seq is not None: |
| | if store_mask.shape[1] != store_seq.shape[1]: |
| | min_len = min(store_mask.shape[1], store_seq.shape[1]) |
| | store_seq = store_seq[:, :min_len] |
| | store_mask = store_mask[:, :min_len] |
| |
|
| | if store_seq.shape[1] == 0: |
| | store_seq = None |
| | store_mask = None |
| |
|
| | mem_ctx = ( |
| | torch.amp.autocast(device_type=hidden_states.device.type, enabled=False) |
| | if self.memory_fp32 |
| | else nullcontext() |
| | ) |
| | with mem_ctx: |
| | retrieved, next_state = self.neural_memory( |
| | mem_inp, |
| | store_seq=store_seq, |
| | state=self.memory_state, |
| | store_mask=store_mask, |
| | detach_mem_state=self.detach_mem_state, |
| | ) |
| | self.memory_state = next_state |
| |
|
| | if retrieved is not None: |
| | retrieved = retrieved.to(dtype=hidden_states.dtype) |
| | if full_store_mask is not None and full_store_mask.shape[:2] == retrieved.shape[:2]: |
| | retrieved = retrieved * full_store_mask.unsqueeze(-1).to(dtype=retrieved.dtype) |
| | gate = self.mem_gate(torch.cat([hidden_states, retrieved], dim=-1)) |
| | hidden_states = hidden_states + gate * retrieved |
| |
|
| | if rest is None: |
| | return hidden_states |
| | return (hidden_states, *rest) |
| |
|
| |
|
| | class QwenTitansForBABILong(nn.Module): |
| | def __init__(self, qwen_model, config: TrainingConfig): |
| | super().__init__() |
| | self.qwen = qwen_model |
| | self.config = config |
| | self.hidden_size = qwen_model.config.hidden_size |
| | self.use_memory = bool(getattr(config, "use_memory", True)) |
| |
|
| | if self.use_memory: |
| | self.memory_layer_stride = int(getattr(config, "memory_layer_stride", 4)) |
| | self.memory_layer_indices = [ |
| | idx for idx in range(len(self.qwen.model.layers)) if idx % self.memory_layer_stride == 0 |
| | ] |
| |
|
| | for layer_idx in self.memory_layer_indices: |
| | base_layer = self.qwen.model.layers[layer_idx] |
| | wrapped = QwenDecoderLayerWithTitansMemory( |
| | base_layer, |
| | hidden_size=self.hidden_size, |
| | chunk_size=config.memory_chunk_size, |
| | batch_size=config.memory_batch_size, |
| | dim_head=config.memory_dim_head, |
| | num_heads=config.memory_heads, |
| | memory_depth=config.memory_depth, |
| | memory_fp32=config.memory_fp32, |
| | detach_mem_state=config.detach_mem_state, |
| | parent_model=self.qwen.model, |
| | ) |
| | self.qwen.model.layers[layer_idx] = wrapped |
| | else: |
| | self.memory_layer_stride = 0 |
| | self.memory_layer_indices = [] |
| |
|
| | if self.use_memory: |
| | logger.info("[QwenTitansForBABILong] Initialized") |
| | logger.info(f" - hidden_size: {self.hidden_size}") |
| | logger.info(f" - chunk_size: {config.chunk_size}") |
| | logger.info(f" - memory_layer_stride: {self.memory_layer_stride}") |
| | logger.info(f" - memory_layers: {self.memory_layer_indices}") |
| | else: |
| | logger.info("[QwenTitansForBABILong] Initialized (memory disabled)") |
| | logger.info(f" - hidden_size: {self.hidden_size}") |
| | logger.info(f" - chunk_size: {config.chunk_size}") |
| |
|
| | self._memory_layers = [ |
| | layer for layer in self.qwen.model.layers if isinstance(layer, QwenDecoderLayerWithTitansMemory) |
| | ] |
| | self.qwen.model._mem_store_mask = None |
| |
|
| | def _split_into_chunks(self, tensor, chunk_size): |
| | seq_len = tensor.shape[1] |
| | chunks = [] |
| | for start in range(0, seq_len, chunk_size): |
| | end = min(start + chunk_size, seq_len) |
| | chunks.append((start, end, tensor[:, start:end])) |
| | return chunks |
| |
|
| | def reset_memory_states(self): |
| | for layer in self._memory_layers: |
| | layer.reset_memory_state() |
| |
|
| | def _set_mem_store_mask( |
| | self, |
| | chunk_ids: torch.Tensor, |
| | chunk_mask: Optional[torch.Tensor], |
| | chunk_start: int, |
| | ) -> None: |
| | if not self.use_memory: |
| | self.qwen.model._mem_store_mask = None |
| | return |
| | if chunk_mask is None: |
| | if chunk_start > 0: |
| | store_mask = torch.ones_like(chunk_ids, dtype=torch.bool) |
| | store_mask[:, 0] = False |
| | else: |
| | store_mask = None |
| | else: |
| | store_mask = chunk_mask.to(device=chunk_ids.device).bool() |
| | if chunk_start > 0: |
| | store_mask[:, 0] = False |
| | self.qwen.model._mem_store_mask = store_mask |
| |
|
| | def get_memory_modules(self) -> List[nn.Module]: |
| | if not self._memory_layers: |
| | return [] |
| | modules = [] |
| | for layer in self._memory_layers: |
| | modules.append(layer.neural_memory) |
| | modules.append(layer.mem_gate) |
| | return modules |
| |
|
| | def forward( |
| | self, |
| | input_ids: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | labels: Optional[torch.Tensor] = None, |
| | return_pred_tokens: bool = False, |
| | topk: int = 0, |
| | chunkwise_backward: bool = False, |
| | loss_scale: Optional[float] = None, |
| | backward_fn: Optional[Callable[[torch.Tensor], None]] = None, |
| | chunk_start: Optional[int] = None, |
| | chunk_end: Optional[int] = None, |
| | reset_mem_state: bool = False, |
| | ) -> Dict[str, torch.Tensor]: |
| | if chunk_start is not None or chunk_end is not None: |
| | start = 0 if chunk_start is None else int(chunk_start) |
| | end = int(chunk_end) if chunk_end is not None else None |
| | return self._forward_single_chunk( |
| | input_ids=input_ids, |
| | attention_mask=attention_mask, |
| | labels=labels, |
| | chunk_start=start, |
| | chunk_end=end, |
| | reset_mem_state=reset_mem_state, |
| | ) |
| | batch_size, seq_len = input_ids.shape |
| | chunk_size = self.config.chunk_size |
| | chunks = self._split_into_chunks(input_ids, chunk_size) |
| |
|
| | self.reset_memory_states() |
| | loss_fct_sum = nn.CrossEntropyLoss(reduction="sum") |
| | total_loss_sum = None |
| | total_loss_tokens = 0 |
| | topk_correct = None |
| | topk_total = None |
| |
|
| | pred_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)] |
| | target_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)] |
| | if topk and topk > 0: |
| | device = input_ids.device |
| | topk_correct = torch.tensor(0.0, device=device, dtype=torch.float32) |
| | topk_total = torch.tensor(0.0, device=device, dtype=torch.float32) |
| |
|
| | for start, end, _ in chunks: |
| | proc_start = max(0, start - 1) |
| | chunk_ids = input_ids[:, proc_start:end] |
| | chunk_labels = labels[:, proc_start:end] if labels is not None else None |
| | chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None |
| |
|
| | self._set_mem_store_mask(chunk_ids, chunk_mask, start) |
| | hidden_full = self._process_chunk(chunk_ids, chunk_mask) |
| | if self.use_memory: |
| | self.qwen.model._mem_store_mask = None |
| |
|
| | if chunk_labels is not None and (chunk_labels != -100).any(): |
| | chunk_labels_local = chunk_labels.to(device=hidden_full.device) |
| | shift_hidden = hidden_full[:, :-1, :].contiguous() |
| | shift_labels = chunk_labels_local[:, 1:].contiguous() |
| |
|
| | valid = shift_labels != -100 |
| | if valid.any(): |
| | hs = shift_hidden[valid] |
| | targets = shift_labels[valid] |
| |
|
| | hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0) |
| |
|
| | logits = self.qwen.lm_head(hs) |
| | logits = logits.float() |
| | logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0) |
| | targets = targets.to(device=logits.device) |
| |
|
| | chunk_loss_sum = loss_fct_sum(logits, targets) |
| | if total_loss_sum is None: |
| | total_loss_sum = chunk_loss_sum |
| | else: |
| | total_loss_sum = total_loss_sum + chunk_loss_sum |
| | total_loss_tokens += targets.numel() |
| |
|
| | if topk and topk > 0: |
| | k = min(int(topk), logits.shape[-1]) |
| | topk_ids = torch.topk(logits, k=k, dim=-1).indices |
| | correct = (topk_ids == targets.unsqueeze(-1)).any(dim=-1) |
| | topk_correct = topk_correct + correct.float().sum() |
| | topk_total = topk_total + torch.tensor(float(targets.numel()), device=topk_total.device) |
| |
|
| | if return_pred_tokens: |
| | idx = valid.nonzero(as_tuple=False) |
| | pred_flat = torch.argmax(logits, dim=-1).detach().to("cpu", dtype=torch.long).tolist() |
| | tgt_flat = targets.detach().to("cpu", dtype=torch.long).tolist() |
| | b_idx_flat = idx[:, 0].detach().to("cpu", dtype=torch.long).tolist() |
| |
|
| | for i, b_idx in enumerate(b_idx_flat): |
| | pred_tokens_by_sample[b_idx].append(int(pred_flat[i])) |
| | target_tokens_by_sample[b_idx].append(int(tgt_flat[i])) |
| |
|
| | if total_loss_sum is None or total_loss_tokens == 0: |
| | device = next(self.qwen.parameters()).device |
| | loss = torch.zeros((), device=device, dtype=torch.float32) |
| | else: |
| | loss = total_loss_sum / total_loss_tokens |
| |
|
| | out: Dict[str, torch.Tensor] = {"loss": loss} |
| | if return_pred_tokens: |
| | lengths = torch.tensor([len(x) for x in target_tokens_by_sample], dtype=torch.long) |
| | max_len = int(lengths.max().item()) if lengths.numel() > 0 else 0 |
| | if max_len > 0: |
| | pred_mat = torch.full((batch_size, max_len), -1, dtype=torch.long) |
| | tgt_mat = torch.full((batch_size, max_len), -1, dtype=torch.long) |
| | for b in range(batch_size): |
| | L = int(lengths[b].item()) |
| | if L > 0: |
| | pred_mat[b, :L] = torch.tensor(pred_tokens_by_sample[b], dtype=torch.long) |
| | tgt_mat[b, :L] = torch.tensor(target_tokens_by_sample[b], dtype=torch.long) |
| | else: |
| | pred_mat = torch.empty((batch_size, 0), dtype=torch.long) |
| | tgt_mat = torch.empty((batch_size, 0), dtype=torch.long) |
| | out["pred_ids"] = pred_mat |
| | out["target_ids"] = tgt_mat |
| | out["target_lengths"] = lengths |
| | if topk and topk > 0 and topk_correct is not None and topk_total is not None: |
| | out["topk_correct"] = topk_correct |
| | out["topk_total"] = topk_total |
| | return out |
| |
|
| | def _forward_single_chunk( |
| | self, |
| | input_ids: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor], |
| | labels: Optional[torch.Tensor], |
| | chunk_start: int, |
| | chunk_end: Optional[int], |
| | reset_mem_state: bool, |
| | ) -> Dict[str, torch.Tensor]: |
| | if reset_mem_state: |
| | self.reset_memory_states() |
| |
|
| | seq_len = input_ids.shape[1] |
| | end = chunk_end if chunk_end is not None else min(chunk_start + self.config.chunk_size, seq_len) |
| | end = min(int(end), seq_len) |
| | start = max(0, int(chunk_start)) |
| |
|
| | proc_start = max(0, start - 1) |
| | chunk_ids = input_ids[:, proc_start:end] |
| | chunk_labels = labels[:, proc_start:end] if labels is not None else None |
| | chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None |
| |
|
| | self._set_mem_store_mask(chunk_ids, chunk_mask, start) |
| | hidden_full = self._process_chunk(chunk_ids, chunk_mask) |
| | if self.use_memory: |
| | self.qwen.model._mem_store_mask = None |
| |
|
| | loss_fct_sum = nn.CrossEntropyLoss(reduction="sum") |
| | total_loss_sum = None |
| | total_loss_tokens = 0 |
| |
|
| | if chunk_labels is not None and (chunk_labels != -100).any(): |
| | chunk_labels_local = chunk_labels.to(device=hidden_full.device) |
| | shift_hidden = hidden_full[:, :-1, :].contiguous() |
| | shift_labels = chunk_labels_local[:, 1:].contiguous() |
| |
|
| | valid = shift_labels != -100 |
| | if valid.any(): |
| | hs = shift_hidden[valid] |
| | targets = shift_labels[valid] |
| |
|
| | hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0) |
| | logits = self.qwen.lm_head(hs) |
| | logits = logits.float() |
| | logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0) |
| | targets = targets.to(device=logits.device) |
| |
|
| | total_loss_sum = loss_fct_sum(logits, targets) |
| | total_loss_tokens = targets.numel() |
| |
|
| | if total_loss_sum is None: |
| | |
| | |
| | total_loss_sum = (hidden_full.float().sum() * 0.0) |
| |
|
| | return { |
| | "loss_sum": total_loss_sum, |
| | "loss_tokens": total_loss_tokens, |
| | "has_grad": True, |
| | } |
| |
|
| | def _process_chunk( |
| | self, |
| | chunk_ids: torch.Tensor, |
| | chunk_attention_mask: Optional[torch.Tensor] = None, |
| | ) -> torch.Tensor: |
| | if hasattr(self.qwen.model, "embed_tokens"): |
| | token_embeds = self.qwen.model.embed_tokens(chunk_ids) |
| | else: |
| | token_embeds = self.qwen.get_input_embeddings()(chunk_ids) |
| |
|
| | outputs = self.qwen.model( |
| | inputs_embeds=token_embeds, |
| | attention_mask=chunk_attention_mask, |
| | use_cache=False, |
| | output_hidden_states=False, |
| | return_dict=True, |
| | ) |
| | return outputs.last_hidden_state |
| |
|
| | def freeze_base_model(self): |
| | """ |
| | 冻结 Qwen base 模型的大部分参数,保留记忆模块和独立的 lm_head 可训练。 |
| | |
| | 可训练的参数包括: |
| | - neural_memory: Q/K/V projections, adaptive lr 等(Memory MLP 通过 Surprise 前向更新) |
| | - mem_gate: 控制记忆输出和原始输出的混合 |
| | - lm_head: 独立的输出层(解开 tied weights) |
| | |
| | 冻结的参数: |
| | - qwen.model.embed_tokens (保持输入分布不变!) |
| | - qwen.model.layers (除了 neural_memory 和 mem_gate) |
| | - qwen.model.norm |
| | """ |
| | frozen_count = 0 |
| | trainable_count = 0 |
| | lm_head_count = 0 |
| | |
| | |
| | |
| | if hasattr(self.qwen, 'lm_head') and hasattr(self.qwen.model, 'embed_tokens'): |
| | lm_head_weight = self.qwen.lm_head.weight |
| | embed_weight = self.qwen.model.embed_tokens.weight |
| | has_tied_weights = lm_head_weight.data_ptr() == embed_weight.data_ptr() |
| | |
| | if has_tied_weights: |
| | logger.info("[freeze_base_model] Detected tied weights - untying lm_head from embed_tokens") |
| | |
| | new_lm_head = nn.Linear( |
| | self.qwen.lm_head.in_features, |
| | self.qwen.lm_head.out_features, |
| | bias=self.qwen.lm_head.bias is not None, |
| | device=lm_head_weight.device, |
| | dtype=lm_head_weight.dtype, |
| | ) |
| | |
| | with torch.no_grad(): |
| | new_lm_head.weight.copy_(lm_head_weight) |
| | if self.qwen.lm_head.bias is not None and new_lm_head.bias is not None: |
| | new_lm_head.bias.copy_(self.qwen.lm_head.bias) |
| | |
| | self.qwen.lm_head = new_lm_head |
| | logger.info(f"[freeze_base_model] Created independent lm_head: {new_lm_head.weight.shape}") |
| | |
| | for name, param in self.named_parameters(): |
| | |
| | is_memory = "neural_memory" in name or "mem_gate" in name |
| | is_lm_head = "lm_head" in name |
| | |
| | if is_memory: |
| | param.requires_grad = True |
| | trainable_count += 1 |
| | elif is_lm_head: |
| | |
| | param.requires_grad = True |
| | trainable_count += 1 |
| | lm_head_count += 1 |
| | logger.info(f"[freeze_base_model] lm_head param: {name}") |
| | else: |
| | |
| | param.requires_grad = False |
| | frozen_count += 1 |
| | |
| | logger.info(f"[freeze_base_model] Frozen: {frozen_count}, Trainable: {trainable_count} (lm_head: {lm_head_count})") |
| | return self |
| |
|
| | def get_param_groups(self, lr_memory: float, lr_pretrained: float, weight_decay: float): |
| | memory_params = [] |
| | pretrained_params = [] |
| |
|
| | for name, param in self.named_parameters(): |
| | if not param.requires_grad: |
| | continue |
| | if "neural_memory" in name or "mem_gate" in name: |
| | memory_params.append(param) |
| | else: |
| | pretrained_params.append(param) |
| |
|
| | param_groups = [] |
| | if len(memory_params) > 0: |
| | param_groups.append( |
| | {"params": memory_params, "lr": lr_memory, "weight_decay": weight_decay, "name": "memory_module"} |
| | ) |
| | if len(pretrained_params) > 0: |
| | param_groups.append( |
| | {"params": pretrained_params, "lr": lr_pretrained, "weight_decay": weight_decay, "name": "pretrained"} |
| | ) |
| | logger.info(f"Param groups: memory={len(memory_params)}, pretrained={len(pretrained_params)}") |
| | return param_groups |
| |
|
| |
|
| | def init_distributed() -> tuple[bool, int, int, int]: |
| | if "RANK" not in os.environ or "WORLD_SIZE" not in os.environ: |
| | return False, 0, 0, 1 |
| |
|
| | rank = int(os.environ["RANK"]) |
| | world_size = int(os.environ["WORLD_SIZE"]) |
| | local_rank = int(os.environ.get("LOCAL_RANK", 0)) |
| |
|
| | if not dist.is_available(): |
| | raise RuntimeError("torch.distributed not available") |
| |
|
| | if not dist.is_initialized(): |
| | dist.init_process_group(backend="nccl", init_method="env://") |
| |
|
| | torch.cuda.set_device(local_rank) |
| | return True, rank, local_rank, world_size |
| |
|
| |
|
| | def cleanup_distributed(): |
| | if dist.is_available() and dist.is_initialized(): |
| | dist.barrier() |
| | dist.destroy_process_group() |
| |
|
| |
|
| | def unwrap_model(model: nn.Module) -> nn.Module: |
| | if hasattr(model, "module"): |
| | return model.module |
| | if hasattr(model, "_fsdp_wrapped_module"): |
| | wrapped = getattr(model, "_fsdp_wrapped_module", None) |
| | if wrapped is not None and hasattr(wrapped, "module"): |
| | return wrapped.module |
| | return model |
| |
|
| |
|
| | def is_fsdp_model(model: nn.Module) -> bool: |
| | try: |
| | from torch.distributed.fsdp import FullyShardedDataParallel as FSDP |
| | return isinstance(model, FSDP) |
| | except Exception: |
| | return False |
| |
|
| |
|
| | class Trainer: |
| | def __init__( |
| | self, |
| | model: QwenTitansForBABILong, |
| | train_dataloader: DataLoader, |
| | eval_dataloader: DataLoader, |
| | config: TrainingConfig, |
| | rank: int = 0, |
| | world_size: int = 1, |
| | is_distributed: bool = False, |
| | tokenizer=None, |
| | ): |
| | self.model = model |
| | self.train_dataloader = train_dataloader |
| | self.eval_dataloader = eval_dataloader |
| | self.config = config |
| | self.device = next(model.parameters()).device |
| | self.rank = rank |
| | self.world_size = world_size |
| | self.is_distributed = is_distributed |
| | self.is_main_process = (rank == 0) |
| | self.tokenizer = tokenizer |
| |
|
| | base_model = unwrap_model(self.model) |
| | param_groups = base_model.get_param_groups( |
| | lr_memory=config.lr_memory, |
| | lr_pretrained=config.lr_pretrained, |
| | weight_decay=config.weight_decay, |
| | ) |
| | self.optimizer = AdamW(param_groups) |
| |
|
| | total_steps = math.ceil( |
| | (len(train_dataloader) * config.num_epochs) / max(config.gradient_accumulation_steps, 1) |
| | ) |
| | self.scheduler = CosineAnnealingLR(self.optimizer, T_max=total_steps, eta_min=1e-7) |
| |
|
| | self.scaler = torch.cuda.amp.GradScaler(enabled=config.fp16) |
| | self.global_step = 0 |
| |
|
| | def _get_group_lr(self, group_name: str) -> Optional[float]: |
| | for group in self.optimizer.param_groups: |
| | if group.get("name") == group_name: |
| | return group.get("lr") |
| | return None |
| |
|
| | def train(self): |
| | self.model.train() |
| | if self.is_main_process: |
| | logger.info("Start training") |
| |
|
| | last_epoch_loss = None |
| | for epoch in range(self.config.num_epochs): |
| | sampler = getattr(self.train_dataloader, "sampler", None) |
| | if sampler is not None and hasattr(sampler, "set_epoch"): |
| | sampler.set_epoch(epoch) |
| | if self.is_main_process: |
| | logger.info(f"Epoch {epoch + 1}/{self.config.num_epochs}") |
| |
|
| | epoch_loss = 0.0 |
| | num_batches = 0 |
| |
|
| | pbar = self.train_dataloader |
| | if self.is_main_process: |
| | pbar = tqdm( |
| | self.train_dataloader, |
| | desc=f"Epoch {epoch + 1}/{self.config.num_epochs}", |
| | leave=False, |
| | dynamic_ncols=True, |
| | ) |
| | for step, batch in enumerate(pbar): |
| | batch = {k: v.to(self.device) for k, v in batch.items()} |
| | if ( |
| | self.config.debug_label_batches > 0 |
| | and self.is_main_process |
| | and step < int(self.config.debug_label_batches) |
| | ): |
| | labels = batch.get("labels") |
| | if labels is not None: |
| | label_tokens = int((labels != -100).sum().item()) |
| | loss_tokens = int((labels[:, 1:] != -100).sum().item()) if labels.size(1) > 1 else 0 |
| | attn_tokens = int(batch["attention_mask"].sum().item()) |
| | logger.info( |
| | f"[BATCH DEBUG] epoch={epoch + 1} step={step + 1}: " |
| | f"attn_tokens={attn_tokens}, label_tokens={label_tokens}, loss_tokens={loss_tokens}" |
| | ) |
| | else: |
| | logger.info(f"[BATCH DEBUG] epoch={epoch + 1} step={step + 1}: labels missing") |
| |
|
| | ga = max(self.config.gradient_accumulation_steps, 1) |
| | sync_gradients = ((step + 1) % ga == 0) |
| | amp_enabled = self.config.fp16 or self.config.bf16 |
| | amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16 |
| | with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype): |
| | if self.config.chunkwise_backward: |
| | labels = batch.get("labels") |
| | if labels is not None: |
| | total_tokens = int((labels[:, 1:] != -100).sum().item()) |
| | else: |
| | total_tokens = 0 |
| | loss_scale = 0.0 if total_tokens == 0 else (1.0 / total_tokens / ga) |
| |
|
| | seq_len = batch["input_ids"].shape[1] |
| | chunk_size = int(self.config.chunk_size) |
| | chunk_ranges = [ |
| | (start, min(start + chunk_size, seq_len)) |
| | for start in range(0, seq_len, chunk_size) |
| | ] |
| | raw_loss_sum = None |
| |
|
| | for idx, (start, end) in enumerate(chunk_ranges): |
| | is_last_chunk = (idx == len(chunk_ranges) - 1) |
| | sync_chunk = sync_gradients and is_last_chunk |
| | chunk_ctx = ( |
| | self.model.no_sync |
| | if (self.is_distributed and not sync_chunk) |
| | else nullcontext |
| | ) |
| | with chunk_ctx(): |
| | outputs = self.model( |
| | input_ids=batch["input_ids"], |
| | attention_mask=batch["attention_mask"], |
| | labels=labels, |
| | chunk_start=start, |
| | chunk_end=end, |
| | reset_mem_state=(idx == 0), |
| | ) |
| | chunk_loss_sum = outputs["loss_sum"] |
| | chunk_loss_tokens = int(outputs.get("loss_tokens", 0)) |
| | if raw_loss_sum is None: |
| | raw_loss_sum = chunk_loss_sum.detach() |
| | else: |
| | raw_loss_sum = raw_loss_sum + chunk_loss_sum.detach() |
| | |
| | |
| | |
| | |
| | scaled_loss = chunk_loss_sum * float(loss_scale) |
| | if self.config.fp16: |
| | self.scaler.scale(scaled_loss).backward() |
| | else: |
| | scaled_loss.backward() |
| |
|
| | if raw_loss_sum is None or total_tokens == 0: |
| | raw_loss = torch.zeros((), device=self.device, dtype=torch.float32) |
| | else: |
| | raw_loss = raw_loss_sum / total_tokens |
| | loss = raw_loss / ga |
| | else: |
| | ctx = self.model.no_sync if (self.is_distributed and not sync_gradients) else nullcontext |
| | with ctx(): |
| | outputs = self.model( |
| | input_ids=batch["input_ids"], |
| | attention_mask=batch["attention_mask"], |
| | labels=batch["labels"], |
| | ) |
| | raw_loss = outputs["loss"] |
| | loss = raw_loss / ga |
| |
|
| | if self.config.fp16: |
| | self.scaler.scale(loss).backward() |
| | else: |
| | loss.backward() |
| |
|
| | epoch_loss += raw_loss.detach().float().item() |
| | num_batches += 1 |
| |
|
| | if sync_gradients: |
| | grad_norm = None |
| | if self.config.fp16: |
| | self.scaler.unscale_(self.optimizer) |
| | grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm) |
| | self.scaler.step(self.optimizer) |
| | self.scaler.update() |
| | else: |
| | grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm) |
| | self.optimizer.step() |
| |
|
| | self.scheduler.step() |
| | self.optimizer.zero_grad(set_to_none=True) |
| | self.global_step += 1 |
| |
|
| | if self.is_main_process: |
| | avg_loss = epoch_loss / max(num_batches, 1) |
| | pbar.set_postfix( |
| | { |
| | "gstep": self.global_step, |
| | "loss": f"{avg_loss:.4f}", |
| | } |
| | ) |
| |
|
| | if self.global_step % self.config.logging_steps == 0 and self.is_main_process: |
| | lr_mem = self._get_group_lr("memory_module") |
| | lr_pre = self._get_group_lr("pretrained") |
| | if lr_pre is None and self.optimizer.param_groups: |
| | lr_pre = self.optimizer.param_groups[0]["lr"] |
| | grad_note = "" |
| | if self.config.debug_grad_norm and grad_norm is not None: |
| | grad_note = f" | grad_norm={float(grad_norm):.4f}" |
| | if lr_mem is None: |
| | lr_label = f"lr={lr_pre:.2e}" if lr_pre is not None else "lr=NA" |
| | logger.info( |
| | f"Step {self.global_step} | loss={epoch_loss / max(num_batches, 1):.4f} | " |
| | f"{lr_label}{grad_note}" |
| | ) |
| | else: |
| | logger.info( |
| | f"Step {self.global_step} | loss={epoch_loss / max(num_batches, 1):.4f} | " |
| | f"lr_mem={lr_mem:.2e} | lr_pre={lr_pre:.2e}{grad_note}" |
| | ) |
| |
|
| | if self.global_step % self.config.eval_steps == 0: |
| | eval_metrics = self.evaluate() |
| | if self.is_main_process: |
| | logger.info( |
| | f"Step {self.global_step}: " |
| | f"eval_loss={eval_metrics['loss']:.4f}, " |
| | f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, " |
| | f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%" |
| | ) |
| | self.model.train() |
| |
|
| | avg_epoch_loss = epoch_loss / max(num_batches, 1) |
| | if self.is_distributed: |
| | t = torch.tensor(avg_epoch_loss, device=self.device, dtype=torch.float32) |
| | dist.all_reduce(t, op=dist.ReduceOp.SUM) |
| | avg_epoch_loss = (t / self.world_size).item() |
| |
|
| | if self.is_main_process: |
| | logger.info(f"Epoch {epoch + 1} done, avg loss={avg_epoch_loss:.4f}") |
| | last_epoch_loss = avg_epoch_loss |
| |
|
| | eval_metrics = self.evaluate() |
| | if self.is_main_process: |
| | logger.info( |
| | f"[EPOCH {epoch + 1} EVAL] " |
| | f"eval_loss={eval_metrics['loss']:.4f}, " |
| | f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, " |
| | f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%" |
| | ) |
| | self._append_eval_metrics( |
| | eval_metrics, |
| | phase="epoch", |
| | epoch=int(epoch + 1), |
| | train_avg_loss=avg_epoch_loss, |
| | ) |
| | self.model.train() |
| |
|
| | if self.is_main_process: |
| | logger.info("Training done, final evaluation") |
| |
|
| | final_eval = self.evaluate(print_examples=int(self.config.final_eval_print_examples)) |
| | if self.is_main_process: |
| | ppl = float(math.exp(min(20.0, final_eval["loss"]))) |
| | logger.info( |
| | f"[FINAL EVAL] loss={final_eval['loss']:.4f}, ppl≈{ppl:.3f}, " |
| | f"em_acc={final_eval['em_acc'] * 100:.2f}%, " |
| | f"tok_acc={final_eval['tok_acc'] * 100:.2f}%" |
| | ) |
| | logger.info("Saving final checkpoint") |
| | self._append_eval_metrics( |
| | final_eval, |
| | phase="final", |
| | epoch=int(self.config.num_epochs), |
| | train_avg_loss=last_epoch_loss, |
| | ) |
| | self.save_final_checkpoint() |
| |
|
| | @torch.no_grad() |
| | def evaluate(self, print_examples: int = 0) -> Dict[str, float]: |
| | self.model.eval() |
| | total_loss = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| | total_batches = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| |
|
| | total_tok_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| | total_tok_total = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| | total_em_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| | total_em_total = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| | total_topk_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| | total_topk_total = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| | printed = 0 |
| |
|
| | for batch in self.eval_dataloader: |
| | batch = {k: v.to(self.device) for k, v in batch.items()} |
| | amp_enabled = self.config.fp16 or self.config.bf16 |
| | amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16 |
| | with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype): |
| | outputs = self.model( |
| | input_ids=batch["input_ids"], |
| | attention_mask=batch["attention_mask"], |
| | labels=batch["labels"], |
| | return_pred_tokens=True, |
| | topk=int(self.config.eval_topk) if self.config.eval_topk else 0, |
| | ) |
| |
|
| | if torch.isfinite(outputs["loss"]): |
| | total_loss += outputs["loss"].detach().float() |
| | total_batches += 1.0 |
| |
|
| | pred_ids = outputs.get("pred_ids", None) |
| | target_ids = outputs.get("target_ids", None) |
| | lengths = outputs.get("target_lengths", None) |
| | topk_correct = outputs.get("topk_correct", None) |
| | topk_total = outputs.get("topk_total", None) |
| | if topk_correct is not None and topk_total is not None: |
| | total_topk_correct += topk_correct.detach().float() |
| | total_topk_total += topk_total.detach().float() |
| | if ( |
| | pred_ids is not None |
| | and target_ids is not None |
| | and lengths is not None |
| | and pred_ids.ndim == 2 |
| | and target_ids.ndim == 2 |
| | and lengths.ndim == 1 |
| | and pred_ids.shape == target_ids.shape |
| | and pred_ids.shape[0] == lengths.shape[0] |
| | ): |
| | pred_cpu = pred_ids.to("cpu", dtype=torch.long) |
| | tgt_cpu = target_ids.to("cpu", dtype=torch.long) |
| | len_cpu = lengths.to("cpu", dtype=torch.long) |
| |
|
| | for i in range(int(len_cpu.shape[0])): |
| | L = int(len_cpu[i].item()) |
| | if L <= 0: |
| | continue |
| | p = pred_cpu[i, :L] |
| | t = tgt_cpu[i, :L] |
| |
|
| | total_tok_correct += torch.tensor(float((p == t).sum().item()), device=self.device, dtype=torch.float32) |
| | total_tok_total += torch.tensor(float(L), device=self.device, dtype=torch.float32) |
| |
|
| | if self.tokenizer is not None: |
| | pred_text = self.tokenizer.decode(p.tolist(), skip_special_tokens=True).strip() |
| | tgt_text = self.tokenizer.decode(t.tolist(), skip_special_tokens=True).strip() |
| | em = float(pred_text == tgt_text) |
| | total_em_correct += torch.tensor(em, device=self.device, dtype=torch.float32) |
| | total_em_total += torch.tensor(1.0, device=self.device, dtype=torch.float32) |
| |
|
| | if self.is_main_process and printed < print_examples: |
| | logger.info(f"[EVAL SAMPLE] pred={repr(pred_text)} | label={repr(tgt_text)} | match={bool(em)}") |
| | printed += 1 |
| |
|
| | if self.is_distributed: |
| | dist.all_reduce(total_loss, op=dist.ReduceOp.SUM) |
| | dist.all_reduce(total_batches, op=dist.ReduceOp.SUM) |
| | dist.all_reduce(total_tok_correct, op=dist.ReduceOp.SUM) |
| | dist.all_reduce(total_tok_total, op=dist.ReduceOp.SUM) |
| | dist.all_reduce(total_em_correct, op=dist.ReduceOp.SUM) |
| | dist.all_reduce(total_em_total, op=dist.ReduceOp.SUM) |
| | dist.all_reduce(total_topk_correct, op=dist.ReduceOp.SUM) |
| | dist.all_reduce(total_topk_total, op=dist.ReduceOp.SUM) |
| |
|
| | avg_loss = (total_loss / total_batches.clamp(min=1.0)).item() |
| | tok_acc = (total_tok_correct / total_tok_total.clamp(min=1.0)).item() |
| | em_acc = (total_em_correct / total_em_total.clamp(min=1.0)).item() |
| | topk_acc = (total_topk_correct / total_topk_total.clamp(min=1.0)).item() |
| | if self.is_main_process: |
| | if self.config.debug_eval_stats: |
| | logger.info( |
| | "[EVAL DEBUG] total_batches=" |
| | f"{float(total_batches.item()):.0f}, total_tok_total={float(total_tok_total.item()):.0f}, " |
| | f"total_em_total={float(total_em_total.item()):.0f}, " |
| | f"total_topk_total={float(total_topk_total.item()):.0f}" |
| | ) |
| | if total_tok_total.item() == 0: |
| | logger.warning("[EVAL DEBUG] No answer tokens found in eval set; acc will be 0.") |
| | logger.info(f"[EVAL METRIC] token_acc(answer-only) = {tok_acc * 100:.2f}%") |
| | logger.info(f"[EVAL METRIC] EM/acc(answer-only) = {em_acc * 100:.2f}%") |
| | if self.config.eval_topk and self.config.eval_topk > 0: |
| | logger.info(f"[EVAL METRIC] top{int(self.config.eval_topk)}_acc(answer-only) = {topk_acc * 100:.2f}%") |
| | return {"loss": avg_loss, "tok_acc": tok_acc, "em_acc": em_acc, "topk_acc": topk_acc} |
| |
|
| | def _append_eval_metrics( |
| | self, |
| | metrics: Dict[str, float], |
| | *, |
| | phase: str, |
| | epoch: Optional[int], |
| | train_avg_loss: Optional[float], |
| | ) -> None: |
| | if not self.is_main_process: |
| | return |
| | os.makedirs(self.config.output_dir, exist_ok=True) |
| | record = { |
| | "phase": phase, |
| | "epoch": epoch, |
| | "global_step": int(self.global_step), |
| | "train_avg_loss": None if train_avg_loss is None else float(train_avg_loss), |
| | "eval_loss": float(metrics.get("loss", 0.0)), |
| | "em_acc_pct": float(metrics.get("em_acc", 0.0) * 100.0), |
| | "tok_acc_pct": float(metrics.get("tok_acc", 0.0) * 100.0), |
| | } |
| | metrics_path = os.path.join(self.config.output_dir, "eval_metrics.jsonl") |
| | with open(metrics_path, "a") as f: |
| | f.write(json.dumps(record) + "\n") |
| |
|
| | def save_final_checkpoint(self): |
| | ckpt_path = os.path.join(self.config.output_dir, self.config.final_ckpt_name) |
| | base_model = unwrap_model(self.model) |
| | memory_sd = { |
| | name: p.detach().cpu() |
| | for name, p in base_model.named_parameters() |
| | if ("neural_memory" in name) or ("mem_gate" in name) |
| | } |
| |
|
| | if is_fsdp_model(self.model) and len(memory_sd) == 0: |
| | from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig |
| | full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) |
| | with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg): |
| | full_sd = self.model.state_dict() |
| | memory_sd = {k: v for k, v in full_sd.items() if ("neural_memory" in k) or ("mem_gate" in k)} |
| |
|
| | if self.is_main_process: |
| | torch.save( |
| | {"memory_state_dict": memory_sd, "global_step": self.global_step, "config": asdict(self.config)}, |
| | ckpt_path, |
| | ) |
| | logger.info(f"Saved memory checkpoint: {ckpt_path}") |
| | if self.is_distributed: |
| | dist.barrier() |
| |
|
| | if self.config.save_full_checkpoint: |
| | full_ckpt_path = os.path.join(self.config.output_dir, self.config.final_full_ckpt_name) |
| | if is_fsdp_model(self.model): |
| | from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig |
| | full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) |
| | with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg): |
| | full_sd = self.model.state_dict() |
| | else: |
| | full_sd = unwrap_model(self.model).state_dict() |
| |
|
| | if self.is_main_process: |
| | torch.save( |
| | {"model_state_dict": full_sd, "global_step": self.global_step, "config": asdict(self.config)}, |
| | full_ckpt_path, |
| | ) |
| | logger.info(f"Saved full checkpoint: {full_ckpt_path}") |
| | if self.is_distributed: |
| | dist.barrier() |
| |
|
| |
|
| | def main(): |
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| |
|
| | parser = argparse.ArgumentParser() |
| | parser.add_argument("--fsdp", action="store_true") |
| | parser.add_argument("--eval_only", action="store_true") |
| | parser.add_argument("--ckpt_path", type=str, default=None) |
| | parser.add_argument("--max_eval_samples", type=int, default=None) |
| | parser.add_argument("--max_samples", type=int, default=None) |
| | parser.add_argument("--max_length", type=int, default=None) |
| | parser.add_argument("--output_dir", type=str, default=None) |
| | parser.add_argument("--num_epochs", type=int, default=None) |
| | parser.add_argument("--eval_steps", type=int, default=None) |
| | parser.add_argument("--eval_topk", type=int, default=0) |
| | parser.add_argument("--batch_size", type=int, default=None) |
| | parser.add_argument("--gradient_accumulation_steps", type=int, default=None) |
| | parser.add_argument("--chunk_size", type=int, default=None) |
| | parser.add_argument("--memory_layer_stride", type=int, default=None) |
| | parser.add_argument("--no_memory", action="store_true") |
| | parser.add_argument("--gradient_checkpointing", action="store_true") |
| | parser.add_argument("--no_chunkwise_backward", action="store_true") |
| | parser.add_argument("--log_every_batches", type=int, default=80) |
| | parser.add_argument("--label_prefix_tokens", type=int, default=0) |
| | parser.add_argument( |
| | "--no_detach_mem_state", |
| | action="store_true", |
| | help="(ignored) kept for backward compatibility; detach_mem_state is forced True", |
| | ) |
| | parser.add_argument("--debug_data_samples", type=int, default=0) |
| | parser.add_argument("--debug_label_batches", type=int, default=0) |
| | parser.add_argument("--debug_eval_stats", action="store_true") |
| | parser.add_argument("--debug_grad_norm", action="store_true") |
| | parser.add_argument( |
| | "--freeze_base_model", |
| | action="store_true", |
| | help="冻结 Qwen base 模型,只训练记忆模块 (neural_memory + mem_gate)", |
| | ) |
| | args = parser.parse_args() |
| |
|
| | config = TrainingConfig() |
| | if args.fsdp: |
| | config.use_fsdp = True |
| | if args.no_memory: |
| | config.use_memory = False |
| | if args.max_samples is not None: |
| | config.max_samples = args.max_samples |
| | if args.max_length is not None: |
| | config.max_length = int(args.max_length) |
| | if args.output_dir is not None: |
| | config.output_dir = args.output_dir |
| | elif not config.use_memory: |
| | config.output_dir = "./outputs/qwen_babilong_no_memory" |
| | if args.num_epochs is not None: |
| | config.num_epochs = args.num_epochs |
| | if args.eval_steps is not None: |
| | config.eval_steps = args.eval_steps |
| | if args.eval_topk is not None: |
| | config.eval_topk = int(args.eval_topk) |
| | if args.batch_size is not None: |
| | config.batch_size = int(args.batch_size) |
| | if args.gradient_accumulation_steps is not None: |
| | config.gradient_accumulation_steps = int(args.gradient_accumulation_steps) |
| | if args.chunk_size is not None: |
| | config.chunk_size = int(args.chunk_size) |
| | if args.memory_layer_stride is not None: |
| | config.memory_layer_stride = int(args.memory_layer_stride) |
| | if args.gradient_checkpointing: |
| | config.gradient_checkpointing = True |
| | if args.no_chunkwise_backward: |
| | config.chunkwise_backward = False |
| | if args.label_prefix_tokens is not None: |
| | config.label_prefix_tokens = int(args.label_prefix_tokens) |
| | ignored_no_detach = bool(args.no_detach_mem_state) |
| | if args.log_every_batches is not None: |
| | config.log_every_batches = int(args.log_every_batches) |
| | ga = max(int(config.gradient_accumulation_steps), 1) |
| | config.logging_steps = max(1, math.ceil(config.log_every_batches / ga)) |
| | if args.debug_data_samples is not None: |
| | config.debug_data_samples = int(args.debug_data_samples) |
| | if args.debug_label_batches is not None: |
| | config.debug_label_batches = int(args.debug_label_batches) |
| | if args.debug_eval_stats: |
| | config.debug_eval_stats = True |
| | if args.debug_grad_norm: |
| | config.debug_grad_norm = True |
| |
|
| | is_distributed, rank, local_rank, world_size = init_distributed() |
| | is_main = (rank == 0) |
| | if ignored_no_detach and is_main: |
| | logger.warning("Ignoring --no_detach_mem_state; plan A keeps detach_mem_state=True.") |
| |
|
| | if config.use_fsdp and config.chunkwise_backward: |
| | if is_main: |
| | logger.warning("chunkwise_backward is incompatible with FSDP; disabling it.") |
| | config.chunkwise_backward = False |
| |
|
| | if is_distributed and (not config.use_fsdp) and config.gradient_checkpointing: |
| | config.gradient_checkpointing = False |
| | if is_main: |
| | logger.warning("gradient_checkpointing is unstable with DDP here; disabling it.") |
| |
|
| | if is_distributed and (not config.use_fsdp) and config.chunkwise_backward: |
| | if is_main: |
| | logger.info("DDP chunkwise backward enabled via per-chunk forward/backward.") |
| |
|
| | if is_distributed and (not config.use_fsdp): |
| | if not config.ddp_find_unused_parameters: |
| | config.ddp_find_unused_parameters = True |
| | if is_main: |
| | logger.warning("Enabling DDP find_unused_parameters to avoid unused grad errors.") |
| |
|
| | torch.manual_seed(config.seed + rank) |
| |
|
| | if torch.cuda.is_available(): |
| | device = torch.device(f"cuda:{local_rank}" if is_distributed else "cuda") |
| | else: |
| | device = torch.device("cpu") |
| |
|
| | if torch.cuda.is_available() and config.bf16: |
| | bf16_supported = False |
| | try: |
| | bf16_supported = torch.cuda.is_bf16_supported() |
| | except Exception: |
| | bf16_supported = False |
| | if not bf16_supported: |
| | if is_main: |
| | logger.warning("bf16 not supported on this GPU/runtime; falling back to fp16.") |
| | config.bf16 = False |
| | if not config.fp16: |
| | config.fp16 = True |
| |
|
| | if torch.cuda.is_available() and getattr(config, "use_tf32", False): |
| | torch.backends.cuda.matmul.allow_tf32 = True |
| | torch.backends.cudnn.allow_tf32 = True |
| | try: |
| | torch.set_float32_matmul_precision("high") |
| | except Exception: |
| | pass |
| |
|
| | if is_main: |
| | logger.info("=" * 60) |
| | logger.info("Qwen3-4B + Titans training (DDP/FSDP)") |
| | logger.info("=" * 60) |
| | logger.info(f"distributed={is_distributed}, world_size={world_size}, use_fsdp={config.use_fsdp}") |
| | logger.info(f"mode={'EVAL_ONLY' if args.eval_only else 'TRAIN'}") |
| | logger.info(f"model_path={config.model_path}") |
| | logger.info(f"data_path={config.data_path}") |
| | logger.info(f"output_dir={config.output_dir}") |
| | logger.info(f"max_samples={config.max_samples}") |
| | logger.info(f"max_length={config.max_length}") |
| | logger.info(f"chunk_size={config.chunk_size}") |
| | logger.info(f"use_memory={config.use_memory}") |
| | if config.use_memory: |
| | logger.info(f"memory_layer_stride={config.memory_layer_stride}") |
| | logger.info(f"chunkwise_backward={config.chunkwise_backward}") |
| | logger.info(f"label_prefix_tokens={config.label_prefix_tokens}") |
| | logger.info(f"detach_mem_state={config.detach_mem_state}") |
| | logger.info(f"freeze_base_model={config.freeze_base_model}") |
| | if config.eval_topk: |
| | logger.info(f"eval_topk={config.eval_topk}") |
| |
|
| | tokenizer = AutoTokenizer.from_pretrained(config.model_path, trust_remote_code=True) |
| | if tokenizer.pad_token is None: |
| | tokenizer.pad_token = tokenizer.eos_token |
| |
|
| | |
| | try: |
| | import transformers |
| | from transformers.utils import import_utils as _import_utils |
| |
|
| | def _disabled(*args, **kwargs): |
| | return False |
| |
|
| | _import_utils.is_flash_attn_2_available = _disabled |
| | if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_flash_attn_2_available"): |
| | transformers.utils.is_flash_attn_2_available = _disabled |
| |
|
| | _import_utils.is_torchao_available = _disabled |
| | if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_torchao_available"): |
| | transformers.utils.is_torchao_available = _disabled |
| |
|
| | _import_utils.is_torchvision_available = _disabled |
| | if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_torchvision_available"): |
| | transformers.utils.is_torchvision_available = _disabled |
| | except Exception as e: |
| | logger.warning(f"Disable checks failed (ignored): {e}") |
| |
|
| | torch_dtype = torch.bfloat16 if config.bf16 else (torch.float16 if config.fp16 else torch.float32) |
| |
|
| | qwen_model = AutoModelForCausalLM.from_pretrained( |
| | config.model_path, |
| | torch_dtype=torch_dtype, |
| | device_map=None, |
| | trust_remote_code=True, |
| | attn_implementation="sdpa", |
| | low_cpu_mem_usage=True, |
| | ) |
| | qwen_model.to(device) |
| | qwen_model.config.use_cache = False |
| | if config.gradient_checkpointing and hasattr(qwen_model, "gradient_checkpointing_enable"): |
| | qwen_model.gradient_checkpointing_enable() |
| |
|
| | train_dataset = BABILongDataset( |
| | config.data_path, |
| | tokenizer, |
| | max_length=config.max_length, |
| | answer_reserve_tokens=config.answer_reserve_tokens, |
| | label_prefix_tokens=config.label_prefix_tokens, |
| | max_samples=config.max_samples, |
| | ) |
| |
|
| | train_size = int(0.9 * len(train_dataset)) |
| | eval_size = len(train_dataset) - train_size |
| | train_dataset, eval_dataset = torch.utils.data.random_split( |
| | train_dataset, |
| | [train_size, eval_size], |
| | generator=torch.Generator().manual_seed(config.seed), |
| | ) |
| |
|
| | if is_main and config.debug_data_samples > 0: |
| | log_dataset_debug_stats(train_dataset, tokenizer, "train", config.debug_data_samples) |
| | log_dataset_debug_stats(eval_dataset, tokenizer, "eval", config.debug_data_samples) |
| |
|
| | train_sampler = None |
| | eval_sampler = None |
| | if is_distributed: |
| | from torch.utils.data.distributed import DistributedSampler |
| | train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank, shuffle=True, seed=config.seed) |
| | eval_sampler = DistributedSampler(eval_dataset, num_replicas=world_size, rank=rank, shuffle=False) |
| |
|
| | train_dataloader = DataLoader( |
| | train_dataset, |
| | batch_size=config.batch_size, |
| | shuffle=(train_sampler is None), |
| | sampler=train_sampler, |
| | collate_fn=collate_fn, |
| | num_workers=0, |
| | ) |
| | eval_dataloader = DataLoader( |
| | eval_dataset, |
| | batch_size=config.batch_size, |
| | shuffle=False, |
| | sampler=eval_sampler, |
| | collate_fn=collate_fn, |
| | num_workers=0, |
| | ) |
| |
|
| | model = QwenTitansForBABILong(qwen_model, config) |
| | model.to(device) |
| |
|
| | |
| | if args.freeze_base_model: |
| | config.freeze_base_model = True |
| | |
| | if config.freeze_base_model: |
| | if not config.use_memory: |
| | if is_main: |
| | logger.error("--freeze_base_model requires memory module (--no_memory is incompatible)") |
| | raise ValueError("freeze_base_model requires use_memory=True") |
| | model.freeze_base_model() |
| | if is_main: |
| | logger.info("=" * 40) |
| | logger.info("FREEZE MODE: Training memory + independent lm_head") |
| | logger.info(" - Trainable: neural_memory, mem_gate, lm_head (untied)") |
| | logger.info(" - Frozen: embed_tokens, transformer layers, norm") |
| | logger.info("=" * 40) |
| |
|
| | if is_distributed: |
| | if config.use_fsdp: |
| | from functools import partial |
| | from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, MixedPrecision |
| | from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy |
| | from transformers.models.qwen3.modeling_qwen3 import Qwen3DecoderLayer |
| |
|
| | mp_policy = MixedPrecision(param_dtype=torch_dtype, reduce_dtype=torch_dtype, buffer_dtype=torch_dtype) |
| | auto_wrap = partial(transformer_auto_wrap_policy, transformer_layer_cls={Qwen3DecoderLayer, QwenDecoderLayerWithTitansMemory}) |
| |
|
| | model = FSDP( |
| | model, |
| | auto_wrap_policy=auto_wrap, |
| | mixed_precision=mp_policy, |
| | device_id=torch.cuda.current_device(), |
| | use_orig_params=config.fsdp_use_orig_params, |
| | ignored_modules=model.get_memory_modules(), |
| | ) |
| | else: |
| | model = DDP( |
| | model, |
| | device_ids=[local_rank], |
| | output_device=local_rank, |
| | find_unused_parameters=config.ddp_find_unused_parameters, |
| | ) |
| | if config.gradient_checkpointing: |
| | try: |
| | model._set_static_graph() |
| | if is_main: |
| | logger.warning("DDP static graph enabled for gradient checkpointing.") |
| | except Exception as e: |
| | if is_main: |
| | logger.warning(f"DDP static graph enable failed (ignored): {e}") |
| |
|
| | trainer = Trainer( |
| | model=model, |
| | train_dataloader=train_dataloader, |
| | eval_dataloader=eval_dataloader, |
| | config=config, |
| | rank=rank, |
| | world_size=world_size, |
| | is_distributed=is_distributed, |
| | tokenizer=tokenizer, |
| | ) |
| |
|
| | if args.eval_only: |
| | ckpt_path = args.ckpt_path or os.path.join(config.output_dir, config.final_ckpt_name) |
| | if is_main: |
| | logger.info(f"eval_only: loading checkpoint: {ckpt_path}") |
| | ckpt = torch.load(ckpt_path, map_location="cpu") |
| | has_full = isinstance(ckpt, dict) and ("model_state_dict" in ckpt) |
| | if has_full: |
| | full_sd = ckpt["model_state_dict"] |
| | if is_fsdp_model(model): |
| | from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig |
| | full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) |
| | with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_cfg): |
| | sd_to_load = full_sd if is_main else {} |
| | model.load_state_dict(sd_to_load, strict=False) |
| | else: |
| | unwrap_model(model).load_state_dict(full_sd, strict=False) |
| |
|
| | memory_sd = ckpt.get("memory_state_dict", ckpt if isinstance(ckpt, dict) else {}) |
| | memory_sd = {k: v for k, v in memory_sd.items() if ("neural_memory" in k) or ("mem_gate" in k)} |
| | if len(memory_sd) > 0: |
| | if is_fsdp_model(model): |
| | from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig |
| | full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) |
| | with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_cfg): |
| | sd_to_load = memory_sd if is_main else {} |
| | model.load_state_dict(sd_to_load, strict=False) |
| | else: |
| | unwrap_model(model).load_state_dict(memory_sd, strict=False) |
| |
|
| | eval_metrics = trainer.evaluate() |
| | if is_main: |
| | ppl = float(math.exp(min(20.0, eval_metrics["loss"]))) |
| | logger.info( |
| | f"[EVAL] loss={eval_metrics['loss']:.4f}, ppl≈{ppl:.3f}, " |
| | f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, " |
| | f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%" |
| | ) |
| | cleanup_distributed() |
| | return |
| |
|
| | trainer.train() |
| | cleanup_distributed() |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|