LightningRL-8B-b32-MBPP / modeling_sdar.py
AAAlexxx111's picture
Upload folder using huggingface_hub
b9e5a42 verified
# This file is modified based on https://github.com/huggingface/transformers/blob/v4.52.4/src/transformers/models/qwen3/modeling_qwen3.py.
# 关闭所有 transformers 日志和警告(必须在所有导入前设��)
import os
import sys
import io
os.environ["TRANSFORMERS_VERBOSITY"] = "error"
# 抑制 transformers 和 Triton 的警告日志
class OutputFilter:
"""输出过滤器,抑制特定日志输出,同时保持 tqdm 等工具正常工作"""
def __init__(self, original_stream):
self.original_stream = original_stream
def write(self, text):
# 过滤规则:
# 1. 🚨 开头的警告 (transformers docstring)
# 2. Config not found 消息
# 3. Triton autotune 日志 (BLOCKS_ARE_CONTIGUOUS, triton_flex_attention, AUTOTUNE benchmarking)
filtered_keywords = [
"🚨",
"Config not found for sdar",
"BLOCKS_ARE_CONTIGUOUS",
"triton_flex_attention",
"AUTOTUNE benchmarking",
]
should_filter = any(keyword in text for keyword in filtered_keywords)
if not should_filter:
self.original_stream.write(text)
return len(text) if not should_filter else 0
def flush(self):
self.original_stream.flush()
# 代理 tqdm 需要的所有属性
def isatty(self):
return self.original_stream.isatty()
@property
def encoding(self):
return self.original_stream.encoding
@property
def mode(self):
return self.original_stream.mode
@property
def name(self):
return self.original_stream.name
def fileno(self):
return self.original_stream.fileno()
def __getattr__(self, name):
return getattr(self.original_stream, name)
# 同时过滤 stdout 和 stderr (Triton 日志打印到 stdout)
sys.stdout = OutputFilter(sys.stdout)
sys.stderr = OutputFilter(sys.stderr)
import logging
import warnings
warnings.filterwarnings("ignore", message=".*is part of.*")
warnings.filterwarnings("ignore", message=".*docstring.*")
# 关闭 torch._inductor autotune 日志
logging.getLogger("torch._inductor").setLevel(logging.CRITICAL)
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/qwen3/modular_qwen3.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_qwen3.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional, Tuple, Union
import torch
from torch import nn
from einops import rearrange
from transformers.activations import ACT2FN
from transformers.cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
from transformers.generation import GenerationMixin
from transformers.integrations import use_kernel_forward_from_hub
from transformers.modeling_attn_mask_utils import AttentionMaskConverter
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
from transformers.modeling_layers import GradientCheckpointingLayer
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
QuestionAnsweringModelOutput,
SequenceClassifierOutputWithPast,
TokenClassifierOutput,
)
from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from transformers.processing_utils import Unpack
from transformers.utils import LossKwargs, can_return_tuple, logging
from .configuration_sdar import SDARConfig
from flash_attn.ops.triton.layer_norm import rms_norm_fn as flash_rms_norm
# ===== FusedLinearDiffusionCrossEntropyLoss =====
try:
from .fused_linear_diffusion_cross_entropy import FusedLinearDiffusionCrossEntropyLoss
fused_diffusion_loss_available = True
except ImportError:
fused_diffusion_loss_available = False
# ===== End FusedLoss =====
import torch.nn.functional as F
# Flash Attention for inference (optional - only required for rollout/eval)
flash_attn_func = None
flash_attn_varlen_func = None
index_first_axis = None
pad_input = None
unpad_input = None
try:
from flash_attn import flash_attn_func, flash_attn_varlen_func
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
except ImportError:
pass
try:
from liger_kernel.ops.swiglu import LigerSiLUMulFunction # noqa: F401
liger_kernel_is_available = True
except ImportError:
liger_kernel_is_available = False
# ===== Flex Attention =====
# ===== Flex Attention =====
# Direct import - will error if not available
from torch.nn.attention.flex_attention import BlockMask, create_block_mask, flex_attention
# ===== End Flex Attention =====
logger = logging.get_logger(__name__)
@use_kernel_forward_from_hub("RMSNorm")
class SDARRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
SDARRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
return flash_rms_norm(
hidden_states, weight=self.weight, bias=None, eps=self.variance_epsilon)
'''
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * \
torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
'''
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class SDARMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(
self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(
self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(
self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
if liger_kernel_is_available:
return self.down_proj(LigerSiLUMulFunction.apply(self.gate_proj(x), self.up_proj(x)))
else:
down_proj = self.down_proj(self.act_fn(
self.gate_proj(x)) * self.up_proj(x))
return down_proj
# ===== DiRL Block Diffusion Functions =====
def calculate_token_nums(position_ids: torch.Tensor):
"""
Calculate token counts per segment for block attention mask creation.
Args:
position_ids: (B, L) position ids
Returns:
list[list[torch.Tensor]]: nested list of segment lengths for each sample
"""
if position_ids.dim() != 2:
raise ValueError(f"Input must be 2D Tensor, got {position_ids.dim()}D")
all_lengths = []
for pids_row in position_ids:
seq_len = pids_row.shape[0]
# Find indices where value is 0 (segment boundaries)
zero_indices = torch.nonzero(pids_row == 0).flatten()
# Add total sequence length as final split point
split_points = torch.cat([
zero_indices,
torch.tensor([seq_len], device=pids_row.device, dtype=zero_indices.dtype)
])
# Calculate differences between adjacent split points
lengths = torch.diff(split_points)
all_lengths.append(lengths)
return all_lengths
# ===== End DiRL Block Diffusion Functions =====
# ===== Flex Attention Block Mask Functions =====
def block_diff_mask(b, h, q_idx, kv_idx, block_size=None, n=None):
"""Construct block diffusion attention mask for flex_attention.
Three masks combined:
- Block Diagonal (M_BD): Self-attention within blocks
- Offset Block-Causal (M_OBC): Cross-attention for context
- Block-Causal (M_BC): Causal attention within x0
Args:
b, h: Batch and head indices (ignored for mask logic).
q_idx, kv_idx: Query and Key indices.
block_size: Defines the block structure.
n: Boundary point for xt/x0 split.
Returns:
A boolean attention mask where True = attend, False = mask.
"""
x0_flag_q = q_idx >= n
x0_flag_kv = kv_idx >= n
block_q = torch.where(
x0_flag_q == 1, (q_idx - n) // block_size, q_idx // block_size
)
block_kv = torch.where(
x0_flag_kv == 1, (kv_idx - n) // block_size, kv_idx // block_size
)
block_diagonal = (block_q == block_kv) & (x0_flag_q == x0_flag_kv)
offset_block_causal = (block_q > block_kv) & (x0_flag_kv == 1) & (x0_flag_q == 0)
block_causal = (block_q >= block_kv) & (x0_flag_kv == 1) & (x0_flag_q == 1)
return block_diagonal | offset_block_causal | block_causal
def block_attn_mask(num_tokens, block_size, device):
"""Create block attention mask for flex_attention.
Args:
num_tokens: list of lists, each inner list is token counts per sample
block_size: block size for block attention
device: torch device
Returns:
bool tensor (B, N, N) where True = attend
"""
masks = []
for i in range(len(num_tokens)):
cur_masks = []
for num in num_tokens[i]:
single_mask = block_diff_mask(
b=None, h=None,
q_idx=torch.arange(num * 2, device=device)[:, None],
kv_idx=torch.arange(num * 2, device=device)[None, :],
block_size=block_size, n=num,
)
cur_masks.append(single_mask)
masks.append(torch.block_diag(*cur_masks))
return torch.stack(masks, dim=0)
@torch.compile(fullgraph=True, mode="max-autotune-no-cudagraphs")
def fused_flex_attention(query, key, value, attention_mask, **kwargs):
"""Compiled flex_attention wrapper for performance."""
return flex_attention(query, key, value, block_mask=attention_mask, **kwargs)
# ===== End Flex Attention Block Mask Functions =====
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(
batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(
attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(
attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class SDARAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: SDARConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(
config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.hidden_size = config.hidden_size
self.num_attention_heads = config.num_attention_heads
self.num_key_value_heads = config.num_key_value_heads
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
# unlike olmo, only on the head dim!
self.q_norm = SDARRMSNorm(self.head_dim, eps=config.rms_norm_eps)
# thus post q_norm does not need reshape
self.k_norm = SDARRMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.sliding_window = config.sliding_window
if not (
self.config.use_sliding_window
and getattr(self.config, "sliding_window", None) is not None
and self.layer_idx >= self.config.max_window_layers
):
self.sliding_window = None
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_value: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
bsz, q_len = input_shape
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(
hidden_states).view(hidden_shape)).transpose(1, 2)
key_states = self.k_norm(self.k_proj(
hidden_states).view(hidden_shape)).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(
hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(
query_states, key_states, cos, sin)
if past_key_value is not None and kwargs.get("store_kv", False):
# sin and cos are specific to RoPE models; cache_position needed for the static cache
key_states, value_states = past_key_value.update(
key_states, value_states, self.layer_idx)
elif past_key_value is not None and not kwargs.get("store_kv", False) and len(past_key_value) > self.layer_idx:
# only retrive, do not store kv
past_key_states, past_value_states = past_key_value[self.layer_idx]
key_states = torch.cat(
[past_key_states, key_states], dim=-2)
value_states = torch.cat(
[past_value_states, value_states], dim=-2)
'''
attention_mask = attention_mask.bool() if attention_mask is not None else None
if torch.all(attention_mask): # decoding
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
attn_output = flash_attn_func(
query_states,
key_states,
value_states,
causal=False,
softmax_scale=self.scaling
)
else: # prefilling
attn_output = F.scaled_dot_product_attention(
query=query_states,
key=key_states,
value=value_states,
attn_mask=attention_mask,
is_causal=False,
scale=self.scaling,
enable_gqa=True
)
attn_output = attn_output.transpose(1, 2).contiguous()
'''
#print(query_states.shape, key_states.shape, value_states.shape)
# --- After RoPE and KV-cache handling, expand KV to all heads ---
key_states = repeat_kv(key_states, self.num_key_value_groups) # [B, H, K, D]
value_states = repeat_kv(value_states, self.num_key_value_groups) # [B, H, K, D]
bsz, q_len = input_shape
# ===== Attention with Training/Inference Branch =====
attn_weights = None
if self.training:
# ===== Training Mode: Use flex_attention with BlockMask =====
if isinstance(attention_mask, BlockMask):
attn_output, attn_weights = fused_flex_attention(
query=query_states,
key=key_states,
value=value_states,
attention_mask=attention_mask,
enable_gqa=True,
scale=self.scaling,
return_lse=True
)
attn_weights = attn_weights.to(value_states.dtype) if attn_weights is not None else None
attn_output = rearrange(attn_output, 'b h l d -> b l (h d)')
else:
raise TypeError(
f"Expected BlockMask during training, got {type(attention_mask)}. "
"Ensure create_flex_block_mask() is called in training code."
)
else:
# ===== Inference Mode: Compatible with regular bool attention_mask =====
# Used for rollout/eval where tokenizer's attention_mask is passed
attention_mask_bool = attention_mask.bool() if attention_mask is not None else None
if attention_mask_bool is not None and torch.all(attention_mask_bool):
# Full attention (decoding) - use Flash Attention
if flash_attn_func is None:
raise ImportError(
"Flash Attention is required for inference. "
"Please install flash-attn: pip install flash-attn --no-build-isolation"
)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
attn_output = flash_attn_func(
query_states,
key_states,
value_states,
causal=False,
softmax_scale=self.scaling
)
attn_output = rearrange(attn_output, 'b l h d -> b l (h d)')
else:
# Partial attention (prefilling) - use SDPA
attn_output = F.scaled_dot_product_attention(
query=query_states,
key=key_states,
value=value_states,
attn_mask=attention_mask_bool,
is_causal=False,
scale=self.scaling,
enable_gqa=True
)
attn_output = rearrange(attn_output, 'b h l d -> b l (h d)')
# ===== End Attention =====
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class SDARDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: SDARConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = SDARAttention(config=config, layer_idx=layer_idx)
self.mlp = SDARMLP(config)
self.input_layernorm = SDARRMSNorm(
config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = SDARRMSNorm(
config.hidden_size, eps=config.rms_norm_eps)
if (
config.sliding_window and config._attn_implementation != "flash_attention_2"
): # diff with Llama is this warning
logger.warning_once(
f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
"unexpected results may be encountered."
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
store_kv: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
# necessary, but kept here for BC
position_embeddings: Optional[Tuple[torch.Tensor,
torch.Tensor]] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
store_kv=store_kv,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
class SDARPreTrainedModel(PreTrainedModel):
config_class = SDARConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["SDARDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn_2 = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_cache_class = True
_supports_quantized_cache = True
_supports_static_cache = True
_supports_attention_backend = True
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, SDARRMSNorm):
module.weight.data.fill_(1.0)
class SDARRotaryEmbedding(nn.Module):
def __init__(self, config: SDARConfig, device=None):
super().__init__()
# BC: "rope_type" was originally "type"
if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
self.rope_type = config.rope_scaling.get(
"rope_type", config.rope_scaling.get("type"))
else:
self.rope_type = "default"
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(
self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
# power user: used with advanced RoPE types (e.g. dynamic rope)
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(
position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(
x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @
position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
class SDARModel(SDARPreTrainedModel):
def __init__(self, config: SDARConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(
config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[SDARDecoderLayer(config, layer_idx)
for layer_idx in range(config.num_hidden_layers)]
)
self.norm = SDARRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = SDARRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
@can_return_tuple
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
store_kv: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**flash_attn_kwargs: Unpack[FlashAttentionKwargs],
) -> BaseModelOutputWithPast:
r"""
Args:
store_kv (`bool`, *optional*):
Whether to store key and value states in the cache. If `True`, the key and value states will be
stored in `past_key_values`. If `False` and `past_key_values` exists, only retrieves cached states
without updating the cache.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError(
"You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
# TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
if not isinstance(past_key_values, (type(None), Cache)):
raise ValueError(
"The `past_key_values` should be either a `Cache` object or `None`.")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache()
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length(
) if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# causal_mask = self._update_causal_mask(
# attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
# )
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
store_kv=store_kv,
cache_position=cache_position,
position_embeddings=position_embeddings,
**flash_attn_kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
def _update_causal_mask(
self,
attention_mask: Union[torch.Tensor, "BlockMask"],
input_tensor: torch.Tensor,
cache_position: torch.Tensor,
past_key_values: Cache,
output_attentions: bool = False,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and past_key_values is not None:
is_padding_right = attention_mask[:, -
1].sum().item() != input_tensor.size()[0]
if is_padding_right:
raise ValueError(
"You are attempting to perform batched generation with padding_side='right'"
" this may lead to unexpected behaviour for Flash Attention version of Qwen3. Make sure to "
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
)
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
# to infer the attention mask.
past_seen_tokens = past_key_values.get_seq_length(
) if past_key_values is not None else 0
using_static_cache = isinstance(past_key_values, StaticCache)
using_sliding_window_cache = isinstance(
past_key_values, SlidingWindowCache)
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
if (
self.config._attn_implementation == "sdpa"
and not (using_static_cache or using_sliding_window_cache)
and not output_attentions
):
if AttentionMaskConverter._ignore_causal_mask_sdpa(
attention_mask,
inputs_embeds=input_tensor,
past_key_values_length=past_seen_tokens,
sliding_window=self.config.sliding_window,
is_training=self.training,
):
return None
dtype = input_tensor.dtype
min_dtype = torch.finfo(dtype).min
sequence_length = input_tensor.shape[1]
# SlidingWindowCache or StaticCache
if using_sliding_window_cache or using_static_cache:
target_length = past_key_values.get_max_cache_shape()
# DynamicCache or no cache
else:
target_length = (
attention_mask.shape[-1]
if isinstance(attention_mask, torch.Tensor)
else past_seen_tokens + sequence_length + 1
)
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=target_length,
dtype=dtype,
cache_position=cache_position,
batch_size=input_tensor.shape[0],
config=self.config,
past_key_values=past_key_values,
)
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type in ["cuda", "xpu", "npu"]
and not output_attentions
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
causal_mask = AttentionMaskConverter._unmask_unattended(
causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
cache_position: torch.Tensor,
batch_size: int,
config: SDARConfig,
past_key_values: Cache,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
config (`SDARConfig`):
The model's configuration class
past_key_values (`Cache`):
The cache class that is being used currently to generate
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
)
diagonal_attend_mask = torch.arange(target_length, device=cache_position.device) > cache_position.reshape(
-1, 1
)
text_config = config.get_text_config()
if getattr(text_config, "use_sliding_window", True) and text_config.sliding_window is not None:
# if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
# the check is needed to verify is current checkpoint was trained with sliding window or not
if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
sliding_attend_mask = torch.arange(target_length, device=cache_position.device) <= (
cache_position.reshape(-1, 1) -
text_config.sliding_window
)
diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
causal_mask *= diagonal_attend_mask
causal_mask = causal_mask[None, None,
:, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
if attention_mask.shape[-1] > target_length:
attention_mask = attention_mask[:, :target_length]
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
causal_mask.device
)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs):
...
class SDARForCausalLM(SDARPreTrainedModel, GenerationMixin):
_tied_weights_keys = ["lm_head.weight"]
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = SDARModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(
config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.embed_tokens
def set_input_embeddings(self, value):
self.model.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model = decoder
def get_decoder(self):
return self.model
def prepare_for_bd_training(self, inputs_ids, position_ids, prompt_mask, masked_indices=None, p_mask_input=None):
"""
Prepare block diffusion training data (aligned with DiRL).
Args:
inputs_ids: (B, L) input token ids
position_ids: (B, L) position ids
prompt_mask: (B, L) bool tensor, marks prompt positions
masked_indices: (B, L) bool tensor, marks which positions are masked
p_mask_input: (B, L) bool tensor, for RL training p_mask
Returns:
concat_inputs_ids: (B, 2*L) concatenated input ids
concat_position_ids: (B, 2*L) concatenated position ids
flex_attention_mask_3d: BlockMask for flex_attention
logits_to_keep_half: (B, L) bool tensor
logits_to_keep: (B, 2*L) bool tensor
p_mask: (M,) float tensor
p_to_keep: (M,) bool tensor or None
"""
bsz, seq_len = inputs_ids.shape
num_tokens = calculate_token_nums(position_ids)
# Get mask_token_id from config, or use default value (bos_token_id + 26, same as DiRL)
mask_token_id = getattr(self.config, 'mask_token_id', None)
if mask_token_id is None:
mask_token_id = self.config.bos_token_id + 26
# Get block_size from config, or use default value (4, same as DiRL)
block_size = getattr(self.config, 'block_size', 4)
# Process masked_indices
if masked_indices is not None:
# RL training mode: use provided masked_indices
noisy_inputs_ids = torch.where(masked_indices, mask_token_id, inputs_ids)
logits_to_keep_half = masked_indices
M = masked_indices.sum().item()
p_mask = torch.full((M,), 0.5, device=inputs_ids.device, dtype=torch.float)
else:
# No masked_indices provided: create response_mask (labels != -100 positions)
if prompt_mask is not None:
response_mask = (prompt_mask == False)
else:
# If no prompt_mask, assume all are response
response_mask = torch.ones(bsz, seq_len, dtype=torch.bool, device=inputs_ids.device)
masked_indices = response_mask
noisy_inputs_ids = torch.where(masked_indices, mask_token_id, inputs_ids)
logits_to_keep_half = masked_indices
M = masked_indices.sum().item()
p_mask = torch.full((M,), 0.5, device=inputs_ids.device, dtype=torch.float)
# 计算 p_to_keep - 用于将原始空间的 p_mask 映射到提取空间
p_to_keep = None
if p_mask_input is not None:
p_to_keep = p_mask_input[logits_to_keep_half]
# Build router_noisy_part (marks which positions are noisy)
# True = original token, False = noisy token, alternating pattern
# 学习DiRL方式:基于每个样本的segment数量构建
router_noisy_part_list = []
for i in range(bsz):
cur_router_noisy_part = (torch.arange(num_tokens[i].shape[0] * 2) % 2 == 0).to(inputs_ids.device)
cur_router_noisy_part = cur_router_noisy_part.repeat_interleave(num_tokens[i].repeat_interleave(2))
router_noisy_part_list.append(cur_router_noisy_part)
router_noisy_part = torch.stack(router_noisy_part_list, dim=0)
concat_inputs_ids = inputs_ids.repeat(1, 2)
logits_to_keep = torch.zeros(bsz, 2 * seq_len, dtype=torch.bool, device=inputs_ids.device)
concat_position_ids = torch.zeros(bsz, 2 * seq_len, dtype=position_ids.dtype, device=position_ids.device)
for i in range(bsz):
concat_inputs_ids[i][router_noisy_part[i]] = noisy_inputs_ids[i]
concat_inputs_ids[i][~router_noisy_part[i]] = inputs_ids[i]
logits_to_keep[i][router_noisy_part[i]] = logits_to_keep_half[i]
concat_position_ids[i][router_noisy_part[i]] = position_ids[i]
concat_position_ids[i][~router_noisy_part[i]] = position_ids[i]
# Create flex_attention mask (using existing block_attn_mask)
attention_mask = block_attn_mask(num_tokens, block_size, inputs_ids.device)
flex_attention_mask_3d = create_block_mask(
lambda b, h, q_idx, kv_idx: attention_mask[b, q_idx, kv_idx],
B=attention_mask.size(0), H=None,
Q_LEN=attention_mask.size(1), KV_LEN=attention_mask.size(2),
)
return concat_inputs_ids, concat_position_ids, flex_attention_mask_3d, logits_to_keep_half, logits_to_keep, p_mask, p_to_keep
@can_return_tuple
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
# ===== DiRL compatibility parameters =====
masked_indices: Optional[torch.Tensor] = None, # (B, L) bool tensor, marks masked positions
return_logits: bool = False, # If True, return only logits for masked positions
# ===== RL training parameters (compute_rl_loss mode) =====
compute_rl_loss: bool = False,
rl_p_mask: Optional[torch.Tensor] = None,
rl_adv: Optional[torch.Tensor] = None,
rl_adv_clip_enabled: bool = False,
rl_adv_clip_min: float = 0.0,
rl_logp_old_tok: Optional[torch.Tensor] = None,
rl_logp_ref_tok: Optional[torch.Tensor] = None,
rl_logp_ref_mask: Optional[torch.Tensor] = None, # Mask for valid logp_ref positions
rl_is_real: Optional[torch.Tensor] = None,
rl_correctness: Optional[torch.Tensor] = None, # (B,) bool tensor for NLL loss
rl_ppo_eps: float = 0.2,
rl_ppo_eps_high: float = 0.28, # clip eps for advantage actions (adv > 0)
rl_kl_beta: float = 0.0, # KL penalty coefficient
rl_nll_weight: float = 1.0, # NLL loss coefficient
rl_use_kl_estimator_k3: bool = True,
rl_return_entropy: bool = False,
rl_loss_reduction_mode: str = "token", # "token" or "sequence"
**kwargs: Unpack[KwargsForCausalLM],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, SDARForCausalLM
>>> model = SDARForCausalLM.from_pretrained("DiffuOpen/SDAR-1.7B-Chat")
>>> tokenizer = AutoTokenizer.from_pretrained("DiffuOpen/SDAR-1.7B-Chat")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# ===== DiRL Block Diffusion Training Mode =====
if self.training:
assert inputs_embeds is None, "only support input_ids during training"
prompt_mask = (labels == -100) if labels is not None else None
(
concat_inputs_ids,
concat_position_ids,
flex_attention_mask_3d,
logits_to_keep_half,
logits_to_keep,
p_mask_out,
p_to_keep,
) = self.prepare_for_bd_training(
input_ids, position_ids, prompt_mask, masked_indices, p_mask_input=rl_p_mask
)
outputs = self.model(
input_ids=concat_inputs_ids,
attention_mask=flex_attention_mask_3d,
position_ids=concat_position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only keep positions marked by logits_to_keep (masked_indices positions)
hidden_states = hidden_states[logits_to_keep].contiguous()
# Initialize entropy (in training mode)
entropy = torch.tensor(0.0, device=input_ids.device)
# ===== Branch 1: compute_rl_loss (PPO training) =====
if compute_rl_loss:
assert p_to_keep is not None, "p_to_keep must be provided for RL loss computation."
assert rl_adv is not None, "rl_adv must be provided for RL loss computation."
assert rl_is_real is not None, "rl_is_real must be provided for RL loss computation."
assert labels is not None, "labels must be provided for RL loss computation."
assert masked_indices is not None, "masked_indices must be provided for RL loss computation."
device = input_ids.device
# logits (M, V) - M is the number of extracted positions after block diffusion
logits = self.lm_head(hidden_states)
# mask — is_real filtering
is_real_tensor = (
rl_is_real.to(device=device, dtype=torch.bool)
if torch.is_tensor(rl_is_real)
else torch.tensor(rl_is_real, dtype=torch.bool, device=device)
)
p_mask_real = rl_p_mask & is_real_tensor.unsqueeze(1) # (B, L)
# Compute sequence lengths and batch indices for sequence-level reduction
if rl_loss_reduction_mode == "sequence":
# Count non-padding tokens per sample (B,)
seq_lengths = (labels != -100).sum(dim=1)
# Create batch index mapping for extracted tokens
batch_indices = torch.arange(input_ids.size(0), device=device).unsqueeze(1).expand_as(rl_p_mask) # (B, L)
# 正确的索引映射:将 (B, L) 空间的 p_mask_real 映射到提取空间 (M,)
# logits_to_keep_half (即 masked_indices) 标记了原始空间中哪些位置被提取
# 在 prepare_for_bd_training 中,提取后的位置对应 logits_to_keep_half 中为 True 的位置
p_to_keep_real = logits_to_keep_half[p_mask_real] # (M,) bool
# Select logits using p_to_keep (在正确的提取空间)
logits_p = logits[p_to_keep_real] # (N, V)
N = p_to_keep_real.sum().item()
# log_softmax
log_probs_p = torch.nn.functional.log_softmax(logits_p, dim=-1)
# Compute entropy if requested
entropy_value = None
if rl_return_entropy:
probs_p = log_probs_p.exp() # Convert log_probs to probs
entropy_p = -(probs_p * log_probs_p).sum(dim=-1) # (N,) per-token entropy
entropy_value = entropy_p.mean().detach().clone() # Scalar mean entropy
del probs_p, entropy_p # Cleanup
# labels / logp - 需要同样映射到提取空间
labels_p = labels[p_mask_real][logits_to_keep_half[p_mask_real]] # (N,)
logp_p = log_probs_p.gather(dim=-1, index=labels_p.unsqueeze(-1)).squeeze(-1)
# Extract batch indices for sequence-level grouping
if rl_loss_reduction_mode == "sequence":
batch_indices_p = batch_indices[p_mask_real][logits_to_keep_half[p_mask_real]] # (N,)
# Valid token mask (TrajRL: no specific token filtering for now)
valid_token_mask = torch.ones_like(labels_p, dtype=torch.bool)
# advantage (TrajRL: per-token advantage, shape (B, L))
adv_tensor = rl_adv.to(device) if torch.is_tensor(rl_adv) else torch.tensor(rl_adv, dtype=torch.float, device=device)
# Handle different adv shapes:
# - (B, L): per-token advantage (TrajRL GAE), use directly
# - (B,): per-sample advantage (DiRL-style), expand to (B, L)
if adv_tensor.dim() == 2:
adv_expanded = adv_tensor
elif adv_tensor.dim() == 1:
adv_expanded = adv_tensor.unsqueeze(1).expand_as(rl_p_mask)
else:
raise ValueError(f"Unexpected adv shape: {adv_tensor.shape}, expected (B, L) or (B,)")
# 映射到提取空间
adv_p = adv_expanded[p_mask_real][logits_to_keep_half[p_mask_real]]
# old logp - 需要映射到提取空间
if rl_logp_old_tok is not None and rl_logp_old_tok.numel() > 0:
logp_old_p = rl_logp_old_tok.to(device)[p_mask_real][logits_to_keep_half[p_mask_real]]
else:
logp_old_p = logp_p.detach()
# NLL Loss for correct samples (TrajRL VAPO)
nll_loss = torch.tensor(0.0, device=device)
if rl_correctness is not None and rl_correctness.any():
correctness_tensor = rl_correctness.to(device=device, dtype=torch.bool)
# Expand (B,) to (B, L) following the same pattern as rl_adv (1D case)
correctness_expanded = correctness_tensor.unsqueeze(1).expand_as(rl_p_mask) # (B, L)
# 映射到提取空间
correct_p = correctness_expanded[p_mask_real][logits_to_keep_half[p_mask_real]] # (N,)
if correct_p.any():
nll_loss = rl_nll_weight * (-logp_p[correct_p].mean())
# KL Loss (aligned with DiRL)
# Only compute KL for tokens with valid logp_ref (masked out positions use no KL penalty)
kl_loss = torch.tensor(0.0, device=device)
if rl_kl_beta > 0 and rl_logp_ref_tok is not None:
# 映射到提取空间
logp_ref_p = rl_logp_ref_tok.to(device)[p_mask_real][logits_to_keep_half[p_mask_real]]
# Apply logp_ref_mask if provided: only compute KL where logp_ref is valid
if rl_logp_ref_mask is not None:
logp_ref_mask_p = rl_logp_ref_mask.to(device)[p_mask_real][logits_to_keep_half[p_mask_real]]
valid_token_mask = valid_token_mask & logp_ref_mask_p
# Filter to valid positions only
logp_ref_p = logp_ref_p[valid_token_mask]
logp_p_valid = logp_p[valid_token_mask]
# Compute KL only on valid positions
kl_seq_p = logp_p_valid - logp_ref_p
if rl_use_kl_estimator_k3:
kl_seq_p = (-kl_seq_p).clamp(-10.0, 10.0).exp() - 1.0 + kl_seq_p
kl_loss = rl_kl_beta * kl_seq_p.mean()
# PPO loss with asymmetric clipping (DiRL-style)
# Use different eps for advantage (adv > 0) vs disadvantage (adv < 0) actions
ratio_p = (logp_p - logp_old_p).clamp(-10.0, 10.0).exp()
# Asymmetric clipping based on advantage sign
# For adv >= 0: use eps_high (more conservative)
# For adv < 0: use eps (more aggressive)
clipped_low = ratio_p.clamp(1 - rl_ppo_eps, 1 + rl_ppo_eps)
clipped_high = ratio_p.clamp(1 - rl_ppo_eps, 1 + rl_ppo_eps_high)
clipped = torch.where(adv_p >= 0, clipped_high, clipped_low)
surrogate_p = torch.minimum(ratio_p * adv_p, clipped * adv_p)
if rl_loss_reduction_mode == "sequence":
# Sequence-level reduction: average within each sequence, then average across sequences
B = input_ids.size(0)
seq_loss_sum = torch.zeros(B, device=device, dtype=surrogate_p.dtype)
seq_loss_sum.scatter_add_(0, batch_indices_p, surrogate_p)
# Normalize by sequence length
seq_loss_mean = seq_loss_sum / seq_lengths.float()
# Average across sequences (only non-zero entries)
valid_seqs = (seq_lengths > 0)
policy_loss = -seq_loss_mean[valid_seqs].mean()
# Store sequence-level statistics for logging
seq_loss_std = seq_loss_mean[valid_seqs].std()
seq_loss_min = seq_loss_mean[valid_seqs].min()
seq_loss_max = seq_loss_mean[valid_seqs].max()
else:
# Token-level reduction (default): average across all tokens
policy_loss = -surrogate_p.mean()
# Validation: ensure loss is finite
if not torch.isfinite(policy_loss):
logger.warning(f"Non-finite policy_loss detected: {policy_loss.item()}, mode={rl_loss_reduction_mode}")
policy_loss = torch.tensor(0.0, device=device, requires_grad=True)
# Total loss
loss = policy_loss + kl_loss + nll_loss
# Save loss values for output (before cleanup)
nll_loss_value = nll_loss.detach().clone()
kl_loss_value = kl_loss.detach().clone()
# Save PPO statistics for logging (before cleanup)
# ratio_mean: mean importance sampling ratio
ratio_mean_value = ratio_p.mean().detach().clone()
# clip_frac: fraction of samples that were clipped
clip_frac_value = (ratio_p != clipped).float().mean().detach().clone()
# Store entropy value (computed earlier)
entropy_value_stored = entropy_value if entropy_value is not None else torch.tensor(0.0, device=device)
# Cleanup
del logits, logits_p, log_probs_p, labels_p
del is_real_tensor, p_mask_real, p_to_keep_real
del adv_tensor, adv_expanded, adv_p
del logp_p, logp_old_p, ratio_p, clipped, surrogate_p, policy_loss, entropy_value
logits = None
# ===== Branch 2: return_logits (inference) =====
elif return_logits:
logits = self.lm_head(hidden_states) # (M, V)
loss = None
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# ===== Branch 3: block diffusion loss (pre-training) =====
else:
assert labels is not None, "Labels must be provided for training."
# Use FusedLinearDiffusionCrossEntropyLoss if available
if fused_diffusion_loss_available and getattr(self.config, 'use_fused_diffusion_loss', False):
answer_len = (labels != -100).sum()
loss_fct = FusedLinearDiffusionCrossEntropyLoss(reduction="sum")
loss = loss_fct(
x=hidden_states,
target=labels[logits_to_keep_half].contiguous(),
weight=self.lm_head.weight,
bias=getattr(self.lm_head, 'bias', None),
p_mask=p_mask_out,
)
loss = loss / answer_len
else:
# Fallback: CrossEntropyLoss on full logits
logits_full = self.lm_head(hidden_states)
loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(logits_full.view(-1, self.config.vocab_size), labels.view(-1))
logits = None
# ===== Block Diffusion Training Mode: Construct output directly =====
# Construct output and return directly (don't execute Standard Mode code)
output = CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Add RL loss attributes for training (DiRL-style)
if self.training and compute_rl_loss:
output.nll_loss = nll_loss_value if "nll_loss_value" in locals() else torch.tensor(0.0, device=input_ids.device)
output.kl_loss = kl_loss_value if "kl_loss_value" in locals() else torch.tensor(0.0, device=input_ids.device)
# Add PPO statistics for logging
output.ratio_mean = ratio_mean_value if "ratio_mean_value" in locals() else torch.tensor(0.0, device=input_ids.device)
output.clip_frac = clip_frac_value if "clip_frac_value" in locals() else torch.tensor(0.0, device=input_ids.device)
output.entropy = entropy_value_stored if "entropy_value_stored" in locals() else torch.tensor(0.0, device=input_ids.device)
output.seq_loss_std = seq_loss_std if "seq_loss_std" in locals() else torch.tensor(0.0, device=input_ids.device)
output.seq_loss_min = seq_loss_min if "seq_loss_min" in locals() else torch.tensor(0.0, device=input_ids.device)
output.seq_loss_max = seq_loss_max if "seq_loss_max" in locals() else torch.tensor(0.0, device=input_ids.device)
return output
else:
# ===== Standard Mode (no block diffusion) =====
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep,
None) if isinstance(logits_to_keep, int) else logits_to_keep
hidden_states = hidden_states[:, slice_indices, :].contiguous()
fuse_linear_and_cross_entropy = getattr(self.config, 'fuse_cross_entropy', False) and self.training
if fuse_linear_and_cross_entropy:
# When using fused_linear_ce_loss, we do not compute the whole logits on HBM
logits = None
else:
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# ===== FusedLinearDiffusionCrossEntropyLoss (if available) =====
use_fused_loss = (
fused_diffusion_loss_available and
getattr(self.config, 'use_fused_diffusion_loss', False) and
self.training
)
if use_fused_loss:
# FusedLoss directly computes from hidden_states, no need for logits
p_mask = kwargs.get('p_mask', None)
if p_mask is None:
# Default: all ones (no noise scaling)
# Match the shape of labels for correct reshaping in FusedLoss
if labels.dim() >= 2:
p_mask = torch.ones_like(labels, dtype=hidden_states.dtype, device=hidden_states.device)
else:
# labels is already 1D
p_mask = torch.ones(labels.shape, dtype=hidden_states.dtype, device=hidden_states.device)
num_chunks = getattr(self.config, 'fused_loss_num_chunks', 8)
loss_fct = FusedLinearDiffusionCrossEntropyLoss(
ignore_index=-100,
label_smoothing=0.0,
logit_scale=1.0,
num_chunks=num_chunks,
reduction="mean"
)
loss = loss_fct(
x=hidden_states,
target=labels,
weight=self.lm_head.weight,
bias=getattr(self.lm_head, 'bias', None),
p_mask=p_mask,
)
else:
# Standard CrossEntropyLoss (fallback)
loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(
logits.view(-1, self.config.vocab_size), labels.view(-1))
# ===== End FusedLoss =====
output = CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Add RL loss attributes for training (DiRL-style)
if self.training and compute_rl_loss:
output.nll_loss = nll_loss_value if "nll_loss_value" in locals() else torch.tensor(0.0, device=input_ids.device)
output.kl_loss = kl_loss_value if "kl_loss_value" in locals() else torch.tensor(0.0, device=input_ids.device)
return output
__all__ = [
"SDARForCausalLM",
"SDARModel",
"SDARPreTrainedModel",
]