Step-3.5-Flash-4bit / modeling_step3p5.py
kernelpool's picture
Add files using upload-large-folder tool
71c4369 verified
# Copyright 2025 The LLAMA4 and HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Callable, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.activations import ACT2FN
from transformers.cache_utils import Cache, DynamicCache
from transformers.generation import GenerationMixin
from transformers.masking_utils import (create_causal_mask,
create_sliding_window_causal_mask)
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
from transformers.modeling_layers import GradientCheckpointingLayer
from transformers.modeling_outputs import BaseModelOutputWithPast, ModelOutput
from transformers.modeling_rope_utils import (ROPE_INIT_FUNCTIONS,
dynamic_rope_update)
from transformers.modeling_utils import (ALL_ATTENTION_FUNCTIONS,
PreTrainedModel)
from transformers.processing_utils import Unpack
from transformers.utils import TransformersKwargs, can_return_tuple, logging
from .configuration_step3p5 import Step3p5Config
logger = logging.get_logger(__name__)
__all__ = ["Step3p5Model", "Step3p5ForCausalLM"]
class Step3p5RotaryEmbedding(nn.Module):
def __init__(self, config: Step3p5Config, device=None, layer_idx=None):
super().__init__()
# BC: "rope_type" was originally "type"
self.layer_idx = layer_idx
if config.rope_parameters is not None:
self.rope_type = config.rope_parameters.get(
"rope_type", config.rope_parameters.get("type"))
else:
self.rope_type = "default"
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
partial_rotary_factors = getattr(config, "partial_rotary_factors",
None)
if partial_rotary_factors is not None:
config.partial_rotary_factor = partial_rotary_factors[
self.layer_idx]
else:
config.partial_rotary_factor = 1.0
self.rope_theta = config.rope_theta
if isinstance(config.rope_theta, list):
self.rope_theta = config.rope_theta.copy()
config.rope_theta = self.rope_theta[self.layer_idx]
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(
self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
config.rope_theta = self.rope_theta
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(
position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float().to(x.device)
device_type = x.device.type if isinstance(
x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type,
enabled=False): # Force float32
freqs = (inv_freq_expanded.float()
@ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., :x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
rotary_dim = cos.shape[-1]
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
# Apply rotary embeddings on the first half or full tensor
q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
# Concatenate back to full shape
q_embed = torch.cat([q_embed, q_pass], dim=-1)
k_embed = torch.cat([k_embed, k_pass], dim=-1)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :,
None, :, :].expand(batch,
num_key_value_heads,
n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen,
head_dim)
# Adapted from transformers.models.llama.modeling_llama.eager_attention_forward -> llama4 doesn't cast attn weights to fp32
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
# breakpoint()
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, :key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights,
p=dropout,
training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
@dataclass
class Step3p5CausalLMOutputWithPast(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
"""
loss: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[list[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
class Step3p5MLP(nn.Module):
def __init__(self, config, intermediate_size=None, swiglu_limit=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size,
self.intermediate_size,
bias=False)
self.up_proj = nn.Linear(self.hidden_size,
self.intermediate_size,
bias=False)
self.down_proj = nn.Linear(self.intermediate_size,
self.hidden_size,
bias=False)
self.act_fn = ACT2FN["silu"]
self.limit = swiglu_limit
def forward(self, x):
up = self.up_proj(x)
gate = self.act_fn(self.gate_proj(x))
if self.limit is not None:
gate = gate.clamp(min=None, max=self.limit)
up = up.clamp(min=-self.limit, max=self.limit)
return self.down_proj(gate * up)
def sigmoid_routing_function(gating_output: torch.Tensor, topk: int,
renormalize: bool):
gating_output = gating_output.float()
gate_prob = torch.sigmoid(gating_output)
gate_prob = gate_prob / gate_prob.sum(dim=-1, keepdim=True)
topk_prob, indices = torch.topk(gate_prob, k=topk, dim=1)
expert_topk_weight = topk_prob
if renormalize:
expert_topk_weight = expert_topk_weight / torch.sum(
expert_topk_weight, dim=-1, keepdim=True)
return expert_topk_weight, indices
def softmax_routing_function(gating_output: torch.Tensor, top_k: int,
renormalize: bool):
gating_output = gating_output.float()
gate_prob = torch.softmax(gating_output, dim=-1)
gate_prob = gate_prob / gate_prob.sum(dim=-1, keepdim=True)
topk_prob, indices = torch.topk(gate_prob, k=top_k, dim=1)
expert_topk_weight = topk_prob
if renormalize:
expert_topk_weight = expert_topk_weight / torch.sum(
expert_topk_weight, dim=-1, keepdim=True)
return expert_topk_weight, indices.to(torch.int32)
class MoELinear(nn.Module):
def __init__(self, num_experts, in_features, out_features):
super().__init__()
self.num_experts = num_experts
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(
torch.empty(num_experts, out_features, in_features))
def forward(self, x, expert_id):
x = F.linear(x.float(), self.weight[expert_id].float())
return x
class Step3p5MoEMLP(nn.Module):
def __init__(self, config, swiglu_limit=None):
super().__init__()
self.num_experts = config.moe_num_experts
self.top_k = config.moe_top_k
self.hidden_size = config.hidden_size
self.moe_intermediate_size = config.moe_intermediate_size
self.use_moe_router_bias = config.use_moe_router_bias
if self.use_moe_router_bias:
self.router_bias = nn.Parameter(torch.zeros(config.moe_num_experts,
dtype=torch.float32),
requires_grad=False)
self.custom_routing_function = self.router_bias_func
elif config.moe_router_activation == "sigmoid":
self.custom_routing_function = sigmoid_routing_function
else:
self.custom_routing_function = None
self.need_fp32_gate = config.need_fp32_gate
self.routed_scaling_factor = getattr(config,
"moe_router_scaling_factor", 1.0)
# gating
self.gate = nn.Linear(self.hidden_size, self.num_experts, bias=False)
self.act_fn = ACT2FN["silu"]
self.limit = swiglu_limit
self.up_proj = MoELinear(self.num_experts, self.hidden_size,
self.moe_intermediate_size)
self.gate_proj = MoELinear(self.num_experts, self.hidden_size,
self.moe_intermediate_size)
self.down_proj = MoELinear(self.num_experts,
self.moe_intermediate_size,
self.hidden_size)
def router_bias_func(self, gating_output: torch.Tensor, topk: int,
renormalize: bool):
gate_prob = torch.sigmoid(gating_output.float())
gate_prob_with_bias = gate_prob + self.router_bias.unsqueeze(0)
_, indices = torch.topk(gate_prob_with_bias, k=topk, dim=1)
topk_prob = torch.gather(gate_prob, 1, indices)
expert_topk_weight = topk_prob
if renormalize:
expert_topk_weight = expert_topk_weight / (
torch.sum(expert_topk_weight, dim=-1, keepdim=True) + 1e-20)
return expert_topk_weight, indices
def get_expert_output(self, inputs: torch.Tensor, expert_id):
#if self.limit is None:
up = self.up_proj(inputs, expert_id)
gate = self.act_fn(self.gate_proj(inputs, expert_id))
if self.limit is not None:
gate = gate.clamp(min=None, max=self.limit)
up = up.clamp(min=-self.limit, max=self.limit)
return self.down_proj(gate * up, expert_id)
def forward(self, hidden_states):
""" """
batch_size, sequence_length, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
if self.need_fp32_gate:
router_logits = torch.matmul(hidden_states.to(torch.float32), self.gate.weight.t().to(torch.float32))
else:
# router_logits: (batch * sequence_length, n_experts)
router_logits = self.gate(hidden_states)
if self.custom_routing_function:
routing_weights, selected_experts = self.custom_routing_function(
router_logits, self.top_k, renormalize=True)
else:
routing_weights = F.softmax(router_logits,
dim=1,
dtype=torch.float)
routing_weights, selected_experts = torch.topk(routing_weights,
self.top_k,
dim=-1)
routing_weights = routing_weights * self.routed_scaling_factor
final_hidden_states = torch.zeros(
(batch_size * sequence_length, hidden_dim),
dtype=hidden_states.dtype,
device=hidden_states.device)
# One hot encode the selected experts to create an expert mask
# this will be used to easily index which expert is going to be sollicitated
expert_mask = torch.nn.functional.one_hot(
selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
# Loop over all available experts in the model and perform the computation on each expert
for expert_idx in range(self.num_experts):
idx, top_x = torch.where(expert_mask[expert_idx])
# Index the correct hidden states and compute the expert hidden state for
# the current expert. We need to make sure to multiply the output hidden
# states by `routing_weights` on the corresponding tokens (top-1 and top-2)
current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
current_hidden_states = (
self.get_expert_output(current_state, expert_idx) *
routing_weights[top_x, idx, None])
# However `index_add_` only support torch tensors for indexing so we'll use
# the `top_x` tensor here.
final_hidden_states.index_add_(
0, top_x, current_hidden_states.to(hidden_states.dtype))
final_hidden_states = final_hidden_states.reshape(
batch_size, sequence_length, hidden_dim)
return final_hidden_states
class Step3p5RMSNorm(nn.Module):
def __init__(
self,
hidden_size: int,
eps: float = 1e-5,
) -> None:
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
dtype = x.dtype
x = x.float()
variance = x.pow(2).mean(dim=-1, keepdim=True)
normed = x * torch.rsqrt(variance + self.variance_epsilon)
normed = normed * (self.weight.float() + 1)
return normed.to(dtype)
class Step3p5Attention(nn.Module):
def __init__(self, config: Step3p5Config, layer_idx):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.num_attention_heads = config.num_attention_heads
self.num_key_value_heads = config.num_attention_groups
layer_types = getattr(config, "layer_types", [])
if layer_types:
enable_sliding_window = layer_types[
self.layer_idx] == "sliding_attention"
else:
enable_sliding_window = self.layer_idx % 2 == 0
if hasattr(config, "yarn_only_types") and layer_types[
self.layer_idx] not in config.yarn_only_types:
config.rope_parameters = None
else:
config.rope_parameters = getattr(config, "rope_scaling", None)
self.sliding_window = config.sliding_window
if enable_sliding_window:
self.num_attention_heads = config.attention_other_setting[
"num_attention_heads"]
self.num_key_value_heads = config.attention_other_setting[
"num_attention_groups"]
if self.sliding_window is not None and enable_sliding_window:
self.sliding_window = (self.sliding_window)
else:
self.sliding_window = None
self.head_dim = getattr(config, "head_dim",
config.hidden_size // self.num_attention_heads)
self.num_key_value_groups = self.num_attention_heads // self.num_key_value_heads
self.rotary_emb = Step3p5RotaryEmbedding(config, layer_idx=layer_idx)
self.q_size = self.num_attention_heads * self.head_dim
self.kv_size = self.num_key_value_heads * self.head_dim
self.scaling = self.head_dim**-0.5
self.q_proj = nn.Linear(config.hidden_size, self.q_size, bias=False)
self.k_proj = nn.Linear(config.hidden_size, self.kv_size, bias=False)
self.v_proj = nn.Linear(config.hidden_size, self.kv_size, bias=False)
self.o_proj = nn.Linear(self.q_size, config.hidden_size, bias=False)
self.q_norm = Step3p5RMSNorm(self.head_dim,
eps=config.rms_norm_eps)
self.k_norm = Step3p5RMSNorm(self.head_dim,
eps=config.rms_norm_eps)
self.use_head_wise_attn_gate = config.use_head_wise_attn_gate
if self.use_head_wise_attn_gate:
self.g_proj = nn.Linear(config.hidden_size,
self.num_attention_heads,
bias=False)
self.use_rope = True
use_rope_layers = getattr(config, "use_rope_layers", None)
if use_rope_layers:
self.use_rope = use_rope_layers[self.layer_idx]
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor],
past_key_value: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Tuple[torch.Tensor, Optional[torch.Tensor],
Optional[Tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(
self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
key_states = self.k_norm(
self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(
1, 2)
if self.use_head_wise_attn_gate:
gate_states = self.g_proj(hidden_states)
cos, sin = self.rotary_emb(hidden_states, position_ids)
# cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(
query_states, key_states, cos, sin)
# query_states, key_states = apply_rotary_pos_emb(query_norm_states, key_norm_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; position_ids needed for the static cache
cache_kwargs = {
"sin": sin,
"cos": cos,
"cache_position": cache_position
}
key_states, value_states = past_key_value.update(
key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
# TODO: considering FP8;
# RuntimeError: Expected attn_mask dtype to be bool or float or to match query dtype,
# but got attn_mask.dtype: long int and query.dtype: c10::BFloat16 instead.
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[
self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=self.sliding_window, # main diff with Llama
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1)
if self.use_head_wise_attn_gate:
output = attn_output.view(
*attn_output.shape[:-1], self.num_attention_heads,
self.head_dim) * gate_states.unsqueeze(-1).sigmoid()
attn_output = output.view(*attn_output.shape)
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Step3p5DecoderLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx):
super().__init__()
self.hidden_size = config.hidden_size
self.layer_idx = layer_idx
self.self_attn = Step3p5Attention(config, layer_idx)
self.attention_type = config.layer_types[layer_idx]
moe_layers_enum = getattr(config, "moe_layers_enum", None)
if moe_layers_enum is not None:
moe_layers_idx = [
int(i) for i in moe_layers_enum.strip().split(',')
]
else:
moe_layers_idx = [i for i in range(1, config.num_hidden_layers)]
self.is_moe_layer = layer_idx in moe_layers_idx
self.use_moe = False
if config.swiglu_limits_shared and config.swiglu_limits_shared[
layer_idx] is not None and config.swiglu_limits_shared[
layer_idx] != 0:
swiglu_limit_shared = config.swiglu_limits_shared[layer_idx]
else:
swiglu_limit_shared = None
if config.swiglu_limits and config.swiglu_limits[
layer_idx] is not None and config.swiglu_limits[layer_idx] != 0:
swiglu_limit = config.swiglu_limits[layer_idx]
else:
swiglu_limit = None
if self.is_moe_layer:
self.moe = Step3p5MoEMLP(config, swiglu_limit=swiglu_limit) #
self.share_expert = Step3p5MLP(
config,
intermediate_size=config.share_expert_dim,
swiglu_limit=swiglu_limit_shared)
self.use_moe = True
else:
self.mlp = Step3p5MLP(config,
intermediate_size=config.intermediate_size,
swiglu_limit=swiglu_limit_shared)
self.input_layernorm = Step3p5RMSNorm(
config.hidden_size,
eps=config.rms_norm_eps)
self.post_attention_layernorm = Step3p5RMSNorm(
config.hidden_size,
eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[tuple[torch.Tensor]] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_value=past_key_value,
cache_position=cache_position,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
if self.use_moe:
share_output = self.share_expert(hidden_states)
moe_output = self.moe(hidden_states)
ffn_output = moe_output + share_output
else:
ffn_output = self.mlp(hidden_states)
if isinstance(ffn_output, tuple):
hidden_states, _ = ffn_output
else:
hidden_states = ffn_output
hidden_states = residual + hidden_states
return hidden_states
class Step3p5PreTrainedModel(PreTrainedModel):
# Link this model family to its configuration class so PreTrainedModel.from_pretrained
# can load the config instead of failing with a NoneType error.
config_class = Step3p5Config
supports_gradient_checkpointing = True
_skip_keys_device_placement = ["past_key_values"]
_keys_to_ignore_on_load_unexpected = [
r"model\.layers\.45\.*",
r"model\.layers\.46\.*",
r"model\.layers\.47\.*"
]
_supports_flash_attn = False
_supports_sdpa = True
_supports_flex_attn = True
_supports_static_cache = True
_supports_attention_backend = True
class Step3p5Model(Step3p5PreTrainedModel, GenerationMixin):
_no_split_modules = ["Step3p5DecoderLayer"]
base_model_prefix = "model"
_tied_weights_keys = ["lm_head.weight"]
config: Step3p5Config
def __init__(self, config: Step3p5Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size,
self.padding_idx)
self.layers = nn.ModuleList([
Step3p5DecoderLayer(config, layer_idx)
for layer_idx in range(config.num_hidden_layers)
])
self.norm = Step3p5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
self.has_sliding_layers = "sliding_attention" in self.config.layer_types
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self, input_ids):
return self.embed_tokens(input_ids)
@can_return_tuple
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (output_hidden_states
if output_hidden_states is not None else
self.config.output_hidden_states)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError(
"You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(
input_ids.to(self.embed_tokens.weight.device))
if use_cache and past_key_values is None:
past_key_values = DynamicCache()
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length(
) if past_key_values is not None else 0
cache_position = torch.arange(past_seen_tokens,
past_seen_tokens +
inputs_embeds.shape[1],
device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
hidden_states = inputs_embeds
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
# Prepare mask arguments
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
# Create the masks
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
}
# The sliding window alternating layers are not always activated depending on the config
if self.has_sliding_layers:
causal_mask_mapping[
"sliding_attention"] = create_sliding_window_causal_mask(
**mask_kwargs)
# # create position embeddings to be shared across the decoder layers
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for decoder_layer in self.layers[:self.config.num_hidden_layers]:
if output_hidden_states:
all_hidden_states += (hidden_states, )
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[
decoder_layer.attention_type],
position_ids=position_ids,
past_key_value=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = layer_outputs
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
class Step3p5ForCausalLM(Step3p5PreTrainedModel, GenerationMixin):
_tied_weights_keys = ["lm_head.weight"]
config: Step3p5Config
def __init__(self, config: Step3p5Config):
super().__init__(config)
self.model = Step3p5Model(config)
self.lm_head = nn.Linear(config.hidden_size,
config.vocab_size,
bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self):
return self.model.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
self.model.set_output_embeddings(new_embeddings)
def set_decoder(self, decoder):
self.model.set_decoder(decoder)
def get_decoder(self):
return self.model.get_decoder()
def forward(
self,
input_ids: torch.LongTensor = None,
num_patches=None,
patch_pixel_values=None,
patch_newline_mask=None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Step3p5CausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Llama4ForCausalLM
>>> model = Llama4ForCausalLM.from_pretrained("meta-llama4/Llama4-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama4/Llama4-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (output_hidden_states
if output_hidden_states is not None else
self.config.output_hidden_states)
# breakpoint()
outputs = self.model(
input_ids=input_ids,
num_patches=num_patches,
patch_pixel_values=patch_pixel_values,
patch_newline_mask=patch_newline_mask,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
logits = self.lm_head(hidden_states)
return Step3p5CausalLMOutputWithPast(logits=logits, )
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
**kwargs,
):
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
if cache_position[0] == 0:
# If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
# Otherwise we need pixel values to be passed to model
model_inputs["pixel_values"] = pixel_values
return model_inputs
def _fix_state_dict_key_on_load(self, key: str) -> tuple[str, bool]:
if key.startswith("language_model."):
return key[len("language_model."):], True
return key, False