Timsty's picture
Upload folder using huggingface_hub
e94400c verified
# ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ
# This file was automatically generated from src/transformers/models/qwen3/modular_qwen3.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_qwen3.py file directly. One of our CI enforces this.
# ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ๐Ÿšจ
# Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append("/data2/timsty/code/LearnLatent/")
from collections.abc import Callable
from typing import Optional, List
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
from transformers.activations import ACT2FN
from transformers.cache_utils import Cache, DynamicCache
from transformers.integrations import use_kernel_forward_from_hub
from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
from transformers.modeling_layers import (
GradientCheckpointingLayer,
)
from transformers import AutoModel, AutoModelForCausalLM
from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from transformers.processing_utils import Unpack
from transformers.utils import TransformersKwargs, auto_docstring, logging
from transformers.utils.generic import check_model_inputs
from starVLA.model.modules.action_model.configuration_actionmodel import ActionModelConfig
logger = logging.get_logger(__name__)
@use_kernel_forward_from_hub("RMSNorm")
class Qwen3RMSNorm(nn.Module):
def __init__(self, hidden_size, eps: float = 1e-6) -> None:
"""
Qwen3RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class Qwen3MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None,
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class Qwen3Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: ActionModelConfig, layer_idx: int):
super().__init__()
self.layer_type = None
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.q_norm = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) # unlike olmo, only on the head dim!
self.k_norm = Qwen3RMSNorm(self.head_dim, eps=config.rms_norm_eps) # thus post q_norm does not need reshape
self.sliding_window = None
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: torch.Tensor | None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, torch.Tensor | None]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=self.sliding_window, # diff with Llama
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Qwen3Layer(GradientCheckpointingLayer):
def __init__(self, config: ActionModelConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Qwen3Attention(config=config, layer_idx=layer_idx)
self.mlp = Qwen3MLP(config)
self.input_layernorm = Qwen3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Qwen3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
class ActionPreTrainedModel(PreTrainedModel):
config: ActionModelConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Qwen3Layer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Qwen3Layer,
"attentions": Qwen3Attention,
}
class Qwen3RotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: ActionModelConfig, device=None):
super().__init__()
# BC: "rope_type" was originally "type"
if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
else:
self.rope_type = "default"
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring
class ActionModel(ActionPreTrainedModel):
def __init__(self, config: ActionModelConfig):
super().__init__(config)
# self.padding_idx = config.pad_token_id
self.config = config
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
self.state_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
self.action_mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.dataset_embed = nn.Embedding(
config.dataset_vocab_size,
config.hidden_size * config.num_data_tokens,
)
self.state_proj_in = nn.Linear(config.state_size, config.hidden_size)
self.action_proj_in = nn.Linear(config.action_size, config.hidden_size)
self.action_encoder = nn.ModuleList(
[Qwen3Layer(config, layer_idx) for layer_idx in range(config.num_encoder_layers)]
)
if self.config.use_vae_reparameterization:
self.fc_mu = nn.Linear(config.hidden_size, config.hidden_size)
self.fc_var = nn.Linear(config.hidden_size, config.hidden_size)
else:
# self.emb_norm = nn.LayerNorm(config.hidden_size)
pass
self.placeholder_tokens = nn.Parameter(torch.randn(1, config.max_action_chunk_size, config.hidden_size))
self.action_decoder = nn.ModuleList(
[Qwen3Layer(config, layer_idx) for layer_idx in range(config.num_decoder_layers)]
)
self.norm = Qwen3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.action_proj_out = nn.Linear(config.hidden_size, config.action_size)
self.rotary_emb = Qwen3RotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
self._maybe_init_from_qwen3()
def _maybe_init_from_qwen3(self) -> None:
name_or_path = getattr(self.config, "qwen3_pretrained_name_or_path", None)
if not name_or_path:
return
pretrained = AutoModel.from_pretrained(
name_or_path,
torch_dtype="auto",
low_cpu_mem_usage=True,
)
src_sd = pretrained.state_dict()
layer_prefix = None
for p in ("model.layers.", "layers."):
if any(k.startswith(p) for k in src_sd.keys()):
layer_prefix = p
break
norm_prefix = None
for p in ("model.norm.", "norm."):
if any(k.startswith(p) for k in src_sd.keys()):
norm_prefix = p
break
def _map_layer_key(target_key: str, module_prefix: str, layer_offset: int) -> str | None:
# target_key example: "action_encoder.0.self_attn.q_proj.weight"
rest = target_key[len(module_prefix) + 1 :] # "0.self_attn.q_proj.weight"
parts = rest.split(".", 1)
if len(parts) != 2:
return None
try:
tgt_idx = int(parts[0])
except ValueError:
return None
src_idx = tgt_idx + int(layer_offset)
return f"{layer_prefix}{src_idx}.{parts[1]}"
own_sd = self.state_dict()
to_load: dict[str, torch.Tensor] = {}
matched = 0
missing = 0
shape_mismatch = 0
init_enc = bool(getattr(self.config, "qwen3_init_action_encoder", True))
init_dec = bool(getattr(self.config, "qwen3_init_action_decoder", True))
init_norm = bool(getattr(self.config, "qwen3_init_norm", True))
enc_off = int(getattr(self.config, "qwen3_encoder_layer_offset", 0))
dec_off = int(getattr(self.config, "qwen3_decoder_layer_offset", 0))
for k, tgt_tensor in own_sd.items():
src_key = None
if init_enc and k.startswith("action_encoder."):
src_key = _map_layer_key(k, "action_encoder", enc_off)
elif init_dec and k.startswith("action_decoder."):
src_key = _map_layer_key(k, "action_decoder", dec_off)
elif init_norm and k == "norm.weight" and norm_prefix is not None:
src_key = f"{norm_prefix}weight"
if not src_key:
continue
src_tensor = src_sd.get(src_key, None)
if src_tensor is None:
missing += 1
continue
if src_tensor.shape != tgt_tensor.shape:
shape_mismatch += 1
continue
to_load[k] = src_tensor.to(device=tgt_tensor.device, dtype=tgt_tensor.dtype)
matched += 1
self.load_state_dict(to_load, strict=False)
print(
f"Initialized from Qwen3 checkpoint {name_or_path}). "
f"matched={matched} missing={missing} shape_mismatch={shape_mismatch} prefix={layer_prefix}"
)
@auto_docstring
def forward(
self,
examples: List[dict] = None,
**kwargs: Unpack[TransformersKwargs],
):
device = next(self.parameters()).device
batch_size = len(examples)
# =========================================================================
# 1. ๅ˜้•ฟ้‡‡ๆ ท (Variable-length Horizon)
# =========================================================================
max_available_len = min([len(ex["action"]) for ex in examples])
limit_len = min(max_available_len, self.config.max_action_chunk_size)
current_chunk_size = np.random.randint(self.config.min_action_len, limit_len + 1)
raw_actions = torch.tensor(
np.array([ex["action"][:current_chunk_size] for ex in examples]),
device=device, dtype=torch.float32
) # Shape: [B, L, Action_Dim]
with torch.autocast("cuda", dtype=torch.float32):
# =========================================================================
# 2. State Encoding & Masking
# =========================================================================
states = [example["state"] for example in examples] if "state" in examples[0] else None
if states is not None:
states_tensor = torch.tensor(
np.array(states), device=device, dtype=torch.float32
)
state_embeds = self.state_proj_in(states_tensor)
if self.config.state_drop_prob > 0:
keep_mask = torch.bernoulli(
torch.full((batch_size, 1, 1), 1 - self.config.state_drop_prob, device=device)
)
# ไฝฟ็”จ learnable state_token ๆ›ฟๆข่ขซ drop ็š„ state
state_token_expanded = self.state_token.expand(batch_size, 1, -1)
state_embeds = keep_mask * state_embeds + (1 - keep_mask) * state_token_expanded
else:
state_embeds = self.state_token.expand(batch_size, -1, -1)
# =========================================================================
# 3. Action Input Construction & Masking (DAE)
# =========================================================================
inputs_embeds = self.action_proj_in(raw_actions)
if self.config.mask_ratio > 0:
# ็”Ÿๆˆ Action Mask
# ่ฟ™้‡Œ็š„ mask ๆ˜ฏๆŒ‡๏ผšTrue ่กจ็คบ่ขซ Mask ๆމ (้œ€่ฆ่ขซๆ›ฟๆขไธบ token)
random_matrix = torch.rand(batch_size, current_chunk_size, device=device)
input_mask = random_matrix < self.config.mask_ratio
# ๅฐ† mask ๆ‰ฉๅฑ•ๅˆฐ hidden dim
input_mask_expanded = input_mask.unsqueeze(-1).float()
# ๆ›ฟๆข่ขซ Mask ็š„้ƒจๅˆ†
mask_token_expanded = self.action_mask_token.expand(batch_size, current_chunk_size, -1)
inputs_embeds = (1 - input_mask_expanded) * inputs_embeds + input_mask_expanded * mask_token_expanded
# =========================================================================
# 4. Dataset Soft Prompt (X-VLA)
# =========================================================================
dataset_ids = [ex.get("dataset_id", 0) for ex in examples] # ้ป˜่ฎค id 0
dataset_ids_tensor = torch.tensor(dataset_ids, device=device, dtype=torch.long)
ds_embeds = self.dataset_embed(dataset_ids_tensor).view(
batch_size, self.config.num_data_tokens, self.config.hidden_size
) # [B, num_data_tokens, H]
# ๆ‹ผๆŽฅ Encoder ่พ“ๅ…ฅ: [CLS, Dataset_Token, State, Action_1...Action_L]
cls_token_expanded = self.cls_token.expand(batch_size, -1, -1)
encoder_inputs = torch.cat((cls_token_expanded, ds_embeds, state_embeds, inputs_embeds), dim=1)
seq_len = encoder_inputs.shape[1]
encoder_attention_mask = torch.ones((batch_size, 1, seq_len, seq_len), device=device, dtype=torch.bool)
encoder_pos_ids = torch.arange(seq_len, device=device).unsqueeze(0)
enc_pos_emb = self.rotary_emb(encoder_inputs, encoder_pos_ids)
hidden_states = encoder_inputs
for encoder_layer in self.action_encoder:
hidden_states = encoder_layer(
hidden_states,
attention_mask=encoder_attention_mask,
position_embeddings=enc_pos_emb,
position_ids=encoder_pos_ids,
**kwargs,
)
# Get Latent (CLS token)
action_embedding = hidden_states[:, :1, :]
vae_kl_loss = None
if self.config.use_vae_reparameterization:
mu = self.fc_mu(action_embedding)
log_var = self.fc_var(action_embedding)
if self.training:
std = torch.exp(log_var * 0.5)
eps = torch.randn_like(std)
action_embedding = mu + eps * std
# KL Loss ่ฎก็ฎ—
kl_loss_per_sample = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp(), dim=[1, 2])
vae_kl_loss = torch.mean(kl_loss_per_sample) / self.config.hidden_size
else:
action_embedding = mu
# L2 normalize action embedding before decoder
action_embedding = F.normalize(action_embedding, p=2, dim=-1)
# =========================================================================
# Decoder
# =========================================================================
# Decoder Input: [Latent, Mask_1...Mask_L]
placeholder_tokens = self.placeholder_tokens[:, :current_chunk_size, :].expand(batch_size, -1, -1)
decoder_inputs = torch.cat((action_embedding, placeholder_tokens), dim=1)
dec_seq_len = decoder_inputs.shape[1]
decoder_attention_mask = torch.ones((batch_size, 1, dec_seq_len, dec_seq_len), device=device,
dtype=torch.bool)
dec_pos_ids = torch.arange(dec_seq_len, device=device).unsqueeze(0)
dec_pos_emb = self.rotary_emb(decoder_inputs, dec_pos_ids)
hidden_states = decoder_inputs
for decoder_layer in self.action_decoder:
hidden_states = decoder_layer(
hidden_states,
attention_mask=decoder_attention_mask,
position_embeddings=dec_pos_emb,
position_ids=dec_pos_ids,
)
hidden_states = self.norm(hidden_states)
reconstructed_actions = self.action_proj_out(hidden_states[:, 1:, :])
# recon_loss = F.mse_loss(reconstructed_actions, raw_actions)
recon_loss = F.l1_loss(reconstructed_actions, raw_actions)
return {
"recon_loss": recon_loss,
"vae_kl_loss": vae_kl_loss,
}
def recon_loss(self, actions, states=None, freeze_encoder=False, **kwargs):
"""
่ฎก็ฎ—้‡ๅปบๆŸๅคฑ
Args:
actions: ่พ“ๅ…ฅๅŠจไฝœๅบๅˆ—
states: ็Šถๆ€ๅ‘้‡๏ผˆๅฏ้€‰๏ผ‰
freeze_encoder: ๆ˜ฏๅฆๅ†ป็ป“ encoder๏ผˆๅฆ‚ๆžœ True๏ผŒๅˆ™ detach embeddings๏ผŒๅช่ฎญ็ปƒ decoder๏ผ‰
"""
action_embeddings = self.encode_actions(actions, states)
if freeze_encoder:
# detach embeddings: ๅช่ฎญ็ปƒ decoder๏ผŒไธ่ฎญ็ปƒ encoder
action_embeddings = action_embeddings.detach()
reconstructed_actions = self.decode_actions(action_embeddings, chunk_size=actions.shape[1])
return F.l1_loss(reconstructed_actions, actions)
def encode_actions(self, actions, states=None, **kwargs):
inputs_embeds = self.action_proj_in(actions)
batch_size = inputs_embeds.shape[0]
cls_token_expanded = self.cls_token.expand(batch_size, -1, -1)
states = self.state_proj_in(states) if states is not None else self.state_token.expand(batch_size, -1, -1)
inputs_embeds = torch.cat((cls_token_expanded, states, inputs_embeds), dim=1)
seq_len = inputs_embeds.shape[1]
encoder_attention_mask = torch.ones(
(batch_size, 1, seq_len, seq_len),
device=inputs_embeds.device,
dtype=torch.bool
)
encoder_pos_ids = torch.arange(seq_len, device=inputs_embeds.device).unsqueeze(0)
enc_pos_emb = self.rotary_emb(inputs_embeds, encoder_pos_ids)
hidden_states = inputs_embeds
for encoder_layer in self.action_encoder:
hidden_states = encoder_layer(
hidden_states,
attention_mask=encoder_attention_mask,
position_embeddings=enc_pos_emb,
position_ids=encoder_pos_ids,
**kwargs,
)
action_embedding = hidden_states[:, :1, :]
if self.config.use_vae_reparameterization:
mu = self.fc_mu(action_embedding)
return F.normalize(mu, p=2, dim=-1) # L2 normalized
else:
return F.normalize(action_embedding, p=2, dim=-1) # L2 normalized
def decode_actions(self, action_embedding, chunk_size, **kwargs):
if chunk_size is None:
chunk_size = self.config.max_action_chunk_size
batch_size = action_embedding.shape[0]
# 1. ๆž„้€  Input [Latent, Placeholders]
# ๆณจๆ„๏ผš่ฟ™้‡Œ็š„ action_embedding ๅบ”่ฏฅๆ˜ฏ (Batch, 1, Dim)
if action_embedding.dim() == 2:
action_embedding = action_embedding.unsqueeze(1)
placeholder_tokens = self.placeholder_tokens[:, :chunk_size, :].expand(batch_size, -1, -1)
hidden_states = torch.cat((action_embedding, placeholder_tokens), dim=1)
# 2. ๆž„้€  Mask ๅ’Œ Pos Embed (ไธŽ Forward ไธ€่‡ด)
dec_seq_len = hidden_states.shape[1]
decoder_attention_mask = torch.ones(
(batch_size, 1, dec_seq_len, dec_seq_len),
device=action_embedding.device,
dtype=torch.bool
)
dec_pos_ids = torch.arange(dec_seq_len, device=action_embedding.device).unsqueeze(0)
dec_pos_emb = self.rotary_emb(hidden_states, dec_pos_ids)
# 3. Decoder Forward
for decoder_layer in self.action_decoder:
hidden_states = decoder_layer(
hidden_states,
attention_mask=decoder_attention_mask,
position_embeddings=dec_pos_emb,
position_ids=dec_pos_ids,
)
hidden_states = self.norm(hidden_states)
reconstructed_actions = self.action_proj_out(hidden_states[:, 1:, :])
return reconstructed_actions
__all__ = [
"ActionPreTrainedModel",
"ActionModel",
]
if __name__ == "__main__":
config = ActionModelConfig()
action_model = ActionModel(config)
print(action_model)
print("Total number of DiT parameters: ",
sum(p.numel() for p in action_model.parameters() if p.requires_grad))
fake_actions = torch.randn(10, 15, 32).to("cuda:7")
sample = {
"action": np.random.uniform(-1, 1, size=(16, 32)).astype(np.float16), # action_chunk, action_dim
"lang": "put the ball on the table",
"state": np.random.uniform(-1, 1, size=(1, 32)).astype(np.float16), # chunk, state_dim
}
batch = [sample, sample]
device = torch.device("cuda:7" if torch.cuda.is_available() else "cpu")
action_model = action_model.to(device)
outputs = action_model(batch)
print(outputs)
action_embedding = action_model.encode_actions(fake_actions)
print(f"action_embedding: {action_embedding}")
reconstructed_actions = action_model.decode_actions(action_embedding, chunk_size=15)
print(f"reconstructed_actions: {reconstructed_actions.shape}")