sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
verl-project/verl:verl/workers/engine/torchtitan/utils.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import re
from collections import defaultdict
from collections.abc import Generator, Iterator
from dataclasses import dataclass
from typing import Any
import torch
import torch.distributed
import torch.nn as nn
from torch.distributed._composable.fsdp import FSDPModule
from torch.distributed.tensor import DTensor
from torch.nn.attention.flex_attention import _mask_mod_signature, and_masks
from torchtitan.components.dataloader import BaseDataLoader
from torchtitan.models.common.attention import (
AttentionMasksType,
VarlenMetadata,
create_attention_mask,
get_causal_mask_mod,
)
logger = logging.getLogger(__name__)
class NoOpDataLoader(BaseDataLoader):
"""A no-op dataloader for use when verl manages its own data loading.
Satisfies the BaseDataLoader interface required by torchtitan's Trainer
but does nothing. Its __iter__ yields nothing, and state_dict /
load_state_dict are no-ops.
"""
@dataclass(kw_only=True, slots=True)
class Config(BaseDataLoader.Config):
pass
def __init__(self, **kwargs):
pass
def __iter__(self) -> Iterator[tuple[dict[str, torch.Tensor], torch.Tensor]]:
return iter([])
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
pass
# Mapping from HuggingFace model_type to torchtitan model name.
# Torchtitan models not mapped here:
# - flux: diffusion model, not applicable to verl's RL/SFT workflows
# - llama3_ft: fault-tolerant variant of llama3, same HF models (mapped via "llama")
_HF_MODEL_TYPE_TO_TORCHTITAN_NAME = {
"qwen2": "qwen3",
"qwen3": "qwen3",
"qwen2_moe": "qwen3",
"qwen3_moe": "qwen3",
"llama": "llama3",
"llama4": "llama4",
"deepseek_v3": "deepseek_v3",
"gpt_oss": "gpt_oss",
}
def derive_torchtitan_name_and_flavor(hf_config) -> tuple[str, str]:
"""Derive torchtitan model name and flavor from a HuggingFace config.
The name is mapped from ``hf_config.model_type``. The flavor is found by
matching architecture parameters (dim, n_layers, vocab_size) against the
known flavors registered in the torchtitan model package.
Args:
hf_config: A HuggingFace AutoConfig object.
Returns:
A ``(name, flavor)`` tuple.
Raises:
ValueError: If model_type is unsupported or no matching flavor is found.
"""
model_type = getattr(hf_config, "model_type", None)
if model_type is None:
raise ValueError("HuggingFace config does not have 'model_type' field")
name = _HF_MODEL_TYPE_TO_TORCHTITAN_NAME.get(model_type)
if name is None:
raise ValueError(
f"Cannot derive torchtitan model name from HF model_type '{model_type}'. "
f"Supported types: {list(_HF_MODEL_TYPE_TO_TORCHTITAN_NAME.keys())}."
)
# Import the model package and find the configs dict
model_module = importlib.import_module(f"torchtitan.models.{name}")
model_configs = None
for attr in dir(model_module):
obj = getattr(model_module, attr)
if isinstance(obj, dict) and attr.endswith("_configs"):
model_configs = obj
break
if model_configs is None:
raise ValueError(
f"Could not find model configs dict in torchtitan.models.{name}. "
f"Expected a dict attribute ending with '_configs'."
)
hidden_size = hf_config.hidden_size
num_layers = hf_config.num_hidden_layers
vocab_size = hf_config.vocab_size
for flavor_name, model_cfg in model_configs.items():
if (
getattr(model_cfg, "dim", None) == hidden_size
and getattr(model_cfg, "n_layers", None) == num_layers
and getattr(model_cfg, "vocab_size", None) == vocab_size
):
logger.info(
f"Auto-derived torchtitan name='{name}', flavor='{flavor_name}' from HF model_type='{model_type}'"
)
return name, flavor_name
raise ValueError(
f"No matching torchtitan flavor found for model_type='{model_type}' "
f"(hidden_size={hidden_size}, num_hidden_layers={num_layers}, "
f"vocab_size={vocab_size}). "
f"Available flavors for '{name}': {list(model_configs.keys())}."
)
def enable_fsdp_gradient_division(model: nn.Module, dp_size: int) -> None:
"""
Re-enable FSDP's automatic gradient division.
TorchTitan calls disable_fsdp_gradient_division() which sets gradient_divide_factor=1.0.
This re-enables it by setting the factor to the specified dp_size, so gradients are
averaged across FSDP ranks. This is needed for verl's loss scaling (loss * dp_size)
to work correctly.
Args:
model: The model (or model part) to enable gradient division on.
dp_size: The data parallel size to use as the gradient divide factor.
"""
for module in model.modules():
if isinstance(module, FSDPModule):
module.set_gradient_divide_factor(float(dp_size))
def get_attention_masks(
input_batch: torch.Tensor,
positions: torch.Tensor,
attn_type: str,
) -> AttentionMasksType:
match attn_type:
case "flex":
return _get_flex_attention_masks(
input_batch,
positions,
)
case "varlen":
return _create_varlen_metadata_for_document(
input_batch,
positions,
)
case _:
raise TypeError("Only varlen and flex attn masks are supported")
def _get_document_mask_mod(positions: torch.Tensor) -> _mask_mod_signature:
# Detect boundaries from position resets
first_dummy_value = positions[:, :1] - 1
position_diff = torch.diff(positions, prepend=first_dummy_value, dim=-1)
sequence_indices = (position_diff != 1).cumsum(-1) # [batch, seq]
def document_mask(b: torch.Tensor, h: torch.Tensor, q_idx: torch.Tensor, kv_idx: torch.Tensor) -> torch.Tensor:
return sequence_indices[b, q_idx] == sequence_indices[b, kv_idx]
return document_mask
def _get_flex_attention_masks(
input_batch: torch.Tensor,
positions: torch.Tensor,
) -> AttentionMasksType:
mask_mods = [get_causal_mask_mod()]
B = input_batch.shape[0]
mask_mods.append(_get_document_mask_mod(positions=positions))
return create_attention_mask(and_masks(*mask_mods), B, None, input_batch.shape[1], input_batch.shape[1])
def _create_varlen_metadata_for_document(input_batch: torch.Tensor, positions: torch.Tensor) -> VarlenMetadata:
"""
Creates cumulative sequence length indices needed for variable length attention
Args:
input_batch: Input token IDs with shape [batch, seq].
positions: Position IDs with shape [batch, seq]. Boundaries detected where
position diff != 1 (i.e., position resets).
Returns:
VarlenMetadata containing cumulative sequence length indices for q, k, and max_seq_len
"""
batch_size, seq_len = input_batch.shape
device = input_batch.device
# Detect boundaries from position resets (where diff != 1)
first_dummy_value = positions[:, :1] - 1
position_diff = torch.diff(positions, prepend=first_dummy_value, dim=-1)
# boundary_mask[b, i] is True if position i starts a new document
boundary_mask = position_diff != 1 # [batch, seq]
boundary_mask[:, 0] = True
cu_seqlens_list, all_seq_lengths = [], []
offset = 0
for b in range(batch_size):
# Find positions where new documents start
boundary_positions = boundary_mask[b].nonzero(as_tuple=True)[0].to(torch.int32)
sample_cu_seqlens = torch.cat(
[
boundary_positions,
torch.tensor([seq_len], dtype=torch.int32, device=device),
]
)
sample_cu_seqlens = torch.unique_consecutive(sample_cu_seqlens)
seq_lengths = torch.diff(sample_cu_seqlens)
all_seq_lengths.append(seq_lengths)
cu_seqlens_adjusted = sample_cu_seqlens[:-1] + offset
cu_seqlens_list.append(cu_seqlens_adjusted)
offset += seq_len
packed_cu_seqlens = torch.cat(cu_seqlens_list + [torch.tensor([offset], dtype=torch.int32, device=device)])
max_seqlen = 0
if len(all_seq_lengths) > 0:
all_seq_lengths = torch.cat(all_seq_lengths)
# device to host sync but only done once per model forward
max_seqlen = all_seq_lengths.max().item()
return VarlenMetadata(
cu_seq_q=packed_cu_seqlens,
cu_seq_k=packed_cu_seqlens,
max_q=max_seqlen,
max_k=max_seqlen,
)
# Regex to parse: model.layers.{L}.mlp.experts.{E}.{weight_suffix}
_EXPERT_PATTERN = re.compile(r"\.layers\.(\d+)\..*\.experts\.(\d+)\.(.*)")
def _parse_expert_name(name: str) -> tuple[int, int, str] | None:
"""Parse layer_id, expert_id, weight_suffix from expert param name."""
match = _EXPERT_PATTERN.search(name)
if match:
return int(match.group(1)), int(match.group(2)), match.group(3)
return None
def _make_expert_name_template(name: str) -> str:
"""Convert 'model.layers.0.mlp.experts.3.w1' -> 'model.layers.0.mlp.experts.{}.w1'"""
return _EXPERT_PATTERN.sub(lambda m: f".layers.{m.group(1)}.mlp.experts.{{}}.{m.group(3)}", name)
def iter_per_tensor_params_ep(
params: dict[str, Any],
device: int,
ep_group: torch.distributed.ProcessGroup,
ep_size: int,
) -> Generator[tuple[str, torch.Tensor], None, None]:
"""Yield (name, tensor) pairs for weight sync with Expert Parallel.
Gathers expert weights across EP ranks one (layer, weight_type) group
at a time to avoid OOM from materializing all experts simultaneously.
Non-expert params are yielded first (with FSDP full_tensor() if needed),
then expert params are all-gathered per group and yielded individually.
Args:
params: HF-format state dict with per-expert keys. Expert keys must
follow the pattern ``model.layers.{L}.mlp.experts.{E}.{suffix}``.
device: device ID to place tensors on.
ep_group: The EP process group for all-gather.
ep_size: Number of EP ranks.
"""
expert_params: dict[tuple[int, str], dict[int, tuple[str, Any]]] = defaultdict(dict)
non_expert_params: list[tuple[str, Any]] = []
for name, param in params.items():
parsed = _parse_expert_name(name) if "mlp.experts." in name else None
if parsed is None:
non_expert_params.append((name, param))
else:
layer_id, expert_id, weight_suffix = parsed
expert_params[(layer_id, weight_suffix)][expert_id] = (name, param)
params.clear()
# Yield non-expert params
for name, param in non_expert_params:
if isinstance(param, DTensor):
yield name, param.to(device, non_blocking=True).full_tensor().to(torch.bfloat16, non_blocking=True)
else:
yield name, param
del non_expert_params
# Yield expert params with all-gather
for (layer_id, weight_suffix), experts_dict in sorted(expert_params.items()):
sorted_expert_ids = sorted(experts_dict.keys())
# Stack local expert weights
local_weights = []
for eid in sorted_expert_ids:
_, param = experts_dict[eid]
if isinstance(param, DTensor):
param = param.to(device, non_blocking=True).full_tensor()
else:
param = param.to(device, non_blocking=True)
local_weights.append(param)
name_template = _make_expert_name_template(experts_dict[sorted_expert_ids[0]][0])
local_stacked = torch.stack(local_weights, dim=0)
# All-gather across EP ranks
gathered_list = [torch.empty_like(local_stacked) for _ in range(ep_size)]
torch.distributed.all_gather(gathered_list, local_stacked, group=ep_group)
all_experts = torch.cat(gathered_list, dim=0)
for expert_id in range(all_experts.shape[0]):
yield name_template.format(expert_id), all_experts[expert_id].to(torch.bfloat16).clone()
del local_weights, local_stacked, gathered_list, all_experts
torch.cuda.empty_cache()
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/engine/torchtitan/utils.py",
"license": "Apache License 2.0",
"lines": 281,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/qat/core.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""QAT (Quantization-Aware Training) utilities for verl FSDP training."""
import json
import logging
import re
from dataclasses import dataclass, field
from typing import Any, Optional
import torch.nn as nn
from verl.base_config import BaseConfig
logger = logging.getLogger(__name__)
@dataclass
class QATConfig(BaseConfig):
"""Unified configuration for QAT (Quantization-Aware Training)."""
enable: bool = False
mode: str = "w4a16"
group_size: int = 16
ignore_patterns: list[str] = field(default_factory=lambda: ["lm_head", "embed_tokens", "re:.*mlp.gate$"])
activation_observer: str = "static_minmax"
quantization_config_path: Optional[str] = None
def load_quantization_config(qat_config: QATConfig) -> dict[str, Any]:
"""Load quantization config JSON file from QATConfig."""
if not qat_config.quantization_config_path:
raise ValueError("quantization_config_path is required when QAT is enabled")
logger.info(f"Loading QAT quantization config from: {qat_config.quantization_config_path}")
with open(qat_config.quantization_config_path) as f:
quant_config = json.load(f)
if qat_config.ignore_patterns:
original_ignore = quant_config.get("ignore", [])
quant_config["ignore"] = qat_config.ignore_patterns
if original_ignore != qat_config.ignore_patterns:
logger.info(f"Overriding JSON 'ignore' field: {original_ignore} -> {qat_config.ignore_patterns}")
logger.info("Successfully loaded QAT quantization config")
return quant_config
def _should_quantize(name: str, module: nn.Module, config: QATConfig) -> bool:
"""Check if a module should be quantized."""
if not isinstance(module, nn.Linear):
return False
for pattern in config.ignore_patterns:
if pattern.startswith("re:"):
regex = pattern[3:]
if re.match(regex, name):
logger.debug(f"Ignoring {name} due to regex pattern: {regex}")
return False
else:
if pattern in name:
logger.debug(f"Ignoring {name} due to pattern: {pattern}")
return False
if module.in_features % config.group_size != 0:
logger.warning(
f"Skipping {name}: in_features={module.in_features} not divisible by group_size={config.group_size}"
)
return False
return True
def apply_qat(
model: nn.Module,
config: QATConfig | dict[str, Any],
) -> nn.Module:
"""Apply QAT to a model by replacing nn.Linear with QATLinear."""
from verl.utils.qat.linear import QATLinear, QATMode
if not isinstance(config, QATConfig):
config = QATConfig(**config)
if not config.enable:
logger.info("QAT is disabled, returning original model")
return model
mode = QATMode(config.mode.lower())
logger.info(f"Applying QAT with mode={mode.value}, group_size={config.group_size}")
modules_to_replace = []
for name, module in model.named_modules():
if _should_quantize(name, module, config):
modules_to_replace.append((name, module))
logger.info(f"Found {len(modules_to_replace)} Linear layers to convert to QAT")
converted_count = 0
for name, module in modules_to_replace:
if isinstance(module, QATLinear):
continue
fake_quant_module = QATLinear.from_linear(
module,
mode=mode,
group_size=config.group_size,
activation_observer=config.activation_observer,
)
_set_module(model, name, fake_quant_module)
converted_count += 1
logger.info(f"Successfully applied QAT to {converted_count} layers")
return model
def _set_module(model: nn.Module, name: str, new_module: nn.Module):
"""Set a module in the model by its full name."""
parts = name.split(".")
parent = model
for part in parts[:-1]:
parent = getattr(parent, part)
setattr(parent, parts[-1], new_module)
FUSION_PATTERNS = {
"qkv": ["q_proj", "k_proj", "v_proj"],
"gate_up": ["gate_proj", "up_proj"],
}
def setup_fusion_siblings(model: nn.Module):
"""Setup fusion siblings for QKV and GateUp layers."""
import weakref
from verl.utils.qat.linear import QATLinear
qat_modules = {name: m for name, m in model.named_modules() if isinstance(m, QATLinear)}
counts = {}
for group_name, suffixes in FUSION_PATTERNS.items():
groups: dict[str, dict[str, nn.Module]] = {}
for name, module in qat_modules.items():
for suffix in suffixes:
if name.endswith(suffix):
parent = name.rsplit(".", 1)[0]
groups.setdefault(parent, {})[suffix] = module
count = 0
for parent, projs in groups.items():
if len(projs) >= 2:
modules = list(projs.values())
for i, m in enumerate(modules):
siblings = modules[:i] + modules[i + 1 :]
m._fusion_siblings_ref = [weakref.ref(s) for s in siblings]
count += 1
counts[group_name] = count
logger.info(f"[QAT Fuse] Setup fusion siblings: {counts}")
return counts
def enable_qat_fuse(model: nn.Module):
"""Enable QAT fuse mode: sets up fusion siblings for weight scale fusion."""
setup_fusion_siblings(model)
model._qat_fuse_enabled = True
logger.info("[QAT Fuse] Enabled QAT fuse mode")
def invalidate_all_scales(model: nn.Module):
"""Clear all cached weight scales after optimizer.step()."""
from verl.utils.qat.linear import QATLinear
count = 0
for module in model.modules():
if isinstance(module, QATLinear):
module._weight_blockwise_scale = None
module._weight_global_scale = None
module._cached_weight_amax = None
count += 1
logger.debug(f"[QAT Fuse] Invalidated scales for {count} QATLinear layers")
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/qat/core.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/qat/linear.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""QAT FakeQuantized Linear module for NVFP4 (W4A4/W4A16) with FSDP compatibility.
Includes Triton kernels for high-performance FP4 quantization.
"""
from enum import Enum
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ["QATLinear", "QATMode"]
import triton
import triton.language as tl
_TORCH_TO_TL_DTYPE = {
torch.float32: tl.float32,
torch.float16: tl.float16,
torch.bfloat16: tl.bfloat16,
}
FP4_E2M1_MAX: float = 6.0
FP8_E4M3_MAX: float = 448.0
@triton.jit
def _fp4_fake_quant_kernel(
x_ptr,
y_ptr,
M,
N,
global_scale_ptr,
stride_xm,
stride_xn,
stride_ym,
stride_yn,
BLOCK_SIZE: tl.constexpr,
TILE_M: tl.constexpr,
TILE_N: tl.constexpr,
NUM_FP4_BLOCKS: tl.constexpr,
OUT_DTYPE: tl.constexpr,
FP4_MAX: tl.constexpr,
FP8_MAX: tl.constexpr,
):
pid_m = tl.program_id(axis=0)
pid_n = tl.program_id(axis=1)
row_start = pid_m * TILE_M
col_start = pid_n * TILE_N
x_block_ptr = tl.make_block_ptr(
base=x_ptr,
shape=(M, N),
strides=(stride_xm, stride_xn),
offsets=(row_start, col_start),
block_shape=(TILE_M, TILE_N),
order=(1, 0),
)
y_block_ptr = tl.make_block_ptr(
base=y_ptr,
shape=(M, N),
strides=(stride_ym, stride_yn),
offsets=(row_start, col_start),
block_shape=(TILE_M, TILE_N),
order=(1, 0),
)
global_scale = tl.load(global_scale_ptr).to(tl.float32)
global_scale_safe = tl.where(global_scale > 0.0, global_scale, 1e-12)
tile = tl.load(x_block_ptr, boundary_check=(0, 1), padding_option="zero").to(tl.float32)
tile_reshaped = tl.reshape(tile, (TILE_M, NUM_FP4_BLOCKS, BLOCK_SIZE))
x_abs = tl.abs(tile_reshaped)
block_max = tl.max(x_abs, axis=2, keep_dims=True)
block_max_scaled = block_max / (FP4_MAX * global_scale_safe)
block_max_scaled = tl.minimum(block_max_scaled, FP8_MAX)
block_max_quant = block_max_scaled.to(tl.float8e4nv).to(tl.float32) * global_scale
block_max_quant = tl.where(block_max_quant >= 1e-5, block_max_quant, 1.0)
block_max_quant_broadcast = tl.broadcast_to(block_max_quant, (TILE_M, NUM_FP4_BLOCKS, BLOCK_SIZE))
abs_scaled = x_abs / block_max_quant_broadcast
q_val = tl.where(
abs_scaled <= 0.25,
0.0,
tl.where(
abs_scaled < 0.75,
0.5,
tl.where(
abs_scaled <= 1.25,
1.0,
tl.where(
abs_scaled < 1.75,
1.5,
tl.where(
abs_scaled <= 2.5,
2.0,
tl.where(abs_scaled < 3.5, 3.0, tl.where(abs_scaled <= 5.0, 4.0, FP4_MAX)),
),
),
),
),
)
x_rescaled = q_val * block_max_quant_broadcast
x_rescaled = tl.where(tile_reshaped >= 0, x_rescaled, -x_rescaled)
tile_quant = tl.reshape(x_rescaled, (TILE_M, TILE_N))
tl.store(y_block_ptr, tile_quant.to(OUT_DTYPE), boundary_check=(0, 1))
def fp4_fake_quant_weight(
weight: torch.Tensor,
global_amax: torch.Tensor = None,
block_size: int = 16,
tile_rows: int = 16,
tile_cols: int = 64,
) -> torch.Tensor:
"""Apply FP4 fake quantization using Triton kernel."""
x_shape = weight.shape
x_dtype = weight.dtype
x = weight.reshape(-1, x_shape[-1]).contiguous()
M, N = x.shape
y = torch.empty_like(x)
stride_xm, stride_xn = x.stride()
stride_ym, stride_yn = y.stride()
tile_cols = max(tile_cols, block_size)
tile_cols_aligned = ((tile_cols + block_size - 1) // block_size) * block_size
num_fp4_blocks = tile_cols_aligned // block_size
if global_amax is None:
global_amax = weight.abs().max().to(torch.float32)
global_scale = global_amax.float() / (FP4_E2M1_MAX * FP8_E4M3_MAX)
grid = (triton.cdiv(M, tile_rows), triton.cdiv(N, tile_cols_aligned))
_fp4_fake_quant_kernel[grid](
x,
y,
M,
N,
global_scale,
stride_xm,
stride_xn,
stride_ym,
stride_yn,
BLOCK_SIZE=block_size,
TILE_M=tile_rows,
TILE_N=tile_cols_aligned,
NUM_FP4_BLOCKS=num_fp4_blocks,
OUT_DTYPE=_TORCH_TO_TL_DTYPE[x_dtype],
FP4_MAX=FP4_E2M1_MAX,
FP8_MAX=FP8_E4M3_MAX,
)
return y.view(*x_shape)
class STEFP4QuantTriton(torch.autograd.Function):
"""Straight-Through Estimator wrapper for Triton FP4 quantization kernel."""
@staticmethod
def forward(ctx, x: torch.Tensor, global_amax: torch.Tensor, block_size: int) -> torch.Tensor:
return fp4_fake_quant_weight(x, global_amax=global_amax, block_size=block_size)
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> tuple:
return grad_output, None, None
class QATMode(str, Enum):
"""QAT quantization mode."""
W4A4 = "w4a4" # Weight 4-bit, Activation 4-bit (dynamic)
W4A16 = "w4a16" # Weight 4-bit, Activation 16-bit (weight only)
class QATLinear(nn.Linear):
"""QAT FakeQuantized Linear layer with FSDP compatibility."""
_UNINITIALIZED_SCALE = -1.0
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
mode: QATMode = QATMode.W4A4,
group_size: int = 16,
activation_observer: str = "static_minmax", # Observer strategy for activation global_scale
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
):
super().__init__(in_features, out_features, bias, device=device, dtype=dtype)
self.mode = mode
self.group_size = group_size
self.activation_observer = activation_observer
self._weight_blockwise_scale: Optional[torch.Tensor] = None
self._weight_global_scale: Optional[torch.Tensor] = None
self._cached_weight_amax: Optional[torch.Tensor] = None
self._fusion_siblings_ref = None
if mode == QATMode.W4A4:
self.register_buffer(
"input_global_scale", torch.tensor([self._UNINITIALIZED_SCALE], dtype=torch.float32), persistent=True
)
self.register_buffer(
"input_amax", torch.tensor([self._UNINITIALIZED_SCALE], dtype=torch.float32), persistent=True
)
self._ema_decay: float = 0.01
self.fake_quant_enabled = True
@classmethod
def from_linear(
cls,
linear: nn.Linear,
mode: QATMode = QATMode.W4A4,
group_size: int = 16,
activation_observer: str = "static_minmax",
) -> "QATLinear":
"""Create QATLinear from an existing nn.Linear."""
has_bias = linear.bias is not None
new_linear = cls(
in_features=linear.in_features,
out_features=linear.out_features,
bias=has_bias,
mode=mode,
group_size=group_size,
activation_observer=activation_observer,
device=linear.weight.device,
dtype=linear.weight.dtype,
)
if linear.weight.device != torch.device("meta"):
new_linear.weight = nn.Parameter(linear.weight.clone())
if has_bias:
new_linear.bias = nn.Parameter(linear.bias.clone())
return new_linear
def _is_amax_initialized(self) -> bool:
"""Check if input_amax has been initialized."""
if not hasattr(self, "input_amax"):
return False
return self.input_amax.item() != self._UNINITIALIZED_SCALE
def _update_input_global_scale(self, x: torch.Tensor):
"""Update static input_global_scale based on observer strategy."""
assert self.mode == QATMode.W4A4, "_update_input_global_scale should only be called in W4A4 mode"
current_amax = torch.amax(torch.abs(x)).detach().to(torch.float32)
if torch.distributed.is_initialized() and torch.distributed.get_world_size() > 1:
torch.distributed.all_reduce(current_amax, op=torch.distributed.ReduceOp.MAX)
scale_factor = FP8_E4M3_MAX * FP4_E2M1_MAX
if self.activation_observer == "memoryless_minmax":
new_scale = (scale_factor / (current_amax + 1e-12)).view(1)
self.input_global_scale.copy_(new_scale.to(self.input_global_scale.device))
elif self.activation_observer == "static_minmax":
if not self._is_amax_initialized():
self.input_amax.copy_(current_amax.view(1).to(self.input_amax.device))
else:
new_amax = torch.maximum(self.input_amax, current_amax.view(1).to(self.input_amax.device))
self.input_amax.copy_(new_amax)
amax_f32 = self.input_amax.to(torch.float32)
new_scale = (scale_factor / (amax_f32 + 1e-12)).float().view(1)
self.input_global_scale.copy_(new_scale.to(self.input_global_scale.device))
elif self.activation_observer == "minmax":
if not self._is_amax_initialized():
self.input_amax.copy_(current_amax.view(1).to(self.input_amax.device))
else:
new_amax = (1 - self._ema_decay) * self.input_amax + self._ema_decay * current_amax.view(1).to(
self.input_amax.device
)
self.input_amax.copy_(new_amax)
amax_f32 = self.input_amax.to(torch.float32)
new_scale = (scale_factor / (amax_f32 + 1e-12)).float().view(1)
self.input_global_scale.copy_(new_scale.to(self.input_global_scale.device))
else:
raise ValueError(f"Unknown activation_observer: {self.activation_observer}")
def _fake_quantize_weight(self, weight: torch.Tensor) -> torch.Tensor:
"""Apply fake quantization to weight tensor using Triton kernel."""
with torch.no_grad():
if self._cached_weight_amax is not None:
global_amax = self._cached_weight_amax
else:
siblings_ref = getattr(self, "_fusion_siblings_ref", None)
if siblings_ref is not None:
siblings = [ref() for ref in siblings_ref if ref() is not None]
siblings = [s for s in siblings if s.weight.device != torch.device("meta")]
for sibling in siblings:
sibling_amax = getattr(sibling, "_cached_weight_amax", None)
if sibling_amax is not None:
global_amax = sibling_amax
self._cached_weight_amax = global_amax
break
else:
all_modules = [self] + siblings
amaxes = [m.weight.abs().max().to(torch.float32) for m in all_modules]
global_amax = torch.max(torch.stack(amaxes))
self._cached_weight_amax = global_amax
for sibling in siblings:
sibling._cached_weight_amax = global_amax
else:
global_amax = weight.abs().max().to(torch.float32)
self._cached_weight_amax = global_amax
if self._weight_global_scale is None:
self._weight_global_scale = global_amax.float() / (FP4_E2M1_MAX * FP8_E4M3_MAX)
result = STEFP4QuantTriton.apply(weight, global_amax, self.group_size)
return result
def _fake_quantize_activation(self, x: torch.Tensor) -> torch.Tensor:
"""Apply fake quantization to activation tensor (W4A4 mode only)."""
original_shape = x.shape
if x.dim() == 3:
x_2d = x.view(-1, x.shape[-1])
else:
x_2d = x
if self.training:
self._update_input_global_scale(x_2d)
if self.input_global_scale.item() == self._UNINITIALIZED_SCALE:
raise RuntimeError("W4A4 input_global_scale uninitialized. Load PTQ model first.")
global_amax = (FP4_E2M1_MAX * FP8_E4M3_MAX) / self.input_global_scale.to(x.device)
result = STEFP4QuantTriton.apply(x_2d, global_amax, self.group_size)
return result.view(original_shape)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward pass with fake quantization."""
if not self.fake_quant_enabled:
return F.linear(x, self.weight, self.bias)
weight_fq = self._fake_quantize_weight(self.weight)
if self.mode == QATMode.W4A4:
x_fq = self._fake_quantize_activation(x)
else:
x_fq = x
return F.linear(x_fq, weight_fq, self.bias)
def extra_repr(self) -> str:
return (
f"in_features={self.in_features}, out_features={self.out_features}, "
f"bias={self.bias is not None}, mode={self.mode.value}, "
f"group_size={self.group_size}, fake_quant_enabled={self.fake_quant_enabled}"
)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/qat/linear.py",
"license": "Apache License 2.0",
"lines": 313,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/qat/quantizer.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fast NVFP4 Quantizer for verl FSDP training.
Directly computes scales and quantizes weights using compressed_tensors APIs.
Includes scale computation utilities for weight quantization.
"""
import logging
import os
import re
from typing import Generator, Iterable, Optional
import torch
from compressed_tensors.compressors.quantized_compressors.fp4_quantized import NVFP4PackedCompressor
from compressed_tensors.quantization.quant_args import (
FP4_E2M1_DATA,
FP8_E4M3_DATA,
QuantizationArgs,
QuantizationStrategy,
QuantizationType,
)
from compressed_tensors.quantization.utils.helpers import generate_gparam
from verl.utils.device import get_device_name, get_torch_device
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
_LAYER_IDX_RE = re.compile(r"layers\.(\d+)\.")
def compute_blockwise_scale(
weight: torch.Tensor,
global_scale: torch.Tensor,
group_size: int = 16,
) -> torch.Tensor:
"""Compute blockwise scale using pre-computed global_scale (for fusion).
Returns FP8 E4M3 blockwise scale tensor.
"""
out_features, in_features = weight.shape
num_groups = in_features // group_size
weight_reshaped = weight.view(out_features, num_groups, group_size)
block_max = torch.amax(torch.abs(weight_reshaped), dim=-1).to(torch.float32)
local_scale = block_max / FP4_E2M1_DATA.max
blockwise_scale_f32 = torch.clamp(
global_scale * local_scale,
min=-FP8_E4M3_DATA.max,
max=FP8_E4M3_DATA.max,
)
blockwise_scale = blockwise_scale_f32.to(torch.float8_e4m3fn)
eps = torch.finfo(torch.float8_e4m3fn).eps
blockwise_scale = torch.where(
blockwise_scale == 0,
torch.tensor(eps, dtype=blockwise_scale.dtype, device=weight.device),
blockwise_scale,
)
return blockwise_scale
# Fusion patterns for transformer models
FUSE_PATTERNS = {
"qkv": ["q_proj", "k_proj", "v_proj"],
"gate_up": ["gate_proj", "up_proj"],
}
def fuse_global_scales(
layer_global_scales: dict[str, torch.Tensor],
strategy: str = "min",
) -> dict[str, torch.Tensor]:
"""Fuse global scales for QKV/GateUp groups (take min across group)."""
if not layer_global_scales:
return {}
# Group by parent module
parent_to_children: dict[str, dict[str, str]] = {}
for name in layer_global_scales:
parent, child = name.rsplit(".", 1) if "." in name else ("", name)
parent_to_children.setdefault(parent, {})[child] = name
fused_scales = {}
processed = set()
for parent, children in parent_to_children.items():
for _, patterns in FUSE_PATTERNS.items():
matched = [children[p] for p in patterns if p in children]
if len(matched) == len(patterns):
group_scales = [layer_global_scales[n] for n in matched]
if strategy == "min":
fused_scale = torch.min(torch.cat(group_scales)).reshape([1])
else:
raise ValueError(f"Unknown fuse strategy: {strategy}")
for layer_name in matched:
fused_scales[layer_name] = fused_scale.clone()
processed.add(layer_name)
for name, scale in layer_global_scales.items():
if name not in processed:
fused_scales[name] = scale
return fused_scales
class QATQuantizer:
"""Quantizer for QAT-trained weights using compressed_tensors APIs."""
def __init__(
self,
mode: str = "w4a16",
group_size: int = 16,
ignore_patterns: Optional[list] = None,
device: Optional[torch.device] = None,
param_dtype: Optional[torch.dtype] = None,
):
self.mode = mode.lower()
self._is_w4a4 = self.mode == "w4a4" # W4A4 needs input_global_scale
self.group_size = group_size
self.ignore_patterns = ignore_patterns or ["lm_head", "embed_tokens", "re:.*mlp.gate$"]
self.device = device or torch.device(get_device_name())
self.param_dtype = param_dtype
self._compressor = NVFP4PackedCompressor()
self._quant_args = QuantizationArgs(
num_bits=4,
type=QuantizationType.FLOAT,
symmetric=True,
strategy=QuantizationStrategy.TENSOR_GROUP,
group_size=group_size,
scale_dtype=FP8_E4M3_DATA.dtype,
)
def _should_quantize(self, name: str, tensor: torch.Tensor) -> bool:
"""Check if parameter should be quantized."""
if not name.endswith(".weight"):
return False
if tensor.dim() != 2:
return False
if tensor.shape[1] % self.group_size != 0:
return False
module_name = name.rsplit(".weight", 1)[0]
for pattern in self.ignore_patterns:
if pattern.startswith("re:"):
# Regex pattern - use re.match like vLLM does
regex = pattern[3:]
if re.match(regex, module_name):
return False
else:
if pattern in module_name:
return False
return True
@staticmethod
def _extract_layer_idx(name: str) -> Optional[int]:
"""Extract decoder layer index from parameter name."""
match = _LAYER_IDX_RE.search(name)
return int(match.group(1)) if match else None
def _process_layer_group(
self,
layer_idx: Optional[int],
layer_params: dict[str, torch.Tensor],
input_global_scales: dict[str, torch.Tensor],
output_device: torch.device,
) -> list[tuple[str, torch.Tensor]]:
"""Quantize one decoder layer's buffered params. Returns list of (name, tensor)."""
layer_weights = {}
layer_passthrough = {}
for name, tensor in layer_params.items():
if "input_global_scale" in name or "input_amax" in name:
continue
if self._should_quantize(name, tensor):
layer_name = name.rsplit(".weight", 1)[0]
layer_weights[layer_name] = (name, tensor)
else:
layer_passthrough[name] = tensor
if layer_idx is None and layer_weights:
raise RuntimeError(
f"[QAT Quantizer] Unexpected quantizable weights outside decoder layers: "
f"{list(layer_weights.keys())}. These should be in ignore_patterns."
)
if not layer_weights:
return [(name, tensor.to(output_device)) for name, tensor in layer_passthrough.items()]
# Move weights to GPU, compute global scales
weights_on_gpu = {}
layer_global_scales = {}
for layer_name, (_, tensor) in layer_weights.items():
weight_gpu = tensor.to(device=self.device, dtype=self.param_dtype)
weights_on_gpu[layer_name] = weight_gpu
amax = torch.amax(torch.abs(weight_gpu)).to(torch.float32)
layer_global_scales[layer_name] = generate_gparam(
-amax.unsqueeze(0),
amax.unsqueeze(0),
scale_data=FP8_E4M3_DATA,
quant_data=FP4_E2M1_DATA,
dtype=torch.float32,
)
fused_global_scales = fuse_global_scales(layer_global_scales, strategy="min")
results = []
for layer_name, weight_gpu in weights_on_gpu.items():
fused_global_scale = fused_global_scales[layer_name]
weight_scale = compute_blockwise_scale(weight_gpu, fused_global_scale, self.group_size)
weight_packed = self._compressor.compress_weight(
weight=weight_gpu,
scale=weight_scale.float(),
global_scale=fused_global_scale,
quantization_args=self._quant_args,
)["weight_packed"]
results.append((f"{layer_name}.weight_packed", weight_packed.to(output_device)))
results.append((f"{layer_name}.weight_scale", weight_scale.to(output_device)))
results.append((f"{layer_name}.weight_global_scale", fused_global_scale.to(output_device)))
if self._is_w4a4:
if layer_name in input_global_scales:
results.append(
(
f"{layer_name}.input_global_scale",
input_global_scales[layer_name].float().to(output_device),
)
)
else:
raise ValueError(
f"W4A4 mode requires input_global_scale for layer '{layer_name}', "
f"but it's not found or uninitialized (-1.0)."
)
del weights_on_gpu, layer_global_scales, fused_global_scales
for name, tensor in layer_passthrough.items():
results.append((name, tensor.to(output_device)))
return results
def quantize_with_fusion(
self,
params: dict[str, torch.Tensor] | Iterable[tuple[str, torch.Tensor]],
target_device: Optional[torch.device] = None,
) -> Generator[tuple[str, torch.Tensor], None, None]:
"""Streaming quantize: consume input layer by layer, yield (name, tensor) pairs."""
if isinstance(params, dict):
params = params.items()
output_device = target_device or torch.device("cpu")
_sentinel = object()
current_layer_idx = _sentinel
layer_buffer: dict[str, torch.Tensor] = {}
input_global_scales: dict[str, torch.Tensor] = {}
for name, tensor in params:
tensor_cpu = tensor.to("cpu") if tensor.is_cuda else tensor
layer_idx = self._extract_layer_idx(name)
# Collect input_global_scales for W4A4 as we go
if self._is_w4a4 and "input_global_scale" in name:
scale_layer_name = name.replace(".input_global_scale", "")
if tensor_cpu.numel() == 1 and tensor_cpu.item() == -1.0:
logger.warning(f"W4A4: {scale_layer_name} input_global_scale is uninitialized")
else:
input_global_scales[scale_layer_name] = tensor_cpu
# Layer boundary: flush previous layer
if layer_idx != current_layer_idx and current_layer_idx is not _sentinel and layer_buffer:
yield from self._process_layer_group(
current_layer_idx, layer_buffer, input_global_scales, output_device
)
layer_buffer = {}
current_layer_idx = layer_idx
layer_buffer[name] = tensor_cpu
# Flush last buffered layer
if layer_buffer:
yield from self._process_layer_group(current_layer_idx, layer_buffer, input_global_scales, output_device)
get_torch_device().empty_cache()
__all__ = [
"QATQuantizer",
]
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/qat/quantizer.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/qat/vllm_patch.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
vLLM NVFP4 Patches for Dynamic Weight Updates.
Enables dynamic weight reloading for NVFP4 quantized models in vLLM.
Supported schemes:
- Dense: W4A16-FP4, W4A4-FP4
- MoE: NVFP4-MoE
"""
import logging
import os
from typing import Optional
from unittest.mock import patch
import torch
from torch.nn import Parameter
from verl.utils.device import get_device_name
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class ParamMetaDict(dict):
"""
Dict-like class for parameter management with metadata-based rebuild and tensor swap.
Supports:
- Rebuild of deleted parameters from saved metadata
- Tensor Swap for parameters with shape changes (address stability for CUDA Graph)
"""
def __init__(self, model: torch.nn.Module, device: Optional[torch.device] = None):
"""
Initialize ParamMetaDict from a model.
Args:
model: vLLM model (may be wrapped in ModelRunner)
device: Device for created parameters
"""
super().__init__()
self.device = device
# Get the actual model (handle vLLM's wrapper structure)
actual_model = model
if hasattr(model, "model"):
actual_model = model.model
self._model = actual_model
# Build mappings by scanning all modules
self._layer_meta_cache: dict[str, dict] = {} # Cache of _hf_param_meta
self._tensor_swap_layers: dict[str, dict] = {} # Layers needing tensor swap
self._build_mappings()
# Initialize with current parameters
for name, param in actual_model.named_parameters():
self[name] = param
def _build_mappings(self):
"""Build layer metadata cache for rebuild and tensor swap."""
for layer_name, module in self._model.named_modules():
# Check for _hf_param_meta which indicates this layer has HF format params
if hasattr(module, "_hf_param_meta"):
self._layer_meta_cache[layer_name] = {
"module": module,
"meta": module._hf_param_meta,
}
# Check for tensor swap layers (weight_scale with shape change)
if "weight_scale" in module._hf_param_meta:
marlin_refs = getattr(module, "_marlin_tensor_refs", {})
if "weight_scale" in marlin_refs:
self._tensor_swap_layers[layer_name] = {
"module": module,
"marlin_ref": marlin_refs["weight_scale"],
"hf_meta": module._hf_param_meta["weight_scale"],
}
# MoE layers (w13_weight_scale, w2_weight_scale)
if "w13_weight_scale" in module._hf_param_meta:
marlin_refs = getattr(module, "_marlin_tensor_refs", {})
if "w13_weight_scale" in marlin_refs:
self._tensor_swap_layers[f"{layer_name}.w13"] = {
"module": module,
"param_name": "w13_weight_scale",
"marlin_ref": marlin_refs["w13_weight_scale"],
"hf_meta": module._hf_param_meta["w13_weight_scale"],
}
if "w2_weight_scale" in marlin_refs:
self._tensor_swap_layers[f"{layer_name}.w2"] = {
"module": module,
"param_name": "w2_weight_scale",
"marlin_ref": marlin_refs["w2_weight_scale"],
"hf_meta": module._hf_param_meta["w2_weight_scale"],
}
def _try_rebuild(self, key: str) -> Optional[Parameter]:
"""
Try to rebuild a parameter from metadata if it was deleted.
Args:
key: Full parameter name
Returns:
Rebuilt parameter or None if cannot rebuild
"""
# Extract layer name and param name
parts = key.rsplit(".", 1)
if len(parts) != 2:
return None
layer_name, param_name = parts
# Check if we have metadata for this layer
if layer_name not in self._layer_meta_cache:
return None
cache_entry = self._layer_meta_cache[layer_name]
module = cache_entry["module"]
meta = cache_entry["meta"]
# Check if this param needs rebuild
if param_name not in meta:
return None
# Already exists on module?
if hasattr(module, param_name):
param = getattr(module, param_name)
if param is not None:
return param
# Rebuild from metadata
new_param = _create_param_from_meta(module, param_name, meta[param_name], self.device)
module.register_parameter(param_name, new_param)
return new_param
def prepare_for_reload(self) -> None:
"""Replace Marlin-format tensors with HF-shape tensors for reload."""
for layer_name, swap_info in self._tensor_swap_layers.items():
module = swap_info["module"]
param_name = swap_info.get("param_name", "weight_scale")
hf_meta = swap_info["hf_meta"]
if hasattr(module, param_name):
new_param = _create_param_from_meta(module, param_name, hf_meta, self.device)
setattr(module, param_name, new_param)
def __getitem__(self, key: str) -> Parameter:
"""Get parameter with rebuild support."""
# Try standard lookup first
if key in dict.keys(self):
return super().__getitem__(key)
# Try rebuild from metadata
param = self._try_rebuild(key)
if param is not None:
self[key] = param
return param
raise KeyError(f"Parameter not found: {key}")
def __contains__(self, key: str) -> bool:
"""Check if parameter exists (with rebuild check)."""
if super().__contains__(key):
return True
# Check if can rebuild from metadata
parts = key.rsplit(".", 1)
if len(parts) == 2:
layer_name, param_name = parts
if layer_name in self._layer_meta_cache:
meta = self._layer_meta_cache[layer_name]["meta"]
if param_name in meta:
return True
return False
def get(self, key: str, default=None):
"""Get parameter with default."""
try:
return self[key]
except KeyError:
return default
def _create_param_from_meta(
module: torch.nn.Module,
param_name: str,
meta: dict,
device: Optional[torch.device] = None,
) -> Parameter:
"""Create a Parameter from saved metadata. Used by rebuild and tensor swap."""
shape = meta["shape"]
dtype = meta["dtype"]
dev = device or meta.get("device", get_device_name())
param_class = meta.get("param_class", Parameter)
weight_loaders = getattr(module, "_weight_loaders", {})
weight_loader = weight_loaders.get(param_name)
data = torch.empty(shape, dtype=dtype, device=dev)
try:
if param_class is not Parameter and weight_loader is not None:
kwargs = {"data": data, "weight_loader": weight_loader}
if "input_dim" in meta:
kwargs["input_dim"] = meta["input_dim"]
if "output_dim" in meta:
kwargs["output_dim"] = meta["output_dim"]
new_param = param_class(**kwargs)
else:
new_param = Parameter(data, requires_grad=False)
if weight_loader is not None:
new_param.weight_loader = weight_loader
except Exception as e:
logger.warning(f"Failed to create param {param_name} with class {param_class}: {e}, using Parameter")
new_param = Parameter(data, requires_grad=False)
if weight_loader is not None:
new_param.weight_loader = weight_loader
if "quant_method" in meta:
new_param.quant_method = meta["quant_method"]
return new_param
def save_param_meta(layer: torch.nn.Module, param_name: str):
"""Save parameter metadata for rebuild."""
if not hasattr(layer, "_hf_param_meta"):
layer._hf_param_meta = {}
param = getattr(layer, param_name, None)
if param is None:
return
meta = {
"shape": tuple(param.shape),
"dtype": param.dtype,
"device": str(param.device),
"param_class": type(param), # Save the actual parameter class
}
# Save vLLM-specific attributes needed for reconstruction
if hasattr(param, "_input_dim"):
meta["input_dim"] = param._input_dim
if hasattr(param, "_output_dim"):
meta["output_dim"] = param._output_dim
# Save MoE-specific attributes (quant_method is required by weight_loader)
if hasattr(param, "quant_method"):
meta["quant_method"] = param.quant_method
layer._hf_param_meta[param_name] = meta
def _check_first_call(layer: torch.nn.Module) -> bool:
"""Check if this is the first process_weights call, and increment counter."""
count = getattr(layer, "_process_weights_call_count", 0)
layer._process_weights_call_count = count + 1
return count == 0
# Dense W4A16 Patches
def patched_w4a16_process_weights_after_loading(self, layer: torch.nn.Module) -> None:
"""Patched process_weights_after_loading for W4A16 Dense layer."""
import vllm._custom_ops as ops
from vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 import (
marlin_make_workspace_new,
marlin_permute_scales,
nvfp4_marlin_process_global_scale,
nvfp4_marlin_process_scales,
)
is_first_call = _check_first_call(layer)
group_size = 16
part_size_n = layer.output_size_per_partition
part_size_k = layer.input_size_per_partition
device = layer.weight_packed.device
param_dtype = getattr(layer, "params_dtype", torch.float16)
# Save metadata (first call only)
if is_first_call:
save_param_meta(layer, "weight_packed")
save_param_meta(layer, "weight_global_scale")
save_param_meta(layer, "weight_scale")
if not hasattr(layer, "_weight_loaders"):
layer._weight_loaders = {}
for pname in ["weight_packed", "weight_global_scale", "weight_scale"]:
param = getattr(layer, pname, None)
if param is not None and hasattr(param, "weight_loader"):
layer._weight_loaders[pname] = param.weight_loader
# Get HF format data
weight_packed_hf = layer.weight_packed.data
weight_global_scale_hf = layer.weight_global_scale.data
weight_scale_hf = layer.weight_scale.data
# Create workspace (first call only)
if is_first_call:
layer.workspace = marlin_make_workspace_new(device)
# Convert to Marlin format
perm = torch.empty(0, dtype=torch.int, device=device)
qweight = weight_packed_hf.view(torch.int32).T.contiguous()
marlin_weight = ops.gptq_marlin_repack(
b_q_weight=qweight,
perm=perm,
size_k=part_size_k,
size_n=part_size_n,
num_bits=4,
is_a_8bit=False,
)
weight_scale = weight_scale_hf.T.contiguous().to(param_dtype)
weight_scale_permuted = marlin_permute_scales(
s=weight_scale,
size_k=part_size_k,
size_n=part_size_n,
group_size=group_size,
is_a_8bit=False,
)
marlin_weight_scale = nvfp4_marlin_process_scales(weight_scale_permuted)
weight_scale_2_raw = (1.0 / weight_global_scale_hf.max()).to(param_dtype)
marlin_weight_scale_2 = nvfp4_marlin_process_global_scale(weight_scale_2_raw)
# Update compute parameters
if is_first_call:
layer.weight = Parameter(marlin_weight, requires_grad=False)
layer.weight_scale = Parameter(marlin_weight_scale, requires_grad=False)
layer.weight_scale_2 = Parameter(marlin_weight_scale_2, requires_grad=False)
if not hasattr(layer, "_marlin_tensor_refs"):
layer._marlin_tensor_refs = {}
layer._marlin_tensor_refs["weight_scale"] = layer.weight_scale.data
else:
layer.weight.data.copy_(marlin_weight)
layer.weight_scale_2.data.copy_(marlin_weight_scale_2)
marlin_scale_ref = layer._marlin_tensor_refs.get("weight_scale")
if marlin_scale_ref is not None:
marlin_scale_ref.copy_(marlin_weight_scale)
layer.weight_scale = Parameter(marlin_scale_ref, requires_grad=False)
else:
logger.warning("W4A16: _marlin_tensor_refs['weight_scale'] not found")
layer.weight_scale = Parameter(marlin_weight_scale, requires_grad=False)
# Delete HF parameters
if hasattr(layer, "weight_packed"):
delattr(layer, "weight_packed")
if hasattr(layer, "weight_global_scale"):
delattr(layer, "weight_global_scale")
def patched_w4a4_process_weights_after_loading(self, layer: torch.nn.Module) -> None:
"""Patched process_weights_after_loading for W4A4 Dense (all backends)."""
from vllm.model_executor.layers.quantization.utils.quant_utils import swizzle_blockscale
is_first_call = _check_first_call(layer)
_W4A4_HF_PARAMS = ["weight_packed", "weight_scale", "weight_global_scale", "input_global_scale"]
if is_first_call:
for pname in _W4A4_HF_PARAMS:
save_param_meta(layer, pname)
if not hasattr(layer, "_weight_loaders"):
layer._weight_loaders = {}
for pname in _W4A4_HF_PARAMS:
param = getattr(layer, pname, None)
if param is not None and hasattr(param, "weight_loader"):
layer._weight_loaders[pname] = param.weight_loader
weight_packed_data = layer.weight_packed.data
weight_scale_data = layer.weight_scale.data
input_global_scale_data = layer.input_global_scale.data
weight_global_scale_data = layer.weight_global_scale.data
global_input_scale = input_global_scale_data.max().to(torch.float32)
global_weight_scale = weight_global_scale_data.max().to(torch.float32)
if self.backend == "flashinfer-trtllm":
from flashinfer import shuffle_matrix_a, shuffle_matrix_sf_a
epilogue_tile_m = 128
processed_weight = shuffle_matrix_a(weight_packed_data.view(torch.uint8), epilogue_tile_m)
processed_weight_scale = (
shuffle_matrix_sf_a(weight_scale_data.view(torch.uint8), epilogue_tile_m)
.reshape(weight_scale_data.shape)
.view(torch.float8_e4m3fn)
)
elif self.backend == "fbgemm":
processed_weight_scale = swizzle_blockscale(weight_scale_data).view(-1).view(torch.uint8)
processed_weight = weight_packed_data
else:
# cutlass / flashinfer-cutlass
processed_weight_scale = swizzle_blockscale(weight_scale_data)
processed_weight = weight_packed_data
alpha = 1.0 / (global_input_scale * global_weight_scale)
if is_first_call:
layer.weight_packed = Parameter(processed_weight, requires_grad=False)
layer.weight_scale = Parameter(processed_weight_scale, requires_grad=False)
layer.input_global_scale = Parameter(global_input_scale, requires_grad=False)
layer.weight_global_scale = Parameter(global_weight_scale, requires_grad=False)
layer.alpha = Parameter(alpha, requires_grad=False)
if not hasattr(layer, "_marlin_tensor_refs"):
layer._marlin_tensor_refs = {}
layer._marlin_tensor_refs["weight_packed"] = layer.weight_packed.data
layer._marlin_tensor_refs["weight_scale"] = layer.weight_scale.data
layer._marlin_tensor_refs["input_global_scale"] = layer.input_global_scale.data
layer._marlin_tensor_refs["weight_global_scale"] = layer.weight_global_scale.data
layer._marlin_tensor_refs["alpha"] = layer.alpha.data
else:
refs = layer._marlin_tensor_refs
for ref_name, new_data in [
("weight_packed", processed_weight),
("weight_scale", processed_weight_scale),
("input_global_scale", global_input_scale),
("weight_global_scale", global_weight_scale),
("alpha", alpha),
]:
ref = refs.get(ref_name)
if ref is not None:
ref.copy_(new_data)
setattr(layer, ref_name, Parameter(ref, requires_grad=False))
else:
logger.warning(f"W4A4: _marlin_tensor_refs['{ref_name}'] not found, creating new Parameter")
setattr(
layer,
ref_name,
Parameter(
new_data.clone() if isinstance(new_data, torch.Tensor) else torch.tensor(new_data),
requires_grad=False,
),
)
def _marlin_repack_experts(packed, perm, size_k, size_n, num_experts):
"""Repack weight for each expert into Marlin format and stack."""
import vllm._custom_ops as ops
result = []
for i in range(num_experts):
qweight = packed[i].view(torch.int32).T.contiguous()
result.append(
ops.gptq_marlin_repack(
b_q_weight=qweight,
perm=perm,
size_k=size_k,
size_n=size_n,
num_bits=4,
is_a_8bit=False,
)
)
return torch.stack(result)
def _marlin_process_scales_experts(scale_hf, param_dtype, size_k, size_n, group_size, num_experts):
"""Process scales for each expert into Marlin format and stack."""
from vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 import (
marlin_permute_scales,
nvfp4_marlin_process_scales,
)
result = []
scales = scale_hf.to(param_dtype)
for i in range(num_experts):
s = marlin_permute_scales(
s=scales[i].T,
size_k=size_k,
size_n=size_n,
group_size=group_size,
is_a_8bit=False,
)
result.append(nvfp4_marlin_process_scales(s))
return torch.stack(result)
def _process_nvfp4_moe_marlin(self, layer: torch.nn.Module, is_first_call: bool) -> None:
"""Process MoE layer with MARLIN backend (W4A16)."""
from vllm.model_executor.layers.fused_moe.oracle.nvfp4 import make_nvfp4_moe_kernel
from vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 import (
marlin_make_workspace_new,
nvfp4_marlin_process_global_scale,
)
group_size = 16
e = layer.num_experts
k = layer.hidden_size
n = layer.intermediate_size_per_partition
device = layer.w13_weight_packed.device
param_dtype = layer.params_dtype
w13_num_shards = 2 if self.moe.is_act_and_mul else 1
if is_first_call:
layer.workspace = marlin_make_workspace_new(device, 4)
perm = torch.empty(0, dtype=torch.int, device=device)
if self.moe.is_act_and_mul and not torch.allclose(
layer.w13_weight_global_scale[:, 0], layer.w13_weight_global_scale[:, 1]
):
logger.warning("w1_weight_global_scale must match w3_weight_global_scale. Accuracy may be affected.")
size_n_w13, size_k_w13 = n * w13_num_shards, k
size_n_w2, size_k_w2 = k, n
w13_weight_marlin = _marlin_repack_experts(layer.w13_weight_packed.data, perm, size_k_w13, size_n_w13, e)
w2_weight_marlin = _marlin_repack_experts(layer.w2_weight_packed.data, perm, size_k_w2, size_n_w2, e)
w13_weight_scale_marlin = _marlin_process_scales_experts(
layer.w13_weight_scale.data, param_dtype, size_k_w13, size_n_w13, group_size, e
)
w2_weight_scale_marlin = _marlin_process_scales_experts(
layer.w2_weight_scale.data, param_dtype, size_k_w2, size_n_w2, group_size, e
)
# Process global scales
w13_scale_2 = 1.0 / layer.w13_weight_global_scale[:, 0]
w2_scale_2 = 1.0 / layer.w2_weight_global_scale.data
w13_scale_2_processed = nvfp4_marlin_process_global_scale(w13_scale_2.to(param_dtype))
w2_scale_2_processed = nvfp4_marlin_process_global_scale(w2_scale_2.to(param_dtype))
# Update parameters
if is_first_call:
layer.w13_weight = Parameter(w13_weight_marlin, requires_grad=False)
layer.w2_weight = Parameter(w2_weight_marlin, requires_grad=False)
layer.w13_weight_scale = Parameter(w13_weight_scale_marlin, requires_grad=False)
layer.w2_weight_scale = Parameter(w2_weight_scale_marlin, requires_grad=False)
layer.w13_weight_scale_2 = Parameter(w13_scale_2_processed, requires_grad=False)
layer.w2_weight_scale_2 = Parameter(w2_scale_2_processed, requires_grad=False)
if not hasattr(layer, "_marlin_tensor_refs"):
layer._marlin_tensor_refs = {}
layer._marlin_tensor_refs["w13_weight_scale"] = layer.w13_weight_scale.data
layer._marlin_tensor_refs["w2_weight_scale"] = layer.w2_weight_scale.data
else:
layer.w13_weight.data.copy_(w13_weight_marlin)
layer.w2_weight.data.copy_(w2_weight_marlin)
layer.w13_weight_scale_2.data.copy_(w13_scale_2_processed)
layer.w2_weight_scale_2.data.copy_(w2_scale_2_processed)
w13_marlin_ref = layer._marlin_tensor_refs.get("w13_weight_scale")
w2_marlin_ref = layer._marlin_tensor_refs.get("w2_weight_scale")
if w13_marlin_ref is not None:
w13_marlin_ref.copy_(w13_weight_scale_marlin)
layer.w13_weight_scale = Parameter(w13_marlin_ref, requires_grad=False)
else:
logger.warning("MoE: _marlin_tensor_refs['w13_weight_scale'] not found")
layer.w13_weight_scale.data.copy_(w13_weight_scale_marlin)
if w2_marlin_ref is not None:
w2_marlin_ref.copy_(w2_weight_scale_marlin)
layer.w2_weight_scale = Parameter(w2_marlin_ref, requires_grad=False)
else:
logger.warning("MoE: _marlin_tensor_refs['w2_weight_scale'] not found")
layer.w2_weight_scale.data.copy_(w2_weight_scale_marlin)
layer.w13_input_scale = None
layer.w2_input_scale = None
# Initialize kernel
self.moe_quant_config = self.get_fused_moe_quant_config(layer)
if self.moe_quant_config is not None and (
(not self.moe.moe_parallel_config.use_all2all_kernels) or self.moe.moe_parallel_config.use_naive_all2all_kernels
):
self.kernel = make_nvfp4_moe_kernel(
moe_quant_config=self.moe_quant_config,
moe_config=self.moe,
experts_cls=self.experts_cls,
)
def _process_nvfp4_moe_flashinfer_cutlass(self, layer: torch.nn.Module, is_first_call: bool) -> None:
"""Process MoE layer with FlashInfer/CUTLASS backend (W4A4)."""
from vllm.model_executor.layers.fused_moe.oracle.nvfp4 import (
convert_to_nvfp4_moe_kernel_format,
make_nvfp4_moe_kernel,
)
from vllm.model_executor.utils import replace_parameter
w13_packed = layer.w13_weight_packed.data
w2_packed = layer.w2_weight_packed.data
w13_scale_hf = layer.w13_weight_scale.data
w2_scale_hf = layer.w2_weight_scale.data
if self.moe.is_act_and_mul and not torch.allclose(
layer.w13_weight_global_scale[:, 0], layer.w13_weight_global_scale[:, 1]
):
logger.warning("w1_weight_global_scale must match w3_weight_global_scale. Accuracy may be affected.")
w13_weight_global_scale = layer.w13_weight_global_scale[:, 0].contiguous()
w13_temp = Parameter(w13_packed.clone(), requires_grad=False)
w2_temp = Parameter(w2_packed.clone(), requires_grad=False)
if is_first_call:
layer.w13_weight = w13_temp
layer.w2_weight = w2_temp
(
w13,
w13_scale,
w13_scale_2,
a13_scale,
w2,
w2_scale,
w2_scale_2,
a2_scale,
) = convert_to_nvfp4_moe_kernel_format(
nvfp4_backend=self.nvfp4_backend,
layer=layer,
w13=w13_temp,
w13_scale=w13_scale_hf,
w13_scale_2=(1.0 / w13_weight_global_scale),
a13_scale=(1.0 / layer.w13_input_global_scale),
w2=w2_temp,
w2_scale=w2_scale_hf,
w2_scale_2=(1.0 / layer.w2_weight_global_scale),
a2_scale=(1.0 / layer.w2_input_global_scale),
is_act_and_mul=self.moe.is_act_and_mul,
)
# Update parameters
if is_first_call:
replace_parameter(layer, "w13_weight", w13)
replace_parameter(layer, "w2_weight", w2)
layer.w13_weight_scale = Parameter(w13_scale, requires_grad=False)
layer.w2_weight_scale = Parameter(w2_scale, requires_grad=False)
if not hasattr(layer, "_marlin_tensor_refs"):
layer._marlin_tensor_refs = {}
layer._marlin_tensor_refs["w13_weight_scale"] = layer.w13_weight_scale.data
layer._marlin_tensor_refs["w2_weight_scale"] = layer.w2_weight_scale.data
else:
layer.w13_weight.data.copy_(w13.data)
layer.w2_weight.data.copy_(w2.data)
w13_scale_ref = layer._marlin_tensor_refs.get("w13_weight_scale")
w2_scale_ref = layer._marlin_tensor_refs.get("w2_weight_scale")
if w13_scale_ref is not None:
w13_scale_ref.copy_(w13_scale)
layer.w13_weight_scale = Parameter(w13_scale_ref, requires_grad=False)
else:
logger.warning("MoE W4A4: _marlin_tensor_refs['w13_weight_scale'] not found")
layer.w13_weight_scale.data.copy_(w13_scale)
if w2_scale_ref is not None:
w2_scale_ref.copy_(w2_scale)
layer.w2_weight_scale = Parameter(w2_scale_ref, requires_grad=False)
else:
logger.warning("MoE W4A4: _marlin_tensor_refs['w2_weight_scale'] not found")
layer.w2_weight_scale.data.copy_(w2_scale)
layer.w13_weight_scale_2 = w13_scale_2
layer.w2_weight_scale_2 = w2_scale_2
layer.w13_input_scale = a13_scale
layer.w2_input_scale = a2_scale
# Initialize kernel
self.moe_quant_config = self.get_fused_moe_quant_config(layer)
if self.moe_quant_config is not None and (
(not self.moe.moe_parallel_config.use_all2all_kernels) or self.moe.moe_parallel_config.use_naive_all2all_kernels
):
self.kernel = make_nvfp4_moe_kernel(
moe_quant_config=self.moe_quant_config,
moe_config=self.moe,
experts_cls=self.experts_cls,
)
# MoE NVFP4 Patches (entry points)
def patched_nvfp4_moe_process_weights_after_loading(self, layer: torch.nn.Module) -> None:
"""Patched process_weights_after_loading for NVFP4 MoE layer."""
from vllm.model_executor.layers.fused_moe.oracle.nvfp4 import NvFp4MoeBackend
is_first_call = _check_first_call(layer)
# Save metadata (first call only)
if is_first_call:
save_param_meta(layer, "w13_weight_packed")
save_param_meta(layer, "w2_weight_packed")
save_param_meta(layer, "w13_weight_scale")
save_param_meta(layer, "w2_weight_scale")
if not hasattr(layer, "_weight_loaders"):
layer._weight_loaders = {}
for pname in ["w13_weight_packed", "w2_weight_packed", "w13_weight_scale", "w2_weight_scale"]:
param = getattr(layer, pname, None)
if param is not None and hasattr(param, "weight_loader"):
layer._weight_loaders[pname] = param.weight_loader
is_marlin = self.nvfp4_backend == NvFp4MoeBackend.MARLIN
if is_marlin:
_process_nvfp4_moe_marlin(self, layer, is_first_call)
else:
_process_nvfp4_moe_flashinfer_cutlass(self, layer, is_first_call)
# Delete HF parameters
if hasattr(layer, "w13_weight_packed"):
delattr(layer, "w13_weight_packed")
if hasattr(layer, "w2_weight_packed"):
delattr(layer, "w2_weight_packed")
_PATCH_TARGETS = [
# Dense W4A16
(
"vllm.model_executor.layers.quantization.compressed_tensors.schemes."
"compressed_tensors_w4a16_nvfp4.CompressedTensorsW4A16Fp4.process_weights_after_loading",
patched_w4a16_process_weights_after_loading,
),
# Dense W4A4
(
"vllm.model_executor.layers.quantization.compressed_tensors.schemes."
"compressed_tensors_w4a4_nvfp4.CompressedTensorsW4A4Fp4.process_weights_after_loading",
patched_w4a4_process_weights_after_loading,
),
# MoE NVFP4
(
"vllm.model_executor.layers.quantization.compressed_tensors."
"compressed_tensors_moe.CompressedTensorsW4A4Nvfp4MoEMethod.process_weights_after_loading",
patched_nvfp4_moe_process_weights_after_loading,
),
]
_applied_patches = []
def apply_qat_patches():
"""Apply NVFP4 patches to support dynamic weight updates. Call before model loading."""
global _applied_patches
if _applied_patches:
logger.warning("QAT patches already applied, skipping")
return _applied_patches
logger.info("Applying NVFP4 patches for dynamic weight loading...")
for target, replacement in _PATCH_TARGETS:
p = patch(target, replacement)
_applied_patches.append(p)
p.start()
logger.info(f"Applied {len(_applied_patches)} NVFP4 patches for dynamic weight loading")
return _applied_patches
def prepare_qat_for_load_weights(model, device=None):
"""
Prepare QAT model for weight loading. Call ONCE before multi-bucket weight loading.
Args:
model: vLLM model
device: Device for created parameters
"""
inner_model = model
if hasattr(model, "model"):
inner_model = model.model
param_meta = ParamMetaDict(inner_model, device=device)
param_meta.prepare_for_reload()
logger.info(f"[prepare_qat] Tensor swap prepared for {len(param_meta._tensor_swap_layers)} layers")
# Rebuild deleted (W4A16) or overwritten (W4A4) params back to HF format
rebuilt_count = 0
for layer_name, cache_entry in param_meta._layer_meta_cache.items():
module = cache_entry["module"]
for param_name, pm in cache_entry["meta"].items():
existing = getattr(module, param_name, None)
if existing is not None:
hf_shape = tuple(pm["shape"])
hf_dtype = pm["dtype"]
if (
tuple(existing.shape) == hf_shape
and existing.dtype == hf_dtype
and hasattr(existing, "weight_loader")
):
continue
new_param = _create_param_from_meta(module, param_name, pm, device)
module.register_parameter(param_name, new_param)
rebuilt_count += 1
logger.info(f"[prepare_qat] Rebuilt {rebuilt_count} parameters")
inner_model._param_meta_for_restore = param_meta
return param_meta
def manual_process_weights_after_loading(model):
"""Trigger weight post-processing for all quantized layers after load_weights."""
dense_count = 0
moe_count = 0
actual_model = model
if hasattr(model, "model"):
actual_model = model.model
for module in actual_model.modules():
if hasattr(module, "scheme"):
module.scheme.process_weights_after_loading(module)
dense_count += 1
quant_method = getattr(module, "quant_method", None)
if quant_method is not None and not hasattr(module, "scheme"):
if hasattr(quant_method, "process_weights_after_loading"):
# Skip KV cache quantization methods
if "KVCache" in quant_method.__class__.__name__:
continue
quant_method.process_weights_after_loading(module)
moe_count += 1
logger.debug(f"Processed {dense_count} dense layers, {moe_count} MoE layers")
return dense_count + moe_count
__all__ = [
"apply_qat_patches",
"prepare_qat_for_load_weights",
"manual_process_weights_after_loading",
]
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/qat/vllm_patch.py",
"license": "Apache License 2.0",
"lines": 694,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/workers/config/test_model_config_on_cpu.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright Amazon.com and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from omegaconf import OmegaConf
from verl.workers.config.model import HFModelConfig
class TestHFModelConfigCPU:
model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B") # Just a path string, not loaded
def test_target_modules_accepts_list_via_omegaconf(self):
"""
Test that target_modules field accepts both string and list values
when merging OmegaConf configs (simulates CLI override behavior).
The purpose is to ensure we can pass
actor_rollout_ref.model.target_modules='["k_proj","o_proj","down_proj","q_proj"]'
"""
# Create structured config from the dataclass defaults
# This is what omega_conf_to_dataclass does internally
cfg_from_dataclass = OmegaConf.structured(HFModelConfig)
# Simulate CLI override with target_modules as a list
cli_config = OmegaConf.create(
{
"path": self.model_path,
"target_modules": ["k_proj", "o_proj", "q_proj", "v_proj"],
}
)
# This merge should NOT raise ValidationError
# Before the fix (target_modules: str), this would fail with:
# "Cannot convert 'ListConfig' to string"
merged = OmegaConf.merge(cfg_from_dataclass, cli_config)
# Verify the list was merged correctly
assert list(merged.target_modules) == ["k_proj", "o_proj", "q_proj", "v_proj"]
def test_target_modules_accepts_none_via_omegaconf(self):
"""Test that target_modules still accepts None values."""
cfg_from_dataclass = OmegaConf.structured(HFModelConfig)
cli_config = OmegaConf.create(
{
"path": self.model_path,
"target_modules": None,
}
)
merged = OmegaConf.merge(cfg_from_dataclass, cli_config)
assert merged.target_modules is None
def test_target_modules_accepts_string_via_omegaconf(self):
"""Test that target_modules still accepts string values."""
cfg_from_dataclass = OmegaConf.structured(HFModelConfig)
cli_config = OmegaConf.create(
{
"path": self.model_path,
"target_modules": "all-linear",
}
)
merged = OmegaConf.merge(cfg_from_dataclass, cli_config)
assert merged.target_modules == "all-linear"
def test_target_modules_raises_on_invalid_type(self):
"""Test that __post_init__ raises TypeError for invalid target_modules types."""
base_config = OmegaConf.structured(HFModelConfig)
invalid_cli_config = OmegaConf.create(
{
"path": self.model_path,
"target_modules": [1, 2, 3], # list of ints instead of strings
}
)
merged_config = OmegaConf.merge(base_config, invalid_cli_config)
with pytest.raises(TypeError):
OmegaConf.to_object(merged_config)
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/config/test_model_config_on_cpu.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/experimental/agent_loop/test_agent_loop_extra_fields_schema_on_cpu.py | # Copyright 2026 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Optional
import numpy as np
import pytest
import torch
from omegaconf import OmegaConf
from verl.experimental.agent_loop.agent_loop import (
AgentLoopMetrics,
AgentLoopWorker,
DictConfigWrap,
_InternalAgentLoopOutput,
)
from verl.experimental.agent_loop.single_turn_agent_loop import SingleTurnAgentLoop
from verl.experimental.fully_async_policy.agent_loop.partial_single_turn_agent_loop import PartialSingleTurnAgentLoop
from verl.protocol import DataProto
from verl.utils.dataset.rl_dataset import RLHFDataset
@dataclass
class _FakeTokenOutput:
token_ids: list[int]
log_probs: Optional[list[float]] = None
routed_experts: Any = None
num_preempted: Optional[int] = None
class _FakeServerManager:
async def generate(
self,
request_id: str,
*,
prompt_ids: list[int],
sampling_params: dict[str, Any],
image_data: Optional[list[Any]] = None,
video_data: Optional[list[Any]] = None,
) -> _FakeTokenOutput:
del request_id, sampling_params, image_data, video_data
# Return a short, deterministic "generation" for testing.
return _FakeTokenOutput(token_ids=prompt_ids[-1:] + [11, 12, 13], log_probs=[0.0, 0.0, 0.0, 0.0])
async def generate_for_partial(
self,
request_id: str,
*,
prompt_ids: list[int],
sampling_params: dict[str, Any],
image_data: Optional[list[Any]] = None,
video_data: Optional[list[Any]] = None,
) -> tuple[list[int], list[float], bool]:
del request_id, sampling_params, image_data, video_data
# Return a short partial generation and "not cancelled".
response_ids = prompt_ids[-1:] + [21, 22]
response_logprobs = [0.0] * len(response_ids)
return response_ids, response_logprobs, False
class _FakeTokenizer:
def apply_chat_template(
self,
messages: list[dict[str, Any]],
*,
tools: Optional[list[dict]] = None,
add_generation_prompt: bool = True,
tokenize: bool = True,
**kwargs,
) -> list[int]:
del messages, tools, add_generation_prompt, tokenize, kwargs
# Minimal tokenization: return a small prompt.
return [101, 102]
def decode(self, ids: list[int] | torch.Tensor, skip_special_tokens: bool = True) -> str:
del ids, skip_special_tokens
return "<decoded>"
def _pad_1d(ids: list[int], *, length: int, pad_id: int = 0) -> list[int]:
if len(ids) > length:
return ids[:length]
return ids + [pad_id] * (length - len(ids))
def _to_internal(
*,
output_prompt_ids: list[int],
output_response_ids: list[int],
output_response_mask: list[int],
metrics: AgentLoopMetrics,
extra_fields: dict[str, Any],
num_turns: int,
prompt_len: int,
response_len: int,
) -> _InternalAgentLoopOutput:
prompt_ids = _pad_1d(output_prompt_ids, length=prompt_len, pad_id=0)
response_ids = _pad_1d(output_response_ids, length=response_len, pad_id=0)
response_mask = _pad_1d(output_response_mask, length=response_len, pad_id=0)
seq_len = prompt_len + response_len
attention_mask = _pad_1d([1] * len(output_prompt_ids), length=prompt_len, pad_id=0) + _pad_1d(
[1] * len(output_response_ids),
length=response_len,
pad_id=0,
)
input_ids = prompt_ids + response_ids
position_ids = list(range(seq_len))
def t(x: list[int]) -> torch.Tensor:
return torch.tensor([x], dtype=torch.long)
return _InternalAgentLoopOutput(
prompt_ids=t(prompt_ids),
response_ids=t(response_ids),
response_mask=t(response_mask),
attention_mask=t(attention_mask),
input_ids=t(input_ids),
position_ids=t(position_ids),
response_logprobs=None,
routed_experts=None,
multi_modal_inputs=None,
multi_modal_data=None,
reward_score=None,
num_turns=num_turns,
metrics=metrics,
extra_fields=extra_fields,
)
@pytest.mark.asyncio
async def test_agent_loop_extra_fields_schema_stable_for_training_concat_on_cpu():
# Minimal config surface used by the agent loops.
config = OmegaConf.create(
{
"actor_rollout_ref": {
"rollout": {"prompt_length": 16, "response_length": 16, "multi_turn": {"tool_config_path": None}},
"model": {},
},
"data": {
"tool_config_path": None,
"apply_chat_template_kwargs": {},
},
}
)
server_manager = _FakeServerManager()
tokenizer = _FakeTokenizer()
processor = None
trainer_config = DictConfigWrap(config)
data_config = DictConfigWrap(config.data)
single_turn = SingleTurnAgentLoop(
trainer_config=trainer_config,
server_manager=server_manager,
tokenizer=tokenizer,
processor=processor,
dataset_cls=RLHFDataset,
data_config=data_config,
)
partial_single_turn = PartialSingleTurnAgentLoop(
trainer_config=trainer_config,
server_manager=server_manager,
tokenizer=tokenizer,
processor=processor,
dataset_cls=RLHFDataset,
data_config=data_config,
)
raw_prompt = [{"role": "user", "content": "hi"}]
sampling_params: dict[str, Any] = {}
out_a = await single_turn.run(sampling_params=sampling_params, raw_prompt=raw_prompt)
out_b = await partial_single_turn.run(sampling_params=sampling_params, raw_prompt=raw_prompt, param_version=0)
# Agent loop outputs should always contain these fields with consistent types.
assert out_a.extra_fields["turn_scores"] == []
assert out_a.extra_fields["tool_rewards"] == []
assert out_b.extra_fields["turn_scores"] == []
assert out_b.extra_fields["tool_rewards"] == []
prompt_len = max(len(out_a.prompt_ids), len(out_b.prompt_ids))
response_len = max(len(out_a.response_ids), len(out_b.response_ids))
internal_a = _to_internal(
output_prompt_ids=out_a.prompt_ids,
output_response_ids=out_a.response_ids,
output_response_mask=out_a.response_mask,
metrics=out_a.metrics,
extra_fields=out_a.extra_fields,
num_turns=out_a.num_turns,
prompt_len=prompt_len,
response_len=response_len,
)
internal_b = _to_internal(
output_prompt_ids=out_b.prompt_ids,
output_response_ids=out_b.response_ids,
output_response_mask=out_b.response_mask,
metrics=out_b.metrics,
extra_fields=out_b.extra_fields,
num_turns=out_b.num_turns,
prompt_len=prompt_len,
response_len=response_len,
)
# Mimic two "worker chunks" and concatenate as in training.
dummy_worker = type("_DummyWorker", (), {"reward_loop_worker_handles": None})()
chunk_a = AgentLoopWorker._postprocess(
dummy_worker,
inputs=[internal_a],
input_non_tensor_batch={
"index": np.array([0], dtype=object),
"agent_name": np.array(["single_turn_agent"], dtype=object),
},
)
chunk_b = AgentLoopWorker._postprocess(
dummy_worker,
inputs=[internal_b],
input_non_tensor_batch={
"index": np.array([1], dtype=object),
"agent_name": np.array(["partial_single_turn_agent"], dtype=object),
},
)
merged: DataProto = DataProto.concat([chunk_a, chunk_b])
# Stable schema: present regardless of which loop produced a sample.
stable_keys = (
"turn_scores",
"tool_rewards",
"is_cancel",
"param_version_start",
"param_version_end",
"extras",
)
for key in stable_keys:
assert key in merged.non_tensor_batch, f"missing key in merged batch: {key}"
assert merged.non_tensor_batch[key].shape == (2,), (
f"invalid shape for {key}: {merged.non_tensor_batch[key].shape}"
)
# And the list-typed fields are actually lists (not missing / scalar).
assert merged.non_tensor_batch["turn_scores"][0] == []
assert merged.non_tensor_batch["tool_rewards"][0] == []
assert merged.non_tensor_batch["turn_scores"][1] == []
assert merged.non_tensor_batch["tool_rewards"][1] == []
| {
"repo_id": "verl-project/verl",
"file_path": "tests/experimental/agent_loop/test_agent_loop_extra_fields_schema_on_cpu.py",
"license": "Apache License 2.0",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/experimental/separation/engine_workers.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from omegaconf import DictConfig
from verl.single_controller.base.decorator import Dispatch, register
from verl.utils.device import (
get_device_name,
)
from verl.workers.engine_workers import ActorRolloutRefWorker
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
device_name = get_device_name()
__all__ = ["DetachActorWorker"]
class DetachActorWorker(ActorRolloutRefWorker):
"""
A worker class that extends ActorRolloutRefWorker to support detaching and restoring the actor model.
This worker facilitates saving the model state to CPU and restoring it, enabling efficient
resource management and checkpointing in distributed training. It currently supports
FSDP, FSDP2, and Megatron strategies.
"""
def __init__(self, config: DictConfig, role: str):
"""
Initialize the DetachActorWorker.
Args:
config: Configuration dictionary.
role: The role of the worker (e.g., 'actor', 'rollout', 'ref').
"""
ActorRolloutRefWorker.__init__(self, config, role)
self._strategy_handlers = None
self.copy_handler, self.restore_handler = self._get_strategy_handlers()
def _get_strategy_handlers(self):
"""
Get the strategy-specific handlers for saving and restoring the model.
Returns:
tuple: A tuple containing (save_handler, restore_handler).
Raises:
NotImplementedError: If the strategy is not supported.
"""
if self._strategy_handlers is not None:
return self._strategy_handlers
strategy = self.config.actor.strategy
if strategy in ["fsdp", "fsdp2"]:
from verl.experimental.fully_async_policy.fsdp2_utils import (
fsdp2_sharded_load_from_cpu,
fsdp2_sharded_save_to_cpu,
)
self._strategy_handlers = (fsdp2_sharded_save_to_cpu, fsdp2_sharded_load_from_cpu)
elif strategy == "megatron":
from verl.experimental.fully_async_policy.megatron_utils import (
copy_megatron_model_to_cpu,
restore_megatron_model_from_cpu,
)
self._strategy_handlers = (copy_megatron_model_to_cpu, restore_megatron_model_from_cpu)
else:
raise NotImplementedError(f"Unsupported strategy: {strategy}")
return self._strategy_handlers
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_model_to_cpu(self, n):
"""
Save the current model state to CPU memory.
Args:
n: Identifier/Key for the saved model state.
"""
if not hasattr(self, "cpu_saved_models"):
self.cpu_saved_models = {}
self.cpu_saved_models[n] = self.copy_handler(self.actor.engine.module)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def restore_model_from_cpu(self, n):
"""
Restore the model state from CPU memory.
Args:
n: Identifier/Key for the saved model state to restore.
"""
if n in self.cpu_saved_models:
strategy = self.config.actor.strategy
if strategy in ["fsdp", "fsdp2"]:
cpu_sharded_state, global_spec = self.cpu_saved_models[n]
self.restore_handler(self.actor.engine.module, cpu_sharded_state, global_spec)
else:
self.restore_handler(self.actor.engine.module, self.cpu_saved_models[n])
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def clear_cpu_model(self, n):
"""
Clear the saved model state from CPU memory.
Args:
n: Identifier/Key for the saved model state to remove.
"""
if n in self.cpu_saved_models:
del self.cpu_saved_models[n]
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/separation/engine_workers.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/separation/ray_trainer.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PPO Trainer with Ray-based single controller.
This trainer supports model-agonistic model initialization with huggingface
"""
import uuid
from copy import deepcopy
from pprint import pprint
from typing import Any, Optional
import numpy as np
import torch
from omegaconf import OmegaConf
from torch.utils.data import Dataset, Sampler
from tqdm import tqdm
from verl import DataProto
from verl.checkpoint_engine import CheckpointEngineManager
from verl.experimental.dataset.sampler import AbstractCurriculumSampler
from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup, ResourcePoolManager
from verl.single_controller.ray.base import create_colocated_worker_cls
from verl.trainer.ppo.core_algos import AdvantageEstimator, agg_loss
from verl.trainer.ppo.metric_utils import (
compute_data_metrics,
compute_throughout_metrics,
compute_timing_metrics,
compute_variance_proxy_metrics,
)
from verl.trainer.ppo.ray_trainer import RayPPOTrainer, apply_kl_penalty, compute_advantage, compute_response_mask
from verl.trainer.ppo.reward import extract_reward
from verl.trainer.ppo.utils import Role, WorkerType
from verl.utils.checkpoint.checkpoint_manager import should_save_ckpt_esi
from verl.utils.config import omega_conf_to_dataclass
from verl.utils.debug import marked_timer
from verl.utils.metric import reduce_metrics
from verl.utils.rollout_skip import RolloutSkip
class SeparateRayPPOTrainer(RayPPOTrainer):
"""
Support for the initialization and fit process of Ray Trainer in the resource-separated scenario:
- Fully async policy
- One-step off-policy
"""
def __init__(
self,
config,
tokenizer,
role_worker_mapping: dict[Role, WorkerType],
resource_pool_manager: ResourcePoolManager,
ray_worker_group_cls: type[RayWorkerGroup] = RayWorkerGroup,
processor=None,
reward_fn=None,
val_reward_fn=None,
train_dataset: Optional[Dataset] = None,
val_dataset: Optional[Dataset] = None,
collate_fn=None,
train_sampler: Optional[Sampler] = None,
device_name=None,
):
super().__init__(
config,
tokenizer,
role_worker_mapping,
resource_pool_manager,
ray_worker_group_cls,
processor,
train_dataset,
val_dataset,
collate_fn,
train_sampler,
device_name,
)
self.global_steps = 0
self.epoch = 0
self.max_steps_duration = 0
self.progress_bar = None
self.logger = None
self.is_last_step = False
self.prev_step_profile = False
self.curr_step_profile = False
self.next_step_profile = False
self.last_val_metrics = {}
self.metrics = {}
self.timing_raw = {}
# reward message
self.reward_tensor = None
self.reward_extra_infos_dict = {}
self.checkpoint_manager = None
def init_workers(self):
"""Initialize distributed training workers using Ray backend.
Creates:
1. Ray resource pools from configuration
2. Worker groups for each role (actor, critic, etc.)
"""
self._init_resource_pools()
self._create_worker_classes()
self._init_worker_groups()
self._init_models()
self._init_reward_loop()
self._init_async_rollout_manager()
self.checkpoint_manager = CheckpointEngineManager(
config=omega_conf_to_dataclass(self.config.actor_rollout_ref.rollout.checkpoint_engine),
trainer=self.actor_rollout_wg,
replicas=self.async_rollout_manager.rollout_replicas,
)
def _init_resource_pools(self):
self.resource_pool_manager.create_resource_pool()
self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()}
def _create_worker_classes(self):
self._create_actor_rollout_classes()
self._create_critic_class()
self._create_reference_policy_class()
self._create_reward_model_class()
def _create_actor_rollout_classes(self):
raise NotImplementedError
def _create_critic_class(self):
# create critic
if self.use_critic:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic)
critic_cfg = omega_conf_to_dataclass(self.config.critic)
if self.use_legacy_worker_impl == "disable":
# convert critic_cfg into TrainingWorkerConfig
from verl.workers.config import FSDPEngineConfig
from verl.workers.engine_workers import TrainingWorkerConfig
self.orig_critic_cfg = critic_cfg
if self.orig_critic_cfg.strategy == "fsdp":
engine_config: FSDPEngineConfig = self.orig_critic_cfg.model.fsdp_config
engine_config.infer_max_token_len_per_gpu = critic_cfg.ppo_infer_max_token_len_per_gpu
engine_config.max_token_len_per_gpu = critic_cfg.ppo_max_token_len_per_gpu
else:
raise NotImplementedError(f"Unknown strategy {self.orig_critic_cfg.strategy=}")
critic_cfg = TrainingWorkerConfig(
model_type="value_model",
model_config=self.orig_critic_cfg.model_config,
engine_config=engine_config,
optimizer_config=self.orig_critic_cfg.optim,
checkpoint_config=self.orig_critic_cfg.checkpoint,
)
critic_cls = RayClassWithInitArgs(cls=self.role_worker_mapping[Role.Critic], config=critic_cfg)
self.resource_pool_to_cls[resource_pool][str(Role.Critic)] = critic_cls
def _create_reference_policy_class(self):
# create reference policy if needed
if self.use_reference_policy:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.RefPolicy)
ref_policy_cls = RayClassWithInitArgs(
self.role_worker_mapping[Role.RefPolicy],
config=self.config.actor_rollout_ref,
role=str(Role.RefPolicy),
# profile_option=self.config.trainer.npu_profile.options,
)
self.resource_pool_to_cls[resource_pool][str(Role.RefPolicy)] = ref_policy_cls
def _create_reward_model_class(self):
# create a reward model if reward_fn is None
if self.use_rm:
# we create a RM here
resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel)
rm_cls = RayClassWithInitArgs(
self.role_worker_mapping[Role.RewardModel], config=self.config.reward.reward_model
)
self.resource_pool_to_cls[resource_pool][str(Role.RewardModel)] = rm_cls
def _init_worker_groups(self):
# initialize WorkerGroup
# NOTE: if you want to use a different resource pool for each role, which can support different parallel size,
# you should not use `create_colocated_worker_cls`.
# Instead, directly pass different resource pool to different worker groups.
# See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information.
all_wg = {}
wg_kwargs = {} # Setting up kwargs for RayWorkerGroup
if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None:
wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout
if OmegaConf.select(self.config.global_profiler, "steps") is not None:
wg_kwargs["profile_steps"] = OmegaConf.select(self.config.global_profiler, "steps")
# Only require nsight worker options when tool is nsys
if OmegaConf.select(self.config.global_profiler, "tool") == "nsys":
assert (
OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options")
is not None
), "worker_nsight_options must be set when using nsys with profile_steps"
wg_kwargs["worker_nsight_options"] = OmegaConf.to_container(
OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options")
)
wg_kwargs["device_name"] = self.device_name
for resource_pool, class_dict in self.resource_pool_to_cls.items():
worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict)
wg_dict = self.ray_worker_group_cls(
resource_pool=resource_pool,
ray_cls_with_init=worker_dict_cls,
**wg_kwargs,
)
spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys())
all_wg.update(spawn_wg)
self.all_wg = all_wg
def _init_models(self):
if self.use_critic:
self.critic_wg = self.all_wg[str(Role.Critic)]
if self.use_legacy_worker_impl == "disable":
self.critic_wg.reset()
# assign critic loss
from functools import partial
from verl.workers.utils.losses import value_loss
value_loss_ = partial(value_loss, config=self.orig_critic_cfg)
self.critic_wg.set_loss_fn(value_loss_)
else:
self.critic_wg.init_model()
if self.use_reference_policy and not self.ref_in_actor:
self.ref_policy_wg = self.all_wg[str(Role.RefPolicy)]
self.ref_policy_wg.init_model()
if self.use_rm:
self.rm_wg = self.all_wg[str(Role.RewardModel)]
self.rm_wg.init_model()
# we should create rollout at the end so that vllm can have a better estimation of kv cache memory
self.actor_rollout_wg = self.all_wg[str(Role.ActorRollout)]
self.actor_rollout_wg.init_model()
def _init_reward_loop(self):
from verl.experimental.reward_loop import RewardLoopManager
# initalize reward loop manager
# reward model (colocate or standalone): get resource_pool
# no reward model: resource_pool = None
resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel) if self.use_rm else None
self.reward_loop_manager = RewardLoopManager(
config=self.config,
rm_resource_pool=resource_pool,
)
def _init_async_rollout_manager(self):
pass
def fit(self):
"""
The training loop of PPO.
The driver process only need to call the compute functions of the worker group through RPC
to construct the PPO dataflow.
The light-weight advantage computation is done on the driver process.
!!!
The logic of fit is consistent with that of fit_refactor;
if any modifications are made, apply them to both methods simultaneously.
"""
from omegaconf import OmegaConf
from verl.utils.tracking import Tracking
self.logger = Tracking(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
default_backend=self.config.trainer.logger,
config=OmegaConf.to_container(self.config, resolve=True),
)
self.global_steps = 0
# load checkpoint and update weights before doing anything
self._load_checkpoint()
self.checkpoint_manager.update_weights(self.global_steps)
current_epoch = self.global_steps // len(self.train_dataloader)
# perform validation before training
# currently, we only support validation using the reward_function.
if self.config.trainer.get("val_before_train", True):
val_metrics = self._validate()
assert val_metrics, f"{val_metrics=}"
pprint(f"Initial validation metrics: {val_metrics}")
self.logger.log(data=val_metrics, step=self.global_steps)
if self.config.trainer.get("val_only", False):
return
if self.config.actor_rollout_ref.rollout.get("skip_rollout", False):
rollout_skip = RolloutSkip(self.config, self.actor_rollout_wg)
rollout_skip.wrap_generate_sequences()
# add tqdm
self.progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress")
# we start from step 1
self.global_steps += 1
self.last_val_metrics = None
self.max_steps_duration = 0
self.prev_step_profile = False
self.curr_step_profile = (
self.global_steps in self.config.global_profiler.steps
if self.config.global_profiler.steps is not None
else False
)
self.next_step_profile = False
for epoch in range(current_epoch, self.config.trainer.total_epochs):
for batch_dict in self.train_dataloader:
self.epoch = epoch
self.fit_step(batch_dict)
if self.is_last_step:
return
def fit_step(self, batch_dict: Any = None):
"""
Single-step training template method. Handles all logic for one training step.
Flow:
1. Pre-step processing -> 2. Get batch -> 3. Generate sequences ->
4. Compute reward -> 5. Compute log_prob -> 6. Compute reward ->
7. Compute advantage -> 8. Update critic -> 9. Update actor -> 10. Post-step processing
Args:
batch_dict: Raw data dictionary
"""
self.metrics = {"training/global_step": self.global_steps, "training/epoch": self.epoch}
self.timing_raw = {}
# reward message
self.reward_tensor = None
self.reward_extra_infos_dict = {}
self._fit_prepare_step()
self._fit_start_profile()
with marked_timer("step", self.timing_raw):
batch = self._fit_get_batch(batch_dict)
batch = self._fit_generate(batch)
batch = self._fit_compute_reward(batch)
batch = self._fit_compute_log_prob(batch)
batch = self._fit_compute_ref_log_prob(batch)
batch = self._fit_compute_critic(batch)
batch = self._fit_compute_advantage(batch)
batch = self._fit_update_critic(batch)
batch = self._fit_update_actor(batch)
self._fit_update_weights()
self._fit_dump_data(batch)
self._fit_validate()
self._fit_save_checkpoint()
self._fit_stop_profile()
self._fit_collect_metrics(batch)
self._fit_torch_memory()
self._fit_experimental(batch)
self._fit_postprocess_step()
def _fit_prepare_step(self):
if hasattr(self.actor_rollout_wg, "async_calls_finalize_fn_exec"):
self.actor_rollout_wg.async_calls_finalize_fn_exec(blocking=False)
self.is_last_step = self.global_steps >= self.total_training_steps
def _fit_start_profile(self):
timing_raw = self.timing_raw
with marked_timer("start_profile", timing_raw):
self._start_profiling(
not self.prev_step_profile and self.curr_step_profile
if self.config.global_profiler.profile_continuous_steps
else self.curr_step_profile
)
def _fit_get_batch(self, batch_dict: dict) -> DataProto:
batch = DataProto.from_single_dict(batch_dict)
batch.meta_info["temperature"] = self.config.actor_rollout_ref.rollout.temperature
# add uid
batch.non_tensor_batch["uid"] = np.array([str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object)
return batch
def _fit_generate(self, batch: DataProto = None) -> DataProto:
metrics = self.metrics
timing_raw = self.timing_raw
gen_batch = self._get_gen_batch(batch)
# pass global_steps to trace
gen_batch.meta_info["global_steps"] = self.global_steps
gen_batch_output = gen_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
with marked_timer("gen", timing_raw, color="red"):
if self.curr_step_profile:
self.async_rollout_manager.start_profile(global_step=self.global_steps)
gen_batch_output = self.async_rollout_manager.generate_sequences(gen_batch_output)
self.checkpoint_manager.sleep_replicas()
if self.curr_step_profile:
self.async_rollout_manager.stop_profile()
timing_raw.update(gen_batch_output.meta_info["timing"])
gen_batch_output.meta_info.pop("timing", None)
if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX:
with marked_timer("gen_max", timing_raw, color="purple"):
gen_baseline_batch = deepcopy(gen_batch)
gen_baseline_batch.meta_info["do_sample"] = False
if self.curr_step_profile:
self.async_rollout_manager.start_profile()
gen_baseline_output = self.async_rollout_manager.generate_sequences(gen_baseline_batch)
self.checkpoint_manager.sleep_replicas()
if self.curr_step_profile:
self.async_rollout_manager.stop_profile()
batch = batch.union(gen_baseline_output)
# compute reward model score on batch
rm_scores = None
if self.use_rm and "rm_scores" not in batch.batch.keys():
batch_reward = self._compute_reward_colocate(batch)
batch = batch.union(batch_reward)
# Compute or extract reward for REMAX baseline
reward_baseline_tensor = batch.batch["rm_scores"].sum(dim=-1)
keys_to_pop = set(gen_baseline_output.batch.keys())
if rm_scores is not None:
keys_to_pop.update(rm_scores.batch.keys())
batch.pop(batch_keys=list(keys_to_pop))
batch.batch["reward_baselines"] = reward_baseline_tensor
del rm_scores, gen_baseline_batch, gen_baseline_output
# repeat to align with repeated responses in rollout
batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
batch = batch.union(gen_batch_output)
if "response_mask" not in batch.batch.keys():
batch.batch["response_mask"] = compute_response_mask(batch)
# Balance the number of valid tokens across DP ranks.
# NOTE: This usually changes the order of data in the `batch`,
# which won't affect the advantage calculation (since it's based on uid),
# but might affect the loss calculation (due to the change of mini-batching).
if self.config.trainer.balance_batch:
self._balance_batch(batch, metrics=metrics)
# compute global_valid tokens
batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist()
# get images_seqlens
images_seqlens_all = []
for multi_modal_input in batch.non_tensor_batch["multi_modal_inputs"]:
if "image_grid_thw" not in multi_modal_input.keys():
continue
images_seqlens_all.extend(multi_modal_input["images_seqlens"].tolist())
batch.meta_info["images_seqlens"] = images_seqlens_all
return batch
def _fit_compute_reward(self, batch: DataProto) -> DataProto:
timing_raw = self.timing_raw
with marked_timer("reward", timing_raw, color="yellow"):
# compute reward model score
if self.use_rm and "rm_scores" not in batch.batch.keys():
batch_reward = self._compute_reward_colocate(batch)
batch = batch.union(batch_reward)
# Compute or extract reward_tensor and reward_extra_infos_dict for training
reward_tensor, reward_extra_infos_dict = extract_reward(batch)
self.reward_tensor = reward_tensor
self.reward_extra_infos_dict = reward_extra_infos_dict
return batch
def _fit_compute_log_prob(self, batch: DataProto) -> DataProto:
metrics = self.metrics
timing_raw = self.timing_raw
# Operating Mode Selection:
# - Bypass mode: Sets old_log_probs = rollout_log_probs (2 policies: π_rollout, π_θ)
# - Decoupled mode: Recomputes old_log_probs as proximal anchor (3 policies: π_rollout, π_old, π_θ)
# Note: π_old computed once per data batch, serves as stable reference during mini-batch updates
rollout_corr_config = self.config.algorithm.get("rollout_correction", None)
bypass_recomputing_logprobs = rollout_corr_config and rollout_corr_config.get("bypass_mode", False)
if bypass_recomputing_logprobs: # Use `rollout_log_probs`
from verl.trainer.ppo.rollout_corr_helper import apply_bypass_mode
apply_bypass_mode(
batch=batch,
rollout_corr_config=rollout_corr_config,
policy_loss_config=self.config.actor_rollout_ref.actor.policy_loss,
)
else: # Recompute old_log_probs
with marked_timer("old_log_prob", timing_raw, color="blue"):
old_log_prob, old_log_prob_mfu = self._compute_old_log_prob(batch)
entropys = old_log_prob.batch["entropys"]
response_masks = batch.batch["response_mask"]
actor_config = self.config.actor_rollout_ref.actor
entropy_agg = agg_loss(
loss_mat=entropys,
loss_mask=response_masks,
loss_agg_mode=actor_config.loss_agg_mode,
loss_scale_factor=actor_config.loss_scale_factor,
)
old_log_prob_metrics = {
"actor/entropy": entropy_agg.detach().item(),
"perf/mfu/actor_infer": old_log_prob_mfu,
}
metrics.update(old_log_prob_metrics)
old_log_prob.batch.pop("entropys")
if "routed_experts" in batch.batch and "routed_experts" in old_log_prob.batch:
router_mode = getattr(self.config.actor_rollout_ref.actor.router_replay, "mode", "disabled")
if router_mode == "R2":
batch.batch.pop("routed_experts")
else:
old_log_prob.batch.pop("routed_experts")
batch = batch.union(old_log_prob)
if "rollout_log_probs" in batch.batch.keys():
# TODO: we may want to add diff of probs too.
from verl.utils.debug.metrics import calculate_debug_metrics
metrics.update(calculate_debug_metrics(batch))
assert "old_log_probs" in batch.batch, f'"old_log_prob" not in {batch.batch.keys()=}'
return batch
def _fit_compute_ref_log_prob(self, batch: DataProto) -> DataProto:
timing_raw = self.timing_raw
if self.use_reference_policy:
with marked_timer(str(Role.RefPolicy), timing_raw, color="olive"):
ref_log_prob = self._compute_ref_log_prob(batch)
batch = batch.union(ref_log_prob)
return batch
def _fit_compute_critic(self, batch: DataProto) -> DataProto:
timing_raw = self.timing_raw
if self.use_critic:
with marked_timer("values", timing_raw, color="cyan"):
values = self._compute_values(batch)
batch = batch.union(values)
return batch
def _fit_compute_advantage(self, batch) -> DataProto:
metrics = self.metrics
timing_raw = self.timing_raw
reward_tensor = self.reward_tensor
reward_extra_infos_dict = self.reward_extra_infos_dict
with marked_timer("adv", timing_raw, color="brown"):
# we combine with rule-based rm
reward_extra_infos_dict: dict[str, list]
batch.batch["token_level_scores"] = reward_tensor
if reward_extra_infos_dict:
batch.non_tensor_batch.update({k: np.array(v) for k, v in reward_extra_infos_dict.items()})
# compute rewards. apply_kl_penalty if available
if self.config.algorithm.use_kl_in_reward:
batch, kl_metrics = apply_kl_penalty(
batch, kl_ctrl=self.kl_ctrl_in_reward, kl_penalty=self.config.algorithm.kl_penalty
)
metrics.update(kl_metrics)
else:
batch.batch["token_level_rewards"] = batch.batch["token_level_scores"]
# Compute rollout correction: IS weights, rejection sampling, and metrics
# Only runs in decoupled mode (computes once per batch using stable π_old)
# In bypass mode, this is skipped - actor computes metrics from evolving π_θ vs π_rollout
rollout_corr_config = self.config.algorithm.get("rollout_correction", None)
bypass_recomputing_logprobs = rollout_corr_config and rollout_corr_config.get("bypass_mode", False)
if (
rollout_corr_config is not None
and "rollout_log_probs" in batch.batch
and not bypass_recomputing_logprobs # Only in decoupled mode
):
from verl.trainer.ppo.rollout_corr_helper import compute_rollout_correction_and_add_to_batch
# Compute IS weights, apply rejection sampling, compute metrics
batch, is_metrics = compute_rollout_correction_and_add_to_batch(batch, rollout_corr_config)
# IS and off-policy metrics already have rollout_corr/ prefix
metrics.update(is_metrics)
# compute advantages, executed on the driver process
norm_adv_by_std_in_grpo = self.config.algorithm.get(
"norm_adv_by_std_in_grpo", True
) # GRPO adv normalization factor
batch = compute_advantage(
batch,
adv_estimator=self.config.algorithm.adv_estimator,
gamma=self.config.algorithm.gamma,
lam=self.config.algorithm.lam,
num_repeat=self.config.actor_rollout_ref.rollout.n,
norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo,
config=self.config.algorithm,
)
return batch
def _fit_update_critic(self, batch: DataProto) -> DataProto:
metrics = self.metrics
timing_raw = self.timing_raw
if self.use_critic:
with marked_timer("update_critic", timing_raw, color="pink"):
critic_output = self._update_critic(batch)
critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"])
metrics.update(critic_output_metrics)
return batch
def _fit_update_actor(self, batch: DataProto) -> DataProto:
metrics = self.metrics
timing_raw = self.timing_raw
# implement critic warmup
if self.config.trainer.critic_warmup <= self.global_steps:
# update actor
with marked_timer("update_actor", timing_raw, color="red"):
actor_output = self._update_actor(batch)
actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"])
metrics.update(actor_output_metrics)
return batch
def _fit_update_weights(self):
timing_raw = self.timing_raw
if self.config.trainer.critic_warmup <= self.global_steps:
# update weights from trainer to rollout
with marked_timer("update_weights", timing_raw, color="red"):
self.checkpoint_manager.update_weights(self.global_steps)
def _fit_dump_data(self, batch: DataProto):
timing_raw = self.timing_raw
reward_extra_infos_dict = self.reward_extra_infos_dict
# Log rollout generations if enabled
rollout_data_dir = self.config.trainer.get("rollout_data_dir", None)
if rollout_data_dir:
self._log_rollout_data(batch, reward_extra_infos_dict, timing_raw, rollout_data_dir)
def _fit_validate(self):
metrics = self.metrics
timing_raw = self.timing_raw
if self.config.trainer.test_freq > 0 and (
self.is_last_step or self.global_steps % self.config.trainer.test_freq == 0
):
with marked_timer("testing", timing_raw, color="green"):
val_metrics: dict = self._validate()
if self.is_last_step:
self.last_val_metrics = val_metrics
metrics.update(val_metrics)
def _fit_save_checkpoint(self):
timing_raw = self.timing_raw
# Check if the ESI (Elastic Server Instance)/training plan is close to expiration.
esi_close_to_expiration = should_save_ckpt_esi(
max_steps_duration=self.max_steps_duration,
redundant_time=self.config.trainer.esi_redundant_time,
)
# Check if the conditions for saving a checkpoint are met.
# The conditions include a mandatory condition (1) and
# one of the following optional conditions (2/3/4):
# 1. The save frequency is set to a positive value.
# 2. It's the last training step.
# 3. The current step number is a multiple of the save frequency.
# 4. The ESI(Elastic Server Instance)/training plan is close to expiration.
if self.config.trainer.save_freq > 0 and (
self.is_last_step or self.global_steps % self.config.trainer.save_freq == 0 or esi_close_to_expiration
):
if esi_close_to_expiration:
print("Force saving checkpoint: ESI instance expiration approaching.")
with marked_timer("save_checkpoint", timing_raw, color="green"):
# sleep replicas to avoid OOM during checkpoint saving
# self.checkpoint_manager.sleep_replicas()
self._save_checkpoint()
# wake replicas to avoid OOM during checkpoint saving
# TODO: Check separation is needed.
# self.checkpoint_manager.update_weights()
def _fit_stop_profile(self):
timing_raw = self.timing_raw
with marked_timer("stop_profile", timing_raw):
self.next_step_profile = (
self.global_steps + 1 in self.config.global_profiler.steps
if self.config.global_profiler.steps is not None
else False
)
self._stop_profiling(
self.curr_step_profile and not self.next_step_profile
if self.config.global_profiler.profile_continuous_steps
else self.curr_step_profile
)
self.prev_step_profile = self.curr_step_profile
self.curr_step_profile = self.next_step_profile
def _fit_collect_metrics(self, batch):
metrics = self.metrics
timing_raw = self.timing_raw
# collect metrics
metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic))
metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw))
# TODO: implement actual tflpo and theoretical tflpo
n_gpus = self.resource_pool_manager.get_n_gpus()
metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus))
# compute variance proxy metrics
gradient_norm = metrics.get("actor/grad_norm", None)
metrics.update(compute_variance_proxy_metrics(batch=batch, gradient_norm=gradient_norm))
def _fit_torch_memory(self):
if (
hasattr(self.config.actor_rollout_ref.actor, "profiler")
and self.config.actor_rollout_ref.actor.profiler.tool == "torch_memory"
):
self.actor_rollout_wg.dump_memory_snapshot(
tag=f"post_update_step{self.global_steps}", sub_dir=f"step{self.global_steps}"
)
def _fit_experimental(self, batch):
# this is experimental and may be changed/removed in the future in favor of a general-purpose one
if isinstance(self.train_dataloader.sampler, AbstractCurriculumSampler):
self.train_dataloader.sampler.update(batch=batch)
# this is experimental and may be changed/removed in the future
# in favor of a general-purpose data buffer pool
if hasattr(self.train_dataset, "on_batch_end"):
# The dataset may be changed after each training batch
self.train_dataset.on_batch_end(batch=batch)
def _fit_postprocess_step(self):
metrics = self.metrics
timing_raw = self.timing_raw
steps_duration = timing_raw["step"]
self.max_steps_duration = max(self.max_steps_duration, steps_duration)
# TODO: make a canonical logger that supports various backend
self.logger.log(data=metrics, step=self.global_steps)
self.progress_bar.update(1)
self.global_steps += 1
if self.is_last_step:
if hasattr(self.actor_rollout_wg, "async_calls_finalize_fn_exec"):
self.actor_rollout_wg.async_calls_finalize_fn_exec(blocking=True)
pprint(f"Final validation metrics: {self.last_val_metrics}")
self.progress_bar.close()
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/separation/ray_trainer.py",
"license": "Apache License 2.0",
"lines": 655,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/utils/test_server_profiler.py | # Copyright 2026 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from unittest.mock import AsyncMock, MagicMock, patch
from verl.utils.profiler.config import (
ProfilerConfig,
TorchProfilerToolConfig,
build_sglang_profiler_args,
build_vllm_profiler_args,
)
class TestServerProfilerArgs(unittest.TestCase):
def test_build_vllm_profiler_args(self):
# Case 1: All features enabled
tool_config = TorchProfilerToolConfig(contents=["stack", "shapes", "memory"])
config = ProfilerConfig(save_path="/tmp/test", tool_config=tool_config)
# Patch environ to avoid side effects and verify calls
with patch.dict(os.environ, {}, clear=True):
args = build_vllm_profiler_args(config, tool_config, rank=0)
# Check Env vars (backward compatibility)
self.assertEqual(os.environ.get("VLLM_TORCH_PROFILER_DIR"), "/tmp/test/agent_loop_rollout_replica_0")
self.assertEqual(os.environ.get("VLLM_TORCH_PROFILER_WITH_STACK"), "1")
self.assertEqual(os.environ.get("VLLM_TORCH_PROFILER_RECORD_SHAPES"), "1")
self.assertEqual(os.environ.get("VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY"), "1")
# Check Args (new API)
self.assertIn("profiler_config", args)
profiler_config_dict = json.loads(args["profiler_config"])
self.assertEqual(profiler_config_dict["torch_profiler_dir"], "/tmp/test/agent_loop_rollout_replica_0")
self.assertTrue(profiler_config_dict["torch_profiler_with_stack"])
self.assertTrue(profiler_config_dict["torch_profiler_record_shapes"])
self.assertTrue(profiler_config_dict["torch_profiler_with_memory"])
def test_build_sglang_profiler_args(self):
# Case 1: Basic features
tool_config = TorchProfilerToolConfig(contents=["stack", "shapes", "memory"])
config = ProfilerConfig(save_path="/tmp/test", tool_config=tool_config)
with self.assertWarns(UserWarning):
args = build_sglang_profiler_args(config, tool_config, rank=0)
self.assertEqual(args["output_dir"], "/tmp/test/agent_loop_rollout_replica_0")
self.assertTrue(args["with_stack"])
self.assertTrue(args["record_shapes"])
class TestServerProfilerFunctionality(unittest.IsolatedAsyncioTestCase):
async def test_vllm_start_stop_profile(self):
try:
# Import strictly inside test to avoid import errors if dependencies missing
from verl.workers.rollout.vllm_rollout.vllm_async_server import vLLMHttpServer
except ImportError:
self.skipTest("vllm or dependencies not installed")
return
# Mock dependencies
mock_profiler = MagicMock()
mock_profiler.check_enable.return_value = True
mock_profiler.check_this_rank.return_value = True
mock_profiler.is_discrete_mode.return_value = True
mock_engine = AsyncMock()
# Mock self object
mock_self = MagicMock()
mock_self.profiler_controller = mock_profiler
mock_self.engine = mock_engine
# Test start_profile using the unbound method
await vLLMHttpServer.start_profile(mock_self)
mock_engine.start_profile.assert_called_once()
# Test stop_profile
await vLLMHttpServer.stop_profile(mock_self)
mock_engine.stop_profile.assert_called_once()
async def test_sglang_start_stop_profile(self):
try:
# Import strictly inside test to avoid import errors if dependencies missing
from verl.workers.rollout.sglang_rollout.async_sglang_server import SGLangHttpServer
except ImportError:
self.skipTest("sglang or dependencies not installed")
return
# Mock dependencies
mock_profiler = MagicMock()
mock_profiler.check_enable.return_value = True
mock_profiler.check_this_rank.return_value = True
mock_profiler.is_discrete_mode.return_value = True
mock_profiler.config = MagicMock()
mock_profiler.tool_config = MagicMock()
mock_tokenizer_manager = AsyncMock()
mock_self = MagicMock()
mock_self.profiler_controller = mock_profiler
mock_self.tokenizer_manager = mock_tokenizer_manager
mock_self.replica_rank = 0
# Mock build_sglang_profiler_args to return known dict
with patch("verl.workers.rollout.sglang_rollout.async_sglang_server.build_sglang_profiler_args") as mock_build:
mock_args = {"arg1": "val1"}
mock_build.return_value = mock_args
# Test start_profile
await SGLangHttpServer.start_profile(mock_self)
mock_build.assert_called_once_with(mock_profiler.config, mock_profiler.tool_config, mock_self.replica_rank)
mock_tokenizer_manager.start_profile.assert_called_once_with(**mock_args)
# Test stop_profile
await SGLangHttpServer.stop_profile(mock_self)
mock_tokenizer_manager.stop_profile.assert_called_once()
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_server_profiler.py",
"license": "Apache License 2.0",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/utils/test_torch_profile.py | # Copyright 2026 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest.mock import MagicMock, patch
import torch
from verl.utils.profiler.config import ProfilerConfig, TorchProfilerToolConfig
from verl.utils.profiler.torch_profile import Profiler, get_torch_profiler
class TestTorchProfile(unittest.TestCase):
def setUp(self):
# Reset Profiler class state
Profiler._define_count = 0
@patch("torch.profiler.profile")
def test_get_torch_profiler(self, mock_profile):
# Test wrapper function
get_torch_profiler(contents=["cpu", "cuda", "stack"], save_path="/tmp/test", rank=0)
mock_profile.assert_called_once()
_, kwargs = mock_profile.call_args
# Verify activities
activities = kwargs["activities"]
self.assertIn(torch.profiler.ProfilerActivity.CPU, activities)
self.assertIn(torch.profiler.ProfilerActivity.CUDA, activities)
# Verify options
self.assertTrue(kwargs["with_stack"])
self.assertFalse(kwargs["record_shapes"])
self.assertFalse(kwargs["profile_memory"])
@patch("verl.utils.profiler.torch_profile.get_torch_profiler")
def test_profiler_lifecycle(self, mock_get_profiler):
# Mock the underlying torch profiler object
mock_prof_instance = MagicMock()
mock_get_profiler.return_value = mock_prof_instance
# Initialize
tool_config = TorchProfilerToolConfig(contents=["cpu"], discrete=False)
config = ProfilerConfig(save_path="/tmp/test", enable=True, tool_config=tool_config)
profiler = Profiler(rank=0, config=config, tool_config=tool_config)
# Test Start
profiler.start()
mock_get_profiler.assert_called_once()
mock_prof_instance.start.assert_called_once()
# Test Step
profiler.step()
mock_prof_instance.step.assert_called_once()
# Test Stop
profiler.stop()
mock_prof_instance.stop.assert_called_once()
@patch("verl.utils.profiler.torch_profile.get_torch_profiler")
def test_discrete_mode(self, mock_get_profiler):
# Mock for discrete mode
mock_prof_instance = MagicMock()
mock_get_profiler.return_value = mock_prof_instance
tool_config = TorchProfilerToolConfig(contents=["cpu"], discrete=True)
config = ProfilerConfig(save_path="/tmp/test", enable=True, tool_config=tool_config)
profiler = Profiler(rank=0, config=config, tool_config=tool_config)
# In discrete mode, start/stop shouldn't trigger global profiler immediately
profiler.start()
mock_get_profiler.assert_not_called()
profiler.stop()
mock_prof_instance.stop.assert_not_called()
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_torch_profile.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/utils/test_fsdp_lora_merge.py | # Copyright 2026 Amazon.com Inc and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
import torch.distributed
import torch.multiprocessing as mp
from peft import LoraConfig, get_peft_model
from torch.distributed import init_device_mesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import MixedPrecision, ShardingStrategy
from transformers import AutoModelForCausalLM, GptOssConfig, Qwen2Config
from verl.utils.device import get_device_name, get_nccl_backend, get_torch_device
from verl.utils.fsdp_utils import (
MixedPrecisionPolicy,
apply_fsdp2,
get_fsdp_wrap_policy,
merged_lora_context,
)
def _test_merged_lora_context_worker(
rank, world_size, rendezvous_file, strategy, model_config, lora_config_dict, backup_adapters
):
"""Worker function for testing merged_lora_context with FSDP.
Args:
rank: Process rank
world_size: Total number of processes
rendezvous_file: Path to rendezvous file for distributed init
strategy: FSDP strategy ("fsdp" or "fsdp2")
model_config: Model configuration object (Qwen2Config, GptOssConfig, etc.)
lora_config_dict: Dictionary of LoRA configuration parameters
backup_adapters: Whether to backup adapter weights before merging
"""
get_torch_device().set_device(rank)
torch.distributed.init_process_group(
backend=get_nccl_backend(),
init_method=f"file://{rendezvous_file}",
rank=rank,
world_size=world_size,
)
device_mesh = init_device_mesh(get_device_name(), mesh_shape=(world_size,), mesh_dim_names=("dp",))
# Create model from provided config
with torch.device(get_device_name()):
model = AutoModelForCausalLM.from_config(
config=model_config, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2"
)
model = model.to(device=get_device_name())
# Add LoRA with provided config
lora_config = LoraConfig(**lora_config_dict)
model = get_peft_model(model, lora_config)
# Initialize LoRA adapter weights to non-zero values for testing
from peft.tuners.lora import LoraLayer
with torch.no_grad():
for name, module in model.named_modules():
if isinstance(module, LoraLayer):
for adapter_name in module.lora_A.keys():
if adapter_name in module.lora_A:
# Initialize lora_A with values around 1.0
module.lora_A[adapter_name].weight.data.uniform_(0.5, 1.5)
if adapter_name in module.lora_B:
# Initialize lora_B with values around 2.0
module.lora_B[adapter_name].weight.data.uniform_(1.5, 2.5)
# Wrap model with FSDP
if strategy == "fsdp":
mixed_precision = MixedPrecision(
param_dtype=torch.bfloat16, reduce_dtype=torch.float32, buffer_dtype=torch.float32
)
model = FSDP(
model,
use_orig_params=True,
device_id=get_torch_device().current_device(),
sharding_strategy=ShardingStrategy.FULL_SHARD,
mixed_precision=mixed_precision,
device_mesh=device_mesh,
auto_wrap_policy=get_fsdp_wrap_policy(module=model, is_lora=True),
)
else:
mp_policy = MixedPrecisionPolicy(
param_dtype=torch.bfloat16, reduce_dtype=torch.float32, cast_forward_inputs=True
)
fsdp_kwargs = {
"mesh": device_mesh,
"mp_policy": mp_policy,
}
apply_fsdp2(model, fsdp_kwargs, {})
# Test: backup adapter weights, merge, restore
from peft.tuners.lora import LoraLayer
lora_layers = [m for m in model.modules() if isinstance(m, LoraLayer)]
# Verify LoRA layers exist
assert len(lora_layers) > 0, "Model should have LoRA layers"
# Initially not merged
for layer in lora_layers:
assert not getattr(layer, "merged", False), "LoRA should not be merged initially"
# Backup adapter weights before merge
from peft.utils.save_and_load import get_peft_model_state_dict
original_adapter_weights = get_peft_model_state_dict(model)
# Use merged_lora_context with the specified backup_adapters flag
for _ in range(3):
with merged_lora_context(model, backup_adapters=backup_adapters):
# Inside context, LoRA should be merged
for layer in lora_layers:
assert getattr(layer, "merged", False), "LoRA should be merged inside context"
# After context, check the state based on backup_adapters flag
for layer in lora_layers:
assert not getattr(layer, "merged", False), "LoRA should be unmerged after context"
restored_adapter_weights = get_peft_model_state_dict(model)
# Verify adapter weights are restored exactly
for key in original_adapter_weights.keys():
assert key in restored_adapter_weights, f"Key {key} should be in restored weights"
torch.testing.assert_close(
original_adapter_weights[key].cpu(),
restored_adapter_weights[key].cpu(),
rtol=1e-5,
atol=1e-6,
msg=f"Adapter weight {key} should be restored to original value",
)
if rank == 0:
model_name = model_config.__class__.__name__
backup_mode = "with backup" if backup_adapters else "without backup"
print(f"merged_lora_context test with {model_name} {strategy} {backup_mode} passed on {world_size} GPUs!")
torch.distributed.barrier()
torch.distributed.destroy_process_group()
@pytest.mark.parametrize("world_size", (2,))
@pytest.mark.parametrize("strategy", ("fsdp", "fsdp2"))
@pytest.mark.parametrize("backup_adapters", (True, False))
def test_merged_lora_context_qwen2(world_size, strategy, backup_adapters, tmp_path):
"""Test merged_lora_context with FSDP on Qwen2 model."""
rendezvous_file = str(tmp_path / f"rdzv_file_qwen2_{backup_adapters}")
os.makedirs(os.path.dirname(rendezvous_file), exist_ok=True)
# Create Qwen2 model config
model_config = Qwen2Config(num_hidden_layers=2, num_attention_heads=2, hidden_size=128)
# Create LoRA config for Qwen2
lora_config_dict = {
"r": 8,
"lora_alpha": 16,
"target_modules": ["q_proj", "v_proj"],
"lora_dropout": 0.0,
"bias": "none",
"task_type": "CAUSAL_LM",
}
mp.spawn(
fn=_test_merged_lora_context_worker,
args=(world_size, rendezvous_file, strategy, model_config, lora_config_dict, backup_adapters),
nprocs=world_size,
join=True,
)
@pytest.mark.parametrize("world_size", (2,))
@pytest.mark.parametrize("strategy", ("fsdp", "fsdp2"))
@pytest.mark.parametrize("backup_adapters", (True, False))
def test_merged_lora_context_gptoss(world_size, strategy, backup_adapters, tmp_path):
"""Test merged_lora_context with FSDP on GPT-OSS model."""
rendezvous_file = str(tmp_path / f"rdzv_file_gptoss_{backup_adapters}")
os.makedirs(os.path.dirname(rendezvous_file), exist_ok=True)
# Create GPT-OSS model config
model_config = GptOssConfig(
num_hidden_layers=2,
num_attention_heads=2,
num_key_value_heads=2,
hidden_size=128,
intermediate_size=256,
)
# Create LoRA config for GPT-OSS
lora_config_dict = {
"r": 8,
"lora_alpha": 16,
"target_modules": "all-linear",
"target_parameters": ["mlp.experts.gate_up_proj", "mlp.experts.down_proj"],
"exclude_modules": ["mlp.router"],
"lora_dropout": 0.0,
"bias": "none",
"task_type": "CAUSAL_LM",
}
mp.spawn(
fn=_test_merged_lora_context_worker,
args=(world_size, rendezvous_file, strategy, model_config, lora_config_dict, backup_adapters),
nprocs=world_size,
join=True,
)
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_fsdp_lora_merge.py",
"license": "Apache License 2.0",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/utils/test_normalize_peft_param_name.py | # Copyright 2026 Amazon.com Inc and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
import torch.distributed
import torch.multiprocessing as mp
from peft import LoraConfig, get_peft_model
from torch.distributed import init_device_mesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import MixedPrecision, ShardingStrategy, StateDictType
from transformers import AutoModelForCausalLM, Qwen3Config
from verl.utils.device import get_device_name, get_nccl_backend, get_torch_device
from verl.utils.fsdp_utils import (
MixedPrecisionPolicy,
apply_fsdp2,
get_fsdp_wrap_policy,
normalize_peft_param_name,
)
from verl.utils.model import convert_weight_keys
def _test_normalize_peft_with_fsdp_worker(rank, world_size, rendezvous_file, strategy):
"""Worker function for testing normalize_peft_param_name with FSDP-wrapped models.
Args:
rank: Process rank
world_size: Total number of processes
rendezvous_file: Path to rendezvous file for distributed init
strategy: FSDP strategy ("fsdp" or "fsdp2")
"""
get_torch_device().set_device(rank)
torch.distributed.init_process_group(
backend=get_nccl_backend(),
init_method=f"file://{rendezvous_file}",
rank=rank,
world_size=world_size,
)
device_mesh = init_device_mesh(get_device_name(), mesh_shape=(world_size,), mesh_dim_names=("dp",))
# Create model config
config = Qwen3Config(
num_hidden_layers=2,
num_attention_heads=2,
num_key_value_heads=2,
hidden_size=128,
intermediate_size=256,
)
# Create base model
with torch.device(get_device_name()):
base_model = AutoModelForCausalLM.from_config(
config=config, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2"
)
base_model = base_model.to(device=get_device_name())
# Create PEFT model with LoRA
lora_config = LoraConfig(
r=8, lora_alpha=16, target_modules="all-linear", lora_dropout=0.0, bias="none", task_type="CAUSAL_LM"
)
peft_model = get_peft_model(base_model, lora_config)
# Wrap base model with FSDP (create a fresh copy for base model)
with torch.device(get_device_name()):
base_model_for_fsdp = AutoModelForCausalLM.from_config(
config=config, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2"
)
base_model_for_fsdp = base_model_for_fsdp.to(device=get_device_name())
if strategy == "fsdp":
mixed_precision = MixedPrecision(
param_dtype=torch.bfloat16, reduce_dtype=torch.float32, buffer_dtype=torch.float32
)
# Wrap base model with FSDP
fsdp_base_model = FSDP(
base_model_for_fsdp,
use_orig_params=True,
device_id=get_torch_device().current_device(),
sharding_strategy=ShardingStrategy.FULL_SHARD,
mixed_precision=mixed_precision,
device_mesh=device_mesh,
auto_wrap_policy=get_fsdp_wrap_policy(module=base_model_for_fsdp, is_lora=False),
)
# Wrap PEFT model with FSDP
fsdp_peft_model = FSDP(
peft_model,
use_orig_params=True,
device_id=get_torch_device().current_device(),
sharding_strategy=ShardingStrategy.FULL_SHARD,
mixed_precision=mixed_precision,
device_mesh=device_mesh,
auto_wrap_policy=get_fsdp_wrap_policy(module=peft_model, is_lora=True),
)
else:
# FSDP2
mp_policy = MixedPrecisionPolicy(
param_dtype=torch.bfloat16, reduce_dtype=torch.float32, cast_forward_inputs=True
)
fsdp_kwargs = {
"mesh": device_mesh,
"mp_policy": mp_policy,
}
# Wrap base model with FSDP2
apply_fsdp2(base_model_for_fsdp, fsdp_kwargs, {})
fsdp_base_model = base_model_for_fsdp
# Wrap PEFT model with FSDP2
apply_fsdp2(peft_model, fsdp_kwargs, {})
fsdp_peft_model = peft_model
# Get state dicts from FSDP models
if strategy == "fsdp":
# FSDP v1: Use full_state_dict context
with FSDP.state_dict_type(fsdp_base_model, StateDictType.FULL_STATE_DICT):
base_state_dict = fsdp_base_model.state_dict()
with FSDP.state_dict_type(fsdp_peft_model, StateDictType.FULL_STATE_DICT):
peft_state_dict = fsdp_peft_model.state_dict()
else:
# FSDP2: Direct state_dict call
base_state_dict = fsdp_base_model.state_dict()
peft_state_dict = fsdp_peft_model.state_dict()
# Normalize PEFT model state dict
normalized_peft_state_dict = normalize_peft_param_name(peft_state_dict)
base_state_dict = convert_weight_keys(
base_state_dict, getattr(fsdp_base_model, "_fsdp_wrapped_module", fsdp_base_model)
)
normalized_peft_state_dict = convert_weight_keys(
normalized_peft_state_dict, getattr(fsdp_peft_model, "_fsdp_wrapped_module", fsdp_peft_model)
)
# Get key sets
base_keys = set(base_state_dict.keys())
normalized_peft_keys = set(normalized_peft_state_dict.keys())
# if rank == 0:
print(f"\n=== FSDP {strategy} Test Results ===")
print(f"Base model keys: {base_keys=}")
print(f"Normalized PEFT keys: {normalized_peft_keys=}")
# Check for missing keys
missing_keys = base_keys - normalized_peft_keys
if missing_keys:
print(f"Missing keys from base model: {missing_keys}")
# Check for extra keys
extra_keys = normalized_peft_keys - base_keys
if extra_keys:
print(f"Extra keys not in base model: {extra_keys}")
# Verify that all base model keys are in the normalized PEFT keys
missing_keys = base_keys - normalized_peft_keys
assert len(missing_keys) == 0, f"Missing keys from base model: {missing_keys}"
# Verify that all normalized PEFT keys are in the base model
extra_keys = normalized_peft_keys - base_keys
assert len(extra_keys) == 0, f"Extra keys not in base model: {extra_keys}"
# Verify exact match
assert base_keys == normalized_peft_keys, "Normalized PEFT keys should exactly match FSDP base model keys"
# Verify tensor shapes match
for key in base_keys:
base_shape = base_state_dict[key].shape
peft_shape = normalized_peft_state_dict[key].shape
assert base_shape == peft_shape, f"Shape mismatch for {key}: base={base_shape}, peft={peft_shape}"
# Verify no LoRA keys remain in normalized state dict
lora_keys = [k for k in normalized_peft_keys if "lora_" in k or "adapter_" in k]
assert len(lora_keys) == 0, f"Normalized state dict should not contain LoRA keys, but found: {lora_keys}"
if rank == 0:
print(f"✓ All tests passed for FSDP {strategy}")
torch.distributed.barrier()
torch.distributed.destroy_process_group()
@pytest.mark.parametrize("world_size", (2,))
@pytest.mark.parametrize("strategy", ("fsdp", "fsdp2"))
def test_normalize_peft_param_name_with_fsdp(world_size, strategy, tmp_path):
"""Test normalize_peft_param_name with FSDP-wrapped models.
This test verifies that after applying FSDP to both base and PEFT models,
the normalized PEFT model keys match the FSDP base model keys.
"""
rendezvous_file = str(tmp_path / f"rdzv_file_normalize_{strategy}")
os.makedirs(os.path.dirname(rendezvous_file), exist_ok=True)
mp.spawn(
fn=_test_normalize_peft_with_fsdp_worker,
args=(world_size, rendezvous_file, strategy),
nprocs=world_size,
join=True,
)
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_normalize_peft_param_name.py",
"license": "Apache License 2.0",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/utils/test_normalize_peft_param_name_on_cpu.py | # Copyright 2026 Amazon.com Inc and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from peft import LoraConfig, get_peft_model
from transformers import AutoModelForCausalLM, Qwen3Config
from verl.utils.fsdp_utils import normalize_peft_param_name
def create_base_model():
"""Create a simple base model for testing."""
config = Qwen3Config(
num_hidden_layers=2,
num_attention_heads=2,
num_key_value_heads=2,
hidden_size=128,
intermediate_size=256,
)
model = AutoModelForCausalLM.from_config(config)
return model
def create_peft_model():
lora_config = LoraConfig(
r=8, lora_alpha=16, target_modules="all-linear", lora_dropout=0.0, bias="none", task_type="CAUSAL_LM"
)
model = create_base_model()
model = get_peft_model(model, lora_config)
return model
@pytest.fixture
def base_model():
"""Create a simple base model for testing."""
return create_base_model()
@pytest.fixture
def peft_model():
"""Create a PEFT model with LoRA adapters."""
return create_peft_model()
def test_normalize_peft_param_name_keys_match_base_model():
"""Test that normalized PEFT model keys match base model keys."""
# Get state dicts
base_model = create_base_model()
peft_model = create_peft_model()
base_state_dict = base_model.state_dict()
peft_state_dict = peft_model.state_dict()
# Normalize PEFT model keys
normalized_peft_state_dict = normalize_peft_param_name(peft_state_dict)
# Get key sets
base_keys = set(base_state_dict.keys())
normalized_peft_keys = set(normalized_peft_state_dict.keys())
print(f"{base_keys=}")
print(f"{normalized_peft_keys=}")
# Verify that all base model keys are in the normalized PEFT keys
missing_keys = base_keys - normalized_peft_keys
assert len(missing_keys) == 0, f"Missing keys from base model: {missing_keys}"
# Verify that all normalized PEFT keys are in the base model
extra_keys = normalized_peft_keys - base_keys
assert len(extra_keys) == 0, f"Extra keys not in base model: {extra_keys}"
# Verify exact match
assert base_keys == normalized_peft_keys, "Normalized PEFT keys should exactly match base model keys"
def test_normalize_peft_param_name_removes_lora_keys(peft_model):
"""Test that LoRA-specific parameters are removed after normalization."""
peft_state_dict = peft_model.state_dict()
# Before normalization, should have lora_A and lora_B keys
lora_keys_before = [k for k in peft_state_dict.keys() if "lora_" in k]
assert len(lora_keys_before) > 0, "PEFT model should have LoRA parameters"
# After normalization, should not have any lora keys
normalized_state_dict = normalize_peft_param_name(peft_state_dict)
lora_keys_after = [k for k in normalized_state_dict.keys() if "lora_" in k]
assert len(lora_keys_after) == 0, (
f"Normalized state dict should not contain LoRA keys, but found: {lora_keys_after}"
)
def test_normalize_peft_param_name_removes_base_model_prefix(peft_model):
"""Test that base_model prefix is removed from parameter names."""
peft_state_dict = peft_model.state_dict()
# Before normalization, should have base_model prefix
base_model_keys = [k for k in peft_state_dict.keys() if "base_model" in k]
assert len(base_model_keys) > 0, "PEFT model should have base_model prefix"
# After normalization, should not have base_model prefix
normalized_state_dict = normalize_peft_param_name(peft_state_dict)
base_model_keys_after = [k for k in normalized_state_dict.keys() if "base_model" in k]
assert len(base_model_keys_after) == 0, (
f"Normalized keys should not contain base_model prefix, but found: {base_model_keys_after}"
)
def test_normalize_peft_param_name_removes_base_layer_suffix(peft_model):
"""Test that .base_layer suffix is removed from parameter names."""
peft_state_dict = peft_model.state_dict()
# Before normalization, should have .base_layer suffix
base_layer_keys = [k for k in peft_state_dict.keys() if ".base_layer" in k]
assert len(base_layer_keys) > 0, "PEFT model should have .base_layer suffix"
# After normalization, should not have .base_layer suffix
normalized_state_dict = normalize_peft_param_name(peft_state_dict)
base_layer_keys_after = [k for k in normalized_state_dict.keys() if ".base_layer" in k]
assert len(base_layer_keys_after) == 0, (
f"Normalized keys should not contain .base_layer suffix, but found: {base_layer_keys_after}"
)
def test_normalize_peft_param_name_tensor_shapes_match(base_model, peft_model):
"""Test that tensor shapes match between base model and normalized PEFT model."""
base_state_dict = base_model.state_dict()
peft_state_dict = peft_model.state_dict()
# Normalize PEFT model keys
normalized_peft_state_dict = normalize_peft_param_name(peft_state_dict)
# Check that shapes match for all common keys
for key in base_state_dict.keys():
assert key in normalized_peft_state_dict, f"Key {key} not found in normalized PEFT state dict"
base_shape = base_state_dict[key].shape
peft_shape = normalized_peft_state_dict[key].shape
assert base_shape == peft_shape, f"Shape mismatch for {key}: base={base_shape}, peft={peft_shape}"
def test_normalize_peft_param_name_empty_dict():
"""Test that normalize_peft_param_name handles empty dict."""
result = normalize_peft_param_name({})
assert result == {}, "Empty dict should return empty dict"
@pytest.mark.parametrize(
"lora_key_pattern",
[
"model.layers.0.self_attn.q_proj.lora_A.default.weight",
"model.layers.0.self_attn.q_proj.lora_B.default.weight",
"model.layers.0.adapter_layer.weight",
"base_model.model.layers.0.lora_embedding_A",
],
)
def test_normalize_peft_param_name_filters_lora_patterns(lora_key_pattern):
"""Test that various LoRA key patterns are filtered out."""
test_dict = {
lora_key_pattern: torch.randn(10, 10),
"model.layers.0.weight": torch.randn(10, 10),
}
normalized = normalize_peft_param_name(test_dict)
# LoRA key should be filtered out
assert lora_key_pattern not in normalized, f"LoRA key {lora_key_pattern} should be filtered out"
# Regular key should remain
assert len(normalized) == 1, "Should have exactly one key remaining"
assert "model.layers.0.weight" in normalized, "Regular weight should remain"
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_normalize_peft_param_name_on_cpu.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/utils/test_padding_on_cpu.py | # Copyright 2026 Amazon.com Inc and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from tensordict import TensorDict
from verl.workers.utils.padding import left_right_2_no_padding, no_padding_2_padding
def test_padding_conversion_with_log_probs():
"""Test that log probability tensors remain in padded format after conversion
This test verifies the fix for the bug where ratio values were ~451,728 instead of ~1.0.
The key insight is that old_log_probs should STAY in padded format and be sliced
in the loss computation to match log_prob from model output, rather than being
converted to nested format.
"""
batch_size = 4
max_seq_len = 128
max_response_len = 64
# Create test data with varying sequence lengths
input_ids = torch.randint(0, 1000, (batch_size, max_seq_len))
# Create attention masks with different valid lengths per sample
attention_mask = torch.zeros(batch_size, max_seq_len)
valid_lens = [100, 120, 90, 128] # Different lengths for each batch item
for i, vlen in enumerate(valid_lens):
attention_mask[i, :vlen] = 1
# Create response masks aligned with the end of each sequence
response_mask = torch.zeros(batch_size, max_response_len)
response_lens = [50, 60, 45, 64] # Different response lengths
for i, rlen in enumerate(response_lens):
response_mask[i, :rlen] = 1
# Create position IDs
position_ids = torch.arange(max_seq_len).unsqueeze(0).expand(batch_size, -1)
# Add log probability tensors in padded format
old_log_probs = torch.randn(batch_size, max_seq_len)
ref_log_prob = torch.randn(batch_size, max_seq_len)
advantages = torch.randn(batch_size, max_response_len)
rollout_log_probs = torch.randn(batch_size, max_seq_len)
data = TensorDict(
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"response_mask": response_mask,
"position_ids": position_ids,
"old_log_probs": old_log_probs,
"ref_log_prob": ref_log_prob,
"advantages": advantages,
"rollout_log_probs": rollout_log_probs,
}
)
# Convert to no-padding format
data_converted = left_right_2_no_padding(data)
# Verify input_ids and position_ids are nested tensors
assert isinstance(data_converted["input_ids"], torch.Tensor)
assert data_converted["input_ids"].is_nested
assert data_converted["position_ids"].is_nested
# Verify log probs REMAIN in padded format (NOT converted to nested)
# They will be sliced in the loss computation to match log_prob format
assert isinstance(data_converted["old_log_probs"], torch.Tensor)
assert not data_converted["old_log_probs"].is_nested, "old_log_probs should remain in padded format"
assert not data_converted["ref_log_prob"].is_nested, "ref_log_prob should remain in padded format"
assert not data_converted["advantages"].is_nested, "advantages should remain in padded format"
assert not data_converted["rollout_log_probs"].is_nested, "rollout_log_probs should remain in padded format"
# Verify they maintain their original shapes
assert data_converted["old_log_probs"].shape == (batch_size, max_seq_len)
assert data_converted["ref_log_prob"].shape == (batch_size, max_seq_len)
assert data_converted["advantages"].shape == (batch_size, max_response_len)
assert data_converted["rollout_log_probs"].shape == (batch_size, max_seq_len)
# Verify that nested tensors (input_ids, position_ids) have correct number of elements per batch item
for i, vlen in enumerate(valid_lens):
assert data_converted["input_ids"][i].numel() == vlen, (
f"Batch {i}: input_ids should have {vlen} elements, got {data_converted['input_ids'][i].numel()}"
)
def test_padding_conversion_without_log_probs():
"""Test that padding conversion works correctly when log prob tensors are not present"""
batch_size = 4
max_seq_len = 128
max_response_len = 64
# Create minimal test data
input_ids = torch.randint(0, 1000, (batch_size, max_seq_len))
attention_mask = torch.ones(batch_size, max_seq_len)
response_mask = torch.ones(batch_size, max_response_len)
position_ids = torch.arange(max_seq_len).unsqueeze(0).expand(batch_size, -1)
data = TensorDict(
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"response_mask": response_mask,
"position_ids": position_ids,
}
)
# Convert to no-padding format
data_converted = left_right_2_no_padding(data)
# Verify basic conversion works
assert data_converted["input_ids"].is_nested
assert data_converted["position_ids"].is_nested
assert "old_log_probs" not in data_converted
assert "ref_log_prob" not in data_converted
def test_padding_roundtrip():
"""Test that converting from padding to nested and back preserves values in the response region"""
batch_size = 2
max_seq_len = 64
max_response_len = 32
prompt_len = max_seq_len - max_response_len # 32
# Create simple test data with known values
input_ids = torch.arange(1, max_seq_len + 1).unsqueeze(0).expand(batch_size, -1).clone()
attention_mask = torch.ones(batch_size, max_seq_len)
response_mask = torch.ones(batch_size, max_response_len)
position_ids = torch.arange(max_seq_len).unsqueeze(0).expand(batch_size, -1)
# Create nested prompts and responses (required by no_padding_2_padding)
prompt_list = [input_ids[i, :prompt_len] for i in range(batch_size)]
response_list = [input_ids[i, prompt_len:] for i in range(batch_size)]
prompts_nested = torch.nested.as_nested_tensor(prompt_list, layout=torch.jagged)
responses_nested = torch.nested.as_nested_tensor(response_list, layout=torch.jagged)
data = TensorDict(
{
"input_ids": input_ids,
"prompts": prompts_nested,
"responses": responses_nested,
"attention_mask": attention_mask,
"response_mask": response_mask,
"position_ids": position_ids,
}
)
# Convert to nested format
data_nested = left_right_2_no_padding(data)
# Verify input_ids is nested
assert data_nested["input_ids"].is_nested
# Convert back to padding format
recovered = no_padding_2_padding(data_nested["input_ids"], data_nested)
# Verify the shape is correct (response region only)
assert recovered.shape == (batch_size, max_response_len)
# Verify values are correct (left-shifted by 1 for log_probs alignment)
# Response tokens are 33,34,...,64 -> left-shifted: 32,33,...,63
expected = torch.arange(prompt_len, max_seq_len, dtype=torch.long).unsqueeze(0).expand(batch_size, -1)
torch.testing.assert_close(recovered, expected)
def test_no_padding_2_padding_varying_lengths():
"""Test no_padding_2_padding with varied prompt/response lengths."""
batch_size = 4
max_seq_len = 100
max_response_len = 50
prompt_lens = [10, 30, 5, 40]
response_lens = [40, 20, 45, 10]
input_ids = torch.zeros(batch_size, max_seq_len, dtype=torch.long)
for i in range(batch_size):
total_len = prompt_lens[i] + response_lens[i]
input_ids[i, :total_len] = torch.arange(1, total_len + 1)
attention_mask = torch.zeros(batch_size, max_seq_len)
for i in range(batch_size):
attention_mask[i, : prompt_lens[i] + response_lens[i]] = 1
response_mask = torch.zeros(batch_size, max_response_len)
for i in range(batch_size):
response_mask[i, : response_lens[i]] = 1
position_ids = torch.arange(max_seq_len).unsqueeze(0).expand(batch_size, -1).clone()
prompt_list = [input_ids[i, : prompt_lens[i]] for i in range(batch_size)]
response_list = [input_ids[i, prompt_lens[i] : prompt_lens[i] + response_lens[i]] for i in range(batch_size)]
prompts_nested = torch.nested.as_nested_tensor(prompt_list, layout=torch.jagged)
responses_nested = torch.nested.as_nested_tensor(response_list, layout=torch.jagged)
data = TensorDict(
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"response_mask": response_mask,
"position_ids": position_ids,
"prompts": prompts_nested,
"responses": responses_nested,
}
)
data_nested = left_right_2_no_padding(data)
input_ids_nested = data_nested["input_ids"]
log_probs_values = input_ids_nested.values().float()
log_probs_nested = torch.nested.nested_tensor_from_jagged(log_probs_values, offsets=input_ids_nested.offsets())
result_slice_response = no_padding_2_padding(log_probs_nested, data_nested)
# Verify no_padding_2_padding produces correct values (left-shifted by 1)
for i in range(batch_size):
resp_len = response_lens[i]
expected_start = prompt_lens[i]
expected_values = torch.arange(expected_start, expected_start + resp_len, dtype=torch.float)
torch.testing.assert_close(
result_slice_response[i, :resp_len],
expected_values,
rtol=1e-5,
atol=1e-6,
msg=f"Batch {i} (prompt_len={prompt_lens[i]}, resp_len={resp_len}): values incorrect",
)
print("All varied length tests passed")
if __name__ == "__main__":
test_padding_conversion_with_log_probs()
test_padding_conversion_without_log_probs()
test_padding_roundtrip()
test_no_padding_2_padding_varying_lengths()
print("All padding conversion tests passed!")
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_padding_on_cpu.py",
"license": "Apache License 2.0",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/workers/rollout/rollout_trtllm/test_adapter.py | # Copyright 2026 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import subprocess
from unittest.mock import AsyncMock, Mock, patch
import aiohttp
import pytest
import ray
from verl.workers.rollout.trtllm_rollout.trtllm_async_server import TRTLLMReplica
from verl.workers.rollout.trtllm_rollout.trtllm_rollout import AsyncTRTLLMHttpAdapter
class TestAsyncTRTLLMHttpAdapter:
def _build_async_session(
self,
*,
adapter: AsyncTRTLLMHttpAdapter,
method: str,
response: AsyncMock | None = None,
) -> tuple[AsyncMock, AsyncMock]:
mock_session = AsyncMock(spec=aiohttp.ClientSession)
mock_session.closed = False
if response is not None:
mock_context_manager = AsyncMock()
mock_context_manager.__aenter__.return_value = response
getattr(mock_session, method).return_value = mock_context_manager
mock_session_cm = AsyncMock()
mock_session_cm.__aenter__.return_value = mock_session
return mock_session_cm, mock_session
@pytest.mark.asyncio
async def test_make_async_request_get_method(self):
"""Test HTTP GET request."""
adapter = AsyncTRTLLMHttpAdapter(host="localhost", port=8000)
get_response = AsyncMock()
get_response.status = 200
get_response.headers = {"Content-Type": "application/json"}
get_response.raise_for_status = Mock()
get_response.json = AsyncMock(return_value={"data": "test"})
get_session_cm, get_session = self._build_async_session(
adapter=adapter,
method="get",
response=get_response,
)
with patch.object(adapter, "_get_session", return_value=get_session_cm):
get_result = await adapter._make_async_request("test_endpoint", method="GET")
assert get_result == {"data": "test"}
get_session.get.assert_called_once_with("http://localhost:8000/test_endpoint", timeout=adapter.timeout)
@pytest.mark.asyncio
async def test_make_async_request_post_method(self):
"""Test HTTP POST request."""
adapter = AsyncTRTLLMHttpAdapter(host="localhost", port=8000)
post_response = AsyncMock()
post_response.status = 200
post_response.headers = {"Content-Type": "application/json"}
post_response.raise_for_status = Mock()
post_response.json = AsyncMock(return_value={"status": "ok"})
post_session_cm, post_session = self._build_async_session(
adapter=adapter,
method="post",
response=post_response,
)
with patch.object(adapter, "_get_session", return_value=post_session_cm):
post_result = await adapter._make_async_request("test_endpoint", {"param": "value"})
assert post_result == {"status": "ok"}
post_session.post.assert_called_once_with(
"http://localhost:8000/test_endpoint", json={"param": "value"}, timeout=adapter.timeout
)
@pytest.mark.asyncio
async def test_make_async_request_http_error(self):
"""Test HTTP error handling."""
adapter = AsyncTRTLLMHttpAdapter(host="localhost", port=8000)
mock_response = AsyncMock()
mock_response.status = 500
mock_response.headers = {"Content-Type": "application/json"}
mock_response.raise_for_status = Mock(
side_effect=aiohttp.ClientResponseError(
request_info=Mock(real_url="http://localhost:8000/test_endpoint"),
history=(),
status=500,
message="server error",
)
)
mock_session_cm, _mock_session = self._build_async_session(
adapter=adapter,
method="post",
response=mock_response,
)
with patch.object(adapter, "_get_session", return_value=mock_session_cm):
with pytest.raises(aiohttp.ClientResponseError):
await adapter._make_async_request("test_endpoint", {"param": "value"})
@pytest.mark.asyncio
async def test_make_async_request_max_attempts_exceeded(self):
"""Test max retries exceeded."""
adapter = AsyncTRTLLMHttpAdapter(host="localhost", port=8000, max_attempts=1)
mock_session_cm, mock_session = self._build_async_session(
adapter=adapter,
method="post",
response=None,
)
mock_session.post.side_effect = asyncio.TimeoutError()
with patch.object(adapter, "_get_session", return_value=mock_session_cm):
with pytest.raises(RuntimeError, match="Failed to complete async request"):
await adapter._make_async_request("test_endpoint", {"param": "value"})
class TestTRTLLMServerAdapter:
def test_init_without_device_mesh(self):
"""Test ServerAdapter init path without device mesh."""
from hydra import compose, initialize_config_dir
prev_rank = os.environ.get("RANK")
os.environ["RANK"] = "0"
try:
os.environ.setdefault("TLLM_RAY_FORCE_LOCAL_CLUSTER", "1")
ray.init(address="local", ignore_reinit_error=True, include_dashboard=False)
config_dir = os.path.abspath("verl/verl/trainer/config")
if not os.path.exists(config_dir):
config_dir = os.path.abspath("verl/trainer/config")
with initialize_config_dir(config_dir=config_dir, version_base=None):
config = compose(config_name="ppo_trainer")
config.trainer.n_gpus_per_node = 2
config.trainer.nnodes = 1
model_root = os.path.expanduser(os.getenv("TRTLLM_TEST_MODEL_PATH_ROOT", "~/models"))
config.actor_rollout_ref.model.path = os.path.join(model_root, "Qwen/Qwen2.5-1.5B-Instruct")
config.actor_rollout_ref.rollout.name = "trtllm"
config.actor_rollout_ref.rollout.mode = "async"
config.actor_rollout_ref.rollout.tensor_model_parallel_size = 2
rollout_config = config.actor_rollout_ref.rollout
model_config = config.actor_rollout_ref.model
replica = TRTLLMReplica(
replica_rank=0,
config=rollout_config,
model_config=model_config,
gpus_per_node=2,
)
asyncio.run(replica.init_standalone())
assert len(replica.workers) == 2
worker0 = replica.workers[0]
worker1 = replica.workers[1]
replica_rank = ray.get(worker0.get_replica_rank.remote())
is_leader_rank_0 = ray.get(worker0.is_leader_rank.remote())
is_leader_rank_1 = ray.get(worker1.is_leader_rank.remote())
assert replica_rank == 0
assert is_leader_rank_0 is True
assert is_leader_rank_1 is False
finally:
if prev_rank is None:
os.environ.pop("RANK", None)
else:
os.environ["RANK"] = prev_rank
ray.shutdown()
subprocess.run(["ray", "stop"], capture_output=True)
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/rollout/rollout_trtllm/test_adapter.py",
"license": "Apache License 2.0",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/workers/rollout/rollout_trtllm/test_async_server.py | # Copyright 2026 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import time
from unittest.mock import MagicMock, patch
import ray
import torch
from ray.util import placement_group_table
from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy
from verl.single_controller.ray import RayResourcePool, SubRayResourcePool
from verl.workers.rollout.replica import RolloutMode
from verl.workers.rollout.trtllm_rollout.trtllm_async_server import TRTLLMHttpServer, TRTLLMReplica
class TestTRTLLMReplica:
def test_placement_group_with_sub_ray_resource_pool(self):
"""
Scenario: SubRayResourcePool, 1 node, 8 GPUs, TP=4, replica_rank=1
SubRayResourcePool pre-assigns start_bundle_index=4 for replica 1.
Expected: Replica 1 gets bundles [4, 5, 6, 7]
"""
with patch("verl.workers.rollout.trtllm_rollout.trtllm_async_server.ray"):
mock_config = MagicMock()
mock_config.tensor_model_parallel_size = 4
mock_config.data_parallel_size = 1
mock_config.pipeline_model_parallel_size = 1
replica = TRTLLMReplica(
replica_rank=1,
config=mock_config,
model_config=MagicMock(),
gpus_per_node=8,
)
mock_pg = MagicMock()
mock_pg.bundle_count = 8
resource_pool = SubRayResourcePool(
placement_groups=[mock_pg],
start_bundle_index=4,
subgroup_world_size=4,
)
replica.resource_pool = resource_pool
replica.world_size = 4 # TP=4
pgs, bundle_indices = replica.get_pgs_and_bundle_indices()
assert len(pgs) == 1
assert pgs[0] == mock_pg
assert len(bundle_indices) == 1
assert bundle_indices[0] == [4, 5, 6, 7]
def test_placement_group_with_ray_resource_pool(self):
"""
Scenario: RayResourcePool, 1 node, 8 GPUs, TP=2, replica_rank=1
RayResourcePool calculates: local_bundle_index = world_size * replica_rank = 2 * 1 = 2
Expected: Replica 1 gets bundles [2, 3]
"""
with patch("verl.workers.rollout.trtllm_rollout.trtllm_async_server.ray"):
mock_config = MagicMock()
mock_config.tensor_model_parallel_size = 2
mock_config.data_parallel_size = 1
mock_config.pipeline_model_parallel_size = 1
replica = TRTLLMReplica(
replica_rank=1,
config=mock_config,
model_config=MagicMock(),
gpus_per_node=8,
)
mock_pg = MagicMock()
mock_pg.bundle_count = 8
resource_pool = RayResourcePool(
process_on_nodes=[8],
use_gpu=True,
max_colocate_count=1,
name_prefix="test_rollout",
)
resource_pool.pgs = [mock_pg]
replica.resource_pool = resource_pool
replica.world_size = 2 # TP=2
pgs, bundle_indices = replica.get_pgs_and_bundle_indices()
assert len(pgs) == 1
assert pgs[0] == mock_pg
assert len(bundle_indices) == 1
assert bundle_indices[0] == [2, 3]
class TestTRTLLMHttpServer:
@staticmethod
def _build_rollout_config(*, response_length: int | None = None, free_cache_engine: bool = False):
from hydra import compose, initialize_config_dir
config_dir = os.path.abspath("verl/verl/trainer/config")
if not os.path.exists(config_dir):
config_dir = os.path.abspath("verl/trainer/config")
with initialize_config_dir(config_dir=config_dir, version_base=None):
config = compose(config_name="ppo_trainer")
config.trainer.n_gpus_per_node = 1
config.trainer.nnodes = 1
model_root = os.path.expanduser(os.getenv("TRTLLM_TEST_MODEL_PATH_ROOT", "~/models"))
config.actor_rollout_ref.model.path = os.path.join(model_root, "Qwen/Qwen2.5-0.5B-Instruct")
config.actor_rollout_ref.rollout.name = "trtllm"
config.actor_rollout_ref.rollout.mode = "async"
config.actor_rollout_ref.rollout.tensor_model_parallel_size = 1
if response_length is not None:
config.actor_rollout_ref.rollout.response_length = response_length
if free_cache_engine:
config.actor_rollout_ref.rollout.free_cache_engine = True
return config.actor_rollout_ref.rollout, config.actor_rollout_ref.model
@staticmethod
def _create_server(rollout_config, model_config, *, name: str):
resource_pool = RayResourcePool(
process_on_nodes=[1],
use_gpu=True,
max_colocate_count=1,
name_prefix="test_rollout",
)
pgs = resource_pool.get_placement_groups()
bundle_indices = [[0]]
pg_data = placement_group_table(pgs[0])
node_id = pg_data["bundles_to_node_id"][bundle_indices[0][0]]
return TRTLLMHttpServer.options(
scheduling_strategy=NodeAffinitySchedulingStrategy(
node_id=node_id,
soft=False,
),
runtime_env={"env_vars": {"RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES": "1"}},
name=name,
).remote(
config=rollout_config,
model_config=model_config,
is_reward_model=False,
rollout_mode=RolloutMode.COLOCATED,
workers=[],
replica_rank=0,
max_colocate_count=1,
pgs=pgs,
bundle_indices=bundle_indices,
)
def test_async_generate(self):
"""Test TRT-LLM generate method with real model."""
try:
os.environ.setdefault("TLLM_RAY_FORCE_LOCAL_CLUSTER", "1")
ray.init(address="local", ignore_reinit_error=True, include_dashboard=False)
rollout_config, model_config = self._build_rollout_config(response_length=50)
server = self._create_server(
rollout_config,
model_config,
name="trtllm_server_test_generate",
)
ray.get(server.launch_server.remote())
# Test generate with a simple prompt
prompt_ids = [1, 2, 3, 4, 5] # Simple test prompt
sampling_params = {
"temperature": 1.0,
"top_k": 0,
"logprobs": 1,
}
request_id = "test_request_1"
result = ray.get(server.generate.remote(prompt_ids, sampling_params, request_id))
print(f"Result: {result}")
# Verify the result structure
assert hasattr(result, "token_ids"), "Result should have token_ids attribute"
assert hasattr(result, "log_probs"), "Result should have log_probs attribute"
assert isinstance(result.token_ids, list), "token_ids should be a list"
assert len(result.token_ids) > 0, "Generated tokens should not be empty"
# Verify logprobs are returned when requested
assert result.log_probs is not None, "log_probs should not be None when requested"
assert len(result.log_probs) == len(result.token_ids), "log_probs length should match token_ids"
print(f"Generated {len(result.token_ids)} tokens")
print(f"Token IDs: {result.token_ids[:10]}...") # Print first 10 tokens
print(f"Log probs: {result.log_probs[:10]}...") # Print first 10 log probs
finally:
ray.shutdown()
subprocess.run(["ray", "stop"], capture_output=True)
def test_async_memory_management(self):
"""Test TRT-LLM async memory management (sleep) reduces memory usage."""
try:
os.environ.setdefault("TLLM_RAY_FORCE_LOCAL_CLUSTER", "1")
ray.init(address="local", ignore_reinit_error=True, include_dashboard=False)
rollout_config, model_config = self._build_rollout_config(free_cache_engine=True)
server = self._create_server(
rollout_config,
model_config,
name="trtllm_server_test_0",
)
ray.get(server.launch_server.remote())
device_ids = ray.get(server.report_device_ids.remote())
print(f"TRTLLM device UUIDs: {device_ids}")
def _uuid_to_device_index(device_uuid: str) -> int | None:
for idx in range(torch.cuda.device_count()):
props = torch.cuda.get_device_properties(idx)
uuid = getattr(props, "uuid", None)
if uuid is None:
# fall back to rank 0
return 0
if isinstance(uuid, bytes):
uuid_str = uuid.decode("utf-8", errors="ignore")
else:
uuid_str = str(uuid)
if uuid_str == device_uuid or uuid_str in device_uuid:
print(f"Mapped device UUID {device_uuid} to torch device index {idx}")
return idx
return 0
def get_gpu_memory_mb_for_device(device_uuid: str) -> float:
device_index = _uuid_to_device_index(device_uuid)
prev_device = torch.cuda.current_device()
torch.cuda.set_device(device_index)
mem_free, mem_total = torch.cuda.mem_get_info()
torch.cuda.set_device(prev_device)
return (mem_total - mem_free) / (1024**2)
baseline_memory_mb = get_gpu_memory_mb_for_device(device_ids[0])
print(f" Baseline memory: {baseline_memory_mb:.2f} MB")
ray.get(server.sleep.remote())
time.sleep(2)
sleep_memory_mb = get_gpu_memory_mb_for_device(device_ids[0])
memory_freed_mb = baseline_memory_mb - sleep_memory_mb
print(f" Memory after sleep: {sleep_memory_mb:.2f} MB")
print(f" Memory freed: {memory_freed_mb:.2f} MB")
assert memory_freed_mb >= baseline_memory_mb * 0.6, (
f"Expected sleep() to free >=60% of baseline memory. "
f"Baseline: {baseline_memory_mb:.2f} MB, freed: {memory_freed_mb:.2f} MB."
)
finally:
ray.shutdown()
subprocess.run(["ray", "stop"], capture_output=True)
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/rollout/rollout_trtllm/test_async_server.py",
"license": "Apache License 2.0",
"lines": 227,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/experimental/vla/main_sac.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pprint import pprint
import datasets
import hydra
import ray
import torch
from omegaconf import OmegaConf
from verl import DataProto
from verl.experimental.vla.sac.sac_ray_trainer import RobRaySACTrainer
from verl.trainer.constants_ppo import get_ppo_ray_runtime_env
from verl.trainer.ppo.ray_trainer import ResourcePoolManager
from verl.trainer.ppo.utils import Role
from verl.utils import hf_tokenizer
from verl.utils.fs import copy_local_path_from_hdfs
logger = logging.getLogger(__name__)
def calculate_reward(data: DataProto, return_dict: bool = False) -> torch.Tensor:
complete_tensor = data.batch["complete"]
reward_per_step = complete_tensor.float()
if return_dict:
return {"reward_tensor": reward_per_step}
else:
return reward_per_step
@hydra.main(config_path="config", config_name="rob_sac_trainer", version_base=None)
def main(config):
if not ray.is_initialized():
default_runtime_env = get_ppo_ray_runtime_env()
ray_init_kwargs = config.ray_kwargs.get("ray_init", {})
runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {})
runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs)
ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env})
logger.info(f"ray init kwargs: {ray_init_kwargs}")
ray.init(**OmegaConf.to_container(ray_init_kwargs))
ray.get(main_task.remote(config))
@ray.remote
def main_task(config):
# print initial config
pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values
OmegaConf.resolve(config)
# download the checkpoint from hdfs
local_path = copy_local_path_from_hdfs(config.actor_rollout_ref.model.path)
# instantiate tokenizer
tokenizer = hf_tokenizer(local_path)
# define worker classes
if config.actor_rollout_ref.actor.strategy in ["fsdp", "fsdp2"]:
assert config.actor_rollout_ref.actor.strategy == config.critic.strategy
from verl.experimental.vla.workers.env.env_worker import EnvWorker
from verl.single_controller.ray import RayWorkerGroup
from .fsdp_workers import RobActorRolloutRefWorker
ray_worker_group_cls = RayWorkerGroup
else:
raise NotImplementedError
role_worker_mapping = {
Role.ActorRollout: ray.remote(RobActorRolloutRefWorker),
Role.Env: ray.remote(EnvWorker),
}
# setup resource pool manager
train_rollout_gpu_num = config.trainer.n_rollout_gpus_per_node
train_rollout_nodes_num = config.trainer.nnodes
env_gpu_num = config.trainer.n_env_gpus_per_node
env_nodes_num = config.env.disagg_sim.nnodes if config.env.disagg_sim.enable else config.trainer.nnodes
resource_pool_spec = {
"train_rollout_pool": [train_rollout_gpu_num] * train_rollout_nodes_num,
"env_gpu_pool": [env_gpu_num] * env_nodes_num,
}
mapping = {
Role.ActorRollout: "train_rollout_pool",
Role.Env: "env_gpu_pool",
}
resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping)
# create datasets
train_dataset = datasets.load_dataset("parquet", data_files=config.data.train_files)["train"]
val_dataset = datasets.load_dataset("parquet", data_files=config.data.val_files)["train"]
# instantiate trainer and start training
trainer = RobRaySACTrainer(
config=config,
tokenizer=tokenizer,
role_worker_mapping=role_worker_mapping,
resource_pool_manager=resource_pool_manager,
ray_worker_group_cls=ray_worker_group_cls,
reward_fn=calculate_reward,
val_reward_fn=calculate_reward,
train_dataset=train_dataset,
val_dataset=val_dataset,
)
trainer.init_workers()
trainer.fit()
if __name__ == "__main__":
main()
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/vla/main_sac.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/vla/models/modules/mlp.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
import torch.nn.init as init
class MLP(nn.Module):
"""
A configurable Multi-Layer Perceptron (MLP) module.
It supports dynamic layer construction, multiple activation functions,
and various weight initialization strategies.
Attributes:
input_dim (int): The number of input features.
hidden_dims (list of int): List containing the number of units in each hidden layer.
output_dim (int): The number of output units.
activation (str): The non-linear activation function to use.
Options: 'relu', 'tanh', 'sigmoid', 'leaky_relu', 'elu', 'selu', 'none'.
init_method (str): The weight initialization strategy.
Options: 'kaiming', 'xavier', 'normal', 'orthogonal'.
"""
def __init__(
self,
input_dim: int,
hidden_dims: list[int],
output_dim: int,
activation: str = "relu",
init_method: str = "kaiming",
):
super().__init__()
self.input_dim = input_dim
self.hidden_dims = hidden_dims
self.output_dim = output_dim
self.activation_name = activation.lower()
self.init_method = init_method.lower()
layers = []
current_dim = input_dim
for h_dim in hidden_dims:
layers.append(nn.Linear(current_dim, h_dim))
act_layer = self._get_activation(self.activation_name)
if act_layer is not None:
layers.append(act_layer)
current_dim = h_dim
layers.append(nn.Linear(current_dim, output_dim))
self.network = nn.Sequential(*layers)
self.apply(self.init_weights)
def _get_activation(self, name):
"""
Factory method to return the activation layer based on string name.
Available options: 'relu', 'tanh', 'sigmoid', 'leaky_relu', 'elu', 'selu'.
"""
activations = {
"relu": nn.ReLU(),
"tanh": nn.Tanh(),
"sigmoid": nn.Sigmoid(),
"leaky_relu": nn.LeakyReLU(0.2),
"elu": nn.ELU(),
"selu": nn.SELU(),
"none": None,
}
return activations.get(name, nn.ReLU())
def init_weights(self, m):
"""
Public method to initialize weights for Linear layers.
Can be used with self.apply(model.init_weights).
Supported methods:
- 'kaiming': Best for ReLU/LeakyReLU. Uses kaiming_normal_.
- 'xavier': Best for Tanh/Sigmoid. Uses xavier_normal_.
- 'normal': Standard normal distribution (std=0.02).
- 'orthogonal': Good for preventing gradient explosion in deep networks.
"""
if isinstance(m, nn.Linear):
if self.init_method == "kaiming":
# Use 'relu' as default nonlinearity for Kaiming
nonlinearity = self.activation_name if self.activation_name in ["relu", "leaky_relu"] else "relu"
init.kaiming_normal_(m.weight, nonlinearity=nonlinearity)
elif self.init_method == "xavier":
init.xavier_normal_(m.weight)
elif self.init_method == "normal":
init.normal_(m.weight, mean=0.0, std=0.02)
elif self.init_method == "orthogonal":
init.orthogonal_(m.weight)
# Initialize bias to zero
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
"""Defines the computation performed at every call."""
return self.network(x)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/vla/models/modules/mlp.py",
"license": "Apache License 2.0",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/vla/models/pi0_torch/configuration_pi0_torch.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import PretrainedConfig
class PI0TorchConfig(PretrainedConfig):
model_type = "pi0_torch"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.state_norm_stats = kwargs.get("state_norm_stats", {})
self.action_norm_stats = kwargs.get("action_norm_stats", {})
self.pi05_enabled = kwargs.get("pi05_enabled", False)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/vla/models/pi0_torch/configuration_pi0_torch.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/vla/models/pi0_torch/model/modeling_pi0.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Giga Team. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# from https://github.com/open-gigaai/giga-models
import math
import torch
import torch.nn.functional as F # noqa: N812
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from torch import Tensor, nn
from .paligemma_with_expert import PaliGemmaWithExpertModel
def get_safe_dtype(dtype: torch.dtype, device: str | torch.device) -> torch.dtype:
"""Mps is currently not compatible with float64."""
if isinstance(device, torch.device):
device = device.type
if device == "mps" and dtype == torch.float64:
return torch.float32
else:
return dtype
def create_sinusoidal_pos_embedding(
time: torch.Tensor, dimension: int, min_period: float, max_period: float, device: str | torch.device = "cpu"
) -> Tensor:
"""Computes sine-cosine positional embedding vectors for scalar
positions."""
if dimension % 2 != 0:
raise ValueError(f"dimension ({dimension}) must be divisible by 2")
if time.ndim != 1:
raise ValueError("The time tensor is expected to be of shape `(batch_size, )`.")
dtype = get_safe_dtype(torch.float64, device)
fraction = torch.linspace(0.0, 1.0, dimension // 2, dtype=dtype, device=device)
period = min_period * (max_period / min_period) ** fraction
# Compute the outer product
scaling_factor = 1.0 / period * 2 * math.pi
sin_input = scaling_factor[None, :] * time[:, None]
pos_emb = torch.cat([torch.sin(sin_input), torch.cos(sin_input)], dim=1)
return pos_emb
def make_att_2d_masks(pad_masks: torch.Tensor, att_masks: torch.Tensor) -> torch.Tensor:
"""Copied from big_vision.
Tokens can attend to valid inputs tokens which have a cumulative mask_ar
smaller or equal to theirs. This way `mask_ar` int[B, N] can be used to
setup several types of attention, for example:
[[1 1 1 1 1 1]]: pure causal attention.
[[0 0 0 1 1 1]]: prefix-lm attention. The first 3 tokens can attend between
themselves and the last 3 tokens have a causal attention. The first
entry could also be a 1 without changing behaviour.
[[1 0 1 0 1 0 0 1 0 0]]: causal attention between 4 blocks. Tokens of a
block can attend all previous blocks and all tokens on the same block.
Args:
pad_masks: bool[B, N] indicating valid (true) vs. padding (false) tokens.
att_masks: int[B, N] defining attention type. A `1` at a position
indicates the start of a new causal block.
Returns:
A 2D boolean attention mask of shape (B, N, N).
"""
if att_masks.ndim != 2:
raise ValueError(att_masks.ndim)
if pad_masks.ndim != 2:
raise ValueError(pad_masks.ndim)
cumsum = torch.cumsum(att_masks, dim=1)
att_2d_masks = cumsum[:, None, :] <= cumsum[:, :, None]
pad_2d_masks = pad_masks[:, None, :] * pad_masks[:, :, None]
att_2d_masks = att_2d_masks & pad_2d_masks
return att_2d_masks
class PI0Model(ModelMixin, ConfigMixin):
"""pi0: A Vision-Language-Action Flow Model for General Robot Control.
[Paper](https://www.physicalintelligence.company/download/pi0.pdf)
[Jax code](https://github.com/Physical-Intelligence/openpi)
┌──────────────────────────────┐
│ actions │
│ ▲ │
│ ┌┴─────┐ │
│ kv cache │Gemma │ │
│ ┌──────────►│Expert│ │
│ │ │ │ │
│ ┌┴────────┐ │x 10 │ │
│ │ │ └▲──▲──┘ │
│ │PaliGemma│ │ │ │
│ │ │ │ robot state │
│ │ │ noise │
│ └▲──▲─────┘ │
│ │ │ │
│ │ image(s) │
│ language tokens │
└──────────────────────────────┘
"""
@register_to_config
def __init__(
self,
max_state_dim: int = 32,
max_action_dim: int = 32,
proj_width: int = 1024,
n_action_steps: int = 50,
num_steps: int = 10,
use_cache: bool = True,
pi05_enabled: bool = False,
):
super().__init__()
# Store the parameters
self.max_state_dim = max_state_dim
self.max_action_dim = max_action_dim
self.proj_width = proj_width
self.n_action_steps = n_action_steps
self.num_steps = num_steps
self.use_cache = use_cache
self.pi05_enabled = pi05_enabled
self.paligemma_with_expert = PaliGemmaWithExpertModel(
pi05_enabled=pi05_enabled,
)
# Projections are float32
if self.pi05_enabled:
self.time_mlp_in = nn.Linear(self.proj_width, self.proj_width, dtype=torch.float32)
self.time_mlp_out = nn.Linear(self.proj_width, self.proj_width, dtype=torch.float32)
else:
self.state_proj = nn.Linear(self.max_state_dim, self.proj_width, dtype=torch.float32)
self.action_time_mlp_in = nn.Linear(self.proj_width * 2, self.proj_width, dtype=torch.float32)
self.action_time_mlp_out = nn.Linear(self.proj_width, self.proj_width, dtype=torch.float32)
self.action_in_proj = nn.Linear(self.max_action_dim, self.proj_width, dtype=torch.float32)
self.action_out_proj = nn.Linear(self.proj_width, self.max_action_dim, dtype=torch.float32)
def forward(
self,
images: list[torch.Tensor],
img_masks: list[torch.Tensor],
lang_tokens: torch.Tensor,
lang_masks: torch.Tensor,
state: torch.Tensor,
x_t: torch.Tensor,
timestep: torch.Tensor,
) -> Tensor:
"""Full forward pass for one diffusion denoising step.
Args:
images: List of image tensors, each shaped (B, C, H, W) after batching.
img_masks: List of boolean masks corresponding to images, each (B,).
lang_tokens: Language token ids (B, L).
lang_masks: Language attention mask (B, L) with True for valid tokens.
state: State tensor (B, state_dim) if pi05 is disabled else ignored.
x_t: Noisy action tokens (B, n_action_steps, action_dim).
timestep: Diffusion timestep as float tensor (B,).
Returns:
Predicted v_t with shape (B, n_action_steps, action_dim).
"""
prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(images, img_masks, lang_tokens, lang_masks)
suffix_embs, suffix_pad_masks, suffix_att_masks, adarms_cond = self.embed_suffix(state, x_t, timestep)
pad_masks = torch.cat([prefix_pad_masks, suffix_pad_masks], dim=1)
att_masks = torch.cat([prefix_att_masks, suffix_att_masks], dim=1)
att_2d_masks = make_att_2d_masks(pad_masks, att_masks)
position_ids = torch.cumsum(pad_masks, dim=1) - 1
(_, suffix_out), _ = self.paligemma_with_expert.forward(
attention_mask=att_2d_masks,
position_ids=position_ids,
past_key_values=None,
inputs_embeds=[prefix_embs, suffix_embs],
use_cache=False,
fill_kv_cache=False,
adarms_cond=[None, adarms_cond],
)
suffix_out = suffix_out[:, -self.n_action_steps :]
# Original openpi code, upcast attention output
suffix_out = suffix_out.to(dtype=self.action_out_proj.weight.dtype)
v_t = self.action_out_proj(suffix_out)
return v_t
def sample_noise(self, shape: tuple[int, ...], device: torch.device | str) -> torch.Tensor:
"""Generate Gaussian noise for the action trajectory.
Args:
shape: Desired output shape, typically (B, n_action_steps, action_dim).
device: Target device string or torch.device.
Returns:
A float32 tensor of standard normal samples with the given shape.
"""
noise = torch.normal(
mean=0.0,
std=1.0,
size=shape,
dtype=torch.float32,
device=device,
)
return noise
def embed_prefix(
self,
images: list[torch.Tensor],
img_masks: list[torch.Tensor],
lang_tokens: torch.Tensor,
lang_masks: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Embed visual and language inputs as the transformer prefix.
Args:
images: List of (B, C, H, W) tensors.
img_masks: List of (B,) boolean masks for image presence.
lang_tokens: (B, L) token ids.
lang_masks: (B, L) boolean mask; True indicates valid tokens.
Returns:
A tuple of (embs, pad_masks, att_masks):
- embs: (B, Np, D) concatenated image and language embeddings
- pad_masks: (B, Np) valid token mask
- att_masks: (B, Np) attention mask scheme selector
"""
# Optimize: batch process images and pre-allocate tensors
num_images = len(images)
# Stack images and masks for batch processing
images_stacked = torch.stack(images, dim=0) # (num_images, bsize, ...)
img_masks_stacked = torch.stack(img_masks, dim=0) # (num_images, bsize)
# Batch embed all images at once
# Reshape to (num_images * bsize, ...)
orig_shape = images_stacked.shape
images_flat = images_stacked.reshape(-1, *orig_shape[2:])
img_embs_flat = self.paligemma_with_expert.embed_image(images_flat)
# Reshape back to (num_images, bsize, num_img_embs, emb_dim)
bsize = orig_shape[1]
img_embs = img_embs_flat.reshape(num_images, bsize, *img_embs_flat.shape[1:])
# Normalize image embeddings
img_emb_dim = img_embs.shape[-1]
num_img_embs = img_embs.shape[2]
# Expand masks: (num_images, bsize) -> (num_images, bsize, num_img_embs)
img_masks_expanded = img_masks_stacked[:, :, None].expand(num_images, bsize, num_img_embs)
# Reshape to (bsize, num_images * num_img_embs, emb_dim)
img_embs_concat = img_embs.transpose(0, 1).reshape(bsize, num_images * num_img_embs, img_emb_dim)
img_masks_concat = img_masks_expanded.transpose(0, 1).reshape(bsize, num_images * num_img_embs)
# Process language embeddings
lang_emb = self.paligemma_with_expert.embed_language_tokens(lang_tokens)
lang_emb_dim = lang_emb.shape[-1]
lang_emb = lang_emb * math.sqrt(lang_emb_dim)
lang_emb = lang_emb.to(dtype=img_embs_concat.dtype)
num_lang_embs = lang_emb.shape[1]
total_seq_len = num_images * num_img_embs + num_lang_embs
# Pre-allocate final tensors
embs = torch.empty(
bsize, total_seq_len, img_emb_dim, dtype=img_embs_concat.dtype, device=img_embs_concat.device
)
pad_masks = torch.empty(bsize, total_seq_len, dtype=torch.bool, device=img_embs_concat.device)
# Fill pre-allocated tensors
embs[:, : num_images * num_img_embs] = img_embs_concat
embs[:, num_images * num_img_embs :] = lang_emb
pad_masks[:, : num_images * num_img_embs] = img_masks_concat
pad_masks[:, num_images * num_img_embs :] = lang_masks
# Create attention masks (all zeros for full attention between image and language)
att_masks = torch.zeros(total_seq_len, dtype=torch.bool, device=pad_masks.device)
att_masks = att_masks[None, :].expand(bsize, total_seq_len)
return embs, pad_masks, att_masks
def embed_suffix(
self, state: torch.Tensor, noisy_actions: torch.Tensor, timestep: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor | None]:
"""Embed state, action and time tokens as the transformer suffix.
Args:
state: (B, state_dim) robot state; ignored when pi05 is enabled.
noisy_actions: (B, n_action_steps, action_dim) current x_t.
timestep: (B,) diffusion time in [0, 1].
Returns:
(embs, pad_masks, att_masks, adarms_cond) where:
- embs: (B, Ns, D) suffix embeddings
- pad_masks: (B, Ns) valid mask
- att_masks: (B, Ns) causal scheme for suffix
- adarms_cond: (B, D) AdaRMS conditioning or None
"""
embs = []
pad_masks = []
att_masks = []
action_emb = self.action_in_proj(noisy_actions)
bsize = action_emb.shape[0]
dtype = action_emb.dtype
device = action_emb.device
# Embed state
if not self.pi05_enabled:
state_emb = self.state_proj(state)
embs.append(state_emb[:, None, :])
state_mask = torch.ones(bsize, 1, dtype=torch.bool, device=device)
pad_masks.append(state_mask)
# Set attention masks so that image and language inputs do not attend to state or actions
att_masks += [1]
# Embed timestep using sine-cosine positional encoding with sensitivity in the range [0, 1]
time_emb = create_sinusoidal_pos_embedding(
timestep, self.proj_width, min_period=4e-3, max_period=4.0, device=device
)
time_emb = time_emb.type(dtype=dtype)
if self.pi05_enabled:
# time MLP (for adaRMS)
time_emb = self.time_mlp_in(time_emb)
time_emb = F.silu(time_emb)
time_emb = self.time_mlp_out(time_emb)
time_emb = F.silu(time_emb)
action_expert_emb = action_emb
adarms_cond = time_emb
else:
# Fuse timestep + action information using an MLP
time_emb = time_emb[:, None, :].expand_as(action_emb)
action_time_emb = torch.cat([action_emb, time_emb], dim=2)
action_time_emb = self.action_time_mlp_in(action_time_emb)
action_time_emb = F.silu(action_time_emb) # swish == silu
action_time_emb = self.action_time_mlp_out(action_time_emb)
action_expert_emb = action_time_emb
adarms_cond = None
# Add to input tokens
embs.append(action_expert_emb)
bsize, action_time_dim = action_expert_emb.shape[:2]
action_time_mask = torch.ones(bsize, action_time_dim, dtype=torch.bool, device=device)
pad_masks.append(action_time_mask)
# Set attention masks so that image, language and state inputs do not attend to action tokens
att_masks += [1] + ([0] * (self.n_action_steps - 1))
embs = torch.cat(embs, dim=1)
pad_masks = torch.cat(pad_masks, dim=1)
att_masks = torch.tensor(att_masks, dtype=embs.dtype, device=embs.device)
att_masks = att_masks[None, :].expand(bsize, len(att_masks))
return embs, pad_masks, att_masks, adarms_cond
@torch.no_grad()
def sample_actions(
self,
images: list[torch.Tensor],
img_masks: list[torch.Tensor],
lang_tokens: torch.Tensor,
lang_masks: torch.Tensor,
state: torch.Tensor,
noise: Tensor | None = None,
) -> Tensor:
"""Run the full inference loop to predict an action trajectory.
Args:
images: List of (B, C, H, W) image tensors.
img_masks: List of (B,) boolean masks.
lang_tokens: (B, L) token ids.
lang_masks: (B, L) boolean mask for tokens.
state: (B, state_dim) robot state.
noise: Optional initial noise; if None, generated internally.
Returns:
Predicted actions with shape (B, n_action_steps, action_dim).
"""
bsize = lang_tokens.shape[0]
device = lang_tokens.device
if noise is None:
actions_shape = (bsize, self.n_action_steps, self.max_action_dim)
noise = self.sample_noise(actions_shape, device)
prefix_embs, prefix_pad_masks, prefix_att_masks = self.embed_prefix(images, img_masks, lang_tokens, lang_masks)
prefix_att_2d_masks = make_att_2d_masks(prefix_pad_masks, prefix_att_masks)
prefix_position_ids = torch.cumsum(prefix_pad_masks, dim=1) - 1
# Compute image and language key value cache
_, past_key_values = self.paligemma_with_expert.forward(
attention_mask=prefix_att_2d_masks,
position_ids=prefix_position_ids,
past_key_values=None,
inputs_embeds=[prefix_embs, None],
use_cache=self.use_cache,
fill_kv_cache=True,
adarms_cond=[None, None],
)
x_t = noise
dt = -1.0 / self.num_steps
timesteps = torch.arange(1.0, -dt / 2, dt, dtype=torch.float32, device=device)
for timestep in timesteps:
v_t = self.denoise_step(
state,
prefix_pad_masks,
past_key_values,
x_t,
timestep.expand(bsize),
)
x_t += dt * v_t
return x_t
def denoise_step(
self,
state: torch.Tensor,
prefix_pad_masks: torch.Tensor,
past_key_values: dict,
x_t: torch.Tensor,
timestep: torch.Tensor,
) -> torch.Tensor:
"""Apply one denoising step of the noise x_t at a given timestep.
Args:
state: (B, state_dim) robot state.
prefix_pad_masks: (B, Np) prefix pad masks computed from embed_prefix.
past_key_values: KV cache dict for the prefix (images+language).
x_t: (B, n_action_steps, action_dim) current noisy actions.
timestep: (B,) current time in [0, 1].
Returns:
v_t prediction with shape (B, n_action_steps, action_dim).
"""
suffix_embs, suffix_pad_masks, suffix_att_masks, adarms_cond = self.embed_suffix(state, x_t, timestep)
suffix_len = suffix_pad_masks.shape[1]
batch_size = prefix_pad_masks.shape[0]
prefix_len = prefix_pad_masks.shape[1]
prefix_pad_2d_masks = prefix_pad_masks[:, None, :].expand(batch_size, suffix_len, prefix_len)
suffix_att_2d_masks = make_att_2d_masks(suffix_pad_masks, suffix_att_masks)
full_att_2d_masks = torch.cat([prefix_pad_2d_masks, suffix_att_2d_masks], dim=2)
prefix_offsets = torch.sum(prefix_pad_masks, dim=-1)[:, None]
position_ids = prefix_offsets + torch.cumsum(suffix_pad_masks, dim=1) - 1
outputs_embeds, _ = self.paligemma_with_expert.forward(
attention_mask=full_att_2d_masks,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=[None, suffix_embs],
use_cache=self.use_cache,
fill_kv_cache=False,
adarms_cond=[None, adarms_cond],
)
suffix_out = outputs_embeds[1]
suffix_out = suffix_out[:, -self.n_action_steps :]
suffix_out = suffix_out.to(dtype=self.action_out_proj.weight.dtype)
v_t = self.action_out_proj(suffix_out)
return v_t
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/vla/models/pi0_torch/model/modeling_pi0.py",
"license": "Apache License 2.0",
"lines": 406,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/vla/models/pi0_torch/model/paligemma_with_expert.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Giga Team. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# from https://github.com/open-gigaai/giga-models
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn
from transformers.activations import ACT2FN
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from transformers.models.auto import CONFIG_MAPPING
from transformers.models.siglip.configuration_siglip import SiglipVisionConfig
from transformers.models.siglip.modeling_siglip import (
SiglipEncoder,
SiglipMultiheadAttentionPoolingHead,
SiglipVisionEmbeddings,
)
from transformers.utils import can_return_tuple
from verl.utils.device import get_device_name
def get_transformers_siglip_vision_config() -> SiglipVisionConfig:
return CONFIG_MAPPING["siglip_vision_model"](
hidden_size=1152,
intermediate_size=4304,
num_channels=3,
num_attention_heads=16,
num_hidden_layers=27,
num_image_tokens=256,
patch_size=14,
projection_dim=2048,
projector_hidden_act="gelu_fast",
torch_dtype="float32",
vision_use_head=False,
)
class GemmaRMSNorm(nn.Module):
def __init__(self, dim: int, eps: float = 1e-6, use_ada_rms_norm: bool = False):
super().__init__()
self.eps = eps
self.use_ada_rms_norm = use_ada_rms_norm
if use_ada_rms_norm:
self.dense = nn.Linear(dim, dim * 3, bias=True)
nn.init.zeros_(self.dense.weight)
else:
self.weight = nn.Parameter(torch.zeros(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x, cond: torch.Tensor | None = None):
normed_inputs = self._norm(x.float())
if self.use_ada_rms_norm:
modulation = self.dense(cond)
scale, shift, gate = torch.chunk(modulation.unsqueeze(1), 3, dim=-1)
normed_inputs = normed_inputs.float() * (1.0 + scale.float()) + shift.float()
return normed_inputs.type_as(x), gate.type_as(x)
# Llama does x.to(float16) * w whilst Gemma is (x * w).to(float16)
# See https://github.com/huggingface/transformers/pull/29402
output = normed_inputs * (1.0 + self.weight.float())
return output.type_as(x)
def extra_repr(self):
if self.use_ada_rms_norm:
return f"{tuple(self.dense.weight.shape)}, eps={self.eps}, use_ada_rms_norm=True"
else:
return f"{tuple(self.weight.shape)}, eps={self.eps}"
class SiglipVisionTransformer(nn.Module):
def __init__(self, config: SiglipVisionConfig):
super().__init__()
self.config = config
self.config._attn_implementation = "sdpa"
embed_dim = config.hidden_size
self.embeddings = SiglipVisionEmbeddings(config)
self.encoder = SiglipEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.use_head = True if not hasattr(config, "vision_use_head") else config.vision_use_head
if self.use_head:
self.head = SiglipMultiheadAttentionPoolingHead(config)
@can_return_tuple
# @auto_docstring
def forward(
self,
pixel_values,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: Optional[bool] = False,
) -> BaseModelOutputWithPooling:
"""Forward pass of the SigLIP vision encoder.
Args:
pixel_values: Image tensor expected by SigLIP (B, C, H, W).
output_attentions: Whether to return attention maps.
output_hidden_states: Whether to return hidden states.
interpolate_pos_encoding: Enable pos-encoding interpolation for different sizes.
Returns:
BaseModelOutputWithPooling with last_hidden_state and optionally pooled output.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
hidden_states = hidden_states.to(dtype=torch.bfloat16)
with torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16):
encoder_outputs: BaseModelOutput = self.encoder(
inputs_embeds=hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.post_layernorm(last_hidden_state)
pooler_output = self.head(last_hidden_state) if self.use_head else None
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooler_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
# Copied from transformers.models.paligemma.modeling_paligemma.PaliGemmaMultiModalProjector
class PaliGemmaMultiModalProjector(nn.Module):
def __init__(self, vision_hidden_size: int = 1152, projection_dim: int = 2048):
super().__init__()
self.linear = nn.Linear(vision_hidden_size, projection_dim, bias=True)
def forward(self, image_features: torch.Tensor) -> torch.Tensor:
"""Project vision features to the transformer hidden size."""
hidden_states = self.linear(image_features)
return hidden_states
class RoPEEmbedding(nn.Module):
"""Precomputed RoPE embeddings for improved performance.
This implementation precomputes sin/cos values for a maximum sequence length, avoiding redundant trigonometric
calculations during forward passes.
"""
def __init__(self, dim: int, max_wavelength: int = 10_000, max_seq_len: int = 8192):
super().__init__()
self.dim = dim
self.max_wavelength = max_wavelength
self.max_seq_len = max_seq_len
# Precompute frequency exponents and inverse frequencies
d_half = dim // 2
freq_exponents = (2.0 / dim) * torch.arange(d_half, dtype=torch.float32)
inv_freq = 1.0 / (max_wavelength**freq_exponents)
# Precompute sin and cos for all positions up to max_seq_len
# Shape: [max_seq_len, d_half]
positions = torch.arange(max_seq_len, dtype=torch.float32)
freqs = torch.outer(positions, inv_freq) # [max_seq_len, d_half]
# Precompute sin and cos values
# We expand to [max_seq_len, 1, d_half] for broadcasting in forward
cos_cached = torch.cos(freqs).unsqueeze(1) # [max_seq_len, 1, d_half]
sin_cached = torch.sin(freqs).unsqueeze(1) # [max_seq_len, 1, d_half]
# Register as buffers so they automatically move to the correct device with the model
self.register_buffer("cos_cached", cos_cached, persistent=False)
self.register_buffer("sin_cached", sin_cached, persistent=False)
def forward(self, x: torch.Tensor, positions: torch.LongTensor) -> torch.Tensor:
"""Applies RoPE positions [B, L] to x [B, L, H, D].
Args:
x: Input tensor of shape [B, L, H, D]
positions: Position indices of shape [B, L]
Returns:
Rotated tensor of shape [B, L, H, D]
"""
dtype = x.dtype
x = x.to(torch.float32)
# Index precomputed sin/cos values using positions
# positions: [B, L] -> cos/sin: [B, L, 1, d_half]
cos = self.cos_cached[positions] # [B, L, 1, d_half]
sin = self.sin_cached[positions] # [B, L, 1, d_half]
# Apply rotary embeddings
d_half = self.dim // 2
x1, x2 = x.split(d_half, dim=-1) # Each: [B, L, H, d_half]
# Rotate: out1 = x1 * cos - x2 * sin, out2 = x2 * cos + x1 * sin
res = torch.empty_like(x)
res[..., :d_half] = x1 * cos - x2 * sin
res[..., d_half:] = x2 * cos + x1 * sin
return res.to(dtype)
class GemmaAttentionWithExpert(nn.Module):
def __init__(
self,
layer_idx: int,
# PaliGemma params
paligemma_hidden_size: int = 2048,
paligemma_num_attention_heads: int = 8,
paligemma_num_key_value_heads: int = 1,
paligemma_head_dim: int = 256,
paligemma_attention_bias: bool = False,
# Expert params
expert_hidden_size: int = 1024,
expert_num_attention_heads: int = 8,
expert_num_key_value_heads: int = 1,
expert_head_dim: int = 256,
expert_attention_bias: bool = False,
# RoPE params
rope_max_wavelength: int = 10_000,
rope_max_seq_len: int = 8192,
):
super().__init__()
self.layer_idx = layer_idx
self.q_proj = nn.ModuleList(
[
nn.Linear(
paligemma_hidden_size,
paligemma_num_attention_heads * paligemma_head_dim,
bias=paligemma_attention_bias,
),
nn.Linear(expert_hidden_size, expert_num_attention_heads * expert_head_dim, bias=expert_attention_bias),
]
)
self.k_proj = nn.ModuleList(
[
nn.Linear(
paligemma_hidden_size,
paligemma_num_key_value_heads * paligemma_head_dim,
bias=paligemma_attention_bias,
),
nn.Linear(expert_hidden_size, expert_num_key_value_heads * expert_head_dim, bias=expert_attention_bias),
]
)
self.v_proj = nn.ModuleList(
[
nn.Linear(
paligemma_hidden_size,
paligemma_num_key_value_heads * paligemma_head_dim,
bias=paligemma_attention_bias,
),
nn.Linear(expert_hidden_size, expert_num_key_value_heads * expert_head_dim, bias=expert_attention_bias),
]
)
self.o_proj = nn.ModuleList(
[
nn.Linear(
paligemma_num_attention_heads * paligemma_head_dim,
paligemma_hidden_size,
bias=paligemma_attention_bias,
),
nn.Linear(expert_num_attention_heads * expert_head_dim, expert_hidden_size, bias=expert_attention_bias),
]
)
self.paligemma_num_attention_heads = paligemma_num_attention_heads
self.paligemma_num_key_value_heads = paligemma_num_key_value_heads
self.paligemma_head_dim = paligemma_head_dim
self.expert_num_attention_heads = expert_num_attention_heads
self.expert_num_key_value_heads = expert_num_key_value_heads
self.expert_head_dim = expert_head_dim
assert paligemma_head_dim == expert_head_dim
assert paligemma_num_attention_heads == expert_num_attention_heads
assert paligemma_num_key_value_heads == expert_num_key_value_heads
self.rope_embedding = RoPEEmbedding(
dim=paligemma_head_dim, max_wavelength=rope_max_wavelength, max_seq_len=rope_max_seq_len
)
def forward(
self,
inputs_embeds: list[Optional[torch.Tensor]],
position_ids: torch.LongTensor,
attention_mask: torch.Tensor,
use_cache: bool,
past_key_values: Optional[dict] = None,
fill_kv_cache: bool = False,
) -> list[Optional[torch.Tensor]]:
"""Multi-source attention over PaliGemma and Expert streams.
Args:
inputs_embeds: [paligemma_embeds, expert_embeds]. Each is (B, L, D) or None.
position_ids: (B, L) rotary positions.
attention_mask: (B, L, L) attention mask.
use_cache: Whether to use KV cache.
past_key_values: Optional cache dict per layer.
fill_kv_cache: If True, fill cache; otherwise, append to it.
Returns:
List[Optional[Tensor]]: outputs per stream aligned to inputs order.
"""
query_states = []
key_states = []
value_states = []
if inputs_embeds[0] is not None:
# PaliGemma
hidden_states = inputs_embeds[0]
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.paligemma_head_dim)
query_states.append(self.q_proj[0](hidden_states).view(hidden_shape))
key_states.append(self.k_proj[0](hidden_states).view(hidden_shape))
value_states.append(self.v_proj[0](hidden_states).view(hidden_shape))
if inputs_embeds[1] is not None:
# Expert
hidden_states = inputs_embeds[1]
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.expert_head_dim)
query_states.append(self.q_proj[1](hidden_states).view(hidden_shape))
key_states.append(self.k_proj[1](hidden_states).view(hidden_shape))
value_states.append(self.v_proj[1](hidden_states).view(hidden_shape))
query_states = torch.cat(query_states, dim=1)
key_states = torch.cat(key_states, dim=1)
value_states = torch.cat(value_states, dim=1)
query_states = self.rope_embedding(query_states, position_ids)
key_states = self.rope_embedding(key_states, position_ids)
if use_cache:
if fill_kv_cache:
past_key_values[self.layer_idx] = {
"key_states": key_states,
"value_states": value_states,
}
else:
key_states = torch.cat([past_key_values[self.layer_idx]["key_states"], key_states], dim=1)
value_states = torch.cat([past_key_values[self.layer_idx]["value_states"], value_states], dim=1)
num_att_heads = self.paligemma_num_attention_heads # Assume same for both
num_key_value_heads = self.paligemma_num_key_value_heads
head_dim = self.paligemma_head_dim
batch_size = query_states.shape[0]
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
if num_key_value_heads != num_att_heads:
# key_states: (B, num_kv_heads, L, D) -> (B, num_att_heads, L, D)
key_states = torch.repeat_interleave(key_states, num_att_heads // num_key_value_heads, dim=1)
value_states = torch.repeat_interleave(value_states, num_att_heads // num_key_value_heads, dim=1)
att_output = F.scaled_dot_product_attention(
query_states,
key_states,
value_states,
attn_mask=attention_mask[:, None, :, :],
is_causal=False,
)
att_output = att_output.permute(0, 2, 1, 3)
att_output = att_output.reshape(batch_size, -1, num_att_heads * head_dim)
outputs_embeds = []
start = 0
if inputs_embeds[0] is not None:
hidden_states = inputs_embeds[0]
end = start + hidden_states.shape[1]
if att_output.dtype != self.o_proj[0].weight.dtype:
att_output_i = att_output[:, start:end].to(self.o_proj[0].weight.dtype)
else:
att_output_i = att_output[:, start:end]
out_emb = self.o_proj[0](att_output_i)
outputs_embeds.append(out_emb)
start = end
else:
outputs_embeds.append(None)
if inputs_embeds[1] is not None:
hidden_states = inputs_embeds[1]
end = start + hidden_states.shape[1]
if att_output.dtype != self.o_proj[1].weight.dtype:
att_output_i = att_output[:, start:end].to(self.o_proj[1].weight.dtype)
else:
att_output_i = att_output[:, start:end]
out_emb = self.o_proj[1](att_output_i)
outputs_embeds.append(out_emb)
else:
outputs_embeds.append(None)
return outputs_embeds
class GemmaMLP(nn.Module):
def __init__(self, hidden_size: int = 1024, intermediate_size: int = 4096, hidden_act: str = "gelu_pytorch_tanh"):
super().__init__()
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[hidden_act]
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Gated MLP block used in both streams."""
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
class GemmaDecoderLayerWithExpert(nn.Module):
def __init__(
self,
layer_idx: int,
pi05_enabled: bool,
# PaliGemma params
paligemma_hidden_size: int = 2048,
paligemma_num_attention_heads: int = 8,
paligemma_num_key_value_heads: int = 1,
paligemma_head_dim: int = 256,
paligemma_attention_bias: bool = False,
paligemma_intermediate_size: int = 16384,
paligemma_hidden_act: str = "gelu_pytorch_tanh",
paligemma_rms_norm_eps: float = 1e-6,
# Expert params
expert_hidden_size: int = 1024,
expert_num_attention_heads: int = 8,
expert_num_key_value_heads: int = 1,
expert_head_dim: int = 256,
expert_attention_bias: bool = False,
expert_intermediate_size: int = 4096,
expert_hidden_act: str = "gelu_pytorch_tanh",
expert_rms_norm_eps: float = 1e-6,
# RoPE params
rope_max_wavelength: int = 10_000,
rope_max_seq_len: int = 8192,
):
super().__init__()
self.self_attn = GemmaAttentionWithExpert(
layer_idx,
paligemma_hidden_size,
paligemma_num_attention_heads,
paligemma_num_key_value_heads,
paligemma_head_dim,
paligemma_attention_bias,
expert_hidden_size,
expert_num_attention_heads,
expert_num_key_value_heads,
expert_head_dim,
expert_attention_bias,
rope_max_wavelength,
rope_max_seq_len,
)
self.mlps = nn.ModuleList(
[
GemmaMLP(paligemma_hidden_size, paligemma_intermediate_size, paligemma_hidden_act),
GemmaMLP(expert_hidden_size, expert_intermediate_size, expert_hidden_act),
]
)
self.input_layernorms = nn.ModuleList(
[
GemmaRMSNorm(paligemma_hidden_size, eps=paligemma_rms_norm_eps),
GemmaRMSNorm(expert_hidden_size, eps=expert_rms_norm_eps, use_ada_rms_norm=pi05_enabled),
]
)
self.post_attention_layernorms = nn.ModuleList(
[
GemmaRMSNorm(paligemma_hidden_size, eps=paligemma_rms_norm_eps),
GemmaRMSNorm(expert_hidden_size, eps=expert_rms_norm_eps, use_ada_rms_norm=pi05_enabled),
]
)
self.pi05_enabled = pi05_enabled
def gated_residual(self, x, y, gate):
if x is None or y is None:
return None
if gate is None:
return x + y
return x + y * gate
def forward(
self,
inputs_embeds: list[Optional[torch.Tensor]],
adarms_cond: list[Optional[torch.Tensor]],
position_ids: torch.LongTensor,
attention_mask: torch.Tensor,
use_cache: bool,
past_key_values: Optional[dict] = None,
fill_kv_cache: bool = False,
) -> list[Optional[torch.Tensor]]:
"""Decoder layer with dual-stream attention and optional AdaRMS
modulation.
Args:
inputs_embeds: [paligemma, expert] embeds.
adarms_cond: Optional conditioning vectors for AdaRMS.
position_ids: (B, L) positions for RoPE.
attention_mask: (B, L, L) attention mask.
use_cache: Whether to use KV cache.
past_key_values: Optional cache dict.
fill_kv_cache: Whether to fill or reuse KV cache.
Returns:
List[Optional[Tensor]]: Updated hidden states per stream.
"""
residuals = list(inputs_embeds)
normed_embeds = []
attn_gates = []
for i, hidden_states in enumerate(inputs_embeds):
if hidden_states is not None:
if self.pi05_enabled and adarms_cond[i] is not None:
normed_h, attn_gate = self.input_layernorms[i](hidden_states, adarms_cond[i])
normed_embeds.append(normed_h)
attn_gates.append(attn_gate)
else:
normed_embeds.append(self.input_layernorms[i](hidden_states))
attn_gates.append(None)
else:
normed_embeds.append(None)
attn_gates.append(None)
attn_outputs = self.self_attn(
normed_embeds, position_ids, attention_mask, use_cache, past_key_values, fill_kv_cache
)
after_attn_embeds = []
for i, (residual, attn_output, attn_gate) in enumerate(zip(residuals, attn_outputs, attn_gates, strict=False)):
if residual is not None:
after_attn_embeds.append(self.gated_residual(residual, attn_output, attn_gate))
else:
after_attn_embeds.append(None)
outputs = []
for i, hidden_states in enumerate(after_attn_embeds):
if hidden_states is not None:
residual = hidden_states
if self.pi05_enabled and adarms_cond[i] is not None:
normed_h, mlp_gate = self.post_attention_layernorms[i](hidden_states, adarms_cond[i])
else:
normed_h = self.post_attention_layernorms[i](hidden_states)
mlp_gate = None
mlp_out = self.mlps[i](normed_h)
outputs.append(self.gated_residual(residual, mlp_out, mlp_gate))
else:
outputs.append(None)
return outputs, past_key_values
class PaliGemmaWithExpertModel(nn.Module):
def __init__(
self,
pi05_enabled: bool = False,
# Paligemma params
paligemma_vocab_size: int = 257152,
paligemma_pad_token_id: int = 0,
paligemma_num_hidden_layers: int = 18,
paligemma_hidden_size: int = 2048,
paligemma_num_attention_heads: int = 8,
paligemma_num_key_value_heads: int = 1,
paligemma_attention_bias: bool = False,
paligemma_intermediate_size: int = 16384,
paligemma_hidden_act: str = "gelu_pytorch_tanh",
paligemma_rms_norm_eps: float = 1e-6,
# Expert params
expert_hidden_size: int = 1024,
expert_num_attention_heads: int = 8,
expert_num_key_value_heads: int = 1,
expert_head_dim: int = 256,
expert_attention_bias: bool = False,
expert_intermediate_size: int = 4096,
expert_hidden_act: str = "gelu_pytorch_tanh",
expert_rms_norm_eps: float = 1e-6,
# RoPE params
rope_max_wavelength: int = 10_000,
rope_max_seq_len: int = 8192,
):
super().__init__()
self.pi05_enabled = pi05_enabled
siglip_vision_config = get_transformers_siglip_vision_config()
# Vision and projection
self.vision_tower = SiglipVisionTransformer(siglip_vision_config)
self.multi_modal_projector = PaliGemmaMultiModalProjector(
vision_hidden_size=siglip_vision_config.hidden_size, projection_dim=siglip_vision_config.projection_dim
)
self.paligemma_hidden_size = paligemma_hidden_size
# Language embed
self.embed_tokens = nn.Embedding(paligemma_vocab_size, paligemma_hidden_size, paligemma_pad_token_id)
# Decoder layers
self.layers = nn.ModuleList(
[
GemmaDecoderLayerWithExpert(
layer_idx=i,
pi05_enabled=pi05_enabled,
paligemma_hidden_size=paligemma_hidden_size,
paligemma_num_attention_heads=paligemma_num_attention_heads,
paligemma_num_key_value_heads=paligemma_num_key_value_heads,
paligemma_head_dim=paligemma_hidden_size // paligemma_num_attention_heads,
paligemma_attention_bias=paligemma_attention_bias, # gemma default
paligemma_intermediate_size=paligemma_intermediate_size,
paligemma_hidden_act=paligemma_hidden_act,
paligemma_rms_norm_eps=paligemma_rms_norm_eps, # gemma default
expert_hidden_size=expert_hidden_size,
expert_num_attention_heads=expert_num_attention_heads,
expert_num_key_value_heads=expert_num_key_value_heads,
expert_head_dim=expert_head_dim,
expert_attention_bias=expert_attention_bias,
expert_intermediate_size=expert_intermediate_size,
expert_hidden_act=expert_hidden_act,
expert_rms_norm_eps=expert_rms_norm_eps,
rope_max_wavelength=rope_max_wavelength,
rope_max_seq_len=rope_max_seq_len,
)
for i in range(paligemma_num_hidden_layers)
]
)
# Final norms
self.norms = nn.ModuleList(
[
GemmaRMSNorm(paligemma_hidden_size, eps=1e-6),
GemmaRMSNorm(expert_hidden_size, eps=expert_rms_norm_eps, use_ada_rms_norm=pi05_enabled),
]
)
def embed_image(self, image: torch.Tensor) -> torch.Tensor:
"""Encode images with SigLIP and project to hidden size."""
image_outputs = self.vision_tower(image)
selected_image_feature = image_outputs.last_hidden_state
image_features = self.multi_modal_projector(selected_image_feature)
return image_features
def embed_language_tokens(self, tokens: torch.Tensor) -> torch.Tensor:
"""Embed token ids into continuous vectors."""
return self.embed_tokens(tokens)
def forward(
self,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[dict] = None,
inputs_embeds: list[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
fill_kv_cache: Optional[bool] = None,
adarms_cond: list[torch.FloatTensor] = None,
) -> tuple[list[Optional[torch.Tensor]], dict]:
"""Run the stacked dual-stream decoder with optional caching and
AdaRMS.
Args:
attention_mask: (B, L, L) attention mask for both streams.
position_ids: (B, L) RoPE positions.
past_key_values: Optional KV cache dict to reuse.
inputs_embeds: [paligemma_embeds, expert_embeds].
use_cache: Whether to use KV cache.
fill_kv_cache: If True, populate cache from inputs.
adarms_cond: Optional per-stream modulation vectors for AdaRMS.
Returns:
(outputs_embeds, past_key_values): outputs per stream and the KV cache.
"""
inputs_embeds = [
input_embed.to(dtype=torch.bfloat16) if input_embed is not None else None for input_embed in inputs_embeds
]
with torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16):
if use_cache and past_key_values is None:
past_key_values = {}
hidden_states_list = inputs_embeds
for layer in self.layers:
# FSDP will make a copy of the "past_key_values" dictionary, which needs to be reassigned.
hidden_states_list, past_key_values = layer(
hidden_states_list,
adarms_cond=adarms_cond,
position_ids=position_ids,
attention_mask=attention_mask,
use_cache=use_cache,
past_key_values=past_key_values,
fill_kv_cache=fill_kv_cache,
)
outputs_embeds = []
for i, hidden_states in enumerate(hidden_states_list):
if hidden_states is not None:
if self.pi05_enabled and adarms_cond[i] is not None:
out_emb, _ = self.norms[i](hidden_states, adarms_cond[i])
else:
out_emb = self.norms[i](hidden_states)
outputs_embeds.append(out_emb)
else:
outputs_embeds.append(None)
return outputs_embeds, past_key_values
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/vla/models/pi0_torch/model/paligemma_with_expert.py",
"license": "Apache License 2.0",
"lines": 630,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/vla/models/pi0_torch/modeling_pi0_torch.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Literal
import torch
from onnx_ir import Tensor
from torch import nn
from torch.distributed.fsdp import register_fsdp_forward_method
from torch.distributions import Normal
from transformers import PreTrainedModel
from typing_extensions import override
from verl.protocol import DataProto
from verl.utils.device import get_device_name
from ...sac.base import SupportSACTraining
from ..modules.mlp import MLP
from .configuration_pi0_torch import PI0TorchConfig
from .model.modeling_pi0 import PI0Model, make_att_2d_masks
from .pi0_utils import (
ImageTransform,
Normalize,
PromptTokenizerTransform,
Unnormalize,
)
from .policy.base import Pi0Output
class PI0ForActionPrediction(PreTrainedModel, SupportSACTraining):
config_class = PI0TorchConfig
base_model_prefix = "pi0_torch"
def __init__(self, config: PI0TorchConfig):
super().__init__(config)
self.model: PI0Model = None
self.state_norm_stats = config.state_norm_stats
self.action_norm_stats = config.action_norm_stats
self.pi05_enabled = config.pi05_enabled
assert self.state_norm_stats, "state_norm_stats must be provided in PI0TorchConfig"
assert self.action_norm_stats, "action_norm_stats must be provided in PI0TorchConfig"
assert isinstance(self.pi05_enabled, bool), "pi05_enabled must be provided in PI0TorchConfig"
# Input transforms
self.state_normalize_transform = Normalize(self.state_norm_stats, use_quantiles=self.pi05_enabled)
self.action_normalize_transform = Normalize(self.action_norm_stats, use_quantiles=self.pi05_enabled)
self.image_transform = ImageTransform(resize_imgs_with_padding=(224, 224), enable_image_aug=False)
max_length = 200 if self.pi05_enabled else 48
self.prompt_tokenizer_transform = PromptTokenizerTransform(max_length=max_length, discrete_state_input=False)
# Output transforms
self.state_unnormalize_transform = Unnormalize(self.state_norm_stats, use_quantiles=self.pi05_enabled)
self.action_unnormalize_transform = Unnormalize(self.action_norm_stats, use_quantiles=self.pi05_enabled)
self._to(get_device_name())
##### SAC Algorithm Support #####
if getattr(self.config, "sac_enable", False):
head_num = 2 if getattr(self.config, "double_q", True) else 1
self.critic_heads = nn.ModuleList(
[
MLP(
input_dim=2150, # 2048(prefix mean) + 32(state) + 10*7(action flat)
hidden_dims=[1024, 512, 256],
output_dim=1,
activation="relu",
init_method="normal",
)
for _ in range(head_num)
]
)
self.target_network_heads = nn.ModuleList(
[
MLP(
input_dim=2150,
hidden_dims=[1024, 512, 256],
output_dim=1,
activation="relu",
init_method="normal",
)
for _ in range(head_num)
]
)
def _to(self, device: torch.device | str):
self.state_normalize_transform.to(device)
self.state_unnormalize_transform.to(device)
self.action_normalize_transform.to(device)
self.action_unnormalize_transform.to(device)
return self
def forward(
self,
images: list[torch.Tensor],
img_masks: list[torch.Tensor],
lang_tokens: torch.Tensor,
lang_masks: torch.Tensor,
state: torch.Tensor,
x_t: torch.Tensor,
timestep: torch.Tensor,
) -> Tensor:
"""Full forward pass for one diffusion denoising step.
Args:
images: List of image tensors, each shaped (B, C, H, W) after batching.
img_masks: List of boolean masks corresponding to images, each (B,).
lang_tokens: Language token ids (B, L).
lang_masks: Language attention mask (B, L) with True for valid tokens.
state: State tensor (B, state_dim) if pi05 is disabled else ignored.
x_t: Noisy action tokens (B, n_action_steps, action_dim).
timestep: Diffusion timestep as float tensor (B,).
Returns:
Predicted v_t with shape (B, n_action_steps, action_dim).
"""
if self.model is None:
raise RuntimeError("PI0ForActionPrediction.model is not initialized. Did from_pretrained() run?")
return self.model(
images,
img_masks,
lang_tokens,
lang_masks,
state,
x_t,
timestep,
)
@torch.no_grad()
def sample_actions(
self,
env_obs: DataProto,
tokenizer,
) -> tuple[Pi0Output, dict, dict]:
"""Run one forward pass from raw inputs to final action sequence.
Args:
env_obs: The environment observations as DataProto.
tokenizer: The tokenizer used for prompt tokenization.
Returns:
A tuple of (pi0_output, s, a):
- pi0_output: The Pi0Output containing the predicted actions.
- s: Dictionary of tensors representing the states, with keys
- "images": torch.Tensor of shape (B, n_images, C, H, W)
- "image_masks": torch.Tensor of shape (B, n_images)
- "lang_tokens": torch.Tensor of shape (B, L)
- "lang_masks": torch.Tensor of shape (B, L)
- "states": torch.Tensor of shape (B, state_dim)
- a: Dictionary of tensors representing actions, with key:
- "full_action": torch.Tensor of shape (B, action_steps, action_dim)
"""
from .policy.libero_policy import LiberoPi0Input
pi0_input = LiberoPi0Input.from_env_obs(env_obs)
# Input transforms
state = self.state_normalize_transform(pi0_input.state)
images, _ = self.image_transform.call_batch(pi0_input.images)
lang_tokens, lang_masks = self.prompt_tokenizer_transform.call_batch(
{"task": pi0_input.task, "observation.state": state}, tokenizer
)
# Inference
pred_action = self.model.sample_actions(images, pi0_input.img_masks, lang_tokens, lang_masks, state=state)
# Output transforms
# state = self.state_unnormalize_transform(state)
pred_action = self.action_unnormalize_transform(pred_action)
from .policy.libero_policy import LiberoPi0Output
pi0_output = LiberoPi0Output.from_model_output({"full_action": pred_action})
s = {
"states": state,
"images": torch.stack(images, dim=1),
"image_masks": torch.stack(pi0_input.img_masks, dim=1),
"lang_tokens": lang_tokens,
"lang_masks": lang_masks,
}
a = {
"full_action": pred_action,
}
return pi0_output, s, a
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop("config", None)
if config is None:
config = PI0TorchConfig.from_pretrained(pretrained_model_name_or_path)
policy = cls(config)
policy.model = PI0Model.from_pretrained(pretrained_model_name_or_path)
return policy
def freeze_vision_tower(self) -> None:
"""Freeze the vision tower parameters."""
if self.model is None:
raise RuntimeError("PI0ForActionPrediction.model is not initialized. Did from_pretrained() run?")
vision_tower = self.model.paligemma_with_expert.vision_tower
vision_tower.requires_grad_(False)
vision_tower.eval()
# --- SAC Algorithm Support ---
def _multi_heads_value(
self, value_heads: nn.ModuleList, input_tensor: torch.Tensor, method: Literal["cat", "min"] = "cat"
) -> torch.Tensor:
q_values = [head(input_tensor) for head in value_heads]
if method == "cat":
q_values = torch.cat(q_values, dim=-1)
elif method == "min":
q_values = torch.min(torch.cat(q_values, dim=-1), dim=-1).values
else:
raise ValueError(f"Unknown method: {method}")
return q_values
def _build_kv_cache_from_prefix(
self,
prefix_features: tuple[torch.Tensor, torch.Tensor, torch.Tensor],
):
"""Build KV cache for prefix. No grad needed."""
prefix_embs, prefix_pad_masks, prefix_att_masks = prefix_features
prefix_att_2d_masks = make_att_2d_masks(prefix_pad_masks, prefix_att_masks)
prefix_position_ids = torch.cumsum(prefix_pad_masks, dim=1) - 1
with torch.no_grad():
_, past_key_values = self.model.paligemma_with_expert.forward(
attention_mask=prefix_att_2d_masks,
position_ids=prefix_position_ids,
past_key_values=None,
inputs_embeds=[prefix_embs, None],
use_cache=self.model.use_cache,
fill_kv_cache=True,
adarms_cond=[None, None],
)
return past_key_values
def _get_logprobs(
self,
s: dict[str, torch.Tensor],
prefix_features: tuple[torch.Tensor, torch.Tensor, torch.Tensor],
*,
x_t: torch.Tensor | None = None, # (B, T, A)
x_next: torch.Tensor | None = None, # (B, T, A)
v_t: torch.Tensor | None = None, # (B, T, A)
t: torch.Tensor | None = None, # (B,)
step_idx: torch.Tensor | None = None, # (B,)
) -> torch.Tensor:
"""
Compute log-probability of x_{t+1} given (x_t, v_t) under the Flow-SDE formulation.
See https://arxiv.org/abs/2510.25889
"""
prefix_embs, prefix_pad_masks, _ = prefix_features
states = s["states"]
B = prefix_embs.shape[0]
device = prefix_embs.device
past_key_values = self._build_kv_cache_from_prefix(prefix_features)
if x_t is None or x_next is None or v_t is None or t is None:
actions_shape = (B, self.model.n_action_steps, self.model.max_action_dim)
x = self.model.sample_noise(actions_shape, device=device)
dt = -1.0 / float(self.model.num_steps)
t_grid = torch.arange(1.0, -dt / 2, dt, dtype=torch.float32, device=device)
x_prev, v_prev, t_prev = None, None, None
for tt in t_grid:
x_prev = x
t_prev = tt
v_prev = self.model.denoise_step(
states,
prefix_pad_masks,
past_key_values,
x,
tt.expand(B),
)
x = x + dt * v_prev
x_t = x_prev
x_next = x
v_t = v_prev
t = t_prev.expand(B)
# sigma schedule step index
K = int(self.model.num_steps)
if step_idx is None:
step_idx = torch.full((B,), K - 1, device=device, dtype=torch.long)
# one-step mean/std
dt_pos = 1.0 / float(K)
t_b = t[:, None, None] # (B,1,1)
dt_b = torch.full_like(t_b, dt_pos)
x0_pred = x_t - v_t * t_b
x1_pred = x_t + v_t * (1.0 - t_b)
# heuristic sigma schedule (ported family)
noise_level = 0.5
t_grid_full = torch.arange(1.0, -dt_pos / 2, -dt_pos, dtype=torch.float32, device=device) # len=K+1
t_for_sigma = torch.where(t_grid_full == 1.0, t_grid_full[1], t_grid_full)
sigmas = noise_level * torch.sqrt(t_grid_full / (1.0 - t_for_sigma).clamp_min(1e-6))
sigmas = sigmas[:-1] # len=K
sigma_i = sigmas[step_idx][:, None, None].clamp_min(1e-6) # (B,1,1)
x0_weight = torch.ones_like(t_b) - (t_b - dt_b)
x1_weight = t_b - dt_b - (sigma_i**2) * dt_b / (2.0 * t_b.clamp_min(1e-6))
x_next_mean = x0_pred * x0_weight + x1_pred * x1_weight
x_next_std = (dt_b.sqrt() * sigma_i).clamp_min(1e-6)
dist = Normal(x_next_mean.float(), x_next_std.float())
log_probs = dist.log_prob(x_next.float()).sum(dim=2).mean(dim=1) # (B,)
return log_probs
def _sample_actions_and_logprobs_from_prefix(
self,
states: torch.Tensor,
prefix_features: tuple[torch.Tensor, torch.Tensor, torch.Tensor],
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Sample actions amd compute logprob aligned with those sampled actions.
Args:
states: (B, state_dim)
prefix_features: tuple of (prefix_embs, prefix_pad_masks, prefix_att_masks)
Returns:
actions: (B, n_action_steps, action_dim)
log_probs: (B,)
"""
prefix_embs, prefix_pad_masks, _ = prefix_features
B = prefix_embs.shape[0]
device = prefix_embs.device
past_key_values = self._build_kv_cache_from_prefix(prefix_features)
actions_shape = (B, self.model.n_action_steps, self.model.max_action_dim)
x = self.model.sample_noise(actions_shape, device=device)
dt = -1.0 / float(self.model.num_steps)
t_grid = torch.arange(1.0, -dt / 2, dt, dtype=torch.float32, device=device) # len=K
x_prev, v_prev, t_prev = None, None, None
for tt in t_grid:
x_prev = x
t_prev = tt
v_prev = self.model.denoise_step(
states,
prefix_pad_masks,
past_key_values,
x,
tt.expand(B),
)
x = x + dt * v_prev
actions = x # x_K
# aligned logprob: use last transition (K-1)
step_idx = torch.full((B,), int(self.model.num_steps) - 1, device=device, dtype=torch.long)
log_probs = self._get_logprobs(
{"states": states},
prefix_features,
x_t=x_prev,
x_next=actions,
v_t=v_prev,
t=t_prev.expand(B),
step_idx=step_idx,
)
return actions, log_probs
@override
def sac_init(self):
"""Initialize SAC-related components."""
self.freeze_vision_tower()
register_fsdp_forward_method(self, "sac_forward_critic")
register_fsdp_forward_method(self, "sac_forward_actor")
register_fsdp_forward_method(self, "sac_update_target_network")
register_fsdp_forward_method(self, "sac_forward_state_features")
@override
def sac_forward_actor(
self,
state_features: tuple[tuple[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor],
) -> tuple[torch.Tensor, torch.Tensor]:
prefix_features, states = state_features
actions, log_probs = self._sample_actions_and_logprobs_from_prefix(states, prefix_features)
return actions, log_probs
@override
def sac_forward_critic(
self,
a: dict[str, torch.Tensor],
state_features: tuple[tuple[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor],
*,
use_target_network: bool = False,
method: Literal["cat", "min"] = "cat",
requires_grad: bool = False,
):
critic_head = self.target_network_heads if use_target_network else self.critic_heads
for p in critic_head.parameters():
p.requires_grad_(requires_grad)
prefix_features, states = state_features
prefix_embs, _, _ = prefix_features
mean_prefix_embs = prefix_embs.mean(dim=1, keepdim=False) # (B, 2048)
actions = self.action_normalize_transform(a["full_action"]) # (B, 50, 32)
actions = actions[:, :10, :7] # (B, 10, 7)
flattened_actions = actions.reshape(actions.shape[0], -1) # (B, 70)
critic_input = torch.cat([mean_prefix_embs, states, flattened_actions], dim=-1) # (B, 2150)
q_values = self._multi_heads_value(critic_head, critic_input, method=method)
return q_values
@override
def sac_forward_state_features(
self, s: dict[str, torch.Tensor]
) -> tuple[tuple[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor]:
with torch.no_grad():
prefix_features = self.model.embed_prefix(
images=s["images"].unbind(dim=1),
img_masks=s["image_masks"].unbind(dim=1),
lang_tokens=s["lang_tokens"],
lang_masks=s["lang_masks"],
)
return (prefix_features, s["states"])
@override
@torch.no_grad()
def sac_update_target_network(self, tau: float):
for target_head, head in zip(self.target_network_heads, self.critic_heads, strict=False):
for target_param, param in zip(target_head.parameters(), head.parameters(), strict=False):
target_param.data.mul_(1.0 - tau).add_(param.data, alpha=tau)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/vla/models/pi0_torch/modeling_pi0_torch.py",
"license": "Apache License 2.0",
"lines": 385,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/vla/models/pi0_torch/pi0_utils.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2025 Giga Team. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# from https://github.com/open-gigaai/giga-models
from typing import Any
import torch
import torch.nn.functional as F
from torchvision import transforms
class Normalize:
"""Normalize robot state vectors using mean/std or quantiles.
Args:
stats: A dict containing either {'mean', 'std'} or {'q01', 'q99'}.
use_quantiles: If True, use quantile based normalization.
"""
def __init__(self, stats: dict[str, Any], *, use_quantiles: bool = False) -> None:
self.EPSILON = 1e-6
self.stats = stats
self.use_quantiles = use_quantiles
required_attrs = ["mean", "std"]
if self.use_quantiles:
required_attrs = ["q01", "q99"]
for attr in required_attrs:
if attr not in stats:
raise AttributeError(f"stats object is missing the following attribute: {attr}")
if self.use_quantiles:
self.q01 = torch.tensor(stats["q01"], dtype=torch.float32)
self.q99 = torch.tensor(stats["q99"], dtype=torch.float32)
else:
self.mean = torch.tensor(stats["mean"], dtype=torch.float32)
self.std = torch.tensor(stats["std"], dtype=torch.float32)
def to(self, device: torch.device | str) -> None:
if self.use_quantiles:
self.q01 = self.q01.to(device)
self.q99 = self.q99.to(device)
else:
self.mean = self.mean.to(device)
self.std = self.std.to(device)
def __call__(self, x: torch.Tensor) -> torch.Tensor:
x_dim = x.shape[-1]
if self.use_quantiles:
return (x - self.q01[..., :x_dim]) / (
self.q99[..., :x_dim] - self.q01[..., :x_dim] + self.EPSILON
) * 2.0 - 1.0
else:
return (x - self.mean[..., :x_dim]) / (self.std[..., :x_dim] + self.EPSILON)
class Unnormalize:
def __init__(self, stats, *, use_quantiles: bool = False):
self.EPSILON = 1e-6
self.stats = stats
self.use_quantiles = use_quantiles
if self.use_quantiles:
self.q01 = torch.tensor(stats["q01"], dtype=torch.float32)
self.q99 = torch.tensor(stats["q99"], dtype=torch.float32)
else:
self.mean = torch.tensor(stats["mean"], dtype=torch.float32)
self.std = torch.tensor(stats["std"], dtype=torch.float32)
def to(self, device: torch.device | str) -> None:
if self.use_quantiles:
self.q01 = self.q01.to(device)
self.q99 = self.q99.to(device)
else:
self.mean = self.mean.to(device)
self.std = self.std.to(device)
def __call__(self, x: torch.Tensor) -> torch.Tensor:
x_dim = x.shape[-1]
if self.use_quantiles:
return (x + 1.0) / 2.0 * (self.q99[..., :x_dim] - self.q01[..., :x_dim] + self.EPSILON) + self.q01[
..., :x_dim
]
else:
return x * (self.std[..., :x_dim] + self.EPSILON) + self.mean[..., :x_dim]
class DeltaActions:
"""Repacks absolute actions into delta action space."""
def __init__(self):
# If the robot has mobile base, masks of base action are False and it doesn't need to be specified explicitly.
self.mask = torch.tensor([True, True, True, True, True, True, False, True, True, True, True, True, True, False])
def to(self, device: torch.device | str) -> None:
self.mask = self.mask.to(device)
def __call__(self, data: dict[str, Any]) -> dict[str, Any]:
if "action" not in data or "observation.state" not in data:
return data
state, action = data["observation.state"], data["action"]
dims = self.mask.shape[-1]
action[..., :dims] -= torch.where(self.mask, state[..., :dims], torch.zeros_like(state[..., :dims])).unsqueeze(
-2
)
data["action"] = action
return data
class AbsoluteActions:
"""Repacks delta actions into absolute action space."""
def __init__(self):
# If the robot has mobile base, masks of base action are False and it doesn't need to be specified explicitly.
self.mask = torch.tensor([True, True, True, True, True, True, False, True, True, True, True, True, True, False])
def to(self, device: torch.device | str) -> None:
self.mask = self.mask.to(device)
def __call__(self, data: dict[str, Any]) -> dict[str, Any]:
if "action" not in data or "observation.state" not in data:
return data
state, action = data["observation.state"], data["action"]
dims = self.mask.shape[-1]
action[..., :dims] += torch.where(self.mask, state[..., :dims], torch.zeros_like(state[..., :dims])).unsqueeze(
-2
)
data["action"] = action
return data
class AlohaInputs:
"""Inputs for the Aloha policy."""
def __init__(self, adapt_to_pi: bool = True) -> None:
self.joint_flip_mask = torch.tensor([1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1])
self.adapt_to_pi = adapt_to_pi
def to(self, device: torch.device | str) -> None:
self.joint_flip_mask = self.joint_flip_mask.to(device)
def _gripper_from_angular_inv(self, value: torch.Tensor) -> torch.Tensor:
# Directly inverts the gripper_from_angular function.
value = _unnormalize(value, min_val=-0.6213, max_val=1.4910)
return value - 0.5476
def _gripper_to_angular(self, value: torch.Tensor) -> torch.Tensor:
# Aloha transforms the gripper positions into a linear space. The following code
# reverses this transformation to be consistent with pi0 which is pretrained in
# angular space.
#
# These values are coming from the Aloha code:
# PUPPET_GRIPPER_POSITION_OPEN, PUPPET_GRIPPER_POSITION_CLOSED
value = _unnormalize(value, min_val=0.01844, max_val=0.05800)
# This is the inverse of the angular to linear transformation inside the Interbotix code.
def linear_to_radian(linear_position, arm_length, horn_radius):
value = (horn_radius**2 + linear_position**2 - arm_length**2) / (2 * horn_radius * linear_position)
return torch.arcsin(torch.clip(value, -1.0, 1.0))
# The constants are taken from the Interbotix code.
value = linear_to_radian(value, arm_length=0.036, horn_radius=0.022)
# pi0 gripper data is normalized (0, 1) between encoder counts (2405, 3110).
# There are 4096 total encoder counts and aloha uses a zero of 2048.
# Converting this to radians means that the normalized inputs are between (0.5476, 1.6296)
return _normalize(value, min_val=0.5476, max_val=1.6296)
def _encode_actions_inv(self, actions: torch.Tensor) -> torch.Tensor:
if self.adapt_to_pi:
actions[:, :14] = self.joint_flip_mask * actions[:, :14]
actions[:, [6, 13]] = self._gripper_from_angular_inv(actions[:, [6, 13]])
return actions
def _decode_state(self, state: torch.Tensor) -> torch.Tensor:
if self.adapt_to_pi:
# Flip the joints.
state[:14] = self.joint_flip_mask * state[:14]
# Reverse the gripper transformation that is being applied by the Aloha runtime.
state[[6, 13]] = self._gripper_to_angular(state[[6, 13]])
return state
def _decode_aloha(self, state: torch.Tensor) -> torch.Tensor:
# state is [left_arm_joint_angles, left_arm_gripper, right_arm_joint_angles, right_arm_gripper]
# dim sizes: [6, 1, 6, 1]
state = self._decode_state(state)
return state
def __call__(self, data: dict[str, Any]) -> dict[str, Any]:
"""Decode Aloha-specific input formats into the pi0 training/runtime
format."""
state = self._decode_aloha(data["observation.state"])
data["observation.state"] = state
# Actions are only available during training.
if "action" in data:
actions = data["action"]
actions = self._encode_actions_inv(actions)
data["action"] = actions
return data
# VeRL: Batch Inference
def _encode_actions_inv_batch(self, actions: torch.Tensor) -> torch.Tensor:
if self.adapt_to_pi:
actions[..., :14] = self.joint_flip_mask * actions[..., :14]
actions[..., [6, 13]] = self._gripper_from_angular_inv(actions[..., [6, 13]])
return actions
def _decode_state_batch(self, state: torch.Tensor) -> torch.Tensor:
if self.adapt_to_pi:
state[..., :14] = self.joint_flip_mask * state[..., :14]
state[..., [6, 13]] = self._gripper_to_angular(state[..., [6, 13]])
return state
def call_batch(self, data: dict[str, Any]) -> dict[str, Any]:
state = self._decode_state_batch(data["observation.state"])
data["observation.state"] = state
if "action" in data:
actions = data["action"]
actions = self._encode_actions_inv_batch(actions)
data["action"] = actions
return data
class AlohaOutputs:
"""Outputs for the Aloha policy."""
def __init__(self, original_action_dim: int, adapt_to_pi: bool = True):
"""
Args:
original_action_dim: int. The original action dimension of the policy. dual-arm robot has 14 dims and mobile
dual-arm robot has 16 dims.
adapt_to_pi: bool. If true, this will convert the joint and gripper values from the standard Aloha space to
the space used by the pi internal runtime which was used to train the base model.
"""
self.joint_flip_mask = torch.tensor([1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1])
self.original_action_dim = original_action_dim
self.adapt_to_pi = adapt_to_pi
def to(self, device: torch.device | str) -> None:
self.joint_flip_mask = self.joint_flip_mask.to(device)
def _gripper_from_angular(self, value: torch.Tensor) -> torch.Tensor:
# Convert from the gripper position used by pi0 to the gripper position that is used by Aloha.
# Note that the units are still angular but the range is different.
# We do not scale the output since the trossen model predictions are already in radians.
# See the comment in _gripper_to_angular for a derivation of the constant
value = value + 0.5476
# These values are coming from the Aloha code:
# PUPPET_GRIPPER_JOINT_OPEN, PUPPET_GRIPPER_JOINT_CLOSE
return _normalize(value, min_val=-0.6213, max_val=1.4910)
def _encode_actions(self, actions: torch.Tensor) -> torch.Tensor:
if self.adapt_to_pi:
# Flip the joints.
actions[:, :14] = self.joint_flip_mask * actions[:, :14]
actions[:, [6, 13]] = self._gripper_from_angular(actions[:, [6, 13]])
return actions
def __call__(self, data: dict[str, Any]) -> dict[str, Any]:
actions = data["action"][:, : self.original_action_dim]
return {"action": self._encode_actions(actions)}
# VeRL: Batch Inference
def _encode_actions_batch(self, actions: torch.Tensor) -> torch.Tensor:
if self.adapt_to_pi:
actions[..., :14] = self.joint_flip_mask * actions[..., :14]
actions[..., [6, 13]] = self._gripper_from_angular(actions[..., [6, 13]])
return actions
def call_batch(self, data: dict[str, Any]) -> dict[str, Any]:
actions = data["action"][..., : self.original_action_dim]
return {"action": self._encode_actions_batch(actions)}
class PadStatesAndActions:
"""Zero-pads states and actions to the model action dimension."""
def __init__(self, action_dim: int) -> None:
self.action_dim = action_dim
def _pad_to_dim(self, x: torch.Tensor, target_dim: int, axis: int = -1) -> torch.Tensor:
"""Pad an array to the target dimension with zeros along the specified
axis."""
current_dim = x.shape[axis]
if current_dim < target_dim:
shape = list(x.shape)
shape[-1] = target_dim
new_vector = torch.zeros(*shape, dtype=x.dtype, device=x.device)
new_vector[..., :current_dim] = x
x = new_vector
return x
def __call__(self, data: dict[str, Any]) -> dict[str, Any]:
data["observation.state"] = self._pad_to_dim(data["observation.state"], self.action_dim, axis=-1)
if "action" in data:
data["action"] = self._pad_to_dim(data["action"], self.action_dim, axis=-1)
return data
def _normalize(x: torch.Tensor, min_val: float, max_val: float) -> torch.Tensor:
return (x - min_val) / (max_val - min_val)
def _unnormalize(x: torch.Tensor, min_val: float, max_val: float) -> torch.Tensor:
return x * (max_val - min_val) + min_val
def resize_with_pad(img: torch.Tensor, width: int, height: int, pad_value: float = -1.0) -> torch.Tensor:
"""Resize an image to fit inside the given (width, height) while preserving
aspect ratio, then pad with the specified value so that the final image
exactly matches the target size.
Args:
img: Input image, shape (C, H, W), with values typically in [0, 1].
width: Target width (W).
height: Target height (H).
pad_value: Value to use for padding, defaults to -1.
Returns:
A torch.Tensor of shape (C, height, width).
"""
# Validate input dimensions
if img.ndim != 3:
raise ValueError(f"(C,H,W) expected, but got {img.shape}")
cur_height, cur_width = img.shape[1:]
ratio = max(cur_width / width, cur_height / height)
resized_height = int(cur_height / ratio)
resized_width = int(cur_width / ratio)
resized_img = F.interpolate(
img.unsqueeze(0), size=(resized_height, resized_width), mode="bilinear", align_corners=False
).squeeze(0)
pad_height = max(0, int(height - resized_height))
pad_width = max(0, int(width - resized_width))
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
padded_img = F.pad(resized_img, (pad_left, pad_right, pad_top, pad_bottom), value=pad_value)
return padded_img.squeeze(0)
class ImageTransform:
def __init__(
self,
resize_imgs_with_padding: tuple[int, int],
present_img_keys: list[str] | None = None,
enable_image_aug: bool = False,
) -> None:
self.resize_imgs_with_padding = resize_imgs_with_padding
self.present_img_keys = present_img_keys
if self.present_img_keys is None:
self.present_img_keys = [
"observation.images.cam_high",
"observation.images.cam_left_wrist",
"observation.images.cam_right_wrist",
]
self.enable_image_aug = enable_image_aug
self.width, self.height = resize_imgs_with_padding
if self.enable_image_aug:
self.color_jitter_transform = transforms.ColorJitter(
brightness=0.3,
contrast=0.4,
saturation=0.5,
)
self.pose_transform = transforms.Compose(
[
transforms.RandomCrop(int(self.width * 0.95), int(self.height * 0.95)),
transforms.Resize((self.width, self.height)),
transforms.RandomRotation((-5, 5)),
]
)
def __call__(self, data: dict[str, torch.Tensor]) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
"""Preprocesses input images: optionally scales and pads to a fixed size,
then maps the pixel range from [0,1] to [-1,1].
Returns two lists:
images: The processed image arrays (C, H, W).
img_masks: A list of boolean masks of the same length as images, currently fixed to True.
"""
images = []
img_masks = []
for key in self.present_img_keys:
if key not in data:
raise ValueError(
f"{key} not found in data. Please check the present_img_keys in the config or the dataset."
)
img = data[key]
# [C, H, W] -> preprocess
if self.resize_imgs_with_padding is not None:
original_height, original_width = img.shape[1:]
target_height, target_width = self.resize_imgs_with_padding
if original_height != target_height or original_width != target_width:
img = resize_with_pad(img, *self.resize_imgs_with_padding, pad_value=0)
if self.enable_image_aug:
if "wrist" not in key:
img = self.pose_transform(img)
img = self.color_jitter_transform(img)
# Normalize pixel values to [-1, 1]
img = img * 2.0 - 1.0
images.append(img)
img_masks.append(torch.tensor(True, dtype=torch.bool, device=img.device))
return images, img_masks
# VeRL: Batch Inference
def call_batch(self, data: dict[str, torch.Tensor]) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
images = []
img_masks = []
for key in self.present_img_keys:
if key not in data:
raise ValueError(
f"{key} not found in data. Please check the present_img_keys in the config or the dataset."
)
img = data[key]
if img.ndim != 4:
raise ValueError(f"(B,C,H,W) expected, but got {img.shape}")
if self.resize_imgs_with_padding is not None:
original_height, original_width = img.shape[2:]
target_height, target_width = self.resize_imgs_with_padding
if original_height != target_height or original_width != target_width:
ratio = max(original_width / target_width, original_height / target_height)
resized_height = int(original_height / ratio)
resized_width = int(original_width / ratio)
img = F.interpolate(img, size=(resized_height, resized_width), mode="bilinear", align_corners=False)
pad_height = max(0, int(target_height - resized_height))
pad_width = max(0, int(target_width - resized_width))
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
img = F.pad(img, (pad_left, pad_right, pad_top, pad_bottom), value=0)
if self.enable_image_aug:
imgs = []
for sample in img:
if "wrist" not in key:
sample = self.pose_transform(sample)
sample = self.color_jitter_transform(sample)
imgs.append(sample)
img = torch.stack(imgs, dim=0)
img = img / 255.0 * 2.0 - 1.0 # pi05 libero
images.append(img)
img_masks.append(torch.ones((img.shape[0],), dtype=torch.bool, device=img.device))
return images, img_masks
class PromptTokenizerTransform:
def __init__(self, max_length: int, discrete_state_input: bool = False) -> None:
# self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_model_path)
self.tokenizer_max_length = max_length
self.discrete_state_input = discrete_state_input
def __call__(self, data: dict[str, Any], tokenizer) -> tuple[torch.Tensor, torch.Tensor]:
"""Tokenize the text input.
Args:
data: Dict containing 'task' string and optionally 'observation.state' tensor to infer device.
Returns:
A tuple of (lang_tokens, lang_masks), both as torch tensors on the inferred device.
"""
task = data["task"].strip().replace("_", " ").replace("\n", " ")
# Infer device from observation.state if available
device = data["observation.state"].device if "observation.state" in data else torch.device("cpu")
if self.discrete_state_input:
assert "observation.state" in data, "discrete_state_input is True, but observation.state is not found."
discretized_state = (
torch.bucketize(data["observation.state"], torch.linspace(-1, 1, 256 + 1, device=device)[:-1]) - 1
)
state_values = " ".join([str(int(x)) for x in discretized_state.tolist()])
task = f"Task: {task}, State: {state_values};\nAction: "
else:
# PaliGemma prompt has to end with a new line in Pi0
task = f"{task}\n"
tokenized_prompt = tokenizer(
task,
padding="max_length",
padding_side="right",
max_length=self.tokenizer_max_length,
return_tensors="pt",
)
lang_tokens = tokenized_prompt["input_ids"][0].to(dtype=torch.int32, device=device)
lang_masks = tokenized_prompt["attention_mask"][0].to(dtype=torch.bool, device=device)
return lang_tokens, lang_masks
# VeRL: Batch Inference
def call_batch(self, data: dict[str, Any], tokenizer) -> tuple[torch.Tensor, torch.Tensor]:
task = data["task"]
if hasattr(task, "tolist") and not isinstance(task, str):
tasks = task.tolist()
else:
tasks = list(task)
tasks = [str(t).strip().replace("_", " ").replace("\n", " ") for t in tasks]
device = data["observation.state"].device if "observation.state" in data else torch.device("cpu")
if self.discrete_state_input:
assert "observation.state" in data, "discrete_state_input is True, but observation.state is not found."
state = data["observation.state"]
discretized_state = torch.bucketize(state, torch.linspace(-1, 1, 256 + 1, device=device)[:-1]) - 1
state_values = [" ".join([str(int(x)) for x in row.tolist()]) for row in discretized_state]
tasks = [
f"Task: {task_item}, State: {state_value};\nAction: "
for task_item, state_value in zip(tasks, state_values, strict=False)
]
else:
tasks = [f"{task_item}\n" for task_item in tasks]
tokenized_prompt = tokenizer(
tasks,
padding="max_length",
padding_side="right",
max_length=self.tokenizer_max_length,
return_tensors="pt",
)
lang_tokens = tokenized_prompt["input_ids"].to(dtype=torch.int32, device=device)
lang_masks = tokenized_prompt["attention_mask"].to(dtype=torch.bool, device=device)
return lang_tokens, lang_masks
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/vla/models/pi0_torch/pi0_utils.py",
"license": "Apache License 2.0",
"lines": 453,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/vla/models/pi0_torch/policy/base.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
import torch
class Pi0Input(ABC):
def __init__(self):
# three images for pi0 input with keys:
# [
# 'observation.images.cam_high',
# 'observation.images.cam_left_wrist',
# 'observation.images.cam_right_wrist',
# ],
# each with shape (B, C, H, W)
self.images: dict[str, torch.Tensor] = {}
# image masks corresponding to the images, each with shape (B,)
self.img_masks: list[torch.Tensor] = []
# task description as a list of strings
self.task: list[str] = []
# robot state with shape (B, state_dim)
self.state: torch.Tensor = None
@classmethod
@abstractmethod
def from_env_obs(cls, env_obs) -> "Pi0Input": ...
class Pi0Output:
def __init__(self):
self.action: torch.Tensor = None
@classmethod
@abstractmethod
def from_model_output(cls, model_output) -> "Pi0Output": ...
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/vla/models/pi0_torch/policy/base.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/vla/models/pi0_torch/policy/libero_policy.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing_extensions import override
from verl.protocol import DataProto
from .base import Pi0Input, Pi0Output
PI0_MAX_STATE_DIM = 32
PI0_ACTION_CHUNK_SIZE = 10
LIBERO_ACTION_DIM = 7
class LiberoPi0Input(Pi0Input):
@override
@classmethod
def from_env_obs(cls, env_obs: DataProto) -> "LiberoPi0Input":
input = cls()
# Process images
images = env_obs.batch["full_image"]
wrist_images = env_obs.batch["wrist_image"]
batch_size = images.shape[0]
cam_high = images.permute(0, 3, 1, 2)
left_wrist = wrist_images.permute(0, 3, 1, 2) # (B, H, W, C) -> (B, C, H, W)
empty_images = torch.zeros(
(batch_size, 3, cam_high.shape[2], cam_high.shape[3]),
device=env_obs.batch.device,
dtype=torch.bfloat16,
)
input.images = {
"observation.images.cam_high": cam_high.to(torch.bfloat16),
"observation.images.cam_left_wrist": left_wrist.to(torch.bfloat16),
"observation.images.cam_right_wrist": empty_images,
}
input.img_masks = [
torch.ones((batch_size,), device=env_obs.batch.device, dtype=torch.bool),
torch.ones((batch_size,), device=env_obs.batch.device, dtype=torch.bool),
torch.zeros((batch_size,), device=env_obs.batch.device, dtype=torch.bool),
]
# Process other data
input.task = list(env_obs.non_tensor_batch["task_descriptions"])
state = env_obs.batch["state"]
input.state = torch.nn.functional.pad(
state, (0, max(0, PI0_MAX_STATE_DIM - state.shape[-1])), "constant", 0
).to(env_obs.batch.device, dtype=torch.float32)
return input
class LiberoPi0Output(Pi0Output):
@override
@classmethod
def from_model_output(cls, model_output: dict) -> "LiberoPi0Output":
output = cls()
output.action = model_output["full_action"][:, :PI0_ACTION_CHUNK_SIZE, :LIBERO_ACTION_DIM]
return output
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/vla/models/pi0_torch/policy/libero_policy.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/vla/models/register_vla_models.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility helpers to register custom VLA models with Hugging Face Auto classes."""
from transformers import AutoConfig, AutoImageProcessor, AutoProcessor
from verl.utils.transformers_compat import get_auto_model_for_vision2seq
from .openvla_oft.configuration_prismatic import OpenVLAConfig
from .openvla_oft.modeling_prismatic import OpenVLAForActionPrediction
from .openvla_oft.processing_prismatic import PrismaticImageProcessor, PrismaticProcessor
from .pi0_torch import PI0ForActionPrediction, PI0TorchConfig
_REGISTERED_MODELS = {
"openvla_oft": False,
"pi0_torch": False,
}
AutoModelForVision2Seq = get_auto_model_for_vision2seq()
def register_openvla_oft() -> None:
"""Register the OpenVLA OFT model and processors."""
if _REGISTERED_MODELS["openvla_oft"]:
return
AutoConfig.register("openvla", OpenVLAConfig)
AutoImageProcessor.register(OpenVLAConfig, PrismaticImageProcessor)
AutoProcessor.register(OpenVLAConfig, PrismaticProcessor)
AutoModelForVision2Seq.register(OpenVLAConfig, OpenVLAForActionPrediction)
_REGISTERED_MODELS["openvla_oft"] = True
def register_pi0_torch_model() -> None:
"""Register the PI0 wrapper with the HF auto classes."""
if _REGISTERED_MODELS["pi0_torch"]:
return
AutoConfig.register("pi0_torch", PI0TorchConfig)
AutoModelForVision2Seq.register(PI0TorchConfig, PI0ForActionPrediction)
_REGISTERED_MODELS["pi0_torch"] = True
def register_vla_models() -> None:
"""Register all custom VLA models with Hugging Face."""
register_openvla_oft()
register_pi0_torch_model()
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/vla/models/register_vla_models.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/vla/sac/base.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any, Literal
import torch
from verl import DataProto
class SupportSACTraining:
"""
Base class for Soft Actor-Critic (SAC).
Subclasses implement a Policy that can be plugged directly into SAC training.
This implementation requires the actor and critic to be integrated within a
single model instance, e.g., sharing a backbone with an additional MLP head
that outputs critic values (Q/V) alongside the actor's action distribution.
Note:
This class intentionally does NOT inherit from `abc.ABC`.
The root model may be wrapped or transformed by FSDP (Fully Sharded
Data Parallel), which performs runtime class substitution; using
`ABCMeta` can break FSDP's class rewriting mechanism.
"""
def sac_init(self):
raise NotImplementedError("Subclasses must implement sac_init method.")
def sac_forward_critic(
self,
a: dict[str, torch.Tensor],
state_features: Any,
*,
use_target_network: bool = False,
method: Literal["cat", "min"] = "cat",
requires_grad: bool = False,
) -> torch.Tensor:
"""Compute Q-values for given state-action pairs.
Args:
a: Dictionary of tensors representing actions, with key:
- "full_action": torch.Tensor of shape (B, action_steps, action_dim)
state_features: Any data structure representing the processed state features.
use_target_network: Whether to use the target critic network heads.
method: Method to combine multiple heads' outputs ("cat" or "min").
requires_grad: Whether to enable gradients for the critic head parameters.
Returns:
q_values: torch.Tensor of shape (B, num_heads) if method is "cat",
or (B, 1) if method is "min", representing the computed Q-values
"""
raise NotImplementedError("Subclasses must implement sac_forward_critic method.")
def sac_forward_actor(
self,
state_features: Any,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Compute actions and their log probabilities from state features.
Args:
state_features: Any data structure representing the processed state features.
Returns:
actions: torch.Tensor of shape (B, n_action_steps, action_dim), sampled actions.
log_probs: torch.Tensor of shape (B,), log probabilities of the sampled actions.
"""
raise NotImplementedError("Subclasses must implement sac_forward_actor method.")
def sac_forward_state_features(self, s: dict[str, torch.Tensor]) -> Any:
"""Compute state features needed for SAC actor and critic.
Args:
s: Dictionary of tensors representing the states, with keys
- "images": torch.Tensor of shape (B, n_images, C, H, W)
- "image_masks": torch.Tensor of shape (B, n_images)
- "lang_tokens": torch.Tensor of shape (B, L)
- "lang_masks": torch.Tensor of shape (B, L)
- "states": torch.Tensor of shape (B, state_dim)
Returns:
state_features: Any data structure representing the processed state features.
"""
raise NotImplementedError("Subclasses must implement sac_forward_state_features method.")
def sac_update_target_network(self, tau: float):
"""Update the target network heads using Polyak averaging.
Args:
tau: The interpolation parameter for Polyak averaging.
"""
raise NotImplementedError("Subclasses must implement sac_update_target_network method.")
class BaseSACActor(ABC):
@abstractmethod
def update_policy(self, data: DataProto) -> dict:
"""
Update the policy using the provided data batch.
Args:
data: DataProto containing the following entries in `data.batch`:
- "a0.full_action": Tensor of shape (B, action_steps, action_dim),
representing the current action chunk for each sample.
- "a1.full_action": Tensor of shape (B, action_steps, action_dim),
representing the next action chunk for each sample.
- "s0.states": Tensor of shape (B, state_dim),
representing the current environment or agent state.
- "s1.states": Tensor of shape (B, state_dim),
representing the next environment or agent state.
- "s0.images": Tensor of shape (B, n_images, C, H, W),
containing current visual observations.
- "s1.images": Tensor of shape (B, n_images, C, H, W),
containing next-step visual observations.
- "s0.image_masks": Tensor of shape (B, n_images),
indicating valid images per sample.
- "s1.image_masks": Tensor of shape (B, n_images),
indicating valid images per sample.
- "s0.lang_tokens": Tensor of shape (B, max_seq_len),
tokenized language instructions.
- "s1.lang_tokens": Tensor of shape (B, max_seq_len),
tokenized language instructions for the next step.
- "s0.lang_masks": Tensor of shape (B, max_seq_len),
attention masks for language tokens.
- "s1.lang_masks": Tensor of shape (B, max_seq_len),
attention masks for language tokens for the next step.
- "rewards": Tensor of shape (B,),
chunk-level scalar rewards aligned to the next step.
- "response_mask": Tensor of shape (B, action_steps),
mask indicating whether each sample has a valid response.
"""
raise NotImplementedError("Subclasses must implement update_policy method.")
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/vla/sac/base.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/vla/sac/naive_rollout_pi05.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In single GPU rollout, the sequences are generated directly by sampling from the model.
The output will contain
1. output_ids
2. attention_masks (left padding)
3. eos_masks
4. log_probs
"""
import logging
from typing import Any
import torch
from verl import DataProto
from verl.experimental.vla.naive_rollout_rob import NaiveRolloutRob
from verl.utils.device import get_device_id, get_device_name
logger = logging.getLogger(__name__)
__all__ = ["PI0RolloutRob"]
class PI0RolloutRob(NaiveRolloutRob):
def __init__(
self,
model_config: dict,
module: torch.nn.Module,
tokenizer: Any,
):
self.model_config = model_config
self.module = module
self.tokenizer = tokenizer
from torch.distributed.fsdp import register_fsdp_forward_method
register_fsdp_forward_method(self.module, "sample_actions")
@torch.no_grad()
def generate_sequences(self, prompts: DataProto) -> DataProto:
"""Generate sequences"""
with torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16):
prompts.to(get_device_id())
output, s, a = self.module.sample_actions(prompts, tokenizer=self.tokenizer)
ret = DataProto.from_dict(
{
"action": output.action,
"full_action": a["full_action"],
"images": s["images"],
"image_masks": s["image_masks"],
"lang_tokens": s["lang_tokens"],
"lang_masks": s["lang_masks"],
"states": s["states"],
}
)
return ret
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/vla/sac/naive_rollout_pi05.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/vla/sac/replay_pool.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import torch
from tensordict import TensorDict
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class SACReplayPool:
"""SAC Replay Pool for storing samples."""
def __init__(
self,
capacity: int,
pool_device: str = "cpu",
sample_device: str = "cpu",
):
self.pool = None
self.capacity = capacity
self.size = 0
self.position = 0
self.rank = torch.distributed.get_rank() if torch.distributed.is_initialized() else 0
self.pool_device = pool_device
self.sample_device = sample_device
def add_batch(self, batch: TensorDict):
"""Add a batch of samples to the replay pool.
Args:
batch (TensorDict): A batch of samples to add. The batch should be a TensorDict
containing the necessary keys for SAC training, each with shape [batch_size, ...].
"""
if self.pool is None:
self._lazy_init_pool(batch)
self._insert_block_to_pool(batch)
def sample_batch(self, batch_size: int) -> TensorDict:
"""Sample a batch of experiences from the replay pool.
Args:
batch_size (int): The number of samples to draw.
Returns:
TensorDict: A batch of sampled experiences.
"""
assert self.size >= batch_size, "Not enough samples in the replay pool to sample the requested batch size."
idx = torch.randperm(self.size)[:batch_size]
sampled_batch = TensorDict(
{key: value.index_select(0, idx).to(self.sample_device) for key, value in self.pool.items()},
batch_size=[batch_size],
device=self.sample_device,
)
return sampled_batch
def insert_and_resample(
self,
source: TensorDict,
) -> TensorDict:
"""Insert a block of data from source to the replay pool and sample a batch with the same size."""
self.add_batch(source)
return self.sample_batch(source.size(0))
def save(self, directory: str):
"""Save the replay pool to a directory."""
os.makedirs(directory, exist_ok=True)
filepath = f"{directory}/sac_replay_pool_rank_{self.rank}.pt"
if self.pool is not None:
meta_info = {
"size": self.size,
"capacity": self.capacity,
"position": self.position,
"pool_device": self.pool_device,
"sample_device": self.sample_device,
}
torch.save((self.pool.cpu(), meta_info), filepath)
logger.info(f"[Rank {self.rank}] Replay pool saved to {filepath} with size: {self.size}")
else:
logger.info("Replay pool is empty. Nothing to save.")
def load(self, directory: str):
"""Load the replay pool from a directory."""
filepath = f"{directory}/sac_replay_pool_rank_{self.rank}.pt"
if not os.path.exists(filepath):
return False
try:
pool, meta_info = torch.load(filepath, weights_only=False)
except (RuntimeError, EOFError, ValueError) as exc:
logger.warning(
f"[Rank {self.rank}] Failed to load replay pool from {filepath}: {exc}. "
"Starting with an empty replay pool."
)
return False
self.pool = pool.to(self.pool_device)
if meta_info["capacity"] != self.capacity:
if meta_info["capacity"] > self.capacity:
logger.warning(
f"Loaded replay pool capacity {meta_info['capacity']} is greater than "
f"the current capacity {self.capacity}. Truncating the loaded pool."
)
self.pool = TensorDict(
{key: value[: self.capacity] for key, value in pool.items()},
batch_size=[self.capacity],
device=self.pool_device,
)
self.size = min(self.size, self.capacity)
self.position = self.position % self.capacity
else:
logger.warning(
f"Loaded replay pool capacity {meta_info['capacity']} is less than "
f"the current capacity {self.capacity}. Keeping the current capacity."
)
self.pool = TensorDict(
{
key: torch.cat(
[
value,
torch.zeros(
(self.capacity - meta_info["capacity"], *value.shape[1:]),
dtype=value.dtype,
device=self.pool_device,
),
],
dim=0,
)
for key, value in pool.items()
},
batch_size=[self.capacity],
device=self.pool_device,
)
self.size = min(meta_info["size"], self.capacity)
self.position = meta_info["position"] % self.capacity
logger.info(f"[Rank {self.rank}] Replay pool loaded from {filepath} with size: {self.size}")
return True
@classmethod
def from_path(
cls,
directory: str,
) -> "SACReplayPool":
"""Load a replay pool from a file.
Args:
directory (str): The directory containing the saved replay pool.
Returns:
SACReplayPool: An instance of SACReplayPool with the loaded data.
"""
rank = torch.distributed.get_rank() if torch.distributed.is_initialized() else 0
filepath = f"{directory}/sac_replay_pool_rank_{rank}.pt"
pool, meta_info = torch.load(filepath, weights_only=False)
replay_pool = cls(
capacity=meta_info["capacity"],
pool_device=meta_info["pool_device"],
sample_device=meta_info["sample_device"],
)
replay_pool.pool = pool.to(replay_pool.pool_device)
replay_pool.rank = rank
replay_pool.size = meta_info["size"]
replay_pool.position = meta_info["position"]
logger.info(f"[Rank {rank}] Replay pool loaded from {filepath} with size: {replay_pool.size}")
return replay_pool
def _insert_block_to_pool(
self,
source: TensorDict,
):
"""insert a block of data from source to the replay pool."""
length = min(source.size(0), self.capacity)
idx = (self.position + torch.arange(length)) % self.capacity
for key in source.keys():
self.pool[key].index_copy_(0, idx, source[key][:length].to(self.pool_device))
self.position = (self.position + length) % self.capacity
self.size = min(self.size + length, self.capacity)
def _lazy_init_pool(self, sample: TensorDict):
"""Lazily initialize the replay pool based on the sample structure."""
logger.info(f"Initializing replay pool with capacity: {self.capacity}")
self.pool = TensorDict(
{
key: torch.zeros((self.capacity, *value.shape[1:]), dtype=value.dtype, device=self.pool_device)
for key, value in sample.items()
},
batch_size=[self.capacity],
device=self.pool_device,
)
def __repr__(self):
return (
f"SACReplayPool(capacity={self.capacity}, "
f"size={self.size}, pool_device={self.pool_device}, sample_device={self.sample_device})"
)
def __len__(self):
return self.size
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/vla/sac/replay_pool.py",
"license": "Apache License 2.0",
"lines": 189,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/vla/sac/sac_actor.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Single Process Actor
"""
import logging
import os
import numpy as np
import torch
import torch.nn.functional as F
from tensordict import TensorDict
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from typing_extensions import override
from verl.experimental.vla.sac.replay_pool import SACReplayPool
from verl.protocol import DataProto
from verl.utils.device import get_device_id, get_device_name
from .base import BaseSACActor, SupportSACTraining
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def get_dict_from_prefix(tensordict: TensorDict, prefix: str) -> dict:
"""Extract a sub-dictionary from a TensorDict based on a given prefix.
Args:
tensordict: The input TensorDict containing various keys.
prefix: The prefix string to filter keys.
Returns:
A dictionary containing key-value pairs from the TensorDict
where the keys start with the specified prefix. The prefix is removed
from the keys in the resulting dictionary.
"""
result = {}
prefix_length = len(prefix)
for key in tensordict.keys():
if key.startswith(prefix):
new_key = key[prefix_length:]
result[new_key] = tensordict[key]
return result
def merge_nested_dicts_or_tuples(a: dict | tuple, b: dict | tuple) -> dict | tuple:
"""Merge two nested structures (dictionaries or tuples) by concatenating tensors
along the first dimension.
"""
if isinstance(a, dict) and isinstance(b, dict):
merged = {}
for key in a.keys():
merged[key] = merge_nested_dicts_or_tuples(a[key], b[key])
return merged
elif isinstance(a, tuple) and isinstance(b, tuple):
merged = []
for item_a, item_b in zip(a, b, strict=False):
merged.append(merge_nested_dicts_or_tuples(item_a, item_b))
return tuple(merged)
else:
return torch.cat([a, b], dim=0)
def split_nested_dicts_or_tuples(data: dict | tuple, split_num: int) -> list[dict | tuple]:
"""Split a nested structure (dictionary or tuple) into smaller chunks along the first dimension."""
if isinstance(data, torch.Tensor):
split_tensors = torch.chunk(data, split_num, dim=0)
return list(split_tensors)
elif isinstance(data, dict):
split_dicts = [dict() for _ in range(split_num)]
for key, value in data.items():
split_values = split_nested_dicts_or_tuples(value, split_num)
for i in range(split_num):
split_dicts[i][key] = split_values[i]
return split_dicts
elif isinstance(data, tuple):
split_tuples = [list() for _ in range(split_num)]
for item in data:
split_items = split_nested_dicts_or_tuples(item, split_num)
for i in range(split_num):
split_tuples[i].append(split_items[i])
return [tuple(split_tuple) for split_tuple in split_tuples]
else:
raise TypeError("Input data must be a torch.Tensor, dict, or tuple.")
class RobDataParallelSACActor(BaseSACActor):
def __init__(
self,
config,
actor_module: SupportSACTraining,
actor_optimizer: torch.optim.Optimizer,
tokenizer=None,
):
super().__init__()
self.config = config
self.sac_config = config.sac
self.device = get_device_name()
self.actor_optimizer = actor_optimizer
self.actor_module = actor_module
self.actor_module.sac_init()
self.tokenizer = tokenizer
self.replay_pool = SACReplayPool(capacity=self.config.replay_pool_capacity, sample_device=self.device)
self.replay_pool.load(self.config.replay_pool_save_dir)
self._init_alpha()
def _init_alpha(self):
"""Initialize the alpha optimizer for automatic entropy tuning."""
self.auto_entropy = self.sac_config.get("auto_entropy", False)
if self.auto_entropy:
self.target_entropy = torch.tensor(float(self.sac_config.get("target_entropy", -32.0)), device=self.device)
# Initialize raw_alpha parameter
self.alpha_type = self.sac_config.get("alpha_type", "softplus")
if self.alpha_type == "exp":
self.raw_alpha = torch.nn.Parameter(
np.log(np.exp(self.sac_config.get("initial_alpha", 1))) * torch.ones(1, device=self.device),
requires_grad=True,
)
elif self.alpha_type == "softplus":
self.raw_alpha = torch.nn.Parameter(
np.log(np.exp(self.sac_config.get("initial_alpha", 0.01)) - 1) * torch.ones(1, device=self.device),
requires_grad=True,
)
else:
return NotImplementedError(f"Unsupported alpha_type: {self.alpha_type}")
# build alpha optimizer and scheduler
self.alpha_optimizer = torch.optim.Adam([self.raw_alpha], lr=self.sac_config.get("alpha_lr", 3e-4))
self.alpha_scheduler = torch.optim.lr_scheduler.ConstantLR(self.alpha_optimizer, factor=1.0)
def _get_alpha(self) -> torch.Tensor:
if self.auto_entropy:
if self.alpha_type == "exp":
return self.raw_alpha.exp()
elif self.alpha_type == "softplus":
return torch.nn.functional.softplus(self.raw_alpha)
else:
return NotImplementedError(f"Unsupported alpha_type: {self.alpha_type}")
else:
return torch.tensor(float(self.sac_config.get("initial_alpha", 0.2)), device=self.device)
def _calculate_actor_loss(
self,
log_probs: torch.Tensor,
q_values: torch.Tensor,
valid: torch.Tensor,
) -> torch.Tensor:
"""Calculate actor loss using the SAC loss function.
Args:
log_probs: Tensor of shape (B,) representing the log probabilities of actions.
q_values: Tensor of shape (B,) representing the Q-values for the actions.
valid: Tensor of shape (B,) indicating valid samples (1 for valid, 0 for invalid).
Returns:
Tensor of shape (1,) representing the actor loss.
"""
alpha = self._get_alpha()
loss = alpha * log_probs - q_values
actor_loss = (loss * valid).sum() / (valid.sum().clamp_min(1.0))
return actor_loss
def _calculate_alpha_loss(self, log_probs: torch.Tensor, valid: torch.Tensor) -> torch.Tensor:
"""Calculate alpha loss for automatic entropy tuning.
Args:
log_probs: Tensor of shape (B,) representing the log probabilities of actions.
valid: Tensor of shape (B,) indicating valid samples (1 for valid, 0 for invalid).
Returns:
Tensor of shape (1,) representing the alpha loss.
"""
alpha_loss = -self._get_alpha() * (log_probs.detach() + self.target_entropy)
alpha_loss = (alpha_loss * valid).sum() / (valid.sum().clamp_min(1.0))
return alpha_loss
def _calculate_critic_loss(
self,
q_predict: torch.Tensor,
q_target: torch.Tensor,
rewards: torch.Tensor,
valid: torch.Tensor,
next_log_prob: torch.Tensor,
) -> torch.Tensor:
"""Calculate critic loss using the SAC loss function.
Args:
q_predict: Tensor of shape (B, critic_num) representing predicted Q-values.
q_target: Tensor of shape (B,) representing target Q-values.
rewards: Tensor of shape (B,) representing rewards.
valid: Tensor of shape (B,) indicating valid samples (1 for valid, 0 for invalid).
next_log_prob: Tensor of shape (B,) representing log probabilities of next actions.
Returns:
Tensor of shape (1,) representing the critic loss.
"""
gamma = self.sac_config.gamma
alpha = self._get_alpha()
with torch.no_grad():
y = rewards + valid * gamma * (q_target - alpha * next_log_prob)
y = y.unsqueeze(1).expand_as(q_predict) # (B, critic_num)
valid_mask = valid.unsqueeze(1)
mse = F.mse_loss(q_predict, y, reduction="none")
per_critic = (mse * valid_mask).sum(dim=0) / valid_mask.sum().clamp_min(1.0)
critic_loss = per_critic.sum()
return critic_loss
def _forward_critic(self, micro_batch: TensorDict) -> torch.Tensor:
s0 = get_dict_from_prefix(micro_batch, "s0.")
s1 = get_dict_from_prefix(micro_batch, "s1.")
a0 = get_dict_from_prefix(micro_batch, "a0.")
with torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16):
with torch.no_grad():
s = merge_nested_dicts_or_tuples(s0, s1)
state_features = self.actor_module.sac_forward_state_features(s)
s0_state_features, s1_state_features = split_nested_dicts_or_tuples(state_features, 2)
a1_actions, log_probs_1 = self.actor_module.sac_forward_actor(s1_state_features)
q_values_0 = self.actor_module.sac_forward_critic(
a0,
s0_state_features,
use_target_network=False,
method="cat",
requires_grad=True,
)
q_values_1 = self.actor_module.sac_forward_critic(
{"full_action": a1_actions},
s1_state_features,
use_target_network=True,
method="min",
requires_grad=False,
)
critic_loss = self._calculate_critic_loss(
q_predict=q_values_0,
q_target=q_values_1,
rewards=micro_batch["rewards"].max(dim=-1).values,
valid=micro_batch["valid"],
next_log_prob=log_probs_1,
)
return critic_loss
def _forward_actor(self, micro_batch: TensorDict) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
micro_batch = micro_batch.to(get_device_id())
s0 = get_dict_from_prefix(micro_batch, "s0.")
with torch.autocast(device_type=get_device_name(), dtype=torch.bfloat16):
s0_state_features = self.actor_module.sac_forward_state_features(s0)
a0_actions, log_probs_0 = self.actor_module.sac_forward_actor(s0_state_features)
q_values_0 = self.actor_module.sac_forward_critic(
{"full_action": a0_actions},
s0_state_features,
use_target_network=False,
method="min",
requires_grad=False,
)
actor_loss = self._calculate_actor_loss(
log_probs=log_probs_0,
q_values=q_values_0,
valid=micro_batch["valid"],
)
return actor_loss, log_probs_0
@override
def update_policy(self, data: DataProto):
batch: TensorDict = data.select(
[
"a0.full_action",
"a1.full_action",
"s0.states",
"s1.states",
"s0.images",
"s1.images",
"s0.image_masks",
"s1.image_masks",
"s0.lang_tokens",
"s1.lang_tokens",
"s0.lang_masks",
"s1.lang_masks",
"rewards",
"response_mask",
]
).batch
batch = self.replay_pool.insert_and_resample(batch)
batch["valid"] = batch["response_mask"].any(dim=-1).float() # (B,)
micro_batches = batch.split(self.config.ppo_micro_batch_size_per_gpu)
global_steps = data.meta_info["global_steps"]
grad_accum_steps = len(micro_batches) * torch.distributed.get_world_size()
actor_logprobs_list = []
actor_loss_list, critic_loss_list, alpha_loss_list = [], [], []
# Training critic
self.actor_optimizer.zero_grad()
for batch_idx, micro_batch in enumerate(micro_batches):
logger.info(f"[{batch_idx + 1}/{len(micro_batches)}] critic micro batch ")
micro_batch = micro_batch.to(get_device_id())
raw_critic_loss = self._forward_critic(micro_batch)
(raw_critic_loss / grad_accum_steps).backward()
critic_loss_list.append(raw_critic_loss.detach().item())
critic_grad_norm = self._optimizer_step()
if global_steps >= self.config.critic_warmup_steps:
# Training actor
self.actor_optimizer.zero_grad()
for batch_idx, micro_batch in enumerate(micro_batches):
logger.info(f"[{batch_idx + 1}/{len(micro_batches)}] actor micro batch ")
micro_batch = micro_batch.to(get_device_id())
raw_actor_loss, log_probs = self._forward_actor(micro_batch)
(raw_actor_loss / grad_accum_steps).backward()
actor_loss_list.append(raw_actor_loss.detach().item())
actor_logprobs_list.append(log_probs.detach())
actor_grad_norm = self._optimizer_step()
# Training alpha
# NOTE: We reuse the log-probabilities computed during the actor forward pass
# to update the entropy temperature (alpha), instead of re-forwarding
# the actor after the policy update (saving compute).
if self.auto_entropy:
self.alpha_optimizer.zero_grad()
for micro_batch, log_probs in zip(micro_batches, actor_logprobs_list, strict=False):
micro_batch = micro_batch.to(get_device_id())
raw_alpha_loss = self._calculate_alpha_loss(log_probs, micro_batch["valid"])
(raw_alpha_loss / grad_accum_steps).backward()
alpha_loss_list.append(raw_alpha_loss.detach().item())
torch.distributed.all_reduce(self.raw_alpha.grad, op=torch.distributed.ReduceOp.SUM)
alpha_grad_norm = torch.nn.utils.clip_grad_norm_(self.raw_alpha, max_norm=self.config.grad_clip)
self.alpha_optimizer.step()
self.alpha_scheduler.step()
# Update target networks
self.actor_module.sac_update_target_network(self.sac_config.tau)
# Save replay pool
if global_steps % self.config.replay_pool_save_interval == 0:
self.replay_pool.save(self.config.replay_pool_save_dir)
# Log metrics
metrics = {
"data/reward_mean": (batch["rewards"].max(dim=-1).values * batch["valid"]).sum().item()
/ batch["valid"].sum().clamp_min(1.0).item(),
"data/valid_ratio": batch["valid"].float().mean().item(),
"sac/alpha": self._get_alpha().detach().item(),
"sac/alpha_lr": self.alpha_optimizer.param_groups[0]["lr"] if self.auto_entropy else 0.0,
"sac/alpha_loss": sum(alpha_loss_list) / len(alpha_loss_list) if alpha_loss_list else 0.0,
"sac/alpha_grad_norm": alpha_grad_norm.detach().item()
if self.auto_entropy and global_steps >= self.config.critic_warmup_steps
else 0.0,
"sac/replay_pool_size": len(self.replay_pool),
"actor/loss": sum(actor_loss_list) / len(actor_loss_list) if actor_loss_list else 0.0,
"actor/lr": self.actor_optimizer.param_groups[0]["lr"],
"actor/grad_norm": actor_grad_norm.detach().item()
if global_steps >= self.config.critic_warmup_steps
else 0.0,
"actor/logprob_mean": torch.cat(actor_logprobs_list).mean().detach().item() if actor_logprobs_list else 0.0,
"critic/loss": sum(critic_loss_list) / len(critic_loss_list) if critic_loss_list else 0.0,
"critic/grad_norm": critic_grad_norm.detach().item(),
}
return metrics
def _optimizer_step(self) -> torch.Tensor:
assert self.config.grad_clip is not None
if isinstance(self.actor_module, FSDP):
grad_norm = self.actor_module.clip_grad_norm_(max_norm=self.config.grad_clip)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(self.actor_module.parameters(), max_norm=self.config.grad_clip)
self.actor_optimizer.step()
return grad_norm
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/vla/sac/sac_actor.py",
"license": "Apache License 2.0",
"lines": 340,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/vla/sac/sac_ray_trainer.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import uuid
from collections import defaultdict
from pprint import pprint
import numpy as np
import torch
from omegaconf import OmegaConf
from tqdm import tqdm
from verl import DataProto
from verl.experimental.dataset.sampler import AbstractCurriculumSampler
from verl.protocol import pad_dataproto_to_divisor, unpad_dataproto
from verl.single_controller.ray import RayClassWithInitArgs
from verl.single_controller.ray.base import create_colocated_worker_cls
from verl.trainer.ppo.metric_utils import (
compute_throughout_metrics,
process_validation_metrics,
)
from verl.trainer.ppo.ray_trainer import RayPPOTrainer
from verl.trainer.ppo.reward import compute_reward
from verl.trainer.ppo.utils import Role
from verl.utils.checkpoint.checkpoint_manager import should_save_ckpt_esi
from verl.utils.debug import marked_timer
from verl.utils.metric import reduce_metrics
def compute_response_mask(config, data: DataProto) -> torch.Tensor:
"""Compute the attention mask for the response part of the sequence.
This function extracts the portion of the attention mask that corresponds to the model's response,
which is used for masking computations that should only apply to response tokens.
Args:
data (DataProto): The data containing batched model outputs and inputs.
Returns:
torch.Tensor: The attention mask for the response tokens.
"""
complete = data.batch["complete"] # shape: [batch_size, num_steps, chunk_size]
complete_traj = complete.view(complete.shape[0], -1) # # shape: [batch_size, num_steps * chunk_size]
batch_size, action_steps = complete_traj.shape
step_indices = torch.arange(action_steps, device=complete.device).unsqueeze(0).expand(batch_size, -1)
first_true_idx_approx = torch.argmax(complete_traj.long(), dim=1)
has_any_true = complete_traj.any(dim=1)
final_first_true_idx = torch.where(
has_any_true, first_true_idx_approx, torch.tensor(action_steps - 1, device=complete.device)
)
mask_traj = step_indices <= final_first_true_idx.unsqueeze(1)
mask = mask_traj.view(complete.shape) # shape: [batch_size, num_steps, chunk_size]
mask = mask.repeat_interleave(config.env.actor.model.action_dim, dim=-1) # eapand to action dim
return mask
def flatten_trajectories(data: DataProto) -> DataProto:
batch_size, num_steps = data.batch["action"].shape[:2]
new_batch_fields = {}
for key, tensor in data.batch.items():
if len(tensor.shape) >= 2 and tensor.shape[0] == batch_size and tensor.shape[1] == num_steps:
# (B, S, H, W) -> (B*S, H, W)
new_shape = (batch_size * num_steps, *tensor.shape[2:])
new_batch_fields[key] = tensor.reshape(new_shape)
elif len(tensor.shape) == 1 and tensor.shape[0] == batch_size:
# [e1, e2] -> [e1, e1, ..., e2, e2, ...] (S times each)
new_batch_fields[key] = tensor.repeat_interleave(num_steps)
else:
new_batch_fields[key] = tensor
new_data = DataProto.from_dict(tensors=new_batch_fields, meta_info=data.meta_info)
return new_data
def add_transition_prefixes(data: DataProto) -> DataProto:
batch = data.batch
step_key = "action" if "action" in batch else "full_action"
if step_key not in batch:
return data
num_steps = batch[step_key].shape[1]
if num_steps <= 1:
return data
def drop_last(tensor: torch.Tensor) -> torch.Tensor:
return tensor[:, :-1, ...]
def shift_next(tensor: torch.Tensor) -> torch.Tensor:
return tensor[:, 1:, ...]
state_keys = ["states", "images", "image_masks", "lang_tokens", "lang_masks"]
action_keys = ["full_action", "action"]
for key in state_keys:
if key in batch:
batch[f"s0.{key}"] = drop_last(batch[key])
batch[f"s1.{key}"] = shift_next(batch[key])
for key in action_keys:
if key in batch:
batch[f"a0.{key}"] = drop_last(batch[key])
batch[f"a1.{key}"] = shift_next(batch[key])
batch_size = batch[step_key].shape[0]
for key, tensor in list(batch.items()):
if tensor.ndim >= 2 and tensor.shape[0] == batch_size and tensor.shape[1] == num_steps:
batch[key] = drop_last(tensor)
return data
class RobRaySACTrainer(RayPPOTrainer):
def _start_profiling(self, do_profile: bool) -> None:
"""Start profiling for all worker groups including env workers."""
super()._start_profiling(do_profile)
if do_profile and hasattr(self, "env_wg"):
self.env_wg.start_profile(role="env", profile_step=self.global_steps)
def _stop_profiling(self, do_profile: bool) -> None:
"""Stop profiling for all worker groups including env workers."""
super()._stop_profiling(do_profile)
if do_profile and hasattr(self, "env_wg"):
self.env_wg.stop_profile()
def init_workers(self):
self.resource_pool_manager.create_resource_pool()
if self.config.env.disagg_sim.enable:
# pin EnvWorker to Simulator GPU nodes
self.resource_pool_manager.get_resource_pool(Role.Env).accelerator_type = "sim"
self.resource_pool_manager.get_resource_pool(Role.ActorRollout).accelerator_type = "train_rollout"
self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()}
resource_pool = self.resource_pool_manager.get_resource_pool(Role.ActorRollout)
actor_rollout_cls = RayClassWithInitArgs(
cls=self.role_worker_mapping[Role.ActorRollout],
config=self.config.actor_rollout_ref,
role="actor_rollout",
)
self.resource_pool_to_cls[resource_pool]["actor_rollout"] = actor_rollout_cls
assert Role.Env in self.role_worker_mapping
if Role.Env in self.role_worker_mapping:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.Env)
env_cls = RayClassWithInitArgs(self.role_worker_mapping[Role.Env], config=self.config.env)
self.resource_pool_to_cls[resource_pool]["env"] = env_cls
# initialize WorkerGroup
# NOTE: if you want to use a different resource pool for each role, which can support different parallel size,
# you should not use `create_colocated_worker_cls`.
# Instead, directly pass different resource pool to different worker groups.
# See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information.
all_wg = {}
wg_kwargs = {} # Setting up kwargs for RayWorkerGroup
if OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout") is not None:
wg_kwargs["ray_wait_register_center_timeout"] = self.config.trainer.ray_wait_register_center_timeout
if OmegaConf.select(self.config.global_profiler, "steps") is not None:
wg_kwargs["profile_steps"] = OmegaConf.select(self.config.global_profiler, "steps")
# Only require nsight worker options when tool is nsys
if OmegaConf.select(self.config.global_profiler, "tool") == "nsys":
assert (
OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options")
is not None
), "worker_nsight_options must be set when using nsys with profile_steps"
wg_kwargs["worker_nsight_options"] = OmegaConf.to_container(
OmegaConf.select(self.config.global_profiler.global_tool_config.nsys, "worker_nsight_options")
)
wg_kwargs["device_name"] = self.device_name
for resource_pool, class_dict in self.resource_pool_to_cls.items():
worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict)
wg_dict = self.ray_worker_group_cls(
resource_pool=resource_pool,
ray_cls_with_init=worker_dict_cls,
**wg_kwargs,
)
spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys())
all_wg.update(spawn_wg)
# we should create rollout at the end so that vllm can have a better estimation of kv cache memory
self.actor_rollout_wg = all_wg["actor_rollout"]
self.actor_rollout_wg.init_model()
self.env_wg = all_wg["env"]
# create async rollout manager and request scheduler
self.async_rollout_mode = False
if self.config.actor_rollout_ref.rollout.mode == "async_envloop":
from verl.experimental.vla.env_loop import EnvLoop
self.async_rollout_mode = True
self.async_rollout_manager = EnvLoop(
config=self.config, rollout_wg=self.actor_rollout_wg, env_wg=self.env_wg
)
def _get_gen_batch(self, batch: DataProto) -> DataProto:
# pop those keys for generation
batch_keys_to_pop = []
non_tensor_batch_keys_to_pop = set(batch.non_tensor_batch.keys())
gen_batch = batch.pop(
batch_keys=batch_keys_to_pop,
non_tensor_batch_keys=list(non_tensor_batch_keys_to_pop),
)
return gen_batch
def _reset_envs(self, gen_batch: DataProto) -> asyncio.Future:
initial_state_ids = gen_batch.non_tensor_batch["state_ids"]
task_ids = gen_batch.non_tensor_batch["task_ids"]
reset_prompts = DataProto.from_dict(non_tensors={"state_ids": initial_state_ids, "task_ids": task_ids})
reset_future = self.env_wg.reset_envs_to_state_ids(reset_prompts)
return reset_future
def fit(self):
"""
The training loop of PPO.
The driver process only need to call the compute functions of the worker group through RPC
to construct the PPO dataflow.
The light-weight advantage computation is done on the driver process.
"""
from omegaconf import OmegaConf
from verl.utils.tracking import Tracking
logger = Tracking(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
default_backend=self.config.trainer.logger,
config=OmegaConf.to_container(self.config, resolve=True),
)
self.global_steps = 0
# load checkpoint before doing anything
self._load_checkpoint()
# perform validation before training
# currently, we only support validation using the reward_function.
if self.val_reward_fn is not None and self.config.trainer.get("val_before_train", True):
val_metrics = self._validate()
assert val_metrics, f"{val_metrics=}"
pprint(f"Initial validation metrics: {val_metrics}")
logger.log(data=val_metrics, step=self.global_steps)
if self.config.trainer.get("val_only", False):
return
# add tqdm
progress_bar = tqdm(total=self.total_training_steps, initial=self.global_steps, desc="Training Progress")
# we start from step 1
self.global_steps += 1
last_val_metrics = None
self.max_steps_duration = 0
prev_step_profile = False
curr_step_profile = (
self.global_steps in self.config.global_profiler.steps
if self.config.global_profiler.steps is not None
else False
)
next_step_profile = False
for epoch in range(self.config.trainer.total_epochs):
train_iter = iter(self.train_dataloader)
next_batch_dict = next(train_iter)
need_validate = False
dataloader_len = len(self.train_dataloader)
print(f"Starting epoch {epoch}, dataloader length: {dataloader_len}")
for step_idx in range(dataloader_len):
batch_dict = next_batch_dict
try:
next_batch_dict = next(train_iter)
except StopIteration:
next_batch_dict = None
metrics = {}
timing_raw = {}
with marked_timer("start_profile", timing_raw):
self._start_profiling(
not prev_step_profile and curr_step_profile
if self.config.global_profiler.profile_continuous_steps
else curr_step_profile
)
batch: DataProto = DataProto.from_single_dict(batch_dict)
# add uid to batch
batch.non_tensor_batch["uid"] = np.array([str(uuid.uuid4()) for _ in range(len(batch))], dtype=object)
gen_batch = self._get_gen_batch(batch)
gen_batch.meta_info["global_steps"] = self.global_steps
gen_batch.meta_info["do_sample"] = True
gen_batch.meta_info["temperature"] = self.config.actor_rollout_ref.rollout.temperature
gen_batch.meta_info["prompt_length"] = self.config.actor_rollout_ref.rollout.prompt_length
gen_batch.meta_info["eos_token_id"] = self.tokenizer.eos_token_id
gen_batch.meta_info["n_samples"] = self.config.actor_rollout_ref.rollout.n
gen_batch.meta_info["pad_token_id"] = self.tokenizer.pad_token_id
gen_batch = gen_batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
is_last_step = self.global_steps >= self.total_training_steps
if step_idx == 0 or need_validate:
# reset env workers in first step
# if validation on last step, the reset was not executed and need to be done here
reset_future = self._reset_envs(gen_batch)
need_validate = (
self.val_reward_fn is not None
and self.config.trainer.test_freq > 0
and (is_last_step or self.global_steps % self.config.trainer.test_freq == 0)
)
with marked_timer("step", timing_raw):
# generate a batch
with marked_timer("gen", timing_raw, color="red"):
gen_batch_output = self.async_rollout_manager.generate_sequences(gen_batch, reset_future)
# prepare for next batch's env reset
if step_idx != dataloader_len - 1 and not need_validate:
next_batch: DataProto = DataProto.from_single_dict(next_batch_dict)
next_gen_batch = self._get_gen_batch(next_batch)
next_gen_batch = next_gen_batch.repeat(
repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True
)
reset_future = self._reset_envs(next_gen_batch)
# repeat to align with repeated responses in rollout
batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
batch = gen_batch_output
if "response_mask" not in batch.batch.keys():
batch.batch["response_mask"] = compute_response_mask(self.config, batch)
with marked_timer("reward", timing_raw, color="yellow"):
# compute reward model score
reward_tensor, reward_extra_infos_dict = compute_reward(batch, self.reward_fn)
batch.batch["rewards"] = reward_tensor
average_reward = reward_tensor.any(-1).mean(dtype=torch.float32).item()
metrics["data/trajectory_avg_reward"] = average_reward
batch = add_transition_prefixes(batch)
batch = flatten_trajectories(batch)
# batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist()
batch.meta_info["global_token_num"] = [0]
# update actor
if self.config.trainer.critic_warmup <= self.global_steps:
with marked_timer("update_actor", timing_raw, color="red"):
batch.meta_info["multi_turn"] = self.config.actor_rollout_ref.rollout.multi_turn.enable
actor_output = self.actor_rollout_wg.update_actor(batch)
actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"])
metrics.update(actor_output_metrics)
# Log rollout generations if enabled
rollout_data_dir = self.config.trainer.get("rollout_data_dir", None)
if rollout_data_dir:
with marked_timer("dump_rollout_generations", timing_raw, color="green"):
inputs = self.tokenizer.batch_decode(batch.batch["prompts"], skip_special_tokens=True)
outputs = self.tokenizer.batch_decode(batch.batch["responses"], skip_special_tokens=True)
scores = batch.batch["token_level_scores"].sum(-1).cpu().tolist()
sample_gts = [
item.non_tensor_batch.get("reward_model", {}).get("ground_truth", None)
for item in batch
]
if "request_id" in batch.non_tensor_batch:
reward_extra_infos_dict.setdefault(
"request_id",
batch.non_tensor_batch["request_id"].tolist(),
)
self._dump_generations(
inputs=inputs,
outputs=outputs,
gts=sample_gts,
scores=scores,
reward_extra_infos_dict=reward_extra_infos_dict,
dump_path=rollout_data_dir,
)
# validate
if need_validate:
with marked_timer("testing", timing_raw, color="green"):
val_metrics: dict = self._validate()
if is_last_step:
last_val_metrics = val_metrics
metrics.update(val_metrics)
# Check if the ESI (Elastic Server Instance)/training plan is close to expiration.
esi_close_to_expiration = should_save_ckpt_esi(
max_steps_duration=self.max_steps_duration,
redundant_time=self.config.trainer.esi_redundant_time,
)
# Check if the conditions for saving a checkpoint are met.
# The conditions include a mandatory condition (1) and
# one of the following optional conditions (2/3/4):
# 1. The save frequency is set to a positive value.
# 2. It's the last training step.
# 3. The current step number is a multiple of the save frequency.
# 4. The ESI(Elastic Server Instance)/training plan is close to expiration.
if self.config.trainer.save_freq > 0 and (
is_last_step or self.global_steps % self.config.trainer.save_freq == 0 or esi_close_to_expiration
):
if esi_close_to_expiration:
print("Force saving checkpoint: ESI instance expiration approaching.")
with marked_timer("save_checkpoint", timing_raw, color="green"):
self._save_checkpoint()
with marked_timer("stop_profile", timing_raw):
next_step_profile = (
self.global_steps + 1 in self.config.global_profiler.steps
if self.config.global_profiler.steps is not None
else False
)
self._stop_profiling(
curr_step_profile and not next_step_profile
if self.config.global_profiler.profile_continuous_steps
else curr_step_profile
)
prev_step_profile = curr_step_profile
curr_step_profile = next_step_profile
steps_duration = timing_raw["step"]
self.max_steps_duration = max(self.max_steps_duration, steps_duration)
# training metrics
metrics.update(
{
"training/global_step": self.global_steps,
"training/epoch": epoch,
}
)
# collect metrics
# metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic))
# metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw))
# TODO: implement actual tflpo and theoretical tflpo
n_gpus = self.resource_pool_manager.get_n_gpus()
metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus))
# this is experimental and may be changed/removed in the future in favor of a general-purpose one
if isinstance(self.train_dataloader.sampler, AbstractCurriculumSampler):
self.train_dataloader.sampler.update(batch=batch)
# TODO: make a canonical logger that supports various backend
logger.log(data=metrics, step=self.global_steps)
progress_bar.update(1)
self.global_steps += 1
if (
hasattr(self.config.actor_rollout_ref.actor, "profiler")
and self.config.actor_rollout_ref.actor.profiler.tool == "torch_memory"
):
self.actor_rollout_wg.dump_memory_snapshot(
tag=f"post_update_step{self.global_steps}", sub_dir=f"step{self.global_steps}"
)
if is_last_step:
pprint(f"Final validation metrics: {last_val_metrics}")
progress_bar.close()
return
# this is experimental and may be changed/removed in the future
# in favor of a general-purpose data buffer pool
if hasattr(self.train_dataset, "on_batch_end"):
# The dataset may be changed after each training batch
self.train_dataset.on_batch_end(batch=batch)
def _validate(self):
data_source_lst = []
reward_extra_infos_dict: dict[str, list] = defaultdict(list)
# Lists to collect samples for the table
sample_scores = []
sample_turns = []
sample_uids = []
for test_data in self.val_dataloader:
test_batch = DataProto.from_single_dict(test_data)
if len(test_batch) < self.config.data.val_batch_size:
print(f"drop last batch in val_dataloader, len {len(test_batch)}")
break
if "uid" not in test_batch.non_tensor_batch:
test_batch.non_tensor_batch["uid"] = np.array(
[str(uuid.uuid4()) for _ in range(len(test_batch))], dtype=object
)
test_gen_batch = self._get_gen_batch(test_batch)
test_gen_batch.meta_info = {
"eos_token_id": self.tokenizer.eos_token_id,
"pad_token_id": self.tokenizer.pad_token_id,
"prompt_length": self.config.actor_rollout_ref.rollout.prompt_length,
"recompute_log_prob": False,
"do_sample": self.config.actor_rollout_ref.rollout.val_kwargs.do_sample,
"temperature": self.config.actor_rollout_ref.rollout.temperature,
"n_samples": self.config.actor_rollout_ref.rollout.n,
"validate": True,
"global_steps": self.global_steps,
}
test_gen_batch = test_gen_batch.repeat(
repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True
)
sample_uids.extend(test_gen_batch.non_tensor_batch["uid"])
# pad to be divisible by dp_size
size_divisor = self.config.env.train.num_envs * self.config.env.rollout.pipeline_stage_num
test_gen_batch_padded, pad_size = pad_dataproto_to_divisor(test_gen_batch, size_divisor)
reset_future = self._reset_envs(test_gen_batch_padded)
test_output_gen_batch_padded = self.async_rollout_manager.generate_sequences(
test_gen_batch_padded, reset_future
)
# unpad
test_output_gen_batch = unpad_dataproto(test_output_gen_batch_padded, pad_size=pad_size)
print("validation generation end")
test_batch = test_output_gen_batch
test_batch.meta_info["validate"] = True
# evaluate using reward_function
if self.val_reward_fn is None:
raise ValueError("val_reward_fn must be provided for validation.")
result = self.val_reward_fn(test_batch, return_dict=True)
reward_tensor = result["reward_tensor"]
scores = reward_tensor.sum(-1).cpu().tolist()
sample_scores.extend(scores)
reward_extra_infos_dict["reward"].extend(scores)
print(f"len reward_extra_infos_dict['reward']: {len(reward_extra_infos_dict['reward'])}")
if "reward_extra_info" in result:
for key, lst in result["reward_extra_info"].items():
reward_extra_infos_dict[key].extend(lst)
print(f"len reward_extra_infos_dict['{key}']: {len(reward_extra_infos_dict[key])}")
# collect num_turns of each prompt
if "__num_turns__" in test_batch.non_tensor_batch:
sample_turns.append(test_batch.non_tensor_batch["__num_turns__"])
data_source_lst.append(test_batch.non_tensor_batch.get("data_source", ["unknown"] * reward_tensor.shape[0]))
for key_info, lst in reward_extra_infos_dict.items():
assert len(lst) == 0 or len(lst) == len(sample_scores), f"{key_info}: {len(lst)=}, {len(sample_scores)=}"
data_sources = np.concatenate(data_source_lst, axis=0)
data_src2var2metric2val = process_validation_metrics(data_sources, sample_uids, reward_extra_infos_dict)
metric_dict = {}
for data_source, var2metric2val in data_src2var2metric2val.items():
core_var = "acc" if "acc" in var2metric2val else "reward"
for var_name, metric2val in var2metric2val.items():
n_max = max([int(name.split("@")[-1].split("/")[0]) for name in metric2val.keys()])
for metric_name, metric_val in metric2val.items():
if (
(var_name == core_var)
and any(metric_name.startswith(pfx) for pfx in ["mean", "maj", "best"])
and (f"@{n_max}" in metric_name)
):
metric_sec = "val-core"
else:
metric_sec = "val-aux"
pfx = f"{metric_sec}/{data_source}/{var_name}/{metric_name}"
metric_dict[pfx] = metric_val
if len(sample_turns) > 0:
sample_turns = np.concatenate(sample_turns)
metric_dict["val-aux/num_turns/min"] = sample_turns.min()
metric_dict["val-aux/num_turns/max"] = sample_turns.max()
metric_dict["val-aux/num_turns/mean"] = sample_turns.mean()
return metric_dict
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/vla/sac/sac_ray_trainer.py",
"license": "Apache License 2.0",
"lines": 491,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/utils/test_check_ipc_version_support_on_npu.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
from unittest.mock import Mock, mock_open, patch
from verl.utils.device import check_ipc_version_support, get_npu_versions
class TestCheckIPCVersionSupport(unittest.TestCase):
"""Test cases for the check_ipc_version_support function."""
def setUp(self):
"""Set up test logging to suppress INFO messages."""
# Suppress INFO log messages during testing
logging.disable(logging.INFO)
def tearDown(self):
"""Restore logging."""
logging.disable(logging.NOTSET)
def test_standard_version_with_support(self):
"""Test standard version that meets minimum requirements."""
# Software 25.5.0 >= 25.3.rc1, CANN 8.3.0 >= 8.3.rc1
result = check_ipc_version_support("25.5.0", "8.3.0")
self.assertTrue(result)
def test_standard_version_newer(self):
"""Test newer standard versions."""
# Software 26.0.0 >= 25.3.rc1, CANN 9.0.0 >= 8.3.rc1
result = check_ipc_version_support("26.0.0", "9.0.0")
self.assertTrue(result)
def test_rc_version_format(self):
"""Test RC version format with additional parts."""
# Software 25.3.rc1.2 -> 25.3.rc1 >= 25.3.rc1
# CANN 8.3.rc1.2 -> 8.3.rc1 >= 8.3.rc1
result = check_ipc_version_support("25.3.rc1.2", "8.3.rc1.2")
self.assertTrue(result)
def test_exact_rc_version(self):
"""Test exact RC version."""
# Software 25.3.rc1 >= 25.3.rc1
# CANN 8.3.rc1 >= 8.3.rc1
result = check_ipc_version_support("25.3.rc1", "8.3.rc1")
self.assertTrue(result)
def test_t_suffix_version(self):
"""Test version with lowercase t suffix."""
# Software 25.5.t3.b001 -> 25.5 >= 25.3.rc1
# CANN 8.3.rc1 >= 8.3.rc1
result = check_ipc_version_support("25.5.t3.b001", "8.3.rc1")
self.assertTrue(result)
def test_t_suffix_version_older(self):
"""Test version with lowercase t suffix that's too old."""
# Software 25.5.t3.b001 -> 25.5 >= 25.3.rc1 (should pass)
# CANN 8.2.rc1 < 8.3.rc1 (should fail)
result = check_ipc_version_support("25.5.t3.b001", "8.2.rc1")
self.assertFalse(result)
def test_software_version_below_minimum(self):
"""Test software version below minimum requirement."""
# Software 25.2.0 < 25.3.rc1
result = check_ipc_version_support("25.2.0", "8.3.0")
self.assertFalse(result)
def test_cann_version_below_minimum(self):
"""Test CANN version below minimum requirement."""
# Software 25.5.0 >= 25.3.rc1
# CANN 8.2.0 < 8.3.rc1
result = check_ipc_version_support("25.5.0", "8.2.0")
self.assertFalse(result)
def test_both_versions_below_minimum(self):
"""Test both versions below minimum requirement."""
# Software 25.2.0 < 25.3.rc1
# CANN 8.2.0 < 8.3.rc1
result = check_ipc_version_support("25.2.0", "8.2.0")
self.assertFalse(result)
def test_invalid_software_version(self):
"""Test invalid software version format."""
with self.assertRaises(RuntimeError) as context:
check_ipc_version_support("invalid.version", "8.3.0")
self.assertIn("Invalid software version format", str(context.exception))
def test_invalid_cann_version(self):
"""Test invalid CANN version format."""
with self.assertRaises(RuntimeError) as context:
check_ipc_version_support("25.5.0", "invalid.version")
self.assertIn("Invalid CANN version format", str(context.exception))
def test_rc_with_more_parts(self):
"""Test RC version with more than 3 parts."""
# Should extract only first 3 parts: 25.3.rc1
result = check_ipc_version_support("25.3.rc1.2.3.4", "8.3.rc1.2.3.4")
self.assertTrue(result)
def test_standard_with_more_parts(self):
"""Test standard version with more than 3 parts."""
# Should extract only first 3 parts: 25.5.0
result = check_ipc_version_support("25.5.0.1.2.3", "8.3.0.1.2.3")
self.assertTrue(result)
def test_rc_edge_case_versions(self):
"""Test edge case RC versions."""
# RC1 is the minimum
result = check_ipc_version_support("25.3.rc1", "8.3.rc1")
self.assertTrue(result)
# RC0 should fail
result = check_ipc_version_support("25.3.rc0", "8.3.rc1")
self.assertFalse(result)
def test_major_version_differences(self):
"""Test major version number differences."""
# Much newer major versions
result = check_ipc_version_support("30.0.0", "10.0.0")
self.assertTrue(result)
# Older major versions
result = check_ipc_version_support("24.0.0", "7.0.0")
self.assertFalse(result)
class TestGetNPUVersions(unittest.TestCase):
"""Test cases for the get_npu_versions function."""
@patch("subprocess.run")
@patch("platform.machine")
@patch("os.path.exists")
@patch("builtins.open", new_callable=mock_open, read_data="version=8.3.rc1\n")
def test_get_npu_versions_success(self, mock_file, mock_exists, mock_machine, mock_run):
"""Test successful retrieval of versions."""
# Mock npu-smi output
mock_run.return_value = Mock(stdout="Software Version : 25.5.0\nOther Info\n", check=True)
# Mock architecture
mock_machine.return_value = "x86_64"
# Mock path exists
mock_exists.return_value = True
software_version, cann_version = get_npu_versions()
self.assertEqual(software_version, "25.5.0")
self.assertEqual(cann_version, "8.3.rc1")
@patch("subprocess.run")
def test_get_npu_versions_missing_software_version(self, mock_run):
"""Test error when Software Version is missing."""
mock_run.return_value = Mock(stdout="Other Info Without Software Version\n", check=True)
with self.assertRaises(RuntimeError) as context:
get_npu_versions()
self.assertIn("Could not find Software Version", str(context.exception))
@patch("subprocess.run")
@patch("platform.machine")
@patch("os.path.exists")
@patch("builtins.open", new_callable=mock_open, read_data="version=8.3.rc1\n")
def test_get_npu_versions_unsupported_architecture(self, mock_file, mock_exists, mock_machine, mock_run):
"""Test error with unsupported architecture."""
mock_run.return_value = Mock(stdout="Software Version : 25.5.0\n", check=True)
mock_machine.return_value = "armv7l" # Unsupported architecture
mock_exists.return_value = True
with self.assertRaises(RuntimeError) as context:
get_npu_versions()
self.assertIn("Unsupported architecture", str(context.exception))
@patch("subprocess.run")
@patch("platform.machine")
@patch("os.path.exists")
@patch("builtins.open", new_callable=mock_open, read_data="version=8.3.rc1\n")
def test_get_npu_versions_cann_path_not_exists(self, mock_file, mock_exists, mock_machine, mock_run):
"""Test error when CANN path doesn't exist."""
mock_run.return_value = Mock(stdout="Software Version : 25.5.0\n", check=True)
mock_machine.return_value = "x86_64"
mock_exists.return_value = False # Path doesn't exist
with self.assertRaises(RuntimeError) as context:
get_npu_versions()
self.assertIn("CANN toolkit path does not exist", str(context.exception))
@patch("subprocess.run")
@patch("platform.machine")
@patch("os.path.exists")
@patch("builtins.open")
def test_get_npu_versions_info_file_not_exists(self, mock_file, mock_exists, mock_machine, mock_run):
"""Test error when CANN info file doesn't exist."""
mock_run.return_value = Mock(stdout="Software Version : 25.5.0\n", check=True)
mock_machine.return_value = "x86_64"
# First call is for CANN path exists, second call is for info file exists
mock_exists.side_effect = [True, False]
with self.assertRaises(RuntimeError) as context:
get_npu_versions()
self.assertIn("CANN toolkit info file does not exist", str(context.exception))
@patch("subprocess.run")
@patch("platform.machine")
@patch("os.path.exists")
@patch("builtins.open", new_callable=mock_open, read_data="other_info=no_version\n")
def test_get_npu_versions_missing_cann_version(self, mock_file, mock_exists, mock_machine, mock_run):
"""Test error when CANN version is missing from info file."""
mock_run.return_value = Mock(stdout="Software Version : 25.5.0\n", check=True)
mock_machine.return_value = "x86_64"
mock_exists.return_value = True
with self.assertRaises(RuntimeError) as context:
get_npu_versions()
self.assertIn("Could not find version in CANN toolkit info file", str(context.exception))
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_check_ipc_version_support_on_npu.py",
"license": "Apache License 2.0",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/utils/test_shared_memory.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import unittest
from multiprocessing import shared_memory
import torch
from verl.workers.rollout.vllm_rollout.bucketed_weight_transfer import create_shared_memory, rebuild_shared_memory
class TestSharedMemory(unittest.TestCase):
"""Test cases for shared memory utility functions."""
def setUp(self):
"""Set up test fixtures before each test method."""
# Use short unique names to avoid POSIX shared memory name length limits
import uuid
short_id = uuid.uuid4().hex[:8]
self.test_name = f"shm_{short_id}"
def tearDown(self):
"""Clean up shared memory after each test method."""
# Note: We're relying on the OS to clean up shared memory
# as we properly delete all references in the tests
pass
def test_create_shared_memory_new(self):
"""Test creating new shared memory with unique name."""
size = 1024
shm = create_shared_memory(size, self.test_name)
# Verify shared memory object is created correctly
self.assertIsNotNone(shm)
# Note: shared memory may have system-dependent size rounding
self.assertGreaterEqual(shm.size, size)
self.assertEqual(shm.name, self.test_name)
# Clean up - delete tensor references first
del shm
def test_create_shared_memory_attach_existing(self):
"""Test that create_shared_memory attaches to existing shared memory when FileExistsError occurs."""
size = 2048
# First, create shared memory
shm1 = create_shared_memory(size, self.test_name)
self.assertGreaterEqual(shm1.size, size)
# Second call should attach to existing memory
shm2 = create_shared_memory(size, self.test_name)
# Verify we attached to the same shared memory
self.assertIsNotNone(shm2)
self.assertGreaterEqual(shm2.size, size)
self.assertEqual(shm2.name, self.test_name)
# Both should reference the same shared memory
self.assertEqual(shm1.name, shm2.name)
# Clean up
del shm1, shm2
def test_rebuild_shared_memory_default_dtype(self):
"""Test rebuilding tensor from shared memory with default dtype (uint8)."""
size = 1024
# Create and write to shared memory
shm = create_shared_memory(size, self.test_name)
test_data = torch.arange(size, dtype=torch.uint8)
shm.buf[:size] = test_data.numpy().tobytes()
# Rebuild tensor from shared memory
tensor, _ = rebuild_shared_memory(self.test_name, size)
# Verify tensor properties
self.assertEqual(tensor.dtype, torch.uint8)
self.assertEqual(len(tensor), size)
# Verify data integrity
reconstructed = torch.frombuffer(shm.buf[:size], dtype=torch.uint8)
self.assertTrue(torch.equal(tensor, reconstructed))
# Clean up - delete references before closing
del tensor, reconstructed
def test_rebuild_shared_memory_custom_dtype(self):
"""Test rebuilding tensor from shared memory with custom dtype."""
size = 256 # 256 bytes = 64 float32 values
# Create and write to shared memory
shm = create_shared_memory(size, self.test_name)
test_data = torch.arange(64, dtype=torch.float32)
shm.buf[:size] = test_data.numpy().tobytes()
# Rebuild tensor with custom dtype
tensor, _ = rebuild_shared_memory(self.test_name, size, dtype=torch.float32)
# Verify tensor properties
self.assertEqual(tensor.dtype, torch.float32)
self.assertEqual(len(tensor), 64)
# Verify data integrity
reconstructed = torch.frombuffer(shm.buf[:size], dtype=torch.float32)
self.assertTrue(torch.equal(tensor, reconstructed))
# Clean up - delete references before closing
del tensor, reconstructed
def test_shared_memory_data_integrity(self):
"""Test that data remains intact between create and rebuild operations."""
size = 512
# Create test data with various patterns
test_data = torch.randint(0, 256, (size,), dtype=torch.uint8)
# Create shared memory and write data
shm = create_shared_memory(size, self.test_name)
shm.buf[:size] = test_data.numpy().tobytes()
# Rebuild tensor
tensor, _ = rebuild_shared_memory(self.test_name, size)
# Verify data integrity
reconstructed = torch.frombuffer(shm.buf[:size], dtype=torch.uint8)
self.assertTrue(torch.equal(test_data, reconstructed))
# Clean up - delete references before closing
del tensor, reconstructed
def test_shared_memory_different_dtypes(self):
"""Test shared memory operations with different tensor dtypes."""
test_cases = [
(torch.float32, 256, 64), # 256 bytes / 4 bytes = 64 values
(torch.float64, 256, 32), # 256 bytes / 8 bytes = 32 values
(torch.int32, 256, 64), # 256 bytes / 4 bytes = 64 values
(torch.int64, 256, 32), # 256 bytes / 8 bytes = 32 values
(torch.uint8, 256, 256), # 256 bytes / 1 byte = 256 values
]
for dtype, size, expected_len in test_cases:
# Create test data
test_data = torch.arange(expected_len, dtype=dtype)
# Create shared memory and write data
shm = create_shared_memory(size, self.test_name)
shm.buf[:size] = test_data.numpy().tobytes()
# Rebuild tensor
tensor, _ = rebuild_shared_memory(self.test_name, size, dtype=dtype)
# Verify properties and data
self.assertEqual(tensor.dtype, dtype)
self.assertEqual(len(tensor), expected_len)
reconstructed = torch.frombuffer(shm.buf[:size], dtype=dtype)
self.assertTrue(torch.equal(test_data, reconstructed))
# Clean up - delete references before closing
del tensor, reconstructed
def test_shared_memory_multiple_operations(self):
"""Test multiple create/rebuild operations with the same name."""
size = 512
# First iteration
test_data1 = torch.arange(size, dtype=torch.uint8)
shm1 = create_shared_memory(size, self.test_name)
shm1.buf[:size] = test_data1.numpy().tobytes()
tensor1, _ = rebuild_shared_memory(self.test_name, size)
reconstructed1 = torch.frombuffer(shm1.buf[:size], dtype=torch.uint8)
self.assertTrue(torch.equal(test_data1, reconstructed1))
del tensor1, reconstructed1, shm1
# Second iteration with different data
test_data2 = torch.arange(size, dtype=torch.uint8) * 2
shm2 = create_shared_memory(size, self.test_name)
shm2.buf[:size] = test_data2.numpy().tobytes()
tensor2, _ = rebuild_shared_memory(self.test_name, size)
reconstructed2 = torch.frombuffer(shm2.buf[:size], dtype=torch.uint8)
self.assertTrue(torch.equal(test_data2, reconstructed2))
del tensor2, reconstructed2, shm2
# Module-level function for cross-process testing
def child_process_function(name, size, test_data_bytes):
"""Child process function to rebuild and verify tensor."""
shm = None
tensor = None
test_data = None
try:
# Convert bytes back to tensor
test_data = torch.frombuffer(test_data_bytes, dtype=torch.uint8)
# Attach to shared memory
shm = shared_memory.SharedMemory(name=name)
# Rebuild tensor from shared memory
tensor = torch.frombuffer(shm.buf[:size], dtype=torch.uint8)
# Verify data integrity
assert torch.equal(test_data, tensor), "Data mismatch in child process"
return True
except Exception as e:
print(f"Error in child process: {e}")
return False
finally:
# Clean up shared memory in child process
# Delete all references first
del tensor, test_data
if shm is not None:
shm.close()
# Note: Don't unlink in child process, parent will clean up
class TestSharedMemoryIntegration(unittest.TestCase):
"""Integration tests for shared memory operations across process boundaries."""
def test_cross_process_shared_memory(self):
"""Test shared memory can be created in one process and accessed in another."""
size = 1024
test_data = torch.arange(size, dtype=torch.uint8)
# Create shared memory in parent process
shm = create_shared_memory(size, "test_cross_proc")
shm.buf[:size] = test_data.numpy().tobytes()
# Convert tensor to bytes for passing to child process
test_data_bytes = test_data.numpy().tobytes()
# Start child process
process = multiprocessing.Process(
target=child_process_function, args=("test_cross_proc", size, test_data_bytes)
)
process.start()
process.join(timeout=5)
# Verify child process completed successfully
self.assertEqual(process.exitcode, 0, "Child process failed")
# Clean up
del shm
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_shared_memory.py",
"license": "Apache License 2.0",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/utils/kernel/fp8_kernel.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import torch
logger = logging.getLogger(__name__)
# Check if Triton is available
_TRITON_AVAILABLE = False
try:
import triton
import triton.language as tl
_TRITON_AVAILABLE = True
except ImportError:
logger.debug("Triton not available, FP8 Triton kernels will not be used")
# Environment variable to control Triton FP8 usage (set to "1" to disable)
_DISABLE_TRITON_FP8 = os.environ.get("VERL_DISABLE_TRITON_FP8", "0").lower() in ("1", "true", "yes")
# FP8 constants
FP8_DTYPE = torch.float8_e4m3fn
FP8_MAX = torch.finfo(FP8_DTYPE).max
FP8_MIN = -FP8_MAX
def ceil_div(x: int, y: int) -> int:
"""Perform ceiling division of two integers."""
return (x + y - 1) // y
def is_triton_available() -> bool:
"""Check if Triton is available for FP8 kernels."""
return _TRITON_AVAILABLE
if _TRITON_AVAILABLE:
@triton.jit
def _blockwise_cast_to_fp8_kernel(
X,
Y,
S,
stride_xm,
stride_xn,
stride_ym,
stride_yn,
stride_sm,
stride_sn,
M,
N,
eps,
fp8_min,
fp8_max,
BLOCK_M: tl.constexpr = 128,
BLOCK_N: tl.constexpr = 128,
):
"""Triton kernel for blockwise FP8 quantization.
Each program instance handles one block of size (BLOCK_M, BLOCK_N).
Computes per-block scale and quantizes to FP8 in a single pass.
Refer to https://github.com/THUDM/slime/blob/main/slime/backends/megatron_utils/kernels/fp8_kernel.py
"""
pid_m = tl.cast(tl.program_id(axis=0), tl.int64)
pid_n = tl.cast(tl.program_id(axis=1), tl.int64)
# Compute block offsets
off_m = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
off_n = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
# Create masks for boundary handling
mask_m = off_m < M
mask_n = off_n < N
mask = mask_m[:, None] & mask_n[None, :]
# Load input block and convert to float32 for precision
x = tl.load(X + off_m[:, None] * stride_xm + off_n[None, :] * stride_xn, mask=mask, other=0.0).to(tl.float32)
# Compute block-wise absolute maximum with epsilon for numerical stability
_absmax = tl.maximum(tl.max(tl.abs(x)), eps)
# Compute scale: scale = absmax / fp8_max
x_s = _absmax / fp8_max
# Compute inverse scale for quantization
s_inv = 1.0 / x_s
# Quantize: clamp(x * s_inv, fp8_min, fp8_max)
y_q = tl.clamp(x * s_inv, fp8_min, fp8_max).to(Y.dtype.element_ty)
# Store quantized values and scale
tl.store(Y + off_m[:, None] * stride_ym + off_n[None, :] * stride_yn, y_q, mask=mask)
tl.store(S + pid_m * stride_sm + pid_n * stride_sn, x_s)
def blockwise_cast_to_fp8_triton(
x: torch.Tensor,
weight_block_size: list[int] | tuple[int, int] | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Quantize a 2D tensor to FP8 using blockwise quantization with Triton.
This function provides high-performance FP8 quantization with minimal memory overhead.
All computations (abs, max, scale, clamp) are performed in a single Triton kernel,
eliminating intermediate tensor allocations.
Args:
x: Input tensor of shape (M, N), must be 2D.
weight_block_size: Block size for quantization as [BLOCK_M, BLOCK_N].
Defaults to [128, 128] if None.
Returns:
Tuple of (quantized_tensor, scale_tensor):
- quantized_tensor: FP8 quantized tensor of shape (M, N)
- scale_tensor: Per-block scale factors of shape (ceil(M/BLOCK_M), ceil(N/BLOCK_N))
This is the inverse scale (multiply to dequantize).
"""
assert x.dim() == 2, f"Expected 2D tensor, got {x.dim()}D"
# Default block size
BLOCK_M, BLOCK_N = 128, 128
if weight_block_size is not None:
BLOCK_M, BLOCK_N = weight_block_size[0], weight_block_size[1]
M, N = x.shape
# Pre-allocate output tensors (only memory allocation in this function)
y = torch.empty(M, N, device=x.device, dtype=FP8_DTYPE)
s = torch.empty(ceil_div(M, BLOCK_M), ceil_div(N, BLOCK_N), dtype=torch.float32, device=x.device)
# Grid: one program per block
def grid(meta):
return (triton.cdiv(M, meta["BLOCK_M"]), triton.cdiv(N, meta["BLOCK_N"]))
# Tune kernel parameters based on memory layout
if x.is_contiguous():
kwargs = {"BLOCK_M": BLOCK_M, "BLOCK_N": BLOCK_N, "num_warps": 8, "num_stages": 2}
else:
kwargs = {"BLOCK_M": BLOCK_M, "BLOCK_N": BLOCK_N, "num_warps": 1, "num_stages": 4}
# Launch kernel
_blockwise_cast_to_fp8_kernel[grid](
x,
y,
s,
*x.stride(),
*y.stride(),
*s.stride(),
M,
N,
1e-10, # eps for numerical stability
FP8_MIN,
FP8_MAX,
**kwargs,
)
return y, s
def scaled_fp8_blockwise_triton(
data_hp: torch.Tensor,
weight_block_size: list[int] | tuple[int, int],
) -> tuple[torch.Tensor, torch.Tensor]:
"""High-performance FP8 blockwise quantization using Triton kernel.
This is the recommended function to use for FP8 quantization when Triton is available.
It handles padding automatically and returns results in the expected format.
Args:
data_hp: Input high-precision tensor of shape (M, N).
weight_block_size: Block size for quantization as [BLOCK_M, BLOCK_N].
Returns:
Tuple of (fp8_data, descale):
- fp8_data: FP8 quantized tensor of original shape
- descale: Per-block descale factors (inverse of scale, for dequantization)
Raises:
RuntimeError: If Triton is not available.
"""
if not _TRITON_AVAILABLE:
raise RuntimeError("Triton is required for scaled_fp8_blockwise_triton but is not available")
block_size0 = weight_block_size[0]
block_size1 = weight_block_size[1]
# Save original shape for potential cropping
original_shape = data_hp.shape
# Pad dimensions to be multiples of block size if needed
pad_dim0 = (block_size0 - data_hp.shape[0] % block_size0) % block_size0
pad_dim1 = (block_size1 - data_hp.shape[1] % block_size1) % block_size1
if pad_dim0 > 0 or pad_dim1 > 0:
logger.debug(
f"Padding weight from {data_hp.shape} to "
f"({data_hp.shape[0] + pad_dim0}, {data_hp.shape[1] + pad_dim1}) "
f"for blockwise FP8 quantization"
)
data_hp = torch.nn.functional.pad(data_hp, (0, pad_dim1, 0, pad_dim0), mode="constant", value=0)
# Call Triton kernel
fp_data, scale = blockwise_cast_to_fp8_triton(data_hp, weight_block_size)
# Remove padding to restore original shape
if pad_dim0 > 0 or pad_dim1 > 0:
fp_data = fp_data[: original_shape[0], : original_shape[1]].contiguous()
# Return scale as descale (the Triton kernel returns scale, we need to return it as-is
# since it's already the inverse scale format expected by vLLM/SGLang)
return fp_data, scale
def _scaled_fp8_blockwise_pytorch(
data_hp: torch.Tensor,
weight_block_size: list[int] | tuple[int, int],
) -> tuple[torch.Tensor, torch.Tensor]:
"""PyTorch implementation of blockwise FP8 quantization.
Memory-optimized implementation that:
- Uses in-place operations where possible
- Explicitly deletes intermediate tensors
- Minimizes peak memory usage during quantization
Args:
data_hp: Input high-precision tensor of shape (M, N).
weight_block_size: Block size for quantization as [BLOCK_M, BLOCK_N].
Returns:
Tuple of (fp8_data, descale):
- fp8_data: FP8 quantized tensor
- descale: Per-block descale factors for dequantization
"""
block_size0 = weight_block_size[0]
block_size1 = weight_block_size[1]
assert block_size0 == block_size1, "Block sizes must be equal"
# Save unpadded shape for later cropping
original_shape = data_hp.shape
# Pad dimensions to be multiples of block size if needed
pad_dim0 = (block_size0 - data_hp.shape[0] % block_size0) % block_size0
pad_dim1 = (block_size1 - data_hp.shape[1] % block_size1) % block_size1
if pad_dim0 > 0 or pad_dim1 > 0:
logger.debug(
f"Padding weight from {data_hp.shape} to "
f"({data_hp.shape[0] + pad_dim0}, {data_hp.shape[1] + pad_dim1}) "
f"for blockwise FP8 quantization"
)
data_hp = torch.nn.functional.pad(data_hp, (0, pad_dim1, 0, pad_dim0), mode="constant", value=0)
# FP8
max_dtype = FP8_MAX
padded_shape = data_hp.shape
blk_m, blk_n = data_hp.shape[0] // block_size0, data_hp.shape[1] // block_size1
# Reshape and permute - these are views, no memory allocation
data_hp = data_hp.reshape(blk_m, block_size0, blk_n, block_size1)
data_hp = data_hp.permute(0, 2, 1, 3).contiguous()
# Flatten to (BLK_M, BLK_N, BLOCK_SIZE_M * BLOCK_SIZE_N) in float32 for precision
data_hp = data_hp.to(torch.float32).flatten(start_dim=2)
# Calculate max absolute value per block - use fused abs+amax
max_abs = data_hp.abs().amax(dim=-1, keepdim=True)
# Compute scale in-place where possible
scale_fp = torch.empty_like(max_abs)
torch.div(max_dtype, max_abs, out=scale_fp)
# Handle edge cases: zero and inf
scale_fp = torch.where(max_abs == 0, torch.ones_like(scale_fp), scale_fp)
scale_fp = torch.where(max_abs == torch.inf, torch.ones_like(scale_fp), scale_fp)
del max_abs # Free max_abs memory
# Compute descale before modifying data
descale_fp = torch.reciprocal(scale_fp)
# Scale and clamp in a memory-efficient way
data_hp.mul_(scale_fp)
del scale_fp # Free scale memory
data_hp.clamp_(min=-max_dtype, max=max_dtype)
# Convert to FP8
fp_data = data_hp.to(FP8_DTYPE)
del data_hp # Free float32 data
# Reshape back to original layout
fp_data = fp_data.reshape(blk_m, blk_n, block_size0, block_size1).permute(0, 2, 1, 3).reshape(padded_shape)
# Remove padding to restore original shape
if original_shape[0] != padded_shape[0] or original_shape[1] != padded_shape[1]:
fp_data = fp_data[: original_shape[0], : original_shape[1]].contiguous()
return fp_data, descale_fp
def scaled_fp8_blockwise(
data_hp: torch.Tensor,
weight_block_size: list[int] | tuple[int, int],
) -> tuple[torch.Tensor, torch.Tensor]:
"""Cast tensor from high precision to FP8 with blockwise quantization.
This function automatically selects the best available implementation:
1. Triton kernel (if available): Highest performance, minimal memory overhead
2. PyTorch fallback: Memory-optimized implementation using in-place operations
To disable Triton and force PyTorch fallback, set environment variable:
VERL_DISABLE_TRITON_FP8=1
Args:
data_hp: Input tensor of shape (M, N) in high precision (bf16/fp16/fp32).
weight_block_size: Block size for quantization as [BLOCK_M, BLOCK_N].
Returns:
Tuple of (fp8_data, descale):
- fp8_data: FP8 quantized tensor
- descale: Per-block descale factors for dequantization
"""
assert len(data_hp.shape) == 2, "Only 2d input tensor is supported"
# Use Triton kernel if available and not disabled
if _TRITON_AVAILABLE and not _DISABLE_TRITON_FP8:
return scaled_fp8_blockwise_triton(data_hp, weight_block_size)
# PyTorch fallback implementation (memory-optimized)
return _scaled_fp8_blockwise_pytorch(data_hp, weight_block_size)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/kernel/fp8_kernel.py",
"license": "Apache License 2.0",
"lines": 266,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/checkpoint_engine/test_correctness_on_gpu.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import ray
from tests.checkpoint_engine.test_utils import create_rollout_worker_group, create_trainer_worker_group
from verl.checkpoint_engine import CheckpointEngineManager
from verl.single_controller.ray.base import (
RayResourcePool,
split_resource_pool,
)
from verl.utils.device import get_device_name
from verl.utils.ray_utils import auto_await
from verl.workers.config import CheckpointEngineConfig, HFModelConfig, RolloutConfig
@pytest.mark.asyncio
@pytest.mark.parametrize("rebuild_group", [False, True])
@pytest.mark.parametrize("num_trainer, num_rollout", [(2, 6)])
@auto_await
async def test_nccl_checkpoint_engine(
rebuild_group,
num_trainer,
num_rollout,
num_nodes=1,
num_gpus_per_node=8,
check_allclose=True,
model_path="~/models/Qwen/Qwen3-8B-Base",
):
model_path = os.path.expanduser(model_path)
ray.init(
runtime_env={
"env_vars": {
"UCX_TLS": "rc,tcp,cuda",
"UCX_MAX_RNDV_RAILS": "4",
"UCX_LOG_LEVEL": "INFO",
"VERL_LOGGING_LEVEL": "DEBUG",
}
}
)
# initialize config
checkpoint_engine_config = CheckpointEngineConfig(
backend="nccl", engine_kwargs={"nccl": {"rebuild_group": rebuild_group}}
)
model_config = HFModelConfig(path=model_path, use_remove_padding=True)
rollout_config = RolloutConfig(name="vllm", checkpoint_engine=checkpoint_engine_config)
# create trainer and rollout worker group
resource_pool = RayResourcePool(process_on_nodes=[num_gpus_per_node] * num_nodes, max_colocate_count=3)
trainer_pool, rollout_pool = split_resource_pool(resource_pool, [num_trainer, num_rollout])
trainer = create_trainer_worker_group(trainer_pool, model_config, checkpoint_engine_config)
trainer.reset()
rollout, replicas = await create_rollout_worker_group(rollout_pool, model_config, rollout_config, check_allclose)
# create checkpoint engine manager
checkpoint_manager = CheckpointEngineManager(config=checkpoint_engine_config, trainer=trainer, replicas=replicas)
for _ in range(3):
await checkpoint_manager.update_weights()
rollout.check_weights()
ray.shutdown()
@pytest.mark.skip(reason="temporary skip since our ci environment is not ready")
@pytest.mark.asyncio
@pytest.mark.parametrize("device", ["cuda", "cpu"])
@pytest.mark.parametrize("num_trainer, num_rollout", [(2, 6)])
@auto_await
async def test_nixl_checkpoint_engine(
num_trainer,
num_rollout,
device,
num_nodes=1,
num_gpus_per_node=8,
check_allclose=True,
model_path="~/models/Qwen/Qwen3-8B-Base",
):
model_path = os.path.expanduser(model_path)
ray.init(
runtime_env={
"env_vars": {
# TODO: it's pretty hard to set these environment variables right, please consult
# with your network admin. Maybe auto adjust UCX_* according to NCCL_IB_*?
"UCX_TLS": "rc,ud,cuda",
# "UCX_IB_GID_INDEX": "3", # NCCL_IB_GID_INDEX
# "UCX_IB_DEVICES": "mlx5_1:1,mlx5_2:1,mlx5_3:1", # NCCL_IB_HCA
"UCX_RC_TIMEOUT": "30s", # NCCL_IB_TIMEOUT
"UCX_RC_RETRY_COUNT": "7", # NCCL_IB_RETRY_COUNT
"UCX_KEEPALIVE_INTERVAL": "1s",
"UCX_KEEPALIVE_NUM_EPS": "10",
"UCX_MAX_RNDV_RAILS": "4",
"UCX_IB_ROCE_REACHABILITY_MODE": "all",
"UCX_LOG_LEVEL": "INFO",
"VERL_LOGGING_LEVEL": "DEBUG",
}
}
)
# initialize config
checkpoint_engine_config = CheckpointEngineConfig(backend="nixl", engine_kwargs={"nixl": {"device": device}})
model_config = HFModelConfig(path=model_path, use_remove_padding=True)
rollout_config = RolloutConfig(name="vllm", checkpoint_engine=checkpoint_engine_config)
# create trainer and rollout worker group
resource_pool = RayResourcePool(process_on_nodes=[num_gpus_per_node] * num_nodes, max_colocate_count=3)
trainer_pool, rollout_pool = split_resource_pool(resource_pool, [num_trainer, num_rollout])
trainer = create_trainer_worker_group(trainer_pool, model_config, checkpoint_engine_config)
trainer.reset()
rollout, replicas = await create_rollout_worker_group(rollout_pool, model_config, rollout_config, check_allclose)
# create checkpoint engine manager
checkpoint_manager = CheckpointEngineManager(config=checkpoint_engine_config, trainer=trainer, replicas=replicas)
for _ in range(3):
await checkpoint_manager.update_weights()
rollout.check_weights()
ray.shutdown()
@pytest.mark.skip(reason="temporary skip since our ci environment is not ready")
@pytest.mark.asyncio
@pytest.mark.parametrize("rebuild_group", [False])
@pytest.mark.parametrize("num_trainer, num_rollout", [(2, 6)])
@auto_await
async def test_kimi_checkpoint_engine(
rebuild_group,
num_trainer,
num_rollout,
num_nodes=1,
num_gpus_per_node=8,
check_allclose=True,
model_path="~/models/Qwen/Qwen3-8B-Base",
):
model_path = os.path.expanduser(model_path)
ray.init(
runtime_env={
"env_vars": {
"NCCL_IB_HCA": "mlx5",
"VERL_LOGGING_LEVEL": "DEBUG",
}
}
)
# initialize config
checkpoint_engine_config = CheckpointEngineConfig(
backend="kimi_ckpt_engine", engine_kwargs={"kimi_ckpt_engine": {"rebuild_group": rebuild_group}}
)
model_config = HFModelConfig(path=model_path, use_remove_padding=True)
rollout_config = RolloutConfig(name="vllm", checkpoint_engine=checkpoint_engine_config)
# create trainer and rollout worker group
resource_pool = RayResourcePool(process_on_nodes=[num_gpus_per_node] * num_nodes, max_colocate_count=3)
resource_pool.get_placement_groups(device_name=get_device_name())
trainer_pool, rollout_pool = split_resource_pool(resource_pool, [num_trainer, num_rollout])
trainer = create_trainer_worker_group(trainer_pool, model_config, checkpoint_engine_config)
trainer.reset()
rollout, replicas = await create_rollout_worker_group(rollout_pool, model_config, rollout_config, check_allclose)
# create checkpoint engine manager
checkpoint_manager = CheckpointEngineManager(config=checkpoint_engine_config, trainer=trainer, replicas=replicas)
for _ in range(3):
await checkpoint_manager.update_weights()
rollout.check_weights()
ray.shutdown()
if __name__ == "__main__":
test_nccl_checkpoint_engine(
rebuild_group=False,
num_trainer=2,
num_rollout=30,
num_nodes=4,
num_gpus_per_node=8,
check_allclose=False,
model_path=os.environ["HDFS_ROOT"] + "/model/Qwen3-30B-A3B-Base",
)
| {
"repo_id": "verl-project/verl",
"file_path": "tests/checkpoint_engine/test_correctness_on_gpu.py",
"license": "Apache License 2.0",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/checkpoint_engine/test_special_server_adapter.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import pytest
import ray
from omegaconf import DictConfig
from transformers import PreTrainedTokenizer
from tests.checkpoint_engine.test_utils import create_trainer_worker_group
from verl.checkpoint_engine import CheckpointEngineManager
from verl.experimental.agent_loop.agent_loop import AgentLoopManager, AsyncLLMServerManager
from verl.experimental.fully_async_policy.agent_loop.agent_loop import FullyAsyncLLMServerManager
from verl.single_controller.ray import (
RayResourcePool,
)
from verl.utils.config import omega_conf_to_dataclass
from verl.utils.device import get_device_name
from verl.workers.config import CheckpointEngineConfig, HFModelConfig
@pytest.fixture
def init_config() -> DictConfig:
from hydra import compose, initialize_config_dir
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")):
config = compose(
config_name="ppo_trainer",
overrides=[
"+async_training.partial_rollout_resume=True",
],
)
config.actor_rollout_ref.model.path = os.path.expanduser("~/models/Qwen/Qwen3-VL-2B-Instruct")
config.actor_rollout_ref.rollout.name = os.environ["ROLLOUT_NAME"]
config.actor_rollout_ref.rollout.max_num_seqs = 256
config.actor_rollout_ref.rollout.response_length = 4096
config.actor_rollout_ref.rollout.checkpoint_engine.backend = "nccl" if get_device_name() == "cuda" else "hccl"
config.actor_rollout_ref.rollout.nnodes = 1
config.trainer.n_gpus_per_node = 4
config.trainer.nnodes = 1
return config
async def _run_update_weights_with_global_steps_none(
server_manager: AsyncLLMServerManager,
checkpoint_manager: CheckpointEngineManager,
tokenizer: PreTrainedTokenizer,
):
await checkpoint_manager.update_weights(global_steps=None)
prompt = [{"role": "user", "content": "How to make a sandwich?"}]
prompt_ids = tokenizer.apply_chat_template(prompt, add_generation_prompt=True, tokenize=True)
output = await server_manager.generate(
request_id="test_0",
prompt_ids=prompt_ids,
sampling_params={
"temperature": 1.0,
"logprobs": True,
},
)
assert output.stop_reason not in ("aborted", "abort"), (
f"output.stop_reason is {output.stop_reason}, expected not abort"
)
assert output.extra_info["global_steps"] is None, (
f"output.extra_info['global_steps'] is {output.extra_info['global_steps']}, expected None"
)
print("========== [update_weights with global_steps=None] ==========")
print("[RESPONSE]", tokenizer.decode(output.token_ids, skip_special_tokens=True))
async def _run_server_manager_without_resume(
initial_steps: int,
train_steps: int,
server_manager: AsyncLLMServerManager,
checkpoint_manager: CheckpointEngineManager,
prompts: list[list[dict]],
tokenizer: PreTrainedTokenizer,
):
for global_steps in range(initial_steps, initial_steps + train_steps):
tasks = []
for i, prompt in enumerate(prompts):
prompt_ids = tokenizer.apply_chat_template(prompt, add_generation_prompt=True, tokenize=True)
tasks.append(
asyncio.create_task(
server_manager.generate(
request_id=f"test_{global_steps}_{i}",
prompt_ids=prompt_ids,
sampling_params={
"temperature": 1.0,
"logprobs": True,
},
)
)
)
# wait a while and update weights to interrupt the generation
await asyncio.sleep(3)
await checkpoint_manager.update_weights(global_steps=global_steps)
outputs = await asyncio.gather(*tasks)
expected_steps = global_steps - 1
for output in outputs:
global_steps = output.extra_info["global_steps"]
assert output.stop_reason in ("aborted", "abort"), (
f"output.stop_reason is {output.stop_reason}, expected in abort"
)
assert global_steps == expected_steps, f"output.global_steps is {global_steps}, expected {expected_steps}"
print(f"========== [{initial_steps=}, {train_steps=}] ==========")
print("[RESPONSE]", tokenizer.decode(outputs[0].token_ids, skip_special_tokens=True))
async def _run_server_manager_with_resume(
initial_steps: int,
train_steps: int,
server_manager: FullyAsyncLLMServerManager,
checkpoint_manager: CheckpointEngineManager,
prompts: list[list[dict]],
tokenizer: PreTrainedTokenizer,
):
# 1. rollout generate responses
tasks = []
for i, prompt in enumerate(prompts):
prompt_ids = tokenizer.apply_chat_template(prompt, add_generation_prompt=True, tokenize=True)
tasks.append(
asyncio.create_task(
server_manager.generate(
request_id=f"test_{initial_steps}_{i}",
prompt_ids=prompt_ids,
sampling_params={
"temperature": 1.0,
"logprobs": True,
},
)
)
)
# 2. trainer update weights to rollout multiple times
for global_steps in range(initial_steps, initial_steps + train_steps):
# wait a while and update weights to interrupt the generation
await asyncio.sleep(3)
await checkpoint_manager.update_weights(global_steps=global_steps)
# 3. wait for rollout generate responses finished
outputs = await asyncio.gather(*tasks)
expected_min_steps = initial_steps - 1
for output in outputs:
min_global_steps = output.extra_info["min_global_steps"]
max_global_steps = output.extra_info["max_global_steps"]
assert min_global_steps == expected_min_steps, (
f"output.min_global_steps is {min_global_steps}, expected {expected_min_steps}"
)
assert max_global_steps > expected_min_steps, (
f"output.max_global_steps is {max_global_steps}, expected > {expected_min_steps}"
)
assert output.stop_reason not in ("aborted", "abort"), (
f"output.stop_reason is {output.stop_reason}, expected not abort"
)
print(f"========== [{initial_steps=}, {train_steps=}] ==========")
print("[RESPONSE]", tokenizer.decode(outputs[0].token_ids, skip_special_tokens=True))
@pytest.mark.asyncio
async def test_server_adapter(init_config):
ray.init(
runtime_env={
"env_vars": {
"TOKENIZERS_PARALLELISM": "true",
"NCCL_DEBUG": "WARN",
"VLLM_LOGGING_LEVEL": "INFO",
"VLLM_USE_V1": "1",
"VLLM_DISABLE_COMPILE_CACHE": "1",
}
}
)
# 1. create trainer worker group
model_config: HFModelConfig = omega_conf_to_dataclass(init_config.actor_rollout_ref.model)
checkpoint_engine_config: CheckpointEngineConfig = omega_conf_to_dataclass(
init_config.actor_rollout_ref.rollout.checkpoint_engine
)
trainer_pool = RayResourcePool(process_on_nodes=[init_config.trainer.n_gpus_per_node], max_colocate_count=3)
trainer = create_trainer_worker_group(trainer_pool, model_config, checkpoint_engine_config)
trainer.reset()
# 2. create standalone rollout with AgentLoopManager
agent_loop_manager = await AgentLoopManager.create(config=init_config)
server_handles = [server._server_handle for server in agent_loop_manager.rollout_replicas]
# 3. create checkpoint engine manager
checkpoint_manager = CheckpointEngineManager(
config=checkpoint_engine_config, trainer=trainer, replicas=agent_loop_manager.rollout_replicas
)
n = 4
prompts = [
[{"role": "user", "content": "Please write an article about the history of China, at least 1000 words."}],
[{"role": "user", "content": "Please write an article about the history of America, at least 1000 words."}],
[{"role": "user", "content": "Please write an article about the geography of China, at least 1000 words."}],
[{"role": "user", "content": "Please write an article about the geography of America, at least 1000 words."}],
] * n
server_manager = AsyncLLMServerManager(config=init_config, server_handles=server_handles)
# 4. test update_weights with global_steps=None
await _run_update_weights_with_global_steps_none(
server_manager=server_manager,
checkpoint_manager=checkpoint_manager,
tokenizer=model_config.tokenizer,
)
# 5. test AsyncLLMServerManager without partial rollout resume
await checkpoint_manager.update_weights(global_steps=0)
await _run_server_manager_without_resume(
initial_steps=1,
train_steps=3,
server_manager=server_manager,
checkpoint_manager=checkpoint_manager,
prompts=prompts,
tokenizer=model_config.tokenizer,
)
# 6. test FullyAsyncLLMServerManager with partial rollout resume
server_manager = FullyAsyncLLMServerManager(config=init_config, server_handles=server_handles)
await _run_server_manager_with_resume(
initial_steps=4,
train_steps=3,
server_manager=server_manager,
checkpoint_manager=checkpoint_manager,
prompts=prompts,
tokenizer=model_config.tokenizer,
)
ray.shutdown()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/checkpoint_engine/test_special_server_adapter.py",
"license": "Apache License 2.0",
"lines": 218,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/checkpoint_engine/hccl_checkpoint_engine.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
from dataclasses import dataclass
from typing import AsyncGenerator, Generator
import ray
import torch
import zmq
from vllm.distributed.utils import StatelessProcessGroup
from verl.checkpoint_engine.base import CheckpointEngine, CheckpointEngineRegistry, TensorMeta
from verl.utils.distributed import stateless_init_process_group
from verl.utils.net_utils import get_free_port, is_valid_ipv6_address
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
@dataclass
class MasterMetadata:
zmq_ip: str
zmq_port: int
dist_ip: str
dist_port: int
class BroadcastOperation:
"""Async broadcast operation with HCCL in separate thread.
Args:
rank (int): The rank of the current process.
group_name (str): The name of the HCCL process group.
bucket (torch.Tensor): The tensor to broadcast.
metadata (dict[str, TensorMeta]): The metadata of the tensor.
socket (zmq.Socket): The zeromq socket to communicate with master.
topic (str): The topic to subscribe.
"""
def __init__(
self,
rank: int,
process_group: StatelessProcessGroup | str,
bucket: torch.Tensor,
metadata: dict[str, TensorMeta],
socket: zmq.Socket,
topic: str,
) -> None:
self.rank = rank
self.pyhccl = process_group
self.bucket = bucket
self.metadata = metadata
self.socket = socket
self.topic = topic
self._run()
def _run(self):
# broadcast tensor meta via zeromq PUB/SUB
if self.rank == 0:
self.socket.send_string(self.topic, flags=zmq.SNDMORE)
self.socket.send_pyobj(self.metadata)
else:
self.socket.recv_string()
self.metadata = self.socket.recv_pyobj()
# broadcast tensor via HCCL
self.pyhccl.broadcast(self.bucket, src=0)
async def wait_for_complete(self) -> dict[str, TensorMeta]:
"""Wait for the broadcast operation to complete.
Returns:
dict[str, TensorMeta]: The bucket meta after broadcast.
"""
return self.metadata
@CheckpointEngineRegistry.register("hccl")
class HCCLCheckpointEngine(CheckpointEngine):
"""HCCL checkpoint engine with collective communication.
Args:
bucket_size (int): Bucket size in bytes to transfer multiple weights at one time. Note that we use
two buffer to send and recv weights at same time, so the device memory overhead is 2 * bucket_size.
group_name (str): The name of the HCCL process group. Defaults to "default".
rebuild_group (bool): Whether to rebuild the HCCL process group in each update. Defaults to False.
is_master (bool): Whether the current process is the master process. Defaults to False.
rollout_dtype (torch.dtype): The dtype of the weights received from rollout workers. Defaults to torch.bfloat16.
"""
def __init__(
self,
bucket_size: int,
group_name: str = "default",
rebuild_group: bool = False,
is_master: bool = False,
rollout_dtype: torch.dtype = torch.bfloat16,
) -> None:
self.bucket_size = bucket_size
self.group_name = group_name
self.rebuild_group = rebuild_group
self.rollout_dtype = rollout_dtype
self.pyhccl = None
self.device = torch.npu.current_device()
# start zeromq server for broadcasting bucket tensor metadata
self.is_master = is_master
self.topic = "bucket_metadata"
if self.is_master:
self._start_zmq_server()
self.dist_port, _ = get_free_port(self.ip)
def prepare(self) -> MasterMetadata:
self.send_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device="npu")
self.recv_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device="npu")
return (
MasterMetadata(zmq_ip=self.ip, zmq_port=self.zmq_port, dist_ip=self.ip, dist_port=self.dist_port)
if self.is_master
else None
)
def finalize(self):
"""Destroy the HCCL process group if rebuild_group is True."""
if self.rebuild_group:
if self.rank >= 0:
self.pyhccl.destroyComm(self.pyhccl.comm)
self.pyhccl = None
self.rank = None
self.world_size = None
self.send_buf = None
self.recv_buf = None
torch.npu.empty_cache()
@classmethod
def build_topology(cls, trainer_world_size: int, rollout_world_size: int, metadata: list[dict]):
trainer_kwargs = {
"rank": [0] + [-1] * (trainer_world_size - 1),
"world_size": [rollout_world_size + 1] * trainer_world_size,
"master_metadata": [metadata[0]] * trainer_world_size,
}
rollout_kwargs = {
"rank": list(range(1, rollout_world_size + 1)),
"world_size": [rollout_world_size + 1] * rollout_world_size,
"master_metadata": [metadata[0]] * rollout_world_size,
}
return trainer_kwargs, rollout_kwargs
def _start_zmq_server(self):
self.ip = ray.util.get_node_ip_address().strip("[]")
self.zmq_port, _ = get_free_port(self.ip)
context = zmq.Context()
self.socket = context.socket(zmq.PUB)
if is_valid_ipv6_address(self.ip):
address = f"tcp://[{self.ip}]:{self.zmq_port}"
self.socket.setsockopt(zmq.IPV6, 1)
else:
address = f"tcp://{self.ip}:{self.zmq_port}"
self.socket.bind(address)
def _connect_zmq_client(self, metadata: MasterMetadata):
assert not self.is_master, "Master process should not connect to other processes."
context = zmq.Context()
self.socket = context.socket(zmq.SUB)
if is_valid_ipv6_address(metadata.zmq_ip):
address = f"tcp://[{metadata.zmq_ip}]:{metadata.zmq_port}"
self.socket.setsockopt(zmq.IPV6, 1)
else:
address = f"tcp://{metadata.zmq_ip}:{metadata.zmq_port}"
self.socket.connect(address)
self.socket.setsockopt_string(zmq.SUBSCRIBE, self.topic)
def init_process_group(self, rank: int, world_size: int, master_metadata: MasterMetadata):
"""Initialize the HCCL process group.
Args:
rank (int): The rank of the current process.
world_size (int): The total number of processes.
"""
# For trainer workers other than rank 0, their rank should be -1.
if rank < 0:
self.rank = rank
self.world_size = world_size
return
if self.rebuild_group or self.pyhccl is None:
self.pyhccl = stateless_init_process_group(
master_metadata.dist_ip, master_metadata.dist_port, rank, world_size, self.device
)
self.rank = rank
self.world_size = world_size
else:
assert self.rank == rank, f"rank {rank} is not equal to self.rank {self.rank}"
assert self.world_size == world_size, (
f"world_size {world_size} is not equal to self.world_size {self.world_size}"
)
if self.rank > 0:
self._connect_zmq_client(master_metadata)
# barrier
signal = torch.tensor([1], dtype=torch.int8, device=torch.npu.current_device())
self.pyhccl.all_reduce(signal)
logger.info(f"init_process_group rank: {self.rank}, world_size: {self.world_size}")
@torch.no_grad()
async def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None]):
"""Send the weights of the model.
Args:
weights: A generator that yields the name of the weight tensor and the tensor itself.
"""
assert self.rank <= 0, "Trainer workers other than rank 0 should not send weights."
# For trainer rank other than 0, consume weights without sending.
if self.rank < 0:
for name, weight in weights:
pass
return
send_buf, recv_buf = self.send_buf, self.recv_buf
broadcast_op = None
start_time = time.time()
bucket_meta: dict[str, TensorMeta] = {}
offset = 0
for name, weight in weights:
# fill the tensor bucket
if offset + weight.nbytes > self.bucket_size:
torch.npu.synchronize()
# wait previous broadcast op finish
if broadcast_op is not None:
await broadcast_op.wait_for_complete()
broadcast_op = BroadcastOperation(
rank=self.rank,
process_group=self.pyhccl,
bucket=send_buf,
metadata={"bucket_meta": bucket_meta, "is_last": False},
socket=self.socket,
topic=self.topic,
)
# swap send_buf and recv_buf
send_buf, recv_buf = recv_buf, send_buf
bucket_meta = {}
offset = 0
assert offset + weight.nbytes <= self.bucket_size, (
f"Weight {name}({weight.shape}, {weight.dtype}) is too large to fit in the bucket."
)
bucket_meta[name] = {
"name": name,
"shape": weight.shape,
"dtype": weight.dtype,
"offset": offset,
}
send_buf[offset : offset + weight.nbytes] = weight.view(-1).view(torch.uint8)
offset += weight.nbytes
# broadcast last bucket
torch.npu.synchronize()
if broadcast_op is not None:
await broadcast_op.wait_for_complete()
broadcast_op = BroadcastOperation(
rank=self.rank,
process_group=self.pyhccl,
bucket=send_buf,
metadata={"bucket_meta": bucket_meta, "is_last": True},
socket=self.socket,
topic=self.topic,
)
await broadcast_op.wait_for_complete()
logger.info(f"Rank {self.rank} send weights done, time cost: {time.time() - start_time:.2f}s")
@torch.no_grad()
async def receive_weights(self) -> AsyncGenerator[tuple[str, torch.Tensor], None]:
"""Receive the weights of the model.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
assert self.rank > 0, "Rank 0 should not receive weights."
send_buf, recv_buf = self.send_buf, self.recv_buf
total_bytes, total_params = 0, 0
# receive first bucket
start_time = time.time()
broadcast_op = BroadcastOperation(
rank=self.rank,
process_group=self.pyhccl,
bucket=recv_buf,
metadata=None,
socket=self.socket,
topic=self.topic,
)
metadata = await broadcast_op.wait_for_complete()
total_bytes += self.bucket_size
total_params += len(metadata["bucket_meta"])
# swap send_buf and recv_buf
send_buf, recv_buf = recv_buf, send_buf
while not metadata["is_last"]:
# 1. receive next bucket
broadcast_op = BroadcastOperation(
rank=self.rank,
process_group=self.pyhccl,
bucket=recv_buf,
metadata=None,
socket=self.socket,
topic=self.topic,
)
# 2. yield tensor from send_buf
for name, meta in metadata["bucket_meta"].items():
dtype, shape = meta["dtype"], meta["shape"]
size = dtype.itemsize * shape.numel()
tensor = send_buf[meta["offset"] : meta["offset"] + size].view(dtype=dtype).view(shape)
yield name, tensor
# 3. wait for next bucket broadcast finish
metadata = await broadcast_op.wait_for_complete()
total_bytes += self.bucket_size
total_params += len(metadata["bucket_meta"])
# 4. swap send_buf and recv_buf
torch.npu.synchronize() # sync non-blocking copy
send_buf, recv_buf = recv_buf, send_buf
# yield tensor from send_buf
for name, meta in metadata["bucket_meta"].items():
dtype, shape = meta["dtype"], meta["shape"]
size = dtype.itemsize * shape.numel()
tensor = send_buf[meta["offset"] : meta["offset"] + size].view(dtype=dtype).view(shape)
yield name, tensor
time_cost = time.time() - start_time
bandwidth = total_bytes / time_cost / (1024 * 1024 * 1024)
logger.info(
f"Rank {self.rank} receive weights done, total_params: {total_params}, "
f"time cost: {time_cost:.2f}s, bandwidth: {bandwidth:.2f} GB/s"
)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/checkpoint_engine/hccl_checkpoint_engine.py",
"license": "Apache License 2.0",
"lines": 308,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/profiler/torch_profile.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
from datetime import datetime, timezone
from typing import Callable, Optional
import torch
from .config import ProfilerConfig, TorchProfilerToolConfig
from .profile import DistProfiler
def get_torch_profiler(
contents: list[str],
save_path: str,
role: Optional[str] = None,
save_file_prefix: Optional[str] = None,
rank: int = 0,
):
if role:
save_path = os.path.join(save_path, role)
os.makedirs(save_path, exist_ok=True)
current_time = datetime.now(tz=timezone.utc).astimezone()
timestamp = current_time.strftime("%Y%m%d%H%M%S%f")[:-3]
pid = os.getpid()
save_file_name = f"prof_rank-{rank}_{pid}_{timestamp}.json.gz"
if save_file_prefix:
save_file_name = f"{save_file_prefix}_{save_file_name}"
save_path = os.path.join(save_path, save_file_name)
def _trace_handler(prof):
print(f"[Profiler] Saving trace to {save_path}")
prof.export_chrome_trace(save_path)
contents = set(contents) if contents else set()
activities = []
if not contents or "cpu" in contents:
activities.append(torch.profiler.ProfilerActivity.CPU)
if not contents or "cuda" in contents:
activities.append(torch.profiler.ProfilerActivity.CUDA)
return torch.profiler.profile(
activities=activities,
with_stack="stack" in contents,
record_shapes="shapes" in contents,
profile_memory="memory" in contents,
on_trace_ready=_trace_handler,
)
class Profiler(DistProfiler):
"""A PyTorch profiler wrapper class for collecting performance metrics.
This profiler provides a convenient interface for profiling PyTorch operations,
with support for:
- CPU and CUDA activity profiling
- Configurable profiling schedule (wait/warmup/active steps)
- Multi-rank profiling support
- Chrome trace export
Args:
config: Configuration object containing profiling parameters
"""
_define_count = 0
def __init__(
self,
rank,
config: ProfilerConfig,
tool_config: Optional[TorchProfilerToolConfig] = None,
save_file_prefix=None,
):
# note : if we do not set use_profile, it will be set as None, so that all function will be skip
config = config or ProfilerConfig(ranks=[], enable=False)
self.save_file_prefix = save_file_prefix
if not tool_config:
assert not config.enable, "tool_config must be provided when profiler is enabled"
self.prof = None
self.rank = rank
self.config = config
self.tool_config = tool_config
self.contents = self.tool_config.contents
self.save_path = self.config.save_path
# Align with other profilers: read discrete mode, default to False for torch profiler
self.discrete = getattr(self.tool_config, "discrete", False)
def check(self):
return self.prof is not None
def start(self, **kwargs):
role = kwargs.get("role", None)
if not self.discrete and Profiler._define_count == 0:
self.prof = get_torch_profiler(
contents=self.contents,
save_path=self.save_path,
role=role,
save_file_prefix=self.save_file_prefix,
rank=self.rank,
)
print(f"[Profiler] started for rank {self.rank}")
self.prof.start()
Profiler._define_count += 1
def step(self):
if self.check():
self.prof.step()
def stop(self):
if not self.discrete and Profiler._define_count == 1:
self.step()
print(f"[Profiler] stopped for rank {self.rank}")
self.prof.stop()
Profiler._define_count -= 1
def annotate(self, message: Optional[str] = None, role: Optional[str] = None, **kwargs_outer) -> Callable:
"""Decorate a Worker member function to profile the current rank in the current training step.
Requires the target function to be a member function of a Worker,
which has a member field `profiler` with Profiler type.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
role (str, optional):
The role of the current data collection. Defaults to None.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs_inner):
profile_name = message or func.__name__
if not self.discrete:
# In continuous mode, we just record function, profiler started globally
with torch.profiler.record_function(profile_name):
return func(*args, **kwargs_inner)
# In discrete mode, we start/stop profiler around the function
prof = get_torch_profiler(
contents=self.contents,
save_path=self.save_path,
role=role,
save_file_prefix=self.save_file_prefix,
rank=self.rank,
)
prof.start()
with torch.profiler.record_function(profile_name):
result = func(*args, **kwargs_inner)
prof.stop()
return result
return wrapper
return decorator
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/profiler/torch_profile.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:examples/sglang_multiturn/gsm8k_toolcall_shaping/gsm8k_toolcall_shaping.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Any, Optional
from verl.utils.reward_score.gsm8k import compute_score as gsm8k_compute_score
def toolcall_shaping_reward(
data_source: Optional[str],
solution_str: str,
ground_truth: str,
extra_info: Optional[dict[str, Any]] = None,
*,
method: str = "strict",
format_score: float = 0.1,
score: float = 1.0,
shaping_reward: float = 0.1,
trigger_substring: str = "<tool_call>",
**kwargs,
) -> float:
"""
GSM8K reward + tool-call shaping reward (trajectory-level).
"""
base = gsm8k_compute_score(solution_str, ground_truth, method, format_score, score)
bonus = shaping_reward if (trigger_substring and trigger_substring in solution_str) else 0.0
return float(base + bonus)
# Optional: keep a default name for convenience in verl config (default is compute_score) [web:59][web:65]
def compute_score(
data_source: Optional[str],
solution_str: str,
ground_truth: str,
extra_info: Optional[dict[str, Any]] = None,
**kwargs,
) -> float:
return toolcall_shaping_reward(
data_source=data_source,
solution_str=solution_str,
ground_truth=ground_truth,
extra_info=extra_info,
**kwargs,
)
| {
"repo_id": "verl-project/verl",
"file_path": "examples/sglang_multiturn/gsm8k_toolcall_shaping/gsm8k_toolcall_shaping.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/rollout/trtllm_rollout/trtllm_async_server.py | # Copyright 2026 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
from typing import Any, Optional
import ray
import torch
from omegaconf import DictConfig
from ray.actor import ActorHandle
from ray.util import placement_group_table
from ray.util.placement_group import PlacementGroup
from verl.single_controller.ray import SubRayResourcePool
from verl.utils.config import omega_conf_to_dataclass
from verl.utils.net_utils import is_valid_ipv6_address
from verl.workers.config import HFModelConfig, RolloutConfig
from verl.workers.rollout.replica import RolloutMode, RolloutReplica, TokenOutput
from verl.workers.rollout.trtllm_rollout.trtllm_rollout import ServerAdapter
from verl.workers.rollout.utils import get_max_position_embeddings, run_uvicorn
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
@ray.remote
class TRTLLMHttpServer:
"""TensorRT LLM HTTP server in single node.
Args:
config (DictConfig): full config.
model_config (HFModelConfig): model config.
is_reward_model (bool): whether this is a reward model.
rollout_mode (RolloutMode): rollout mode.
workers (list[ActorHandle]): list of rollout workers.
replica_rank (int): replica rank, a replica may contain multiple nodes.
max_colocate_count (int): max colocate count.
pgs (list[PlacementGroup]): placement groups.
bundle_indices (list[list[int]]): bundle indices.
"""
def __init__(
self,
config: RolloutConfig,
model_config: HFModelConfig,
is_reward_model: bool,
rollout_mode: RolloutMode,
workers: list[ActorHandle],
replica_rank: int,
max_colocate_count: int,
pgs: list[PlacementGroup] = None,
bundle_indices: list[list[int]] = None,
):
os.environ["TRT_LLM_DISABLE_LOAD_WEIGHTS_IN_PARALLEL"] = "1"
assert torch.cuda.is_available(), "TRTLLM http server should run on GPU node"
self.config: RolloutConfig = omega_conf_to_dataclass(config)
self.model_config: HFModelConfig = omega_conf_to_dataclass(model_config, dataclass_type=HFModelConfig)
self.is_reward_model = is_reward_model
max_position_embeddings = get_max_position_embeddings(self.model_config.hf_config)
if self.config.max_model_len is None:
self.config.max_model_len = max_position_embeddings
else:
if self.config.max_model_len > max_position_embeddings:
raise ValueError(
f"max_model_len ({self.config.max_model_len}) should be less than or equal to "
f"max_position_embeddings ({max_position_embeddings})"
)
self.rollout_mode = rollout_mode
self.workers = workers
self.replica_rank = replica_rank
self.max_colocate_count = max_colocate_count
self.pgs = pgs
self.bundle_indices = bundle_indices
# model weights version, set by ServerAdapter when update weights.
self.global_steps = None
if self.rollout_mode != RolloutMode.HYBRID and self.config.load_format == "dummy":
logger.warning(f"rollout mode is {self.rollout_mode}, load_format is dummy, set to auto")
self.config.load_format = "auto"
# used for http server
self._server_address = ray.util.get_node_ip_address().strip("[]")
self._server_port = None
logger.info(f"TRTLLMHttpServer, replica_rank: {self.replica_rank}")
self.sampling_args = {
"detokenize": False,
"end_id": -1,
"pad_id": self.model_config.hf_config.pad_token_id,
"stop_token_ids": [self.model_config.hf_config.eos_token_id],
"include_stop_str_in_output": True,
}
def get_server_address(self):
"""Get http server address and port."""
assert self._server_port is not None, "http server is not launched, port is None"
return self._server_address, self._server_port
async def launch_server(self):
from tensorrt_llm import AsyncLLM
from tensorrt_llm.llmapi import CapacitySchedulerPolicy, CudaGraphConfig, KvCacheConfig, SchedulerConfig
from tensorrt_llm.serve import OpenAIServer
assert self.config.pipeline_model_parallel_size == 1, "pipeline_model_parallel_size > 1 is not supported yet"
engine_kwargs = self.config.get("engine_kwargs", {}).get("trtllm", {}) or {}
kv_cache_config = KvCacheConfig(
enable_block_reuse=self.config.enable_prefix_caching,
free_gpu_memory_fraction=self.config.gpu_memory_utilization,
)
per_worker_gpu_share = 1.0 / self.max_colocate_count
quantization = self.config.quantization
if quantization is not None:
if quantization == "fp8":
FP8_BLOCK_QUANT_KWARGS = {
"activation_scheme": "dynamic",
"fmt": "e4m3",
"quant_method": "fp8",
"weight_block_size": [128, 128],
}
engine_kwargs["model_kwargs"] = {"quantization_config": FP8_BLOCK_QUANT_KWARGS}
if self.config.load_format != "dummy":
raise ValueError("FP8 quantization is only supported for dummy load format")
else:
raise ValueError(f"Currently only support fp8 quantization, got: {quantization}")
llm_kwargs = {
"model": self.model_config.local_path,
"backend": "pytorch",
"dtype": self.config.dtype,
"enable_chunked_prefill": self.config.enable_chunked_prefill,
"skip_tokenizer_init": self.config.skip_tokenizer_init,
"orchestrator_type": "ray",
"ray_worker_extension_cls": "tensorrt_llm.llmapi.rlhf_utils.WorkerExtension",
"kv_cache_config": kv_cache_config,
"max_seq_len": self.config.max_model_len,
"max_batch_size": self.config.max_num_seqs,
"max_num_tokens": self.config.max_num_batched_tokens,
"tensor_parallel_size": self.config.tensor_model_parallel_size,
"pipeline_parallel_size": self.config.pipeline_model_parallel_size,
"moe_expert_parallel_size": self.config.expert_parallel_size,
"moe_tensor_parallel_size": self.config.moe_tensor_parallel_size,
"load_format": self.config.load_format,
"trust_remote_code": self.model_config.trust_remote_code,
"placement_groups": self.pgs,
"placement_bundle_indices": self.bundle_indices,
"per_worker_gpu_share": per_worker_gpu_share,
"enable_sleep": self.config.enable_sleep_mode,
"allreduce_strategy": "NCCL",
"sampler_type": "TRTLLMSampler",
**engine_kwargs,
}
if self.is_reward_model:
llm_kwargs.update(
{
"cuda_graph_config": None,
"disable_overlap_scheduler": True,
}
)
else:
llm_kwargs.update(
{
"cuda_graph_config": CudaGraphConfig(
enable_padding=True,
batch_sizes=self.config.cudagraph_capture_sizes,
max_batch_size=0 if self.config.cudagraph_capture_sizes else self.config.max_num_seqs,
),
"scheduler_config": SchedulerConfig(
capacity_scheduler_policy=CapacitySchedulerPolicy.MAX_UTILIZATION,
),
}
)
self.llm = await AsyncLLM(**llm_kwargs)
trtllm_server = OpenAIServer(
generator=self.llm,
model=self.model_config.local_path,
tool_parser=None,
server_role=None,
metadata_server_cfg=None,
)
app = trtllm_server.app
self._server_port, self._server_task = await run_uvicorn(app, None, self._server_address)
async def generate(
self,
prompt_ids: list[int],
sampling_params: dict[str, Any],
request_id: str,
image_data: Optional[list[Any]] = None,
video_data: Optional[list[Any]] = None,
) -> TokenOutput:
"""Generate sequence with token-in-token-out."""
assert image_data is None and video_data is None, "Multimodality is not yet supported in TRTLLMHttpServer."
from tensorrt_llm.llmapi import SamplingParams
max_tokens = min(self.config.response_length, self.config.max_model_len - len(prompt_ids))
sampling_params["max_tokens"] = max_tokens
sampling_params["logprobs"] = 1 if sampling_params.pop("logprobs", False) else None
if sampling_params["top_k"] == -1:
sampling_params["top_k"] = 0
sampling_params.update(self.sampling_args)
trt_llm_sampling_params = SamplingParams(**sampling_params)
outputs = await self.llm.generate_async(
inputs=prompt_ids,
sampling_params=trt_llm_sampling_params,
)
token_ids = outputs.outputs[0].token_ids
log_probs = None
if trt_llm_sampling_params.logprobs is not None:
log_probs = [list(d.values())[0].logprob for d in outputs.outputs[0].logprobs]
return TokenOutput(token_ids=token_ids, log_probs=log_probs, extra_info={"global_steps": self.global_steps})
async def set_global_steps(self, global_steps: int):
"""Set the global steps of the model weights."""
self.global_steps = global_steps
async def abort_all_requests(self):
raise NotImplementedError
async def resume_generation(self):
raise NotImplementedError
async def wake_up(self):
if self.rollout_mode == RolloutMode.HYBRID:
# In hybrid mode, rollout is wake up in `update_weights`
raise ValueError(f"wake_up not support rollout_mode {self.rollout_mode}")
if self.rollout_mode == RolloutMode.COLOCATED:
await self.llm.resume(tags=ServerAdapter.get_full_tags())
elif self.rollout_mode == RolloutMode.STANDALONE:
logger.info("skip wake_up in standalone mode")
async def sleep(self):
if not self.config.free_cache_engine:
return
if self.rollout_mode == RolloutMode.HYBRID:
await self.llm.release(tags=ServerAdapter.get_full_tags())
elif self.rollout_mode == RolloutMode.COLOCATED:
await self.llm.release(tags=ServerAdapter.get_full_tags())
elif self.rollout_mode == RolloutMode.STANDALONE:
logger.info("skip sleep in standalone mode")
async def report_device_ids(self) -> list[str]:
"""Report GPU device UUIDs from TRT-LLM workers."""
return await self.llm.collective_rpc(
"report_device_id",
unique_reply_rank=0,
)
class TRTLLMReplica(RolloutReplica):
def __init__(
self,
replica_rank: int,
config: RolloutConfig,
model_config: DictConfig,
gpus_per_node: int = 8,
is_reward_model: bool = False,
) -> None:
super().__init__(replica_rank, config, model_config, gpus_per_node, is_reward_model)
self.node_ip = ray.util.get_node_ip_address().strip("[]")
def rollout_worker_use_gpu(self) -> bool:
return False
def get_pgs_and_bundle_indices(self) -> tuple[list[PlacementGroup], list[list[int]]]:
"""Get placement groups and bundle indices for the replica."""
start_pg_index = 0
local_bundle_index = 0
# For SubRayResourcePool, the replica is assigned sub pool specific for this replica.
if isinstance(self.resource_pool, SubRayResourcePool):
assert self.resource_pool.subgroup_world_size == self.world_size, (
"Subgroup world size must be equal to world size"
)
local_bundle_index = self.resource_pool.start_bundle_index
# For RayResourcePool, the replica is assigned to entire resource pool.
# We need to find start pg index and local bundle index based on replica rank.
else:
local_bundle_index = self.world_size * self.replica_rank
while local_bundle_index >= self.resource_pool.pgs[start_pg_index].bundle_count:
start_pg_index += 1
local_bundle_index -= self.resource_pool.pgs[start_pg_index].bundle_count
assert (
start_pg_index < len(self.resource_pool.pgs)
and local_bundle_index < self.resource_pool.pgs[start_pg_index].bundle_count
), "Start pg index or local bundle index out of range"
# Global Bundle View for Replica x 2 & TP=4:
# ┌───────────────────┬───────────────────┐
# │ Placement Group 0 │ Placement Group 1 │
# ├────┬────┬────┬────┼────┬────┬────┬────┤
# │ 0 │ 1 │ 2 │ 3 │ 0 │ 1 │ 2 │ 3 │
# └────┴────┴────┴────┴────┴────┴────┴────┘
# └───────────────┘ └───────────────┘
# Replica 0 Replica 1
# (4 GPUs) (4 GPUs)
left_bundle_count = self.world_size
pgs = []
bundle_indices = []
for pg in self.resource_pool.pgs[start_pg_index:]:
if left_bundle_count == 0:
break
left_bundle_count_in_pg = min(left_bundle_count, pg.bundle_count - local_bundle_index)
pg_bundle_indices = [local_bundle_index + idx for idx in range(left_bundle_count_in_pg)]
pgs.append(pg)
bundle_indices.append(pg_bundle_indices)
left_bundle_count -= left_bundle_count_in_pg
local_bundle_index = 0
assert left_bundle_count == 0, "all bundle indices should be assigned"
return pgs, bundle_indices
async def launch_servers(self):
assert self.nnodes == 1, "TRTLLMReplica doesn't support multiple nodes for single replica yet."
assert self.resource_pool.pgs is not None, "placement groups are not initialized"
pgs, bundle_indices = self.get_pgs_and_bundle_indices()
# Check server process should be launched on the same node as first bundle of first pg.
first_pg_data = placement_group_table(pgs[0])
node_id = first_pg_data["bundles_to_node_id"][bundle_indices[0][0]]
print(f"TRTLLMReplica: {self.replica_rank}")
print(f"pg node_id: {node_id}")
print(f"pgs: {pgs}")
print(f"bundle_indices: {bundle_indices}")
# TRTLLMReplica is a 1:1 map from replica to TRTLLMHttpServer.
name = (
f"trtllm_server_{self.replica_rank}"
if not self.is_reward_model
else f"trtllm_server_reward_{self.replica_rank}"
)
server = TRTLLMHttpServer.options(
scheduling_strategy=ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy(
node_id=node_id,
soft=False,
),
runtime_env={"env_vars": {"RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES": "1"}},
name=name,
max_concurrency=self.max_concurrency,
).remote(
config=self.config,
model_config=self.model_config,
is_reward_model=self.is_reward_model,
rollout_mode=self.rollout_mode,
workers=self.workers,
replica_rank=self.replica_rank,
max_colocate_count=self.resource_pool.max_colocate_count,
pgs=pgs,
bundle_indices=bundle_indices,
)
self.servers.append(server)
# launch http server in each node
await asyncio.gather(*[server.launch_server.remote() for server in self.servers])
# get http server address from first server
server_address, server_port = await self.servers[0].get_server_address.remote()
self._server_handle = self.servers[0]
self._server_address = (
f"[{server_address}]:{server_port}"
if is_valid_ipv6_address(server_address)
else f"{server_address}:{server_port}"
)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/rollout/trtllm_rollout/trtllm_async_server.py",
"license": "Apache License 2.0",
"lines": 340,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/rollout/trtllm_rollout/trtllm_rollout.py | # Copyright 2026 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import asyncio
import base64
import contextlib
import gc
import logging
import os
import pickle
import threading
from contextlib import asynccontextmanager
from typing import Any, Generator, Optional
import aiohttp
import pynvml
import ray
import torch
import torch.distributed as dist
from torch.distributed.device_mesh import DeviceMesh, init_device_mesh
from torch.multiprocessing.reductions import reduce_tensor
from verl.utils.device import get_torch_device
from verl.utils.net_utils import is_valid_ipv6_address
from verl.workers.config import HFModelConfig, RolloutConfig
from verl.workers.rollout.base import BaseRollout
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
# Default configuration constants
DEFAULT_TIMEOUT = 60.0
DEFAULT_MAX_ATTEMPTS = 3
DEFAULT_RETRY_DELAY = 2.0
DEFAULT_MAX_CONNECTIONS = 2000
DEFAULT_MAX_WAIT_TIME = 300.0
@contextlib.contextmanager
def nvml_context():
"""Context manager for NVML initialization and shutdown.
Raises:
RuntimeError: If NVML initialization fails
"""
try:
pynvml.nvmlInit()
yield
except pynvml.NVMLError as e:
raise RuntimeError(f"Failed to initialize NVML: {e}") from e
finally:
try:
pynvml.nvmlShutdown()
except pynvml.NVMLError:
pass
_NVML_INITIALIZED = False
_NVML_LOCK = threading.Lock()
def get_device_uuid(id: str | int) -> str:
"""Get the UUID of a CUDA device using NVML."""
id = int(id) # pynvml expects int; ray.get_gpu_ids() may return str
global _NVML_INITIALIZED
with _NVML_LOCK:
if not _NVML_INITIALIZED:
try:
pynvml.nvmlInit()
_NVML_INITIALIZED = True
except pynvml.NVMLError as e:
raise RuntimeError(f"Failed to initialize NVML: {e}") from e
# Get the device handle and UUID
try:
handle = pynvml.nvmlDeviceGetHandleByIndex(id)
uuid = pynvml.nvmlDeviceGetUUID(handle)
# Ensure the UUID is returned as a string, not bytes
if isinstance(uuid, bytes):
return uuid.decode("utf-8")
elif isinstance(uuid, str):
return uuid
else:
raise RuntimeError(f"Unexpected UUID type: {type(uuid)} for device {id} (global index: {id})")
except pynvml.NVMLError as e:
raise RuntimeError(f"Failed to get device UUID for device {id} (global index: {id}): {e}") from e
async def _read_async_response(resp: aiohttp.ClientResponse) -> dict[str, Any]:
if resp.status == 204 or (resp.content_length == 0):
return {}
try:
return await resp.json(content_type=None)
except Exception:
try:
text = await resp.text()
except Exception:
return {}
return {
"content_type": (resp.headers.get("Content-Type") or ""),
"text": text,
}
class AsyncTRTLLMHttpAdapter:
def __init__(
self,
host: str,
port: int,
timeout: float = DEFAULT_TIMEOUT,
max_attempts: int = DEFAULT_MAX_ATTEMPTS,
retry_delay: float = DEFAULT_RETRY_DELAY,
max_connections: int = DEFAULT_MAX_CONNECTIONS,
):
self.host = host
self.port = port
self.timeout = timeout
self.max_attempts = max_attempts
self.retry_delay = retry_delay
self.max_connections = max_connections
@asynccontextmanager
async def _get_session(self) -> aiohttp.ClientSession:
"""Context manager for safe session access with proper connection pooling.
Yields:
aiohttp.ClientSession: Session instance for making HTTP requests
Note:
This method creates a new session for each request to avoid resource competition
while still maintaining proper connection pooling through the shared connector.
"""
# Create a new session for each request to avoid resource competition
connector = aiohttp.TCPConnector(
limit=self.max_connections,
limit_per_host=self.max_connections // 4,
ttl_dns_cache=300,
use_dns_cache=True,
)
timeout = aiohttp.ClientTimeout(total=self.timeout)
session = aiohttp.ClientSession(connector=connector, timeout=timeout)
try:
yield session
finally:
# Always close the session to free up resources
if not session.closed:
await session.close()
async def _make_async_request(
self,
endpoint: str,
payload: Optional[dict[str, Any]] = None,
timeout: float = DEFAULT_TIMEOUT,
method: str = "POST",
return_status: bool = False,
) -> dict[str, Any] | int:
"""Make an async HTTP request with retry logic and consistent error handling.
Args:
endpoint (str): The API endpoint to call (without leading slash)
payload (Optional[Dict[str, Any]], optional): The JSON payload to send.
Defaults to empty dict if None.
method (str, optional): HTTP method to use. Defaults to "POST".
Returns:
Dict[str, Any]: The JSON response from the server
Raises:
aiohttp.ClientResponseError: If the HTTP request fails with a client/server error
RuntimeError: If all retry attempts are exhausted
Note:
- Uses exponential backoff for retries
- Logs warnings for timeout and connection errors, errors for HTTP errors
"""
url = f"http://{self.host}:{self.port}/{endpoint}"
for attempt in range(self.max_attempts):
try:
async with self._get_session() as session:
if method.upper() == "GET":
async with session.get(url, timeout=timeout) as response:
response.raise_for_status()
return response.status if return_status else await _read_async_response(response)
else:
async with session.post(url, json=payload or {}, timeout=timeout) as response:
response.raise_for_status()
return response.status if return_status else await _read_async_response(response)
except asyncio.TimeoutError:
logger.warning(f"Async request to {endpoint} timed out (attempt {attempt + 1})")
except aiohttp.ClientConnectorError:
logger.warning(f"Connection error for {endpoint} (attempt {attempt + 1})")
except aiohttp.ClientResponseError as e:
logger.error(f"HTTP error for {endpoint}: {e}")
raise
except Exception as e:
logger.error(f"Unexpected error for {endpoint}: {e}")
if attempt == self.max_attempts - 1:
raise
if attempt < self.max_attempts - 1:
await asyncio.sleep(self.retry_delay * (2**attempt))
raise RuntimeError(f"Failed to complete async request to {endpoint} after {self.max_attempts} attempts")
async def resume_memory_occupation(self, tags: list[str]):
"""Resume GPU memory occupation (async version).
Similar to AsyncEngine, this method handles first-time weight reloading
by calling release_memory_occupation if needed.
Args:
tags (Optional[List[str]], optional): List of tags to specify which memory to resume.
If None, resumes all memory. Defaults to None. ["weights", "kv_cache"]
Returns:
Dict[str, Any]: Server response indicating memory resume status
"""
return await self._make_async_request("resume_memory", {"tags": tags})
async def release_memory_occupation(self, tags: list[str]):
"""Release GPU memory occupation temporarily (async version).
Args:
tags (Optional[List[str]], optional): List of tags to specify which memory to release.
If None, releases all memory. Defaults to None. ["weights", "kv_cache"]
Returns:
Dict[str, Any]: Server response indicating memory release status
"""
return await self._make_async_request("release_memory", {"tags": tags})
async def update_weights(self, weights: dict[str, str]):
"""Update model weights from tensor data asynchronously.
Args:
weights: A dictionary that maps the device uuid of the weight handles.
Returns:
Dict[str, Any]: Server response containing update status
"""
return await self._make_async_request("update_weights", {"weights": weights})
class ServerAdapter(BaseRollout):
_WEIGHTS_TAGS = [
"sampler",
"drafter",
"guided_decoder",
"spec_resource_manager",
"model_extra",
"executor_extra",
"model",
"draft_model",
]
@staticmethod
def get_full_tags() -> list[str]:
return ServerAdapter._WEIGHTS_TAGS + ["kv_cache"]
def __init__(
self, config: RolloutConfig, model_config: HFModelConfig, device_mesh: DeviceMesh, replica_rank: int = -1
):
if config.get("quantization", None) == "fp8":
FP8_BLOCK_QUANT_KWARGS = {
"activation_scheme": "dynamic",
"fmt": "e4m3",
"quant_method": "fp8",
"weight_block_size": [128, 128],
}
fp8_block_quant_kwargs = dict(FP8_BLOCK_QUANT_KWARGS)
model_config.hf_config.quantization_config = fp8_block_quant_kwargs
super().__init__(config, model_config, device_mesh)
self._adapter = None
self.hybrid_device_mesh = None
self.gpu_id = None
self.is_leader_rank = None
self.replica_rank = None
self.is_dp_rank = None
# hybrid mode
if self.device_mesh is not None:
assert device_mesh.mesh_dim_names.index("dp") == 0, "DP dim should always be the first dimension"
# Clone a new device mesh for CPU backend only (used for internal ranks communication)
device_mesh_kwargs = dict(
mesh_shape=device_mesh.mesh.shape,
mesh_dim_names=device_mesh.mesh_dim_names,
)
self.hybrid_device_mesh = init_device_mesh("cpu", **device_mesh_kwargs)
self.hybrid_device_mesh[self.hybrid_device_mesh.mesh_dim_names[1:]]._flatten(mesh_dim_name="exclude_dp")
self.is_leader_rank = self.hybrid_device_mesh["exclude_dp"].get_local_rank() == 0
logger.info(f"is_dp_leader: {self.is_leader_rank}")
logger.info(f"exclude_dp_rank = {self.hybrid_device_mesh['exclude_dp'].get_local_rank()}")
logger.info(f"exclude_dp_size = {self.hybrid_device_mesh['exclude_dp'].size()}")
self.gpu_id = ray.get_gpu_ids()[0]
self.replica_rank = self.hybrid_device_mesh["dp"].get_local_rank()
assert len(ray.get_gpu_ids()) == 1, "ServerAdapter should run on a single GPU node"
else:
rank = int(os.environ["RANK"])
self.replica_rank = replica_rank
self.is_leader_rank = rank == 0
# Below is required for all modes.
assert self.replica_rank >= 0, "replica_rank is not set"
assert self.is_leader_rank is not None, "is_leader_rank is not set"
self.node_ip = ray.util.get_node_ip_address().strip("[]")
async def _init_server_adapter(self):
if self._adapter is not None:
return
# Lazy init http server adapter because http server is launched after hybrid engine.
self.server_actor = ray.get_actor(f"trtllm_server_{self.replica_rank}")
server_address, server_port = await self.server_actor.get_server_address.remote()
assert server_address == self.node_ip, f"server address: {server_address} != node_ip: {self.node_ip}"
logger.debug(f"replica_rank={self.replica_rank}, server address: {server_address}, port: {server_port}")
host = f"[{server_address}]" if is_valid_ipv6_address(server_address) else server_address
self._adapter = AsyncTRTLLMHttpAdapter(
host=host,
port=server_port,
timeout=self.config.server.timeout,
max_attempts=self.config.server.max_attempts,
retry_delay=self.config.server.retry_delay,
max_connections=self.config.server.max_connections,
)
async def resume(self, tags: list[str]):
"""Resume rollout weights or kv cache in GPU memory.
Args:
tag: weights or kv_cache.
"""
# Synchronize all ranks before resuming KV cache to ensure non-leader ranks
# have completed actor offloading to CPU, preventing OOM issue.
if "kv_cache" in tags and self.config.free_cache_engine:
await asyncio.to_thread(dist.barrier, group=self.hybrid_device_mesh["exclude_dp"].get_group())
if self.is_leader_rank and self.config.free_cache_engine:
if "weights" in tags:
tags = self._WEIGHTS_TAGS
elif "kv_cache" in tags:
tags = ["kv_cache"]
else:
raise ValueError(f"Invalid tag: {tags}")
await self._init_server_adapter()
await self._adapter.resume_memory_occupation(tags=tags)
async def release(self):
"""Release weights and kv cache in GPU memory."""
if self.is_leader_rank and self.config.free_cache_engine:
await self._init_server_adapter()
tags = self._WEIGHTS_TAGS + ["kv_cache"]
await self._adapter.release_memory_occupation(tags=tags)
async def update_weights_from_ipc_handles(self, device_handles):
assert self.hybrid_device_mesh is not None, "hybrid_device_mesh is not set"
"""Update weights from IPC handles."""
if self.is_leader_rank:
gathered_handles = [None for _ in range(self.hybrid_device_mesh["exclude_dp"].size())]
else:
gathered_handles = None
await asyncio.to_thread(
dist.gather_object,
obj=device_handles,
object_gather_list=gathered_handles,
group_dst=0,
group=self.hybrid_device_mesh["exclude_dp"].get_group(),
)
if self.is_leader_rank:
all_handles = {k: v for d in gathered_handles for k, v in d.items()}
await self._adapter.update_weights(all_handles)
await asyncio.to_thread(dist.barrier, group=self.hybrid_device_mesh["exclude_dp"].get_group())
async def update_weights(
self, weights: Generator[tuple[str, torch.Tensor], None, None], global_steps: int = None, **kwargs
):
assert self.hybrid_device_mesh is not None, "hybrid_device_mesh is not set"
"""Update the weights of the rollout model.
Args:
weights: A generator that yields the name of the weight tensor and the tensor itself.
"""
if self.is_leader_rank:
await self._init_server_adapter()
total_available_bytes = int(self.config.checkpoint_engine.update_weights_bucket_megabytes) * 1024 * 1024
if self.config.get("quantization", None) == "fp8":
from verl.utils.trtllm.trtllm_fp8_utils import TRTLLMFP8QuantizerHelper
fp8_quantizer_helper = TRTLLMFP8QuantizerHelper(self.model_config.hf_config.quantization_config)
weights = fp8_quantizer_helper.quant_weights_by_name(
weights,
dtype=self.model_config.hf_config.dtype,
)
try:
device_uuid = get_device_uuid(self.gpu_id)
except Exception as e:
logger.error(f"Failed to get device UUID in update_weights(): {e}")
device_uuid = None
raise e
cur_available_bytes = total_available_bytes
cur_handles = []
async def flush():
nonlocal cur_available_bytes, cur_handles
if not cur_handles:
return
serialized_device_handles = {device_uuid: base64.b64encode(pickle.dumps(cur_handles)).decode("utf-8")}
await self.update_weights_from_ipc_handles(serialized_device_handles)
cur_available_bytes = total_available_bytes
cur_handles = []
for name, param in weights:
size_in_bytes = param.element_size() * param.numel()
if size_in_bytes > cur_available_bytes:
await flush()
assert cur_available_bytes >= size_in_bytes, (
f"cur_available_bytes: {cur_available_bytes:,} size_in_bytes: {size_in_bytes:,} name: {name}"
)
cur_available_bytes -= size_in_bytes
handle = reduce_tensor(param.detach())
cur_handles.append((name, handle))
await flush()
if self.is_leader_rank:
# Finalize update weights
await self._adapter.update_weights(None)
if global_steps is not None:
await self.server_actor.set_global_steps.remote(global_steps)
await asyncio.to_thread(dist.barrier, group=self.hybrid_device_mesh["exclude_dp"].get_group())
del weights
gc.collect()
get_torch_device().empty_cache()
def _get_attribute(self, name: str):
return getattr(self, name)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/rollout/trtllm_rollout/trtllm_rollout.py",
"license": "Apache License 2.0",
"lines": 389,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/models/mcore/mtp_patch.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
import torch
from megatron.core import parallel_state
from megatron.core.models.gpt.gpt_model import GPTModel
from megatron.core.transformer.multi_token_prediction import MTPLossAutoScaler, MTPLossLoggingHelper, roll_tensor
try:
from megatron.core.utils import unwrap_model
except ImportError:
from verl.utils.megatron_utils import unwrap_model
def _get_patching_model(model: torch.nn.Module):
model = unwrap_model(model)
if isinstance(model, GPTModel):
return model
if not (hasattr(model, "language_model") and isinstance(model.language_model, GPTModel)):
print(f"Model {model.__class__.__name__} is not a supported for fused forward")
return None
return model.language_model
def patch_postprocess(model: torch.nn.Module):
model = _get_patching_model(model)
if model is not None:
model._postprocess_backup = model._postprocess
model._postprocess = _megatron_gptmodel_postprocess.__get__(model, model.__class__)
def unpatch_postprocess(model: torch.nn.Module):
model = _get_patching_model(model)
if model is not None:
model._postprocess = model._postprocess_backup
# copy from https://github.com/NVIDIA/Megatron-LM/blob/23e092f41ec8bc659020e401ddac9576c1cfed7e/megatron/core/models/gpt/gpt_model.py
# patch the postprocess method of GPTModel to support advanced features like MTP, 1f1b overlap, etc.
def _megatron_gptmodel_postprocess(
self,
hidden_states,
input_ids,
position_ids,
labels,
rotary_pos_emb,
rotary_pos_cos,
rotary_pos_sin,
mtp_in_postprocess=None,
loss_mask=None,
decoder_input=None,
attention_mask=None,
inference_params=None,
packed_seq_params=None,
sequence_len_offset=None,
runtime_gather_output=None,
extra_block_kwargs=None,
inference_context=None,
**kwargs,
):
"""Compatibility patch for GPTModel._postprocess.
For inference (`labels is None`), delegate to the upstream implementation to stay
aligned with Megatron-Core updates.
For training (`labels is not None`), keep VERL's MTP behavior and always return
logits (instead of CE loss) so PPO paths can compute custom losses from logits.
"""
# Keep inference path aligned with whatever upstream Megatron currently expects.
if labels is None:
return self._postprocess_backup(
hidden_states=hidden_states,
input_ids=input_ids,
position_ids=position_ids,
labels=labels,
rotary_pos_emb=rotary_pos_emb,
rotary_pos_cos=rotary_pos_cos,
rotary_pos_sin=rotary_pos_sin,
mtp_in_postprocess=mtp_in_postprocess,
loss_mask=loss_mask,
decoder_input=decoder_input,
attention_mask=attention_mask,
inference_params=inference_params,
packed_seq_params=packed_seq_params,
sequence_len_offset=sequence_len_offset,
runtime_gather_output=runtime_gather_output,
extra_block_kwargs=extra_block_kwargs,
inference_context=inference_context,
**kwargs,
)
# Training path: keep logits for external loss computation.
output_weight = None
if self.share_embeddings_and_output_weights:
output_weight = self.shared_embedding_or_output_weight()
if mtp_in_postprocess:
hidden_states = self.mtp(
input_ids=input_ids,
position_ids=position_ids,
hidden_states=hidden_states,
attention_mask=attention_mask,
inference_params=inference_params,
rotary_pos_emb=rotary_pos_emb,
rotary_pos_cos=rotary_pos_cos,
rotary_pos_sin=rotary_pos_sin,
packed_seq_params=packed_seq_params,
sequence_len_offset=sequence_len_offset,
embedding=self.embedding,
**(extra_block_kwargs or {}),
)
if not self.post_process:
return hidden_states
# Skip when mtp_num_layers is None or 0.
if self.config.mtp_num_layers:
cp_group = None
if getattr(self, "pg_collection", None) is not None:
cp_group = self.pg_collection.cp
elif hasattr(self, "cp_group"):
cp_group = self.cp_group
# Prefer upstream helper when available (newer Megatron-LM).
try:
from megatron.core.transformer.multi_token_prediction import process_mtp_loss
hidden_states = process_mtp_loss(
hidden_states=hidden_states,
labels=labels,
loss_mask=loss_mask,
output_layer=self.output_layer,
output_weight=output_weight,
runtime_gather_output=runtime_gather_output,
is_training=self.training,
compute_language_model_loss=self.compute_language_model_loss,
config=self.config,
cp_group=cp_group,
packed_seq_params=packed_seq_params,
)
except (ImportError, AttributeError, TypeError):
# Fallback for older Megatron-LM versions without process_mtp_loss API.
mtp_labels = labels.clone()
hidden_states_list = torch.chunk(hidden_states, 1 + self.config.mtp_num_layers, dim=0)
hidden_states = hidden_states_list[0]
if loss_mask is None:
# if loss_mask is not provided, use all ones as loss_mask
loss_mask = torch.ones_like(mtp_labels)
for mtp_layer_number in range(self.config.mtp_num_layers):
# Calc loss for the current Multi-Token Prediction (MTP) layers.
mtp_labels, _ = roll_tensor(
mtp_labels,
shifts=-1,
dims=-1,
cp_group=self.cp_group,
packed_seq_params=packed_seq_params,
)
loss_mask, num_tokens = roll_tensor(
loss_mask,
shifts=-1,
dims=-1,
cp_group=self.cp_group,
packed_seq_params=packed_seq_params,
)
# Compute mtp loss without storing logits to save memory.
mtp_loss = self.compute_output_layer_and_language_model_loss(
hidden_states_list[mtp_layer_number + 1],
labels=mtp_labels,
weight=self.shared_embedding_or_output_weight(),
sequence_parallel_enabled=self.output_layer.sequence_parallel,
column_parallel_linear=self.output_layer,
col_linear_kwargs={
"weight": output_weight,
"runtime_gather_output": runtime_gather_output,
},
)
mtp_loss = loss_mask * mtp_loss
if self.training:
# TODO(shifangx): remove the use of parallel_state here
# after moving loss logging to loss_func in pretrain_gpt.py
MTPLossLoggingHelper.save_loss_to_tracker(
torch.sum(mtp_loss) / num_tokens,
mtp_layer_number,
self.config.mtp_num_layers,
avg_group=parallel_state.get_data_parallel_group(with_context_parallel=True),
)
mtp_loss_scale = self.config.mtp_loss_scaling_factor / self.config.mtp_num_layers
if self.config.calculate_per_token_loss:
hidden_states = MTPLossAutoScaler.apply(hidden_states, mtp_loss_scale * mtp_loss)
else:
hidden_states = MTPLossAutoScaler.apply(hidden_states, mtp_loss_scale * mtp_loss / num_tokens)
logits, _ = self.output_layer(hidden_states, weight=output_weight, runtime_gather_output=runtime_gather_output)
# [s b h] => [b s h]
return logits.transpose(0, 1).contiguous()
def patch_mtp_layer_get_embeddings(model: torch.nn.Module):
"""Patch the _get_embeddings method of MultiTokenPredictionLayer"""
from megatron.core.models.gpt.gpt_model import GPTModel
from megatron.core.transformer.multi_token_prediction import MultiTokenPredictionLayer
# Unwrap each model in the actor_module to get the actual GPTModel
model = _get_patching_model(model)
# Collect all MultiTokenPredictionLayer instances
target_layers = []
if isinstance(model, GPTModel):
# Check if GPTModel has MTP and find the layers
if hasattr(model, "mtp") and hasattr(model.mtp, "layers"):
for layer in model.mtp.layers:
if isinstance(layer, MultiTokenPredictionLayer):
target_layers.append(layer)
elif hasattr(model, "layers"):
# Check if any layer in the model is MultiTokenPredictionLayer
for layer in model.layers:
if isinstance(layer, MultiTokenPredictionLayer):
target_layers.append(layer)
if target_layers:
for layer in target_layers:
layer._get_embeddings_backup = layer._get_embeddings
layer._get_embeddings = _patched_get_embeddings_for_detach.__get__(layer, layer.__class__)
print(f"Found and patched {len(target_layers)} MTP layer(s) in any of the actor modules")
return True
else:
print("No MTP layers found to patch in any of the actor modules")
return False
def unpatch_mtp_layer_get_embeddings(model: torch.nn.Module):
"""Unpatch the _get_embeddings method of MultiTokenPredictionLayer"""
from megatron.core.models.gpt.gpt_model import GPTModel
from megatron.core.transformer.multi_token_prediction import MultiTokenPredictionLayer
# Unwrap each model in the actor_module to get the actual GPTModel
model = _get_patching_model(model)
# Collect all MultiTokenPredictionLayer instances
target_layers = []
if isinstance(model, GPTModel):
# Check if GPTModel has MTP and find the layers
if hasattr(model, "mtp") and hasattr(model.mtp, "layers"):
for layer in model.mtp.layers:
if isinstance(layer, MultiTokenPredictionLayer):
target_layers.append(layer)
elif hasattr(model, "layers"):
# Check if any layer in the model is MultiTokenPredictionLayer
for layer in model.layers:
if isinstance(layer, MultiTokenPredictionLayer):
target_layers.append(layer)
unpatched_count = 0
for layer in target_layers:
if hasattr(layer, "_get_embeddings_backup"):
layer._get_embeddings = layer._get_embeddings_backup
delattr(layer, "_get_embeddings_backup")
unpatched_count += 1
if unpatched_count > 0:
print(f"Unpatched {unpatched_count} MTP layer(s)")
return True
return False
def _patched_get_embeddings_for_detach(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
embedding: Callable,
hidden_states: torch.Tensor,
packed_seq_params=None,
):
"""
Patched version of _get_embeddings method for MultiTokenPredictionLayer.
This is a modified version that you can customize according to your needs.
The original implementation is preserved below with modifications.
"""
# You can modify the logic here as needed
# For example, you could:
# - Change the shift amount in roll_tensor
# - Apply custom transformations to input_ids or position_ids
# - Add debugging information
# - Modify the embedding computation
# Original logic with custom modifications
from megatron.core.transformer.multi_token_prediction import roll_tensor
from megatron.core.utils import make_viewless_tensor
# Calc logits for the current Multi-Token Prediction (MTP) layers.
input_ids, _ = roll_tensor(
input_ids,
shifts=-1, # You can modify this shift value
dims=-1,
cp_group=self.cp_group,
packed_seq_params=packed_seq_params,
)
position_ids, _ = roll_tensor(
position_ids,
shifts=-1, # You can modify this shift value
dims=-1,
cp_group=self.cp_group,
packed_seq_params=packed_seq_params,
)
# embedding computation - you can modify this part
decoder_input = embedding(input_ids=input_ids, position_ids=position_ids)
# Apply custom transformations if needed
# For example: decoder_input = some_custom_function(decoder_input)
hidden_states = make_viewless_tensor(inp=hidden_states, requires_grad=True, keep_graph=True)
# detach decoder_input and hidden_states
decoder_input = decoder_input.detach()
hidden_states = hidden_states.detach()
return input_ids, position_ids, decoder_input, hidden_states
| {
"repo_id": "verl-project/verl",
"file_path": "verl/models/mcore/mtp_patch.py",
"license": "Apache License 2.0",
"lines": 294,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/fully_async_policy/sglang_rollout/sglang_async_server.py | # Copyright 2025 Meituan Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
from typing import Any, Optional
import ray
import torch
from ray.actor import ActorHandle
from verl.workers.config import HFModelConfig, RolloutConfig
from verl.workers.rollout.replica import RolloutMode
from verl.workers.rollout.sglang_rollout.async_sglang_server import (
SGLangHttpServer,
SGLangReplica,
)
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
class SGLangHttpServerForPartial(SGLangHttpServer):
def __init__(
self,
config: RolloutConfig,
model_config: HFModelConfig,
rollout_mode: RolloutMode,
workers: list[ActorHandle],
replica_rank: int,
node_rank: int,
nnodes: int,
cuda_visible_devices: str,
base_gpu_id: int,
):
super().__init__(
config=config,
model_config=model_config,
rollout_mode=rollout_mode,
workers=workers,
replica_rank=replica_rank,
node_rank=node_rank,
nnodes=nnodes,
cuda_visible_devices=cuda_visible_devices,
base_gpu_id=base_gpu_id,
)
# for cancel LLMServer
self.paused = False
self.lock = asyncio.Lock()
self.cancel_event: dict[str, asyncio.Event] = {}
self.req_output: dict[str, Optional[dict[str, Any]]] = {}
async def _generate_step(
self,
prompt_ids: torch.Tensor,
sampling_params: dict[str, Any],
request_id: str,
image_data: Optional[list[Any]] = None,
video_data: Optional[list[Any]] = None,
) -> None:
sampling_params = dict(sampling_params)
max_new_tokens = min(
self.config.response_length,
self.config.max_model_len - len(prompt_ids) - 1,
)
sampling_params["max_new_tokens"] = max_new_tokens
sampling_params.setdefault(
"repetition_penalty",
self.config.get("repetition_penalty", 1.0),
)
sampling_params.pop("logprobs", None)
return_logprob = True
from sglang.srt.managers.io_struct import GenerateReqInput
if video_data is not None and len(video_data) > 0:
logger.warning(
f"Request {request_id} received video_data but it is not used. "
"This is to keep consistency with the implementation in "
"verl/workers/rollout/sglang_rollout/async_sglang_server.py. "
"Video data will be ignored."
)
request = GenerateReqInput(
rid=request_id,
input_ids=prompt_ids,
sampling_params=sampling_params,
return_logprob=return_logprob,
image_data=image_data,
# TODO: support video input for sglang
# video_data=video_data,
)
generator = self.tokenizer_manager.generate_request(request, None)
async for output in generator:
self.req_output[request_id] = output
assert self.req_output[request_id] is not None
async def generate_for_partial(
self,
prompt_ids: torch.Tensor,
sampling_params: dict[str, Any],
request_id: str,
image_data: Optional[list[Any]] = None,
video_data: Optional[list[Any]] = None,
) -> tuple[list[int], list[float], bool]:
async with self.lock:
if self.paused:
return [], [], True
self.req_output[request_id] = None
self.cancel_event[request_id] = asyncio.Event()
cancel_handle = asyncio.create_task(self.cancel_event[request_id].wait())
generation_handle = asyncio.create_task(
self._generate_step(prompt_ids, sampling_params, request_id, image_data, video_data)
)
done, pending = await asyncio.wait(
[generation_handle, cancel_handle],
return_when=asyncio.FIRST_COMPLETED,
)
for task in done:
await task
for task in pending:
task.cancel()
async with self.lock:
output = self.req_output.get(request_id)
if output is None:
self.cancel_event.pop(request_id, None)
self.req_output.pop(request_id, None)
return [], [], True
meta_info = output.get("meta_info", {})
output_token_logprobs = meta_info.get("output_token_logprobs")
token_ids: list[int] = []
log_probs: list[float] = []
if output_token_logprobs is not None:
for log_prob, token_id, _ in output_token_logprobs:
token_ids.append(int(token_id))
log_probs.append(float(log_prob))
else:
token_ids = list(output["output_ids"])
log_probs = []
is_cancel = generation_handle not in done
self.cancel_event.pop(request_id, None)
self.req_output.pop(request_id, None)
return token_ids, log_probs, is_cancel
async def cancel(self):
async with self.lock:
self.paused = True
for request_id in self.cancel_event:
self.cancel_event[request_id].set()
async def resume(self):
async with self.lock:
self.paused = False
class FullyAsyncSGLangReplica(SGLangReplica):
def __init__(
self,
replica_rank: int,
config: RolloutConfig,
model_config: HFModelConfig,
gpus_per_node: int = 8,
is_reward_model: bool = False,
):
super().__init__(replica_rank, config, model_config, gpus_per_node, is_reward_model)
self.server_class = ray.remote(SGLangHttpServerForPartial)
async def cancel(self):
"""Cancel each rollout server."""
await asyncio.gather(*[server.cancel.remote() for server in self.servers])
async def resume(self):
"""Resume each rollout server."""
await asyncio.gather(*[server.resume.remote() for server in self.servers])
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/fully_async_policy/sglang_rollout/sglang_async_server.py",
"license": "Apache License 2.0",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:scripts/veomni/moe_merge.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Merge individual MoE expert weights into stacked tensors for efficient loading.
This script takes a HuggingFace checkpoint with individual expert weights
(e.g., model.layers.{i}.mlp.experts.{j}.gate_proj.weight) and merges them
into stacked tensors (e.g., model.layers.{i}.mlp.experts.gate_proj) for
faster loading and better memory efficiency in VeOmni.
The merging process:
1. Loads individual expert weights from the HF checkpoint
2. Stacks them into single tensors for each projection type
3. Handles all three projection types: gate_proj, up_proj, down_proj
4. Supports both Qwen3-MoE (num_experts) and DeepSeek (n_routed_experts) formats
5. Handles models with initial dense layers (first_k_dense_replace)
Usage: python moe_merge.py --raw_hf_path <input_checkpoint> --merge_hf_path <output_dir>
"""
import os
from argparse import ArgumentParser
from dataclasses import dataclass
from glob import glob
from typing import Generator
import torch
from safetensors.torch import safe_open
from tqdm import tqdm
from transformers import AutoConfig
from veomni.models import build_tokenizer, save_model_weights
@dataclass
class StateDictIterator:
filepath: str
def __iter__(self) -> Generator[tuple[str, "torch.Tensor"], None, None]:
if self.filepath.endswith(".safetensors"):
with safe_open(self.filepath, framework="pt", device="cpu") as f:
for key in f.keys():
yield key, f.get_tensor(key)
else:
state_dict = torch.load(self.filepath, map_location="cpu", weights_only=True, mmap=True)
for key in state_dict.keys():
yield key, state_dict[key]
def main(raw_hf_path, merge_hf_path):
torch.set_default_dtype(torch.bfloat16)
os.makedirs(merge_hf_path, exist_ok=True)
config = AutoConfig.from_pretrained(raw_hf_path)
tokenizer = build_tokenizer(raw_hf_path)
safetensor_files = list(glob(os.path.join(raw_hf_path, "*.safetensors")))
safetensor_files.sort()
state_dict_iterators = [StateDictIterator(shard_file) for shard_file in safetensor_files]
new_state_dict = {}
for state_dict_iterator in tqdm(state_dict_iterators, desc="Loading checkpoint shards"):
for name, tensor in state_dict_iterator:
new_state_dict[name] = tensor.cpu()
print(new_state_dict.keys())
if hasattr(config, "num_experts"):
# qwen3moe
num_experts = config.num_experts
elif hasattr(config, "n_routed_experts"):
# deepseek
num_experts = config.n_routed_experts
else:
raise RuntimeError("could not find how many experts to assign")
num_hidden_layers = config.num_hidden_layers
if hasattr(config, "first_k_dense_replace"):
# deepseek first k dense layer
moe_layer_start_idx = config.first_k_dense_replace
else:
# moe layer only in the model
moe_layer_start_idx = 0
for i in range(moe_layer_start_idx, num_hidden_layers):
gate_proj = []
for j in range(num_experts):
gate_proj.append(new_state_dict.pop(f"model.layers.{i}.mlp.experts.{j}.gate_proj.weight"))
new_state_dict[f"model.layers.{i}.mlp.experts.gate_proj"] = torch.stack(gate_proj)
up_proj = []
for j in range(num_experts):
up_proj.append(new_state_dict.pop(f"model.layers.{i}.mlp.experts.{j}.up_proj.weight"))
new_state_dict[f"model.layers.{i}.mlp.experts.up_proj"] = torch.stack(up_proj)
down_proj = []
for j in range(num_experts):
down_proj.append(new_state_dict.pop(f"model.layers.{i}.mlp.experts.{j}.down_proj.weight"))
new_state_dict[f"model.layers.{i}.mlp.experts.down_proj"] = torch.stack(down_proj)
model_assets = [config, tokenizer]
save_model_weights(merge_hf_path, new_state_dict, model_assets=model_assets)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--raw_hf_path", type=str, required=True)
parser.add_argument("--merge_hf_path", type=str, required=True)
args = parser.parse_args()
main(args.raw_hf_path, args.merge_hf_path)
| {
"repo_id": "verl-project/verl",
"file_path": "scripts/veomni/moe_merge.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:scripts/veomni/moe_split.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Reverse process of moe_merge.py - splits merged MoE expert weights back to individual experts.
This script takes a HF checkpoint that has been processed by moe_merge.py (where expert weights
are stacked into single tensors) and splits them back to the original format with individual
expert weights.
The process reverses the merging by:
1. Loading stacked tensors like model.layers.{i}.mlp.experts.gate_proj
2. Unstacking them back to individual experts model.layers.{i}.mlp.experts.{j}.gate_proj.weight
3. Handling all three projection types: gate_proj, up_proj, down_proj
Usage: python moe_split.py --merge_hf_path <merged_checkpoint> --split_hf_path <output_dir>
"""
import os
from argparse import ArgumentParser
from dataclasses import dataclass
from glob import glob
from typing import Generator
import torch
from safetensors.torch import safe_open
from tqdm import tqdm
from transformers import AutoConfig
from veomni.models import build_tokenizer, save_model_weights
@dataclass
class StateDictIterator:
filepath: str
def __iter__(self) -> Generator[tuple[str, "torch.Tensor"], None, None]:
if self.filepath.endswith(".safetensors"):
with safe_open(self.filepath, framework="pt", device="cpu") as f:
for key in f.keys():
yield key, f.get_tensor(key)
else:
state_dict = torch.load(self.filepath, map_location="cpu", weights_only=True, mmap=True)
for key in state_dict.keys():
yield key, state_dict[key]
def main(merge_hf_path, split_hf_path):
torch.set_default_dtype(torch.bfloat16)
os.makedirs(split_hf_path, exist_ok=True)
config = AutoConfig.from_pretrained(merge_hf_path)
tokenizer = build_tokenizer(merge_hf_path)
safetensor_files = list(glob(os.path.join(merge_hf_path, "*.safetensors")))
safetensor_files.sort()
state_dict_iterators = [StateDictIterator(shard_file) for shard_file in safetensor_files]
new_state_dict = {}
for state_dict_iterator in tqdm(state_dict_iterators, desc="Loading checkpoint shards"):
for name, tensor in state_dict_iterator:
new_state_dict[name] = tensor.cpu()
num_experts = config.num_experts
num_hidden_layers = config.num_hidden_layers
for i in range(num_hidden_layers):
print(f"Converting layer {i}")
for proj_name in ["gate_proj", "up_proj", "down_proj"]:
stacked_key = f"model.layers.{i}.mlp.experts.{proj_name}"
if stacked_key in new_state_dict:
stacked_tensor = new_state_dict.pop(stacked_key)
for j in range(num_experts):
expert_key = f"model.layers.{i}.mlp.experts.{j}.{proj_name}.weight"
new_state_dict[expert_key] = stacked_tensor[j]
model_assets = [config, tokenizer]
print("Saving to safetensors")
save_model_weights(split_hf_path, new_state_dict, model_assets=model_assets)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--merge_hf_path", type=str, required=True)
parser.add_argument("--split_hf_path", type=str, required=True)
args = parser.parse_args()
main(args.merge_hf_path, args.split_hf_path)
| {
"repo_id": "verl-project/verl",
"file_path": "scripts/veomni/moe_split.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/workers/rollout/test_vllm_cli_args_on_cpu.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pytest
from verl.workers.rollout.vllm_rollout.utils import build_cli_args_from_config
class TestBuildCliArgsFromConfig:
"""Tests for CLI argument serialization from config dictionaries."""
def test_string_value(self):
"""String values become '--key value'."""
config = {"model": "gpt2"}
result = build_cli_args_from_config(config)
assert result == ["--model", "gpt2"]
def test_integer_value(self):
"""Integer values are converted to strings."""
config = {"tensor-parallel-size": 4}
result = build_cli_args_from_config(config)
assert result == ["--tensor-parallel-size", "4"]
def test_float_value(self):
"""Float values are converted to strings."""
config = {"temperature": 0.7}
result = build_cli_args_from_config(config)
assert result == ["--temperature", "0.7"]
def test_bool_true(self):
"""Bool True adds flag without value."""
config = {"enable-prefix-caching": True}
result = build_cli_args_from_config(config)
assert result == ["--enable-prefix-caching"]
def test_bool_false(self):
"""Bool False is skipped entirely."""
config = {"enable-prefix-caching": False}
result = build_cli_args_from_config(config)
assert result == []
def test_none_value(self):
"""None values are skipped."""
config = {"lora-path": None}
result = build_cli_args_from_config(config)
assert result == []
def test_list_values(self):
"""List values are expanded into multiple arguments."""
config = {"cudagraph-capture-sizes": [1, 2, 4, 8]}
result = build_cli_args_from_config(config)
assert result == ["--cudagraph-capture-sizes", "1", "2", "4", "8"]
def test_empty_list(self):
"""Empty lists are skipped (vLLM nargs='+' requires at least one value)."""
config = {"cudagraph-capture-sizes": []}
result = build_cli_args_from_config(config)
assert result == []
def test_list_with_strings(self):
"""List of strings is properly expanded."""
config = {"allowed-origins": ["http://localhost", "http://example.com"]}
result = build_cli_args_from_config(config)
assert result == ["--allowed-origins", "http://localhost", "http://example.com"]
def test_dict_value(self):
"""Dict values are JSON serialized."""
config = {"extra-config": {"key": "value", "nested": True}}
result = build_cli_args_from_config(config)
assert result[0] == "--extra-config"
# JSON output may have different key ordering, so parse and compare
assert json.loads(result[1]) == {"key": "value", "nested": True}
def test_mixed_config(self):
"""Test a realistic mixed configuration."""
config = {
"tensor-parallel-size": 4,
"enable-prefix-caching": True,
"disable-log-requests": False,
"lora-path": None,
"cudagraph-capture-sizes": [1, 2, 4, 8],
"max-model-len": 2048,
}
result = build_cli_args_from_config(config)
# Check expected args are present
assert "--tensor-parallel-size" in result
assert "4" in result
assert "--enable-prefix-caching" in result
assert "--cudagraph-capture-sizes" in result
assert "1" in result
assert "8" in result
assert "--max-model-len" in result
assert "2048" in result
# Check skipped values are not present
assert "--disable-log-requests" not in result
assert "--lora-path" not in result
def test_preserves_order(self):
"""Arguments should preserve dictionary order (Python 3.7+)."""
config = {"first": "a", "second": "b", "third": "c"}
result = build_cli_args_from_config(config)
assert result == ["--first", "a", "--second", "b", "--third", "c"]
def test_empty_config(self):
"""Empty config returns empty list."""
config = {}
result = build_cli_args_from_config(config)
assert result == []
def test_single_element_list(self):
"""Single element list works correctly."""
config = {"sizes": [42]}
result = build_cli_args_from_config(config)
assert result == ["--sizes", "42"]
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/rollout/test_vllm_cli_args_on_cpu.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/utils/ckpt/test_checkpoint_cleanup_on_cpu.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import pytest
class TestCheckpointCleanupLogic:
"""Tests for checkpoint cleanup methods in BaseCheckpointManager."""
@pytest.fixture(autouse=True)
def setup(self):
"""Set up test fixtures."""
self.test_dir = tempfile.mkdtemp()
yield
shutil.rmtree(self.test_dir, ignore_errors=True)
@pytest.fixture
def manager(self, monkeypatch):
"""Create a minimal BaseCheckpointManager for testing."""
import torch.distributed
monkeypatch.setattr(torch.distributed, "get_rank", lambda: 0)
monkeypatch.setattr(torch.distributed, "get_world_size", lambda: 1)
from verl.utils.checkpoint.checkpoint_manager import BaseCheckpointManager
class MockModel:
pass
class MockOptimizer:
pass
return BaseCheckpointManager(
model=MockModel(),
optimizer=MockOptimizer(),
lr_scheduler=None,
processing_class=None,
checkpoint_config=None,
)
def _create_checkpoint_dir(self, step: int) -> str:
"""Create a mock checkpoint directory."""
path = os.path.join(self.test_dir, f"global_step_{step}")
os.makedirs(path, exist_ok=True)
with open(os.path.join(path, "checkpoint.txt"), "w") as f:
f.write(f"step={step}")
return path
def test_max_ckpt_1_preserves_existing_before_save(self, manager):
"""
Regression test: max_ckpt_to_keep=1 must NOT delete existing checkpoint before save.
"""
ckpt_100 = self._create_checkpoint_dir(100)
manager.previous_saved_paths = [ckpt_100]
manager.ensure_checkpoint_capacity(max_ckpt_to_keep=1)
assert os.path.exists(ckpt_100), "Bug: checkpoint deleted before save!"
assert manager.previous_saved_paths == [ckpt_100]
def test_max_ckpt_1_deletes_old_after_save(self, manager):
"""After save succeeds, old checkpoint should be deleted."""
ckpt_100 = self._create_checkpoint_dir(100)
manager.previous_saved_paths = [ckpt_100]
ckpt_200 = self._create_checkpoint_dir(200)
manager.register_checkpoint(ckpt_200, max_ckpt_to_keep=1)
assert not os.path.exists(ckpt_100)
assert os.path.exists(ckpt_200)
assert manager.previous_saved_paths == [ckpt_200]
def test_max_ckpt_2_keeps_one_before_save(self, manager):
"""With max_ckpt_to_keep=2, pre-save cleanup keeps 1 checkpoint."""
ckpt_100 = self._create_checkpoint_dir(100)
ckpt_200 = self._create_checkpoint_dir(200)
manager.previous_saved_paths = [ckpt_100, ckpt_200]
manager.ensure_checkpoint_capacity(max_ckpt_to_keep=2)
assert not os.path.exists(ckpt_100)
assert os.path.exists(ckpt_200)
assert len(manager.previous_saved_paths) == 1
def test_max_ckpt_0_keeps_all(self, manager):
"""max_ckpt_to_keep=0 means unlimited - no deletions."""
ckpt_100 = self._create_checkpoint_dir(100)
ckpt_200 = self._create_checkpoint_dir(200)
manager.previous_saved_paths = [ckpt_100, ckpt_200]
manager.ensure_checkpoint_capacity(max_ckpt_to_keep=0)
ckpt_300 = self._create_checkpoint_dir(300)
manager.register_checkpoint(ckpt_300, max_ckpt_to_keep=0)
assert os.path.exists(ckpt_100)
assert os.path.exists(ckpt_200)
assert os.path.exists(ckpt_300)
assert len(manager.previous_saved_paths) == 3
def test_full_save_cycle_max_ckpt_1(self, manager):
"""Simulate multiple save cycles with max_ckpt_to_keep=1."""
# First save
manager.ensure_checkpoint_capacity(1)
ckpt_100 = self._create_checkpoint_dir(100)
manager.register_checkpoint(ckpt_100, 1)
assert manager.previous_saved_paths == [ckpt_100]
# Second save - existing checkpoint must survive pre-save
manager.ensure_checkpoint_capacity(1)
assert os.path.exists(ckpt_100), "Bug: checkpoint deleted before save!"
ckpt_200 = self._create_checkpoint_dir(200)
manager.register_checkpoint(ckpt_200, 1)
assert not os.path.exists(ckpt_100)
assert manager.previous_saved_paths == [ckpt_200]
# Third save
manager.ensure_checkpoint_capacity(1)
assert os.path.exists(ckpt_200), "Bug: checkpoint deleted before save!"
ckpt_300 = self._create_checkpoint_dir(300)
manager.register_checkpoint(ckpt_300, 1)
assert not os.path.exists(ckpt_200)
assert manager.previous_saved_paths == [ckpt_300]
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/ckpt/test_checkpoint_cleanup_on_cpu.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:scripts/megatron_merge_lora.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pprint import pprint
import hydra
import ray
import torch
from omegaconf import OmegaConf
from verl.single_controller.base.decorator import Dispatch, register
from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup
from verl.utils.megatron_utils import get_hf_model_checkpoint_path, load_megatron_model_to_gpu
from verl.workers.megatron_workers import ActorRolloutRefWorker
os.environ["NCCL_DEBUG"] = "WARN"
os.environ["TOKENIZERS_PARALLELISM"] = "true"
class CustomSaveWorker(ActorRolloutRefWorker):
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_merged_weights(self, hf_ckpt_path):
import os
if self._is_offload_param:
load_megatron_model_to_gpu(self.actor_module)
torch.distributed.barrier()
print(f"[Rank {os.environ.get('RANK', '?')}] Saving weights to {hf_ckpt_path}...")
if self.vanilla_bridge:
self.bridge.save_weights(
self.actor_module, hf_ckpt_path, distributed_filesystem=True, memory_efficient=True
)
else:
self.bridge.save_hf_weights(self.actor_module, hf_ckpt_path)
return True
@hydra.main(config_path="../verl/trainer/config", config_name="ppo_megatron_trainer", version_base=None)
def main(config):
assert config.actor_rollout_ref.model.lora.adapter_path is not None, "adapter_path must be specified"
if (
config.actor_rollout_ref.actor.optim.lr_decay_steps is None
or config.actor_rollout_ref.actor.optim.lr_decay_steps < 1
):
# set to bypass OptimizerParamScheduler checks
config.actor_rollout_ref.actor.optim.lr_decay_steps = 100000
run_merge(config)
def run_merge(config) -> None:
if not ray.is_initialized():
# this is for local ray cluster
default_runtime_env = {"env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN"}}
ray_init_kwargs = config.ray_kwargs.get("ray_init", {})
runtime_env_kwargs = ray_init_kwargs.get("runtime_env", {})
runtime_env = OmegaConf.merge(default_runtime_env, runtime_env_kwargs)
ray_init_kwargs = OmegaConf.create({**ray_init_kwargs, "runtime_env": runtime_env})
print(f"ray init kwargs: {ray_init_kwargs}")
ray.init(**OmegaConf.to_container(ray_init_kwargs))
ray.get(main_task.remote(config))
@ray.remote(num_cpus=1)
def main_task(config):
pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values
OmegaConf.resolve(config)
ray_cls_with_init = RayClassWithInitArgs(
cls=ray.remote(CustomSaveWorker), config=config.actor_rollout_ref, role="actor"
)
resource_pool = RayResourcePool(process_on_nodes=[config.trainer.n_gpus_per_node] * config.trainer.nnodes)
worker = RayWorkerGroup(
resource_pool=resource_pool,
ray_cls_with_init=ray_cls_with_init,
device_name=config.trainer.device,
)
worker.init_model()
adapter_path = config.actor_rollout_ref.model.lora.adapter_path
hf_ckpt_path = get_hf_model_checkpoint_path(os.path.dirname(adapter_path))
worker.save_merged_weights(hf_ckpt_path)
if __name__ == "__main__":
"""
Use the same config as your training script, besides **specifying the adapter_path**.
For example, your training script starts with:
`python3 -m verl.trainer.main_ppo --config-name=ppo_megatron_trainer ...`
Now replace it with
`python3 ./scripts/megatron_merge_lora.py --config-name=ppo_megatron_trainer ...`
"""
main()
| {
"repo_id": "verl-project/verl",
"file_path": "scripts/megatron_merge_lora.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/checkpoint_engine/test_utils.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from typing import Generator
import ray
import torch
from transformers import AutoModelForCausalLM
from verl.checkpoint_engine import CheckpointEngineRegistry, CheckpointEngineWorker
from verl.single_controller.base.decorator import Dispatch, register
from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup
from verl.utils.device import get_device_name
from verl.utils.fs import copy_to_local
from verl.workers.config import CheckpointEngineConfig, FSDPEngineConfig, HFModelConfig, RolloutConfig
from verl.workers.engine_workers import TrainingWorker, TrainingWorkerConfig
from verl.workers.rollout import BaseRollout, RolloutReplica
class TrainingWorkerTest(TrainingWorker):
def __init__(self, config: TrainingWorkerConfig, checkpoint_engine_config: CheckpointEngineConfig) -> None:
super().__init__(config)
backend = checkpoint_engine_config.backend
bucket_size = checkpoint_engine_config.update_weights_bucket_megabytes << 20
engine_kwargs = checkpoint_engine_config.engine_kwargs.get(backend, {})
if torch.distributed.get_rank() == 0:
engine_kwargs["is_master"] = True
self.checkpoint_engine = CheckpointEngineRegistry.new(backend, bucket_size=bucket_size, **engine_kwargs)
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
async def update_weights(self, global_steps: int = None):
per_tensor_param, _ = self.engine.get_per_tensor_param()
await self.checkpoint_engine.send_weights(per_tensor_param)
@register(dispatch_mode=Dispatch.DP_COMPUTE, blocking=False)
def execute_checkpoint_engine(self, method: str, *args, **kwargs):
return getattr(self.checkpoint_engine, method)(*args, **kwargs)
class MockServerAdapter(BaseRollout):
def __init__(self, config: RolloutConfig, model_config: HFModelConfig, check_allclose: bool = True):
super().__init__(config, model_config, device_mesh=None)
self.check_allclose = check_allclose
self.model = None
self.received_weights: dict[str, torch.Tensor] = {}
async def resume(self, tags: list[str]):
raise NotImplementedError()
async def release(self):
raise NotImplementedError()
async def update_weights(
self,
weights: Generator[tuple[str, torch.Tensor], None, None],
**kwargs,
):
async for name, weight in weights:
weight = weight.clone()
if self.check_allclose:
self.received_weights[name] = weight.clone()
def check_weights(self):
if not self.check_allclose:
return
if self.model is None:
local_path = copy_to_local(self.model_config.path)
self.model = AutoModelForCausalLM.from_pretrained(local_path, torch_dtype=torch.bfloat16, device_map="cpu")
for name, weight in self.model.state_dict().items():
assert name in self.received_weights, f"weight {name} not received"
received = self.received_weights[name]
assert torch.allclose(weight.to(received.device), received), f"weight {name} not equal"
self.received_weights.clear()
class MockReplica(RolloutReplica):
async def init_hybrid(self, worker_group: RayWorkerGroup):
"""Init hybrid rollout server, rollout engine and training engine(fsdp/megatron) fused in same process.
Args:
worker_group: RayWorkerGroup, fused workers where training engine(fsdp/megatron) have been initialized.
"""
self.workers = worker_group.workers[
self.world_size * self.replica_rank : self.world_size * (self.replica_rank + 1)
]
def get_ray_class_with_init_args(self) -> RayClassWithInitArgs:
"""Get rollout worker actor class for colocated and standalone mode."""
raise NotImplementedError
async def launch_servers(self):
"""Launch http server in each node."""
raise NotImplementedError
class CheckpointEngineWorkerTest(CheckpointEngineWorker):
def __init__(
self, rollout_config: RolloutConfig, model_config: HFModelConfig, check_allclose: bool = True, *args, **kwargs
) -> None:
server_adapter = MockServerAdapter(rollout_config, model_config, check_allclose)
super().__init__(rollout_config, model_config, server_adapter, *args, **kwargs)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def check_weights(self):
self.server_adapter.check_weights()
def create_trainer_worker_group(
resource_pool: RayResourcePool, model_config: HFModelConfig, checkpoint_engine_config: CheckpointEngineConfig
) -> RayWorkerGroup:
engine_config = FSDPEngineConfig(forward_only=True, fsdp_size=resource_pool.world_size, strategy="fsdp")
trainer_config = TrainingWorkerConfig(
model_type="language_model",
model_config=model_config,
engine_config=engine_config,
)
ray_cls_with_init = RayClassWithInitArgs(
cls=ray.remote(TrainingWorkerTest),
config=trainer_config,
checkpoint_engine_config=checkpoint_engine_config,
)
ray_cls_with_init.update_options(
{
"runtime_env": {
"env_vars": {
"PYTORCH_CUDA_ALLOC_CONF": "expandable_segments:True",
}
}
}
)
wg = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init, device_name=get_device_name())
return wg
async def create_rollout_worker_group(
resource_pool: RayResourcePool,
model_config: HFModelConfig,
rollout_config: RolloutConfig,
check_allclose: bool = True,
) -> tuple[RayWorkerGroup, list[MockReplica]]:
# create rollout worker group
ray_cls_with_init = RayClassWithInitArgs(
cls=ray.remote(CheckpointEngineWorkerTest),
model_config=model_config,
rollout_config=rollout_config,
check_allclose=check_allclose,
)
wg = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init, device_name=get_device_name())
# create rollout replicas
rollout_world_size = (
rollout_config.tensor_model_parallel_size
* rollout_config.data_parallel_size
* rollout_config.pipeline_model_parallel_size
)
num_replicas = wg.world_size // rollout_world_size
replicas = []
for replica_rank in range(num_replicas):
replica = MockReplica(
replica_rank=replica_rank,
config=rollout_config,
model_config=model_config,
)
replicas.append(replica)
await asyncio.gather(*[replica.init_hybrid(wg) for replica in replicas])
return wg, replicas
| {
"repo_id": "verl-project/verl",
"file_path": "tests/checkpoint_engine/test_utils.py",
"license": "Apache License 2.0",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/checkpoint_engine/base.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from abc import ABC, abstractmethod
from typing import Any, Generator, TypedDict
import ray
import torch
from verl.single_controller.base import Worker
from verl.single_controller.base.decorator import Dispatch, register
from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup
from verl.utils.distributed import initialize_global_process_group_ray
from verl.utils.ray_utils import auto_await
from verl.workers.config import CheckpointEngineConfig, HFModelConfig, RolloutConfig
from verl.workers.rollout import BaseRollout, RolloutReplica, get_rollout_class
class TensorMeta(TypedDict):
name: str
shape: torch.Size
dtype: torch.dtype
offset: int
class CheckpointEngineRegistry:
"""Checkpoint engine registry."""
_registry: dict[str, type["CheckpointEngine"]] = {}
def register(backend: str):
"""Register a checkpoint engine.
Args:
backend: The backend of the checkpoint engine.
"""
def wrapper(cls: type["CheckpointEngine"]):
CheckpointEngineRegistry._registry[backend] = cls
return cls
return wrapper
@classmethod
def get(cls, backend: str) -> type["CheckpointEngine"]:
"""Get the checkpoint engine class.
Args:
backend: The backend of the checkpoint engine.
Returns:
The checkpoint engine class.
"""
return cls._registry[backend]
@classmethod
def new(cls, backend: str, *args, **kwargs) -> "CheckpointEngine":
"""Create a new checkpoint engine instance.
Args:
backend: The backend of the checkpoint engine.
*args: Variable length argument pass to the checkpoint engine constructor.
**kwargs: Arbitrary keyword arguments pass to the checkpoint engine constructor.
Returns:
A new checkpoint engine instance.
"""
if backend not in cls._registry:
raise ValueError(f"Checkpoint engine {backend} not registered")
return cls._registry[backend](*args, **kwargs)
class CheckpointEngine(ABC):
"""CheckpointEngine is an abstraction to transfer weights from trainer to rollout.
In trainer process:
>>> trainer = EngineRegistry.new(...) # FSDP, Megatron, VeOmini, TorchTitan, ...
>>> engine = CheckpointEngine.new(...) # NCCLCheckpointEngine, NIXLCheckpointEngine, ...
>>> await engine.send_weights(trainer.get_per_tensor_param())
In rollout process:
>>> engine = CheckpointEngine.new(...)
>>> server_adapter = ServerAdapter()
>>> await server_adapter.update_weights(engine.get_weights()) # update weights via cuda ipc
"""
@abstractmethod
def prepare(self) -> dict[str, Any]:
"""Prepare checkpoint engine before each step send_weights/receive_weights.
1. Allocate weight bucket.
2. [Optional] Register weight bucket for RDMA.
3. Return metadata to build communication topology: master ip:port, register RDMA description, etc.
Args:
worker_group: The worker group that the checkpoint engine will be used.
Returns:
A dictionary that contains the metadata of the worker group.
"""
raise NotImplementedError
@classmethod
@abstractmethod
def build_topology(
cls, trainer_world_size: int, rollout_world_size: int, metadata: list[dict]
) -> tuple[dict[str, list[Any]], dict[str, list[Any]]]:
"""Build communication topology between all workers.
Args:
trainer_world_size: The world size of the trainer worker group.
rollout_world_size: The world size of the rollout replica.
metadata: A list of metadata `prepare` from all workers.
Returns:
A tuple of two dictionaries that contains the communication topology for trainer and rollout worker group.
Each dict value should be a list argument equal to the world size of the worker group to dispatch to
`init_process_group`.
```
world_size = rollout.world_size + trainer.world_size
kwargs = {
"rank": list(range(world_size)),
"world_size": [world_size] * world_size,
"master_metadata": [metadata[0]] * world_size,
}
```
"""
raise NotImplementedError
@abstractmethod
def init_process_group(self, **kwargs):
"""Init process group for checkpoint engine.
Args:
**kwargs: Keyword arguments from `build_topology`.
"""
raise NotImplementedError
@abstractmethod
def finalize(self):
"""Finalize checkpoint engine after each step send_weights/receive_weights.
1. Free weight bucket.
1. [Optional] Deregister weight bucket for RDMA.
2. [Optional] Destroy process group.
"""
raise NotImplementedError
@abstractmethod
async def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None]):
"""Send the weights of the model.
Args:
weights: A generator that yields the name of the weight tensor and the tensor itself.
"""
raise NotImplementedError
@abstractmethod
async def receive_weights(self) -> Generator[tuple[str, torch.Tensor], None, None]:
"""Receive the weights of the model.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
raise NotImplementedError
class CheckpointEngineWithCache(CheckpointEngine):
"""Checkpoint engine with local cache: shm, disk, etc. This allow to synchronize weights without interrupting
rollout ongoing requests (partial rollout). After requests exhausted, rollout can get weights from local cache.
Laminar: https://arxiv.org/abs/2510.12633
"""
@abstractmethod
async def get_weights(self) -> Generator[tuple[str, torch.Tensor], None, None]:
"""Get the weights of the model from local cache.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
raise NotImplementedError
@CheckpointEngineRegistry.register("naive")
class ColocatedCheckpointEngine(CheckpointEngine):
"""Checkpoint engine for trainer and rollout colocated on same GPU.
In trainer process:
>>> engine = ColocatedCheckpointEngine()
>>> trainer = Trainer()
>>> server_adapter = ServerAdapter()
>>> engine.send_weights(trainer.get_per_tensor_param())
>>> server_adapter.update_weights(engine.receive_weights())
"""
def __init__(self, bucket_size: int, is_master: bool = False) -> None:
self.bucket_size = bucket_size
self.is_master = is_master
def prepare(self):
raise NotImplementedError
def init_process_group(self, **kwargs):
raise NotImplementedError
def finalize(self):
raise NotImplementedError
@classmethod
def build_topology(cls, *args, **kwargs):
raise NotImplementedError
def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None]):
"""Send the weights of the model.
Args:
weights: A generator that yields the name of the weight tensor and the tensor itself.
"""
self.weights = weights
def receive_weights(self) -> Generator[tuple[str, torch.Tensor], None, None]:
"""Receive the weights of the model.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
yield from self.weights
self.weights = None
class CheckpointEngineWorker(Worker):
"""CheckpointEngineWorker colocated with inference engine's WorkerProc on same GPU.
Args:
rollout_config: The rollout configuration.
model_config: The model configuration.
server_adapter: The server adapter to update weights.
"""
def __init__(
self,
rollout_config: RolloutConfig,
model_config: HFModelConfig,
server_adapter: BaseRollout = None,
*args,
**kwargs,
) -> None:
super().__init__()
self.rollout_config = rollout_config
self.model_config = model_config
self.server_adapter: BaseRollout = server_adapter
backend = self.rollout_config.checkpoint_engine.backend
bucket_size = self.rollout_config.checkpoint_engine.update_weights_bucket_megabytes << 20
engine_kwargs = self.rollout_config.checkpoint_engine.engine_kwargs.get(backend, {})
self.checkpoint_engine: CheckpointEngine = CheckpointEngineRegistry.new(
backend, bucket_size=bucket_size, **engine_kwargs
)
self.extra_rollout_args = args
self.extra_rollout_kwargs = kwargs
if self.server_adapter is None:
self.server_adapter = get_rollout_class(self.rollout_config.name, self.rollout_config.mode)(
*self.extra_rollout_args,
config=self.rollout_config,
model_config=self.model_config,
device_mesh=None,
**self.extra_rollout_kwargs,
)
# sglang and trt-llm need device_mesh for internal communication
initialize_global_process_group_ray(timeout_second=None, backend="cpu:gloo")
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
async def update_weights(self, global_steps: int = None):
weights = self.checkpoint_engine.receive_weights()
await self.server_adapter.update_weights(weights, global_steps=global_steps)
@register(dispatch_mode=Dispatch.DP_COMPUTE, blocking=False)
def execute_checkpoint_engine(self, method: str, *args, **kwargs):
return getattr(self.checkpoint_engine, method)(*args, **kwargs)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def get_replica_rank(self) -> int:
"""Get replica rank from the underlying rollout server adapter."""
return self.server_adapter.replica_rank
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def is_leader_rank(self) -> bool:
"""Get leader rank flag from the underlying rollout server adapter."""
return self.server_adapter.is_leader_rank
_worker_cls = ray.remote(CheckpointEngineWorker)
class CheckpointEngineManager:
"""Checkpoint engine manager to coordinate weight synchronization between trainer and rollout replicas.
- ME: model engine, FSDP, MCore, VeOmni, export full tensor generator `get_per_tensor_param`
- CE: checkpoint engine, NCCL, NIXL, etc
In trainer, model engine and checkpoint engine are in same process.
In rollout, checkpoint engine and rollout worker are in separate process, update weights via cuda ipc.
```
┌────────┬────────┬─────┬────────┐ ┌───────────────────┬───────────────────┐
│ ┌────┐ │ ┌────┐ │ │ ┌────┐ │ │ Replica 0 │ Replica 1 │
│ │ ME0│ │ │ ME1│ │ │ │ MEn│ │ ├────┬────┬────┬────┼────┬────┬────┬────┤
│ └──┬─┘ │ └────┘ │ ... │ └────┘ │ │ 0 │ 1 │ 2 │ 3 │ 0 │ 1 │ 2 │ 3 │
│ v | | | | └──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┘
| ┌──┴─┐ │ ┌────┐ │ │ ┌────┐ │ ^ ^ ^ cuda ipc ^ ^ ^
│ │ CE │ │ │ CE │ │ │ │ CE │ │ ┌──┴─┬──┴─┬──┴─┬──┴─┬──┴─┬──┴─┬──┴─┬──┴─┐
│ └──┬─┘ │ └────┘ │ │ └────┘ │ │ CE │ CE │ CE │ CE │ CE │ CE │ CE │ CE |
└────┼───┴────────┴─────┴────────┘ └──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┴──┬─┘
v | | | | | | | |
└─────────────(nccl/nixl/..)─────────────┴────┴────┴────┴────┴────┴────┴────┘
```
Args:
config: The checkpoint engine config.
trainer: The trainer worker group.
replicas: The list of rollout replicas.
"""
def __init__(
self,
config: CheckpointEngineConfig,
trainer: RayWorkerGroup,
replicas: list[RolloutReplica],
) -> None:
self.config = config
self.backend = config.backend
self.backend_cls = CheckpointEngineRegistry.get(config.backend)
self.trainer = trainer
self.replicas = replicas
def build_process_group(self, rollout: RayWorkerGroup):
"""Build process group for trainer and rollout replicas."""
trainer = self.trainer
# 1. prepare all workers
metadata = ray.get(
trainer.execute_checkpoint_engine(["prepare"] * trainer.world_size)
+ rollout.execute_checkpoint_engine(["prepare"] * rollout.world_size)
)
# 2. build communication topology between all workers
trainer_kwargs, rollout_kwargs = self.backend_cls.build_topology(
trainer.world_size, rollout.world_size, metadata
)
for k, v in trainer_kwargs.items():
assert len(v) == trainer.world_size, f"trainer_kwargs[{k}] must have length of {trainer.world_size}"
for k, v in rollout_kwargs.items():
assert len(v) == rollout.world_size, f"rollout_kwargs[{k}] must have length of {rollout.world_size}"
trainer_kwargs["method"] = ["init_process_group"] * trainer.world_size
rollout_kwargs["method"] = ["init_process_group"] * rollout.world_size
# 3. init process group between all workers
ray.get(
trainer.execute_checkpoint_engine(**trainer_kwargs) + rollout.execute_checkpoint_engine(**rollout_kwargs)
)
def add_replicas(self, replicas: list[RolloutReplica]):
"""Add rollout replicas to the manager for elastic scale up, will rebuild process group.
Args:
replicas: The list of rollout replicas to add.
"""
self.replicas.extend(replicas)
def remove_replicas(self, replicas: list[RolloutReplica]):
"""Remove rollout replicas from the manager for elastic scale down, will rebuild process group.
Args:
replicas: The list of rollout replicas to remove.
"""
replicas_set = set(replicas)
self.replicas = [r for r in self.replicas if r not in replicas_set]
@auto_await
async def sleep_replicas(self):
"""Sleep all rollout replicas: free weight and kv_cache device memory."""
# skip sleep replicas for disaggregated rollout
if self.backend != "naive":
return
await asyncio.gather(*[r.sleep() for r in self.replicas])
@auto_await
async def update_weights(self, global_steps: int = None):
"""Update weights from trainer to rollout replicas.
Args:
global_steps: The global steps of the trainer.
"""
# 0. update weights for sync training with colocated trainer and rollout
if self.backend == "naive":
ray.get(self.trainer.update_weights(global_steps=global_steps))
return
# 1. abort and save all unfinished requests for partial rollout
await asyncio.gather(*[r.abort_all_requests() for r in self.replicas])
# 2. create a temporay worker group for all replicas
workers = []
for replica in self.replicas:
workers.extend(replica.workers)
rollout = RayWorkerGroup(worker_handles=workers, ray_cls_with_init=RayClassWithInitArgs(cls=_worker_cls))
trainer = self.trainer
# 3. build process group
self.build_process_group(rollout)
# 4. update weights of all workers
ray.get(trainer.update_weights(global_steps=global_steps) + rollout.update_weights(global_steps=global_steps))
# 5. finalize all workers
ray.get(
trainer.execute_checkpoint_engine(["finalize"] * trainer.world_size)
+ rollout.execute_checkpoint_engine(["finalize"] * rollout.world_size)
)
# 6. resume all unfinished requests for partial rollout
await asyncio.gather(*[r.resume_generation() for r in self.replicas])
| {
"repo_id": "verl-project/verl",
"file_path": "verl/checkpoint_engine/base.py",
"license": "Apache License 2.0",
"lines": 346,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/checkpoint_engine/nccl_checkpoint_engine.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
import time
from dataclasses import dataclass
from typing import AsyncGenerator, Generator
from unittest.mock import patch
with patch("importlib.metadata.distributions", return_value=[]):
import cupy as cp
import ray
import ray.util.collective as collective
import torch
import zmq
from verl.checkpoint_engine.base import CheckpointEngine, CheckpointEngineRegistry, TensorMeta
from verl.utils.net_utils import get_free_port, is_valid_ipv6_address
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
@dataclass
class MasterMetadata:
zmq_ip: str
zmq_port: int
class BroadcastOperation:
"""Async broadcast operation with NCCL in separate thread.
Args:
rank (int): The rank of the current process.
group_name (str): The name of the NCCL process group.
bucket (cp.ndarray | torch.Tensor): The tensor to broadcast.
metadata (dict[str, TensorMeta]): The metadata of the tensor.
socket (zmq.Socket): The zeromq socket to communicate with master.
topic (str): The topic to subscribe.
"""
def __init__(
self,
rank: int,
group_name: str,
bucket: cp.ndarray | torch.Tensor,
metadata: dict[str, TensorMeta],
socket: zmq.Socket,
topic: str,
) -> None:
self.rank = rank
self.group_name = group_name
self.bucket = bucket
self.metadata = metadata
self.socket = socket
self.topic = topic
loop = asyncio.get_running_loop()
self._task = loop.run_in_executor(None, self._run)
def _run(self):
# broadcast tensor meta via zeromq PUB/SUB
if self.rank == 0:
self.socket.send_string(self.topic, flags=zmq.SNDMORE)
self.socket.send_pyobj(self.metadata)
else:
self.socket.recv_string()
self.metadata = self.socket.recv_pyobj()
# broadcast tensor via NCCL
collective.broadcast(self.bucket, src_rank=0, group_name=self.group_name)
async def wait_for_complete(self) -> dict[str, TensorMeta]:
"""Wait for the broadcast operation to complete.
Returns:
dict[str, TensorMeta]: The bucket meta after broadcast.
"""
await self._task
return self.metadata
@CheckpointEngineRegistry.register("nccl")
class NCCLCheckpointEngine(CheckpointEngine):
"""NCCL checkpoint engine with collective communication.
Args:
bucket_size (int): Bucket size in bytes to transfer multiple weights at one time. Note that we use
two buffer to send and recv weights at same time, so the device memory overhead is 2 * bucket_size.
group_name (str): The name of the NCCL process group. Defaults to "default".
rebuild_group (bool): Whether to rebuild the NCCL process group in each update. Defaults to False.
is_master (bool): Whether the current process is the master process. Defaults to False.
rollout_dtype (torch.dtype): The dtype of the weights received from rollout workers. Defaults to torch.bfloat16.
"""
def __init__(
self,
bucket_size: int,
group_name: str = "default",
rebuild_group: bool = False,
is_master: bool = False,
rollout_dtype: torch.dtype = torch.bfloat16,
) -> None:
self.bucket_size = bucket_size
self.group_name = group_name
self.rebuild_group = rebuild_group
self.rollout_dtype = rollout_dtype
# start zeromq server for broadcasting bucket tensor metadata
self.is_master = is_master
self.topic = "bucket_metadata"
if self.is_master:
self._start_zmq_server()
def prepare(self) -> MasterMetadata:
# For master process, use cupy instead of torch to avoid memory register error
# when `PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True`.
if self.is_master:
self.send_buf = cp.zeros(self.bucket_size, dtype=cp.uint8)
self.recv_buf = cp.zeros(self.bucket_size, dtype=cp.uint8)
else:
self.send_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device="cuda")
self.recv_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device="cuda")
return MasterMetadata(zmq_ip=self.ip, zmq_port=self.listen_port) if self.is_master else None
def finalize(self):
"""Destroy the NCCL process group if rebuild_group is True."""
if self.rebuild_group:
if self.rank >= 0:
collective.destroy_collective_group(self.group_name)
self.rank = None
self.world_size = None
self.send_buf = None
self.recv_buf = None
torch.cuda.empty_cache()
@classmethod
def build_topology(cls, trainer_world_size: int, rollout_world_size: int, metadata: list[dict]):
trainer_kwargs = {
"rank": [0] + [-1] * (trainer_world_size - 1),
"world_size": [rollout_world_size + 1] * trainer_world_size,
"master_metadata": [metadata[0]] * trainer_world_size,
}
rollout_kwargs = {
"rank": list(range(1, rollout_world_size + 1)),
"world_size": [rollout_world_size + 1] * rollout_world_size,
"master_metadata": [metadata[0]] * rollout_world_size,
}
return trainer_kwargs, rollout_kwargs
def _start_zmq_server(self):
self.ip = ray.util.get_node_ip_address().strip("[]")
self.listen_port, _ = get_free_port(self.ip)
context = zmq.Context()
self.socket = context.socket(zmq.PUB)
if is_valid_ipv6_address(self.ip):
address = f"tcp://[{self.ip}]:{self.listen_port}"
self.socket.setsockopt(zmq.IPV6, 1)
else:
address = f"tcp://{self.ip}:{self.listen_port}"
self.socket.bind(address)
def _connect_zmq_client(self, metadata: MasterMetadata):
assert not self.is_master, "Master process should not connect to other processes."
context = zmq.Context()
self.socket = context.socket(zmq.SUB)
if is_valid_ipv6_address(metadata.zmq_ip):
address = f"tcp://[{metadata.zmq_ip}]:{metadata.zmq_port}"
self.socket.setsockopt(zmq.IPV6, 1)
else:
address = f"tcp://{metadata.zmq_ip}:{metadata.zmq_port}"
self.socket.connect(address)
self.socket.setsockopt_string(zmq.SUBSCRIBE, self.topic)
def init_process_group(self, rank: int, world_size: int, master_metadata: MasterMetadata):
"""Initialize the NCCL process group.
Args:
rank (int): The rank of the current process.
world_size (int): The total number of processes.
"""
# For trainer workers other than rank 0, their rank should be -1.
if rank < 0:
self.rank = rank
self.world_size = world_size
return
if self.rebuild_group or not collective.is_group_initialized(self.group_name):
collective.init_collective_group(world_size, rank, "nccl", self.group_name)
self.rank = rank
self.world_size = world_size
else:
assert self.rank == rank, f"rank {rank} is not equal to self.rank {self.rank}"
assert self.world_size == world_size, (
f"world_size {world_size} is not equal to self.world_size {self.world_size}"
)
if self.rank > 0:
self._connect_zmq_client(master_metadata)
collective.barrier(self.group_name)
logger.info(f"init_process_group rank: {self.rank}, world_size: {self.world_size}")
@torch.no_grad()
async def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None]):
"""Send the weights of the model.
Args:
weights: A generator that yields the name of the weight tensor and the tensor itself.
"""
assert self.rank <= 0, "Trainer workers other than rank 0 should not send weights."
# For trainer rank other than 0, consume weights without sending.
if self.rank < 0:
for name, weight in weights:
pass
return
send_buf, recv_buf = self.send_buf, self.recv_buf
broadcast_op = None
start_time = time.time()
bucket_meta: dict[str, TensorMeta] = {}
offset = 0
for name, weight in weights:
# fill the tensor bucket
if offset + weight.nbytes > self.bucket_size:
torch.cuda.synchronize()
# wait previous broadcast op finish
if broadcast_op is not None:
await broadcast_op.wait_for_complete()
broadcast_op = BroadcastOperation(
rank=self.rank,
group_name=self.group_name,
bucket=send_buf,
metadata={"bucket_meta": bucket_meta, "is_last": False},
socket=self.socket,
topic=self.topic,
)
# swap send_buf and recv_buf
send_buf, recv_buf = recv_buf, send_buf
bucket_meta = {}
offset = 0
assert offset + weight.nbytes <= self.bucket_size, (
f"Weight {name}({weight.shape}, {weight.dtype}) is too large to fit in the bucket."
)
bucket_meta[name] = {
"name": name,
"shape": weight.shape,
"dtype": weight.dtype,
"offset": offset,
}
send_buf[offset : offset + weight.nbytes] = cp.asarray(weight.view(-1).view(torch.uint8))
offset += weight.nbytes
# broadcast last bucket
torch.cuda.synchronize()
if broadcast_op is not None:
await broadcast_op.wait_for_complete()
broadcast_op = BroadcastOperation(
rank=self.rank,
group_name=self.group_name,
bucket=send_buf,
metadata={"bucket_meta": bucket_meta, "is_last": True},
socket=self.socket,
topic=self.topic,
)
await broadcast_op.wait_for_complete()
logger.info(f"Rank {self.rank} send weights done, time cost: {time.time() - start_time:.2f}s")
@torch.no_grad()
async def receive_weights(self) -> AsyncGenerator[tuple[str, torch.Tensor], None]:
"""Receive the weights of the model.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
assert self.rank > 0, "Rank 0 should not receive weights."
send_buf, recv_buf = self.send_buf, self.recv_buf
total_bytes, total_params = 0, 0
# receive first bucket
start_time = time.time()
broadcast_op = BroadcastOperation(
rank=self.rank,
group_name=self.group_name,
bucket=recv_buf,
metadata=None,
socket=self.socket,
topic=self.topic,
)
metadata = await broadcast_op.wait_for_complete()
total_bytes += self.bucket_size
total_params += len(metadata["bucket_meta"])
# swap send_buf and recv_buf
send_buf, recv_buf = recv_buf, send_buf
while not metadata["is_last"]:
# 1. receive next bucket
broadcast_op = BroadcastOperation(
rank=self.rank,
group_name=self.group_name,
bucket=recv_buf,
metadata=None,
socket=self.socket,
topic=self.topic,
)
# 2. yield tensor from send_buf
for name, meta in metadata["bucket_meta"].items():
dtype, shape = meta["dtype"], meta["shape"]
size = dtype.itemsize * shape.numel()
tensor = send_buf[meta["offset"] : meta["offset"] + size].view(dtype=dtype).view(shape)
yield name, tensor
# 3. wait for next bucket broadcast finish
metadata = await broadcast_op.wait_for_complete()
total_bytes += self.bucket_size
total_params += len(metadata["bucket_meta"])
# 4. swap send_buf and recv_buf
torch.cuda.synchronize() # sync non-blocking copy
send_buf, recv_buf = recv_buf, send_buf
# yield tensor from send_buf
for name, meta in metadata["bucket_meta"].items():
dtype, shape = meta["dtype"], meta["shape"]
size = dtype.itemsize * shape.numel()
tensor = send_buf[meta["offset"] : meta["offset"] + size].view(dtype=dtype).view(shape)
yield name, tensor
time_cost = time.time() - start_time
bandwidth = total_bytes / time_cost / (1024 * 1024 * 1024)
logger.info(
f"Rank {self.rank} receive weights done, total_params: {total_params}, "
f"time cost: {time_cost:.2f}s, bandwidth: {bandwidth:.2f} GB/s"
)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/checkpoint_engine/nccl_checkpoint_engine.py",
"license": "Apache License 2.0",
"lines": 305,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/checkpoint_engine/nixl_checkpoint_engine.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
import time
import uuid
from collections import defaultdict, deque
from dataclasses import dataclass
from typing import AsyncGenerator, Generator
from unittest.mock import patch
with patch("importlib.metadata.distributions", return_value=[]):
import cupy as cp
import nixl._api as nixl_api
import nixl._bindings as nixl_bindings
import ray
import torch
import zmq
import zmq.asyncio
from verl.checkpoint_engine.base import CheckpointEngine, CheckpointEngineRegistry, TensorMeta
from verl.utils.net_utils import get_free_port, is_valid_ipv6_address
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
@dataclass
class NixlAgentMetadata:
agent_name: str
agent_metadata: bytes
zmq_ip: str
zmq_port: int
class NixlAgent:
"""This is a wrapper class for nixl_agent, the main purpose is to use ZeroMQ instead of
`nixl_agent.send_notif` to send bucket tensor metadata.
"""
def __init__(self):
self.agent_name = str(uuid.uuid4())
self.agent = nixl_api.nixl_agent(self.agent_name)
self.notifications: dict[str, deque[bytes]] = defaultdict(deque)
self.start_zmq_server()
self.zmq_clients: dict[str, zmq.Socket] = {}
self.messages: dict[str, deque[bytes]] = defaultdict(deque)
def __getattr__(self, name):
attr = getattr(self.agent, name)
if callable(attr):
def wrapper(*args, **kwargs):
return attr(*args, **kwargs)
return wrapper
else:
return attr
def get_agent_metadata(self) -> NixlAgentMetadata:
return NixlAgentMetadata(
agent_name=self.agent_name,
agent_metadata=self.agent.get_agent_metadata(),
zmq_ip=self.ip,
zmq_port=self.listen_port,
)
def start_zmq_server(self):
self.ip = ray.util.get_node_ip_address().strip("[]")
self.listen_port, _ = get_free_port(self.ip)
context = zmq.asyncio.Context()
self.socket = context.socket(zmq.PULL)
if is_valid_ipv6_address(self.ip):
address = f"tcp://[{self.ip}]:{self.listen_port}"
self.socket.setsockopt(zmq.IPV6, 1)
else:
address = f"tcp://{self.ip}:{self.listen_port}"
self.socket.bind(address)
def add_remote_agent(self, metadata: NixlAgentMetadata) -> str:
agent_name = self.agent.add_remote_agent(metadata.agent_metadata).decode("utf-8")
assert agent_name == metadata.agent_name, f"Agent name {agent_name} not equal to {metadata.agent_name}"
context = zmq.Context()
socket = context.socket(zmq.PUSH)
if is_valid_ipv6_address(metadata.zmq_ip):
address = f"tcp://[{metadata.zmq_ip}]:{metadata.zmq_port}"
socket.setsockopt(zmq.IPV6, 1)
else:
address = f"tcp://{metadata.zmq_ip}:{metadata.zmq_port}"
socket.connect(address)
self.zmq_clients[agent_name] = socket
return agent_name
def remove_remote_agent(self, agent_name: str):
self.agent.remove_remote_agent(agent_name)
socket = self.zmq_clients.pop(agent_name)
socket.close()
def send_message(self, agent_name, message: dict):
socket = self.zmq_clients[agent_name]
socket.send_pyobj((self.agent_name, message), zmq.DONTWAIT)
async def read_message(self, agent_name: str) -> dict:
while len(self.messages[agent_name]) == 0:
recv_agent_name, message = await self.socket.recv_pyobj()
self.messages[recv_agent_name].append(message)
return self.messages[agent_name].popleft()
async def get_notification(self, remote_name: str) -> bytes:
while len(self.notifications[remote_name]) == 0:
notifs = self.agent.get_new_notifs()
for remote_name, notif in notifs.items():
self.notifications[remote_name].extend(notif)
await asyncio.sleep(0)
return self.notifications[remote_name].popleft()
class ReadableOperation:
"""Encapsulates a readable operation to remote agent.
1. send metadata to remote agent
2. wait until remote agent read complete.
Args:
agent (NixlAgent): The Nixl agent.
remote_agent (str): The name of the remote agent.
local_descs (nixl_bindings.nixlXferDList): The local transfer descriptors.
metadata (dict): Metadata for the read operation.
bucket_size (int): The size of the bucket in bytes.
"""
def __init__(
self,
agent: NixlAgent,
remote_agent: str,
local_descs: nixl_bindings.nixlXferDList,
metadata: dict,
):
self.agent = agent
self.remote_agent = remote_agent
self.local_descs = local_descs
self.notify_key = uuid.uuid4().bytes
message = {"notify_key": self.notify_key, "remote_descs": self.local_descs, **metadata}
self.agent.send_message(self.remote_agent, message)
async def wait_for_complete(self):
"""Block until remote agent read complete."""
notification = await self.agent.get_notification(self.remote_agent)
assert self.notify_key == notification, f"Notify key {self.notify_key} not equal to {notification}"
logger.debug(f"ReadableOperation to {self.remote_agent} complete")
class ReadOperation:
"""Encapsulates a read operation from remote agent.
1. read medata from remote agent
2. start read transfer operation
3. wait until read complete
Args:
agent (NixlAgent): The Nixl agent.
remote_agent (str): The name of the remote agent.
local_descs (nixl_bindings.nixlXferDList): The local transfer descriptors.
bucket_size (int): The size of the bucket in bytes.
"""
def __init__(self, agent: NixlAgent, remote_agent: str, local_descs: nixl_bindings.nixlXferDList, bucket_size: int):
self.agent = agent
self.remote_agent = remote_agent
self.local_descs = local_descs
self.remote_descs = None
self.xfer_handle = None
self.notify_key = None
self.bucket_size = bucket_size
self.start_time = None
async def read_metadata(self) -> dict:
"""Block until the remote agent sends the metadata.
Returns:
dict: Metadata from the remote agent.
"""
metadata = await self.agent.read_message(self.remote_agent)
self.remote_descs = metadata.pop("remote_descs")
self.notify_key = metadata.pop("notify_key")
return metadata
def begin_read(self):
"""Start the read operation."""
assert self.remote_descs is not None and self.notify_key is not None
self.xfer_handle = self.agent.initialize_xfer(
"READ", self.local_descs, self.remote_descs, self.remote_agent, self.notify_key
)
state = self.agent.transfer(self.xfer_handle)
assert state != "ERR", f"Read from {self.remote_agent} got to {state} state."
self.start_time = time.time()
async def wait_for_complete(self):
"""Block until the read operation complete."""
while True:
state = self.agent.check_xfer_state(self.xfer_handle)
if state == "ERR":
logger.error(f"Read from {self.remote_agent} got to {state} state.")
exit(-1)
elif state == "DONE":
break
else:
await asyncio.sleep(0)
self.agent.release_xfer_handle(self.xfer_handle)
end_time = time.time()
bandwidth = self.bucket_size / (end_time - self.start_time) / (1024 * 1024 * 1024)
logger.debug(f"ReadOperation read data from {self.remote_agent} complete, bandwidth: {bandwidth:.2f} GB/s")
@CheckpointEngineRegistry.register("nixl")
class NIXLCheckpointEngine(CheckpointEngine):
"""NIXL checkpoint engine with p2p communication, support various backends: ucx, uccl, mooncacke, etc.
For UCX backend, some environment variables need to be set: UCX_TLS, UCX_IB_GID_INDEX, UCX_IB_DEVICES, etc.
Please refer to: https://openucx.readthedocs.io/en/master/faq.html
Args:
bucket_size (int): Bucket size in bytes to transfer multiple weights at one time. Note that we use
two buffer to send and recv weights at same time, so the device memory overhead is 2 * bucket_size.
device (str): The device to use for the checkpoint engine, "cpu" or "cuda".
rollout_dtype (torch.dtype): The dtype of the weights received from rollout workers. Defaults to torch.bfloat16.
"""
def __init__(
self,
bucket_size: int,
device: str = "cuda",
rollout_dtype: torch.dtype = torch.bfloat16,
is_master: bool = False,
):
self.bucket_size = bucket_size
self.device = device
self.rollout_dtype = rollout_dtype
self.agent = NixlAgent()
self.is_master = is_master
def prepare(self) -> NixlAgentMetadata:
"""Prepare send and recv bucket.
Returns:
NixlAgentMetadata: The metadata of the current nixl agent.
"""
# For master process, use cupy instead of torch to avoid memory register error
# when `PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True`.
if self.device == "cuda":
send_buf = cp.zeros(self.bucket_size, dtype=cp.uint8)
recv_buf = cp.zeros(self.bucket_size, dtype=cp.uint8)
self.send_buf = torch.as_tensor(send_buf, dtype=torch.uint8)
self.recv_buf = torch.as_tensor(recv_buf, dtype=torch.uint8)
else:
self.send_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device=self.device, pin_memory=True)
self.recv_buf = torch.zeros(self.bucket_size, dtype=torch.uint8, device=self.device, pin_memory=True)
self.send_reg_descs = self.agent.register_memory(self.send_buf)
self.recv_reg_descs = self.agent.register_memory(self.recv_buf)
self.send_descs = self.agent.get_xfer_descs(self.send_buf)
self.recv_descs = self.agent.get_xfer_descs(self.recv_buf)
return self.agent.get_agent_metadata()
@classmethod
def build_topology(cls, trainer_world_size: int, rollout_world_size: int, metadata: list[dict]):
trainer_kwargs = {
"method": ["init_process_group"] * trainer_world_size,
"rank": [0] + [-1] * (trainer_world_size - 1),
"world_size": [rollout_world_size + 1] * trainer_world_size,
"prev_agent_metadata": [None] * trainer_world_size,
"next_agent_metadata": [metadata[-rollout_world_size]] + [None] * (trainer_world_size - 1),
}
rollout_kwargs = {
"method": ["init_process_group"] * rollout_world_size,
"rank": list(range(1, rollout_world_size + 1)),
"world_size": [rollout_world_size + 1] * rollout_world_size,
"prev_agent_metadata": [metadata[0]] + metadata[-rollout_world_size:-1],
"next_agent_metadata": metadata[-rollout_world_size + 1 :] + [None],
}
return trainer_kwargs, rollout_kwargs
def init_process_group(
self, rank: int, world_size: int, prev_agent_metadata: NixlAgentMetadata, next_agent_metadata: NixlAgentMetadata
):
"""Setup the communication with the previous and next agent.
Args:
rank (int): The rank of the current process.
world_size (int): The total number of processes.
prev_agent_metadata (NixlAgentMetadata): The metadata of the previous nixl agent.
next_agent_metadata (NixlAgentMetadata): The metadata of the next nixl agent.
"""
if rank < 0:
assert not prev_agent_metadata and not next_agent_metadata, (
f"rank {rank} should not have prev_agent_metadata or next_agent_metadata"
)
elif rank == 0:
assert not prev_agent_metadata and next_agent_metadata, f"rank {rank} should have next_agent_metadata"
elif 0 < rank < world_size - 1:
assert prev_agent_metadata and next_agent_metadata, (
f"rank {rank} should have prev_agent_metadata and next_agent_metadata"
)
elif rank == world_size - 1:
assert prev_agent_metadata and not next_agent_metadata, (
f"rank {rank} should have prev_agent_metadata and not next_agent_metadata"
)
self.rank = rank
self.world_size = world_size
self.prev_agent = None
self.next_agent = None
if prev_agent_metadata is not None:
self.prev_agent = self.agent.add_remote_agent(prev_agent_metadata)
if next_agent_metadata is not None:
self.next_agent = self.agent.add_remote_agent(next_agent_metadata)
logger.info(
f"init_process_group rank: {self.rank}, world_size: {self.world_size}, "
f"prev_agent: {self.prev_agent}, next_agent: {self.next_agent}"
)
def finalize(self):
"""Cleanup communication with the previous and next agent, and deregister the memory."""
if self.prev_agent:
self.agent.remove_remote_agent(self.prev_agent)
if self.next_agent:
self.agent.remove_remote_agent(self.next_agent)
self.agent.deregister_memory(self.send_reg_descs)
self.agent.deregister_memory(self.recv_reg_descs)
self.send_buf = None
self.recv_buf = None
self.send_reg_descs = None
self.recv_reg_descs = None
self.send_descs = None
self.recv_descs = None
self.rank = None
self.world_size = None
self.prev_agent = None
self.next_agent = None
@torch.no_grad()
async def send_weights(self, weights: Generator[tuple[str, torch.Tensor], None, None]):
"""Send the weights of the model.
Args:
weights: A generator that yields the name of the weight tensor and the tensor itself.
"""
assert self.rank <= 0, "Trainer workers other than rank 0 should not send weights."
# For trainer workers other than rank 0, just consume weights and do nothing.
if self.rank < 0:
for name, weight in weights:
pass
return
assert self.next_agent is not None, "Next agent is not set."
send_buf, recv_buf = self.send_buf, self.recv_buf
send_descs, recv_descs = self.send_descs, self.recv_descs
readable_op = None
start_time = time.time()
bucket_meta: dict[str, TensorMeta] = {}
offset = 0
for name, weight in weights:
# fill the tensor bucket
if offset + weight.nbytes > self.bucket_size:
torch.cuda.synchronize()
# wait previous bucket to be received
if readable_op is not None:
await readable_op.wait_for_complete()
# send bucket meta to next agent
readable_op = ReadableOperation(
self.agent,
self.next_agent,
send_descs,
{"bucket_meta": bucket_meta, "is_last": False},
)
# swap send and recv buf
send_buf, recv_buf = recv_buf, send_buf
send_descs, recv_descs = recv_descs, send_descs
bucket_meta = {}
offset = 0
assert offset + weight.nbytes <= self.bucket_size, (
f"Weight {name}({weight.shape}, {weight.dtype}) is too large to fit in the bucket."
)
bucket_meta[name] = {
"name": name,
"shape": weight.shape,
"dtype": weight.dtype,
"offset": offset,
}
send_buf[offset : offset + weight.nbytes].copy_(weight.view(-1).view(torch.uint8), non_blocking=True)
offset += weight.nbytes
# send last bucket meta to next agent
torch.cuda.synchronize()
if readable_op is not None:
await readable_op.wait_for_complete()
readable_op = ReadableOperation(
self.agent, self.next_agent, send_descs, {"bucket_meta": bucket_meta, "is_last": True}
)
await readable_op.wait_for_complete()
logger.info(f"Rank {self.rank} send weights done, time cost: {time.time() - start_time:.2f}s")
@torch.no_grad()
async def receive_weights(self) -> AsyncGenerator[tuple[str, torch.Tensor], None]:
"""Receive the weights of the model.
Yields:
A tuple of the name of the weight tensor and the tensor itself.
"""
assert self.prev_agent is not None, "Previous agent is not set."
send_buf, recv_buf = self.send_buf, self.recv_buf
send_descs, recv_descs = self.send_descs, self.recv_descs
total_bytes, total_params = 0, 0
# receive first bucket from previous agent
start_time = time.time()
read_op = ReadOperation(self.agent, self.prev_agent, recv_descs, self.bucket_size)
metadata = await read_op.read_metadata()
read_op.begin_read()
await read_op.wait_for_complete()
total_bytes += self.bucket_size
total_params += len(metadata["bucket_meta"])
# swap send and recv buf
send_buf, recv_buf = recv_buf, send_buf
send_descs, recv_descs = recv_descs, send_descs
while not metadata["is_last"]:
# 1. send bucket to next agent
readable_op = None
if self.next_agent is not None:
readable_op = ReadableOperation(
self.agent,
self.next_agent,
send_descs,
metadata,
)
# 2. receive bucket from previous agent
read_op = ReadOperation(self.agent, self.prev_agent, recv_descs, self.bucket_size)
next_metadata = await read_op.read_metadata()
read_op.begin_read()
# 3. yield tensor from send_buf
for name, meta in metadata["bucket_meta"].items():
dtype, shape = meta["dtype"], meta["shape"]
size = dtype.itemsize * shape.numel()
tensor = send_buf[meta["offset"] : meta["offset"] + size].view(dtype=dtype).view(shape)
yield name, tensor
# 4. wait for next agent read complete and read from previous agent complete
if readable_op is not None:
await readable_op.wait_for_complete()
await read_op.wait_for_complete()
total_bytes += self.bucket_size
total_params += len(next_metadata["bucket_meta"])
# 5. swap send and recv buf
torch.cuda.synchronize() # sync non-blocking copy
metadata = next_metadata
send_buf, recv_buf = recv_buf, send_buf
send_descs, recv_descs = recv_descs, send_descs
# send last bucket to next agent
readable_op = None
if self.next_agent is not None:
readable_op = ReadableOperation(
self.agent,
self.next_agent,
send_descs,
metadata,
)
# yield tensor from send_buf
for name, meta in metadata["bucket_meta"].items():
dtype, shape = meta["dtype"], meta["shape"]
size = dtype.itemsize * shape.numel()
tensor = send_buf[meta["offset"] : meta["offset"] + size].view(dtype=dtype).view(shape)
yield name, tensor
# wait for next agent read complete
if readable_op is not None:
await readable_op.wait_for_complete()
time_cost = time.time() - start_time
bandwidth = total_bytes / time_cost / (1024 * 1024 * 1024)
logger.info(
f"Rank {self.rank} receive weights done, total_params: {total_params}, "
f"time cost: {time_cost:.2f}s, bandwidth: {bandwidth:.2f} GB/s"
)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/checkpoint_engine/nixl_checkpoint_engine.py",
"license": "Apache License 2.0",
"lines": 439,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/trainer/ppo/prefix_grouper_utils.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import torch
from prefix_grouper import PrefixGrouper
from verl.utils.torch_functional import logprobs_from_logits
def build_position_ids_for_prefix_grouper(prefix_grouper: PrefixGrouper) -> torch.Tensor:
"""Build position_ids for PrefixGrouper where each response restarts from prefix_len."""
num_samples = len(prefix_grouper.group_info)
max_len = prefix_grouper.padding_mask.size(1)
device = prefix_grouper.padding_mask.device
position_ids = torch.zeros(num_samples, max_len, dtype=torch.long, device=device)
for i, group in enumerate(prefix_grouper.group_info):
prefix_len = group.prefix_len
position_ids[i, :prefix_len] = torch.arange(prefix_len, device=device)
cur_pos = prefix_len
for suffix_len in group.suffix_lens:
if suffix_len > 0:
position_ids[i, cur_pos : cur_pos + suffix_len] = torch.arange(
prefix_len, prefix_len + suffix_len, device=device
)
cur_pos += suffix_len
return position_ids
def build_pg_from_micro_batch(
micro_batch: dict,
pad_token_id: int,
padding_mode: str = "right",
):
"""Build PrefixGrouper from micro_batch dict containing prompts, responses, response_mask, uid."""
prompts = micro_batch["prompts"]
responses = micro_batch["responses"]
response_mask = micro_batch["response_mask"]
uids = micro_batch["uid"]
bs = responses.size(0)
group_sizes = []
cur = 1
for i in range(1, bs):
if uids[i] == uids[i - 1]:
cur += 1
else:
group_sizes.append(cur)
cur = 1
group_sizes.append(cur)
prefix_indices = []
cursor = 0
for gs in group_sizes:
prefix_indices.append(cursor)
cursor += gs
prefix_indices = torch.tensor(prefix_indices, device=prompts.device)
prefix_ids = prompts.index_select(0, prefix_indices)
prefix_mask = prefix_ids.ne(pad_token_id)
prefix_grouper = PrefixGrouper.from_ungrouped_masks(
prefix_mask=prefix_mask,
suffix_mask=response_mask,
group_sizes=group_sizes,
padding_mode=padding_mode,
device=prompts.device,
)
concat_input_ids = prefix_grouper.concat_input(prefix_ids, prefix_mask, responses, response_mask)
attention_mask = prefix_grouper.padding_mask
position_ids = build_position_ids_for_prefix_grouper(prefix_grouper)
return (
prefix_grouper,
concat_input_ids,
attention_mask,
position_ids,
responses,
response_mask,
)
def pg_forward(
model,
prefix_grouper,
concat_input_ids,
attention_mask,
position_ids,
completion_ids,
completion_mask,
*,
temperature=1.0,
padding_mode="right",
include_prefix_last=1,
calculate_entropy=False,
entropy_fn=None,
):
logits = model(
input_ids=concat_input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
use_cache=False,
prefix_grouper=prefix_grouper,
).logits
prefix_out, prefix_mask, suffix_out_raw, suffix_mask_raw = prefix_grouper.split_output(
logits, include_prefix_last=include_prefix_last
)
completion_ids_right = prefix_grouper.convert_padding(
completion_ids,
completion_mask,
padding_mode=padding_mode,
)
suffix_out = suffix_out_raw[:, :-1].float()
suffix_mask = suffix_mask_raw[:, 1:]
suffix_out /= temperature
log_probs = logprobs_from_logits(suffix_out, completion_ids_right)
entropy = None
if calculate_entropy and entropy_fn is not None:
entropy = entropy_fn(suffix_out)
return log_probs, entropy, suffix_mask
def forward_micro_batch_with_prefix_grouper(
micro_batch: dict,
model,
temperature: float,
calculate_entropy: bool,
device_name: str,
param_dtype,
use_chunking_entropy: bool = False,
):
"""
Forward pass using PrefixGrouper for shared-prefix optimization.
Args:
micro_batch: Dict containing prompts, responses, response_mask, uid, etc.
model: The actor module.
temperature: Temperature for logits scaling.
calculate_entropy: Whether to compute entropy.
device_name: Device name for autocast.
param_dtype: Parameter dtype for autocast.
use_chunking_entropy: Whether to use chunking entropy function.
Returns:
tuple: (entropy, log_probs) where entropy may be None if not calculated.
"""
import verl.utils.torch_functional as verl_F
entropy_fn = None
if calculate_entropy:
if use_chunking_entropy:
entropy_fn = verl_F.entropy_from_logits_with_chunking
else:
entropy_fn = verl_F.entropy_from_logits
pad_token_id = micro_batch.get("pad_token_id", 0)
(
prefix_grouper,
concat_input_ids,
attention_mask,
position_ids,
responses,
response_mask,
) = build_pg_from_micro_batch(
micro_batch,
pad_token_id=pad_token_id,
padding_mode="right",
)
with torch.autocast(device_type=device_name, dtype=param_dtype):
log_probs, entropy, suffix_mask_from_pg = pg_forward(
model=model,
prefix_grouper=prefix_grouper,
concat_input_ids=concat_input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
completion_ids=responses,
completion_mask=response_mask,
temperature=temperature,
padding_mode="right",
include_prefix_last=1,
calculate_entropy=calculate_entropy,
entropy_fn=entropy_fn,
)
# Zero out padding positions
padding_mask = suffix_mask_from_pg == 0
log_probs = log_probs.masked_fill(padding_mask, 0.0)
if entropy is not None:
entropy = entropy.masked_fill(padding_mask, 0.0)
# Pad to target response length if needed
target_response_length = responses.size(1)
if log_probs.size(1) != target_response_length:
batch_size = log_probs.size(0)
current_len = log_probs.size(1)
full_log_probs = log_probs.new_zeros(batch_size, target_response_length)
full_log_probs[:, :current_len] = log_probs
log_probs = full_log_probs
if entropy is not None:
full_entropy = entropy.new_zeros(batch_size, target_response_length)
full_entropy[:, :current_len] = entropy
entropy = full_entropy
return entropy, log_probs
| {
"repo_id": "verl-project/verl",
"file_path": "verl/trainer/ppo/prefix_grouper_utils.py",
"license": "Apache License 2.0",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/models/test_tiled_mlp_accuracy.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test script to verify TiledMLP accuracy by comparing logits and gradients
between regular MLP and TiledMLP under FSDP2.
Run with: torchrun --nproc_per_node=2 tests/test_tiled_mlp_accuracy.py
"""
import torch
import torch.distributed as dist
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.fsdp import fully_shard
def setup_distributed():
dist.init_process_group(backend="nccl")
rank = dist.get_rank()
world_size = dist.get_world_size()
torch.cuda.set_device(rank)
return rank, world_size
def create_model(model_name="Qwen/Qwen3-1.7B", num_layers=2):
"""Load a Qwen3-1.7B model with only 2 layers from pretrained weights."""
from transformers import AutoConfig, AutoModelForCausalLM
config = AutoConfig.from_pretrained(model_name, trust_remote_code=True)
config.num_hidden_layers = num_layers
model = AutoModelForCausalLM.from_pretrained(
model_name,
config=config,
torch_dtype=torch.bfloat16,
trust_remote_code=True,
attn_implementation="flash_attention_2",
)
return model
def apply_fsdp2(model, device_mesh):
"""Apply FSDP2 sharding to model."""
for layer in model.model.layers:
fully_shard(layer, mesh=device_mesh)
fully_shard(model, mesh=device_mesh)
return model
def run_forward_backward(model, input_ids, labels):
"""Run forward and backward pass, return logits and gradients."""
model.zero_grad()
outputs = model(input_ids=input_ids, labels=labels)
logits = outputs.logits.clone().detach()
loss = outputs.loss
loss.backward()
# Collect MLP gradients
gradients = {}
for name, param in model.named_parameters():
if "mlp" in name and param.grad is not None:
gradients[name] = param.grad.clone().detach()
return logits, gradients, loss.item()
def compare_results(logits1, grads1, logits2, grads2, rank):
"""Compare logits and gradients between two runs."""
# Compare logits
logits_diff = (logits1 - logits2).abs()
logits_max_diff = logits_diff.max().item()
logits_mean_diff = logits_diff.mean().item()
# Compare gradients (only for params that exist on this rank due to FSDP sharding)
all_pass = True
grad_results = []
for name in sorted(grads1.keys()):
if name in grads2:
g1, g2 = grads1[name], grads2[name]
diff = (g1 - g2).abs()
max_diff = diff.max().item()
mean_diff = diff.mean().item()
# Check if within tolerance (1e-2 for bf16)
passed = max_diff < 1e-2
if not passed:
all_pass = False
grad_results.append((name, max_diff, mean_diff, passed))
# Only print on rank 0 to avoid duplicate output
if rank == 0:
print("\n=== Comparison Results ===")
print("\nLogits:")
print(f" Max diff: {logits_max_diff:.2e}")
print(f" Mean diff: {logits_mean_diff:.2e}")
print("\nMLP Parameter Gradients:")
if grad_results:
for name, max_diff, mean_diff, passed in grad_results:
status = "✓" if passed else "✗"
print(f" {name}: max={max_diff:.2e}, mean={mean_diff:.2e} {status}")
else:
print(" (Gradients sharded to other ranks under FSDP2)")
return all_pass
def main():
rank, world_size = setup_distributed()
device_mesh = init_device_mesh("cuda", (world_size,))
model_name = "Qwen/Qwen3-1.7B"
num_layers = 2
if rank == 0:
print(f"Running TiledMLP accuracy test with {world_size} GPUs")
print(f"Model: {model_name} ({num_layers} layers, from pretrained)")
dist.barrier()
# ========== Create Model 1: WITHOUT TiledMLP ==========
if rank == 0:
print("\n" + "=" * 60)
print("Creating Model 1 (without TiledMLP)")
print("=" * 60)
model1 = create_model(model_name, num_layers)
model1 = apply_fsdp2(model1, device_mesh)
model1 = model1.cuda()
# Create deterministic input
torch.manual_seed(42)
batch_size, seq_len = 2, 256
vocab_size = model1.config.vocab_size
input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), device="cuda")
labels = input_ids.clone()
# ========== Run Model 1: WITHOUT TiledMLP ==========
if rank == 0:
print("\n" + "=" * 60)
print("Running forward/backward on Model 1 (without TiledMLP)")
print("=" * 60)
logits1, grads1, loss1 = run_forward_backward(model1, input_ids, labels)
if rank == 0:
print(f"Loss: {loss1:.4f}")
# Free model1 memory before creating model2
del model1
torch.cuda.empty_cache()
dist.barrier()
# ========== Create Model 2, apply TiledMLP patch, then FSDP2 ==========
if rank == 0:
print("\n" + "=" * 60)
print("Creating Model 2 (with TiledMLP, patch before FSDP2)")
print("=" * 60)
model2 = create_model(model_name, num_layers)
# Apply TiledMLP patch AFTER model instantiation but BEFORE FSDP2 wrap
if rank == 0:
print("Applying TiledMLP monkey patch before FSDP2...")
from verl.models.transformers.tiled_mlp import apply_tiled_mlp_monkey_patch
apply_tiled_mlp_monkey_patch(num_shards=4, model_type="qwen3")
model2 = apply_fsdp2(model2, device_mesh)
model2 = model2.cuda()
dist.barrier()
# ========== Run Model 2: WITH TiledMLP ==========
if rank == 0:
print("\n" + "=" * 60)
print("Running forward/backward on Model 2 (with TiledMLP)")
print("=" * 60)
logits2, grads2, loss2 = run_forward_backward(model2, input_ids, labels)
if rank == 0:
print(f"Loss: {loss2:.4f}")
dist.barrier()
# ========== Compare Results ==========
all_pass = compare_results(logits1, grads1, logits2, grads2, rank)
dist.barrier()
if rank == 0:
print("\n" + "=" * 60)
print("SUMMARY")
print("=" * 60)
print(f"Loss diff: {abs(loss1 - loss2):.2e}")
print(f"All gradient checks: {'PASS' if all_pass else 'FAIL'}")
# Cleanup
del model2
torch.cuda.empty_cache()
dist.destroy_process_group()
if __name__ == "__main__":
main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/models/test_tiled_mlp_accuracy.py",
"license": "Apache License 2.0",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/models/transformers/tiled_mlp.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
FSDP2-compatible TiledMLP implementation for memory-efficient MLP computation.
This module provides a tiled MLP implementation that reduces peak memory usage
by processing the MLP forward/backward pass in chunks (tiles). This is particularly
useful for large models with FSDP2 training.
"""
import threading
from typing import Optional
import torch
import torch.nn as nn
class GradientAccumulator:
"""Gradient accumulator for TiledMLP (FSDP compatible).
This class manages gradient accumulation across multiple shards during
the backward pass of TiledMLP. It ensures correct gradient computation
when processing input in chunks.
"""
def __init__(self, params: list[torch.nn.Parameter], total_shards: int, dtype: torch.dtype = None):
self.params = params
self.total_shards = total_shards
self.grad_accumulation_dtype = dtype or torch.float32
self.accumulated_grads = {}
self.hooks = []
self.lock = threading.Lock()
for param in self.params:
if param.grad is not None:
self.accumulated_grads[param] = param.grad.to(self.grad_accumulation_dtype)
param.grad = None
else:
self.accumulated_grads[param] = torch.zeros_like(param, dtype=self.grad_accumulation_dtype)
def install_hooks(self, is_last_shard: bool):
"""Install gradient hooks for the current shard."""
self._remove_hooks()
def create_hook(param):
def hook(grad):
with self.lock:
grad_to_accum_dtype = grad.to(self.grad_accumulation_dtype)
self.accumulated_grads[param] += grad_to_accum_dtype
if is_last_shard:
param.grad = None # Critical: prevent double accumulation
final_grad = self.accumulated_grads[param].to(param.dtype)
return final_grad
return None
return hook
for param in self.params:
if param.requires_grad:
hook = param.register_hook(create_hook(param))
self.hooks.append(hook)
def _remove_hooks(self):
"""Remove all registered hooks."""
for hook in self.hooks:
hook.remove()
self.hooks.clear()
def cleanup(self):
"""Cleanup hooks and resources."""
self._remove_hooks()
class TiledMLP(torch.autograd.Function):
"""TiledMLP implementation for memory-efficient MLP computation.
This autograd function processes MLP forward/backward in tiles (chunks)
to reduce peak memory usage. Compatible with FSDP2.
"""
@staticmethod
def forward(ctx, fn, module, x, shards, compute_params):
ctx.fn = fn
ctx.module = module
ctx.shards = shards
ctx.compute_params = [p for p in compute_params if p.requires_grad]
ctx.save_for_backward(x)
# Split on dim=-2 (seqlen dimension) following Liger Kernel style
x_shards = list(torch.chunk(x, chunks=shards, dim=-2))
with torch.no_grad():
output_shards = [fn(module, x_shard) for x_shard in x_shards]
output_unsharded = torch.cat(output_shards, dim=-2)
return output_unsharded
@staticmethod
def backward(ctx, *grads):
fn = ctx.fn
(x,) = ctx.saved_tensors
module = ctx.module
shards = ctx.shards
compute_params = ctx.compute_params
x_requires_grad = x.requires_grad
x = x.detach()
x.requires_grad_(x_requires_grad)
# Flatten to [bs*seqlen, hidden_size]
hidden_size = x.shape[-1]
x_shape_orig = x.shape
x = x.view(-1, hidden_size)
incoming_grad = grads[0].view(-1, hidden_size)
# Pre-allocate input gradient
x_grad = torch.zeros_like(x)
# Split on dim=0
x_shards = list(torch.chunk(x, chunks=shards, dim=0))
grad_accumulator = GradientAccumulator(compute_params, shards, dtype=x.dtype)
for i, x_shard in enumerate(x_shards):
x_shard.requires_grad_(x_requires_grad)
shard_step = x_shards[i].shape[0]
shard_offset = i * x_shards[0].shape[0]
# narrow(0, ...) creates a contiguous view that can receive gradients
x_shard.grad = x_grad.narrow(0, shard_offset, shard_step)
incoming_grad_shard = incoming_grad.narrow(0, shard_offset, shard_step)
is_last_shard = i + 1 == shards
grad_accumulator.install_hooks(is_last_shard)
with torch.enable_grad():
output = fn(module, x_shard)
torch.autograd.backward(output, incoming_grad_shard)
grad_accumulator.cleanup()
del grad_accumulator
# Restore original shape
x_grad = x_grad.view(x_shape_orig) if x_requires_grad else None
return (None, None, x_grad, None, None)
def _mlp_forward_fn(module, x):
"""Forward function for LlamaMLP / Qwen2MLP / Qwen3MLP style."""
return module.down_proj(module.act_fn(module.gate_proj(x)) * module.up_proj(x))
# ============================================================================
# Monkey Patch Functions
# ============================================================================
# Model type to MLP class mapping
_MODEL_TYPE_TO_MLP_CLASS = {
"llama": ("transformers.models.llama.modeling_llama", "LlamaMLP"),
"qwen2": ("transformers.models.qwen2.modeling_qwen2", "Qwen2MLP"),
"qwen2_5": ("transformers.models.qwen2.modeling_qwen2", "Qwen2MLP"), # Qwen2.5 uses Qwen2 MLP
"qwen3": ("transformers.models.qwen3.modeling_qwen3", "Qwen3MLP"),
}
def apply_tiled_mlp_monkey_patch(
num_shards: int = 4,
model_type: Optional[str] = None,
):
"""Apply TiledMLP monkey patch based on model_type.
This function MUST be called BEFORE model instantiation to take effect.
It patches the MLP classes in transformers library to use TiledMLP for
memory-efficient computation during training.
Args:
num_shards: Number of shards to split the input into. Higher values
reduce peak memory but may slightly impact performance.
model_type: The model type string (e.g., "llama", "qwen2", "qwen3").
If None, patches all supported model types.
Returns:
List of patched class names.
"""
if model_type is None:
types_to_patch = list(_MODEL_TYPE_TO_MLP_CLASS.keys())
elif model_type in _MODEL_TYPE_TO_MLP_CLASS:
types_to_patch = [model_type]
else:
raise ValueError(
f"TiledMLP does not support model_type='{model_type}'. "
f"Supported types: {list(_MODEL_TYPE_TO_MLP_CLASS.keys())}. "
f"For SwiGLU-style MLPs, you can add support by extending _MODEL_TYPE_TO_MLP_CLASS "
f"in verl/models/transformers/tiled_mlp.py"
)
patched_classes = []
for mtype in types_to_patch:
module_path, class_name = _MODEL_TYPE_TO_MLP_CLASS[mtype]
try:
import importlib
module = importlib.import_module(module_path)
mlp_class = getattr(module, class_name)
_patch_mlp_class(mlp_class, _mlp_forward_fn, num_shards)
if class_name not in patched_classes:
patched_classes.append(class_name)
except (ImportError, AttributeError) as e:
print(f"Warning: Could not patch {mtype} MLP: {e}")
if patched_classes:
print(f"TiledMLP monkey patch applied to: {', '.join(patched_classes)} (shards={num_shards})")
return patched_classes
def _patch_mlp_class(mlp_class: type[nn.Module], forward_fn, num_shards: int):
"""Patch a single MLP class to use TiledMLP."""
def tiled_forward(self, x):
compute_params = [p for p in self.parameters() if p.requires_grad]
return TiledMLP.apply(forward_fn, self, x, num_shards, compute_params)
mlp_class.forward = tiled_forward
| {
"repo_id": "verl-project/verl",
"file_path": "verl/models/transformers/tiled_mlp.py",
"license": "Apache License 2.0",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/experimental/reward_loop/test_math_verify.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ray
from hydra import compose, initialize_config_dir
from torchdata.stateful_dataloader import StatefulDataLoader
from transformers import AutoTokenizer
from tests.experimental.agent_loop.agent_utils import init_agent_loop_manager
from verl.protocol import DataProto
from verl.trainer.main_ppo import create_rl_sampler
from verl.utils.dataset.rl_dataset import RLHFDataset, collate_fn
def test_agent_reward_loop_standalone():
ray.init(
runtime_env={
"env_vars": {
"TOKENIZERS_PARALLELISM": "true",
"NCCL_DEBUG": "WARN",
"VLLM_LOGGING_LEVEL": "INFO",
"VLLM_USE_V1": "1",
}
}
)
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")):
config = compose(config_name="ppo_trainer")
rollout_model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-1.5B-Instruct")
# actor_rollout_ref config
config.data.return_raw_chat = True
config.data.max_prompt_length = 1024
config.data.max_response_length = 4096
config.actor_rollout_ref.model.path = rollout_model_path
config.actor_rollout_ref.actor.use_dynamic_bsz = True
config.actor_rollout_ref.rollout.name = os.getenv("ROLLOUT_NAME", "vllm")
config.actor_rollout_ref.rollout.mode = "async"
config.actor_rollout_ref.rollout.tensor_model_parallel_size = 2
config.actor_rollout_ref.rollout.gpu_memory_utilization = 0.9
config.actor_rollout_ref.rollout.enforce_eager = True
config.actor_rollout_ref.rollout.prompt_length = 2048
config.actor_rollout_ref.rollout.response_length = 4096
config.actor_rollout_ref.rollout.skip_tokenizer_init = True
config.trainer.n_gpus_per_node = 8
config.trainer.nnodes = 1
config.reward.reward_manager.name = "remote"
config.reward.num_workers = 2
config.reward.custom_reward_function.path = "tests/experimental/reward_loop/reward_fn.py"
config.reward.custom_reward_function.name = "compute_score_math_verify"
# 1. init reward model manager
agent_loop_manager = init_agent_loop_manager(config)
# 2. init test data
local_folder = os.path.expanduser("~/data/math/")
data_files = [os.path.join(local_folder, "train.parquet")]
tokenizer = AutoTokenizer.from_pretrained(rollout_model_path)
dataset = RLHFDataset(
data_files=data_files,
tokenizer=tokenizer,
config=config.data,
processor=None,
)
batch_size = 64
sampler = create_rl_sampler(config.data, dataset)
dataloader = StatefulDataLoader(
dataset=dataset,
batch_size=batch_size,
num_workers=config.data.dataloader_num_workers,
drop_last=True,
collate_fn=collate_fn,
sampler=sampler,
)
# 3. generate responses
batch_dict = next(iter(dataloader))
batch = DataProto.from_single_dict(batch_dict)
gen_batch = agent_loop_manager.generate_sequences(prompts=batch)
rm_scores = gen_batch.batch["rm_scores"]
accuracy = rm_scores.sum(dim=-1).mean()
print(accuracy)
ray.shutdown()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/experimental/reward_loop/test_math_verify.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/experimental/reward_loop/reward_manager/remote.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import itertools
import ray
from verl import DataProto
from verl.experimental.reward_loop.reward_manager import register
from verl.experimental.reward_loop.reward_manager.base import RewardManagerBase
from verl.utils.reward_score import default_compute_score
@ray.remote(num_cpus=1)
class RewardComputeWorker:
"""
WARNING: This class cannot have async methods.
"""
def __init__(self, compute_score_fn):
# since the reward function may not be pickleable, we need to init it in the worker
self.compute_score_fn = compute_score_fn
def compute_score(self, **kwargs) -> dict:
return self.compute_score_fn(**kwargs)
@register("remote")
class RemoteRewardManager(RewardManagerBase):
"""
The reward manager.
Some errors exist when using default thread pool to compute reward score, e.g., math-verify.
https://github.com/volcengine/verl/issues/3407
To avoid the above issues, we use a separate process to compute reward score.
Moreover, process may be more suitable for cpu-intensive requests.
"""
def __init__(self, config, tokenizer, compute_score, reward_router_address=None, reward_model_tokenizer=None):
super().__init__(config, tokenizer, compute_score)
self.compute_score = compute_score or default_compute_score
self.is_async_reward_score = inspect.iscoroutinefunction(self.compute_score)
assert not self.is_async_reward_score, "Async reward score is not supported in remote reward manager. "
self.reward_router_address = reward_router_address
self.reward_model_tokenizer = reward_model_tokenizer
num_reward_workers = config.reward.num_workers
# in the rollout & reward parallel mode
# the sum of final reward workers will be agent_loop_workers * num_reward_workers
self.reward_worker = [
# register the reward worker in the same node
RewardComputeWorker.options(
scheduling_strategy=ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy(
node_id=ray.get_runtime_context().get_node_id(),
soft=True,
),
).remote(self.compute_score)
for _ in range(num_reward_workers)
]
self.reward_worker_pool = itertools.cycle(self.reward_worker)
def choose_reward_worker(self):
return next(self.reward_worker_pool)
async def run_single(self, data: DataProto) -> dict:
assert len(data) == 1, "Only support single data item"
data_item = data[0]
response_ids = data_item.batch["responses"]
response_length = response_ids.shape[-1]
valid_response_length = data_item.batch["attention_mask"][-response_length:].sum()
valid_response_ids = response_ids[:valid_response_length]
data_source = data_item.non_tensor_batch["data_source"]
ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"]
extra_info = data_item.non_tensor_batch.get("extra_info", {})
tool_extra_fields = data_item.non_tensor_batch.get("tool_extra_fields", None)
if tool_extra_fields is not None:
extra_info.update(tool_extra_fields.items())
num_turns = data_item.non_tensor_batch.get("__num_turns__", None)
rollout_reward_scores = data_item.non_tensor_batch.get("reward_scores", {})
extra_info["num_turns"] = num_turns
extra_info["rollout_reward_scores"] = rollout_reward_scores
response_str = await self.loop.run_in_executor(
None, lambda: self.tokenizer.decode(valid_response_ids, skip_special_tokens=True)
)
extra_reward_kwargs = (
{
"reward_router_address": self.reward_router_address,
"reward_model_tokenizer": self.reward_model_tokenizer,
}
if self.reward_router_address is not None
else {}
)
reward_worker = self.choose_reward_worker()
result = await reward_worker.compute_score.remote(
data_source=data_source,
solution_str=response_str,
ground_truth=ground_truth,
extra_info=extra_info,
**extra_reward_kwargs,
)
reward_extra_info = {}
score: float
if isinstance(result, dict):
score = result["score"]
for key, value in result.items():
reward_extra_info[key] = value
else:
score = result
reward_extra_info["acc"] = score
reward = score
return {"reward_score": reward, "reward_extra_info": reward_extra_info}
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/reward_loop/reward_manager/remote.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/engine/veomni/transformer_impl.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from dataclasses import dataclass, field
from typing import Any, Callable, Optional, Sequence
import torch
import torch.distributed as dist
from tensordict import TensorDict
from torch.distributed.tensor import DTensor
from veomni.distributed import parallel_state
from veomni.distributed.offloading import build_activation_offloading_context
from veomni.distributed.torch_parallelize import build_parallelize_model
from veomni.models.auto import build_foundation_model
from veomni.optim import build_lr_scheduler, build_optimizer
import verl.utils.torch_functional as verl_F
from verl.trainer.config import CheckpointConfig
from verl.utils import tensordict_utils as tu
from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager
from verl.utils.device import get_device_id, get_device_name
from verl.utils.fsdp_utils import fsdp_version
from verl.utils.model import convert_weight_keys
from verl.utils.profiler import log_gpu_memory_usage
from verl.utils.ulysses import (
get_ulysses_sequence_parallel_group,
set_ulysses_sequence_parallel_group,
)
from verl.workers.config import HFModelConfig, VeOmniEngineConfig, VeOmniOptimizerConfig
from ..base import BaseEngineCtx, EngineRegistry
from ..fsdp.transformer_impl import FSDPEngine, FSDPEngineWithLMHead
from ..utils import enable_full_determinism, postprocess_batch_func, prepare_micro_batches
from .utils import (
MOE_PARAM_HANDERS,
VL_TYPE2INDEX,
load_veomni_model_to_gpu,
load_veomni_optimizer,
offload_veomni_model_to_cpu,
offload_veomni_optimizer,
)
logger = logging.getLogger(__file__)
class VeOmniEngine(FSDPEngine):
def __init__(
self,
model_config: HFModelConfig,
engine_config: VeOmniEngineConfig,
optimizer_config: VeOmniOptimizerConfig,
checkpoint_config: CheckpointConfig,
**kwargs,
):
"""
Initialize the VeOmniEngine.
Sets up distributed device meshes, LoRA, and offload policies based on config.
Args:
config: Configuration object with VeOmni and model settings.
"""
self.model_config = model_config
self.engine_config = engine_config
self.optimizer_config = optimizer_config
self.checkpoint_config = checkpoint_config
# VeOmniEngine only supports fsdp2.
self.data_parallel_mode = "fsdp2"
self.rank = dist.get_rank()
fsdp_size = self.engine_config.fsdp_size
world_size = dist.get_world_size()
dp_size = world_size // self.engine_config.ulysses_parallel_size
if fsdp_size < 0 or fsdp_size >= dp_size:
data_parallel_replicate_size = 1
data_parallel_shard_size = dp_size
else:
if dp_size % fsdp_size != 0:
raise ValueError(
f"Data parallel size ({dp_size}) must be divisible by fsdp_size ({fsdp_size}). "
"Please adjust your parallel configuration."
)
data_parallel_replicate_size = dp_size // fsdp_size
data_parallel_shard_size = fsdp_size
parallel_state.init_parallel_state(
dp_size=dp_size,
dp_replicate_size=data_parallel_replicate_size,
dp_shard_size=data_parallel_shard_size,
ep_size=self.engine_config.expert_parallel_size,
ulysses_size=self.engine_config.ulysses_parallel_size,
dp_mode=self.data_parallel_mode,
)
if self.engine_config.full_determinism:
enable_full_determinism(seed=self.engine_config.seed)
self.use_remove_padding = self.model_config.use_remove_padding
self._is_offload_param = self.engine_config.param_offload
self._is_offload_optimizer = self.engine_config.optimizer_offload
self._is_lora = self.model_config.lora_rank > 0
self.use_ulysses_sp = parallel_state.get_parallel_state().sp_enabled
self.ulysses_sequence_parallel_size = self.engine_config.ulysses_parallel_size
if self.use_ulysses_sp:
self.ulysses_parallel_group = parallel_state.get_parallel_state().device_mesh["sp"].get_group()
else:
self.ulysses_parallel_group = None
if self.engine_config.entropy_from_logits_with_chunking:
entropy_from_logits = verl_F.entropy_from_logits_with_chunking
else:
entropy_from_logits = verl_F.entropy_from_logits
self.compute_entropy_from_logits = (
torch.compile(entropy_from_logits, dynamic=True)
if self.engine_config.use_torch_compile # use torch compile by default
else entropy_from_logits
)
def initialize(self):
"""
Build the model, optimizer, and learning rate scheduler under VeOmni.
Applies device, dtype, and precision configurations, including mixed precision.
Sets up checkpoint manager and FLOPs counter.
"""
self._build_model_optimizer()
self.checkpoint_manager = FSDPCheckpointManager(
model=self.module,
optimizer=self.optimizer,
lr_scheduler=self.lr_scheduler,
processing_class=self.model_config.get_processor(),
checkpoint_config=self.checkpoint_config,
trust_remote_code=self.model_config.trust_remote_code,
)
self.to(
device="cpu",
model=self._is_offload_param,
optimizer=self._is_offload_optimizer,
grad=self._is_offload_optimizer,
)
log_gpu_memory_usage("After offload model/optimizer/grad during init", logger=logger)
def _build_optimizer(self, module):
optimizer = build_optimizer(
module,
lr=self.optimizer_config.lr,
betas=self.optimizer_config.betas,
weight_decay=self.optimizer_config.weight_decay,
optimizer_type=self.optimizer_config.optimizer,
)
get_optimizer_pre_hook = getattr(module, "get_optimizer_pre_hook", None)
if get_optimizer_pre_hook is not None:
optimizer_pre_hook = get_optimizer_pre_hook(module, module.config, self.data_parallel_mode)
optimizer.register_step_pre_hook(optimizer_pre_hook)
return optimizer
def _build_lr_scheduler(self, optimizer):
optim_config = self.optimizer_config
lr_scheduler = build_lr_scheduler(
optimizer,
train_steps=optim_config.total_training_steps,
lr=optim_config.lr,
lr_min=optim_config.lr_min,
lr_decay_style=optim_config.lr_scheduler_type,
lr_decay_ratio=optim_config.lr_decay_ratio,
lr_warmup_ratio=optim_config.lr_warmup_steps_ratio,
lr_start=optim_config.lr_start,
)
return lr_scheduler
def _build_model_optimizer(self):
# Load base model with specified configuration and dtype
module = build_foundation_model(
config_path=self.model_config.hf_config_path,
weights_path=self.model_config.path,
torch_dtype="float32" if self.engine_config.mixed_precision else "bfloat16",
attn_implementation=self.engine_config.attn_implementation,
moe_implementation=self.engine_config.moe_implementation,
init_device=self.engine_config.init_device,
)
log_gpu_memory_usage("After load base model", logger=logger)
# Applies parallel strategies to the model.
log_gpu_memory_usage("Before parallelize model", logger=logger)
module = build_parallelize_model(
module,
init_device=self.engine_config.init_device,
weights_path=self.model_config.path,
enable_full_shard=self.engine_config.enable_full_shard,
enable_mixed_precision=self.engine_config.mixed_precision,
enable_gradient_checkpointing=self.model_config.enable_gradient_checkpointing,
enable_fsdp_offload=self.engine_config.enable_fsdp_offload,
basic_modules=module._no_split_modules + self.engine_config.basic_modules,
enable_reentrant=self.engine_config.enable_reentrant,
enable_forward_prefetch=self.engine_config.forward_prefetch,
)
log_gpu_memory_usage("After parallelize model", logger=logger)
if not self.engine_config.forward_only:
# Initialize optimizer with model parameters and config settings
optimizer = self._build_optimizer(module)
# Create learning rate scheduler with warmup and decay settings
lr_scheduler = self._build_lr_scheduler(optimizer)
else:
optimizer = None
lr_scheduler = None
self.module = module
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.model_fwd_context, self.model_bwd_context = build_activation_offloading_context(
self.model_config.enable_activation_offload,
self.model_config.enable_gradient_checkpointing,
self.engine_config.activation_gpu_limit,
)
def optimizer_step(self):
"""
Perform an optimization step using the optimizer.
"""
if hasattr(self.module, "clip_grad_norm_"):
grad_norm = self.module.clip_grad_norm_(self.optimizer_config.clip_grad)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(self.module.parameters(), self.optimizer_config.clip_grad)
if isinstance(grad_norm, DTensor):
grad_norm = grad_norm.full_tensor()
# if grad_norm is not finite, skip the update
if not torch.isfinite(grad_norm):
print(f"WARN: grad_norm is not finite: {grad_norm}")
self.optimizer.zero_grad()
else:
self.optimizer.step()
return grad_norm.item()
def forward_backward_batch(self, data: TensorDict, loss_function: Callable, forward_only=False) -> Any:
"""
Perform a forward pass and optionally a backward pass on a batch of data.
Args:
data: The input data for the forward pass, typically containing tensors and metadata.
loss_function: The loss function to optimize. See `verl.workers.roles.utils.losses` for examples.
forward_only: If True, perform only the forward pass. If False, perform forward and backward pass.
Returns:
Any: The output of the forward pass, which can be used for loss computation or other purposes.
"""
tu.assign_non_tensor(data, sp_size=parallel_state.get_parallel_state().ulysses_size)
# compute num_tokens in global batch for loss normalization
batch_num_tokens = data["loss_mask"].sum().to(get_device_id())
torch.distributed.all_reduce(
batch_num_tokens, op=torch.distributed.ReduceOp.SUM, group=self.get_data_parallel_group()
)
tu.assign_non_tensor(data, batch_num_tokens=batch_num_tokens.item())
tu.assign_non_tensor(data, dp_size=self.get_data_parallel_size())
micro_batches, indices = prepare_micro_batches(
data=data, dp_group=self.get_data_parallel_group(), same_micro_num_in_dp=True
)
output_lst = []
for micro_batch in micro_batches:
with self.model_fwd_context:
loss, meta_info = self.forward_step(micro_batch, loss_function=loss_function, forward_only=forward_only)
if not forward_only:
with self.model_bwd_context:
loss.backward()
output_lst.append(meta_info)
return postprocess_batch_func(output_lst=output_lst, indices=indices, data=data)
def get_data_parallel_rank(self):
return parallel_state.get_parallel_state().device_mesh.get_local_rank("dp")
def get_data_parallel_size(self):
return torch.distributed.get_world_size() // parallel_state.get_parallel_state().ulysses_size
def get_data_parallel_group(self):
if parallel_state.get_parallel_state().ulysses_size > 1:
return parallel_state.get_parallel_state().device_mesh.get_group(mesh_dim="dp")
else:
return torch.distributed.group.WORLD
def is_mp_src_rank_with_outputs(self):
"""
Whether the current rank is the first rank in model parallel group that contains model outputs
"""
if parallel_state.get_parallel_state().ulysses_size > 1:
is_collect = parallel_state.get_parallel_state().device_mesh["ulysses"].get_local_rank() == 0
else:
is_collect = True
return is_collect
def train_mode(self, **kwargs):
"""
Return a context manager that switches to training mode with VeOmni-specific handling.
Includes parameter and optimizer offload entry/exit.
"""
return EngineTrainModeCtx(self, **kwargs)
def eval_mode(self, **kwargs):
"""
Return a context manager that switches to evaluation mode with VeOmni-specific handling.
Includes activation offload entry/exit.
"""
return EngineEvalModeCtx(self, **kwargs)
def to(self, device: str, model: bool = True, optimizer: bool = True, grad: bool = True):
"""
Move model parameters, optimizer states, or both to the specified device.
Note that this function executes irrespective of offload config. It serves as manual control.
Args:
device: Target device identifier.
model: If True, move the model.
optimizer: If True, move the optimizer states.
"""
super(FSDPEngine, self).to(device=device, model=model, optimizer=optimizer, grad=grad)
device_name = get_device_name()
assert device in (device_name, "cpu")
if device == device_name:
if model:
load_veomni_model_to_gpu(self.module)
if optimizer and self.optimizer is not None:
load_veomni_optimizer(self.optimizer, device)
elif device == "cpu":
if model:
offload_veomni_model_to_cpu(self.module)
if optimizer and self.optimizer is not None:
offload_veomni_optimizer(self.optimizer)
else:
raise ValueError(f"Invalid device type: {device}")
def save_checkpoint(
self,
local_path: str,
hdfs_path: Optional[str] = None,
global_step: int = 0,
max_ckpt_to_keep: Optional[int] = None,
**kwargs,
) -> None:
"""
Save VeOmni checkpoint, handling parameter offload as needed.
"""
origin_module_device = next(self.module.parameters()).device.type
if self._is_offload_param or origin_module_device == "cpu":
load_veomni_model_to_gpu(self.module)
self.checkpoint_manager.save_checkpoint(
local_path=local_path, hdfs_path=hdfs_path, global_step=global_step, max_ckpt_to_keep=max_ckpt_to_keep
)
torch.distributed.barrier()
if self._is_offload_param:
offload_veomni_model_to_cpu(self.module)
def load_checkpoint(
self, local_path: str, hdfs_path: Optional[str] = None, del_local_after_load: int = True, **kwargs
) -> None:
"""
Load VeOmni checkpoint, restoring parameters and optimizer state.
"""
if self._is_offload_param:
load_veomni_model_to_gpu(self.module)
self.checkpoint_manager.load_checkpoint(
local_path=local_path, hdfs_path=hdfs_path, del_local_after_load=del_local_after_load
)
torch.distributed.barrier()
if self._is_offload_param:
offload_veomni_model_to_cpu(self.module)
if self._is_offload_optimizer:
offload_veomni_optimizer(self.optimizer)
def get_per_tensor_param(self, **kwargs):
load_veomni_model_to_gpu(self.module)
params = self.module.state_dict()
params = convert_weight_keys(params, getattr(self.module, "_fsdp_wrapped_module", self.module))
if self._is_offload_param:
offload_veomni_model_to_cpu(self.module)
device = get_device_id()
ps = parallel_state.get_parallel_state()
model_type = getattr(self.module.config, "model_type", "default")
process_func = MOE_PARAM_HANDERS.get(model_type, lambda n, t: iter([(n, t)]))
def param_generator():
for name, param in params.items():
unsharded_tensor = param.full_tensor() if isinstance(param, DTensor) else param
is_expert_layer = "mlp.experts." in name
is_proj = any(p in name for p in ["down_proj", "gate_proj", "up_proj", "gate_up_proj"])
if is_expert_layer and is_proj and ps.ep_enabled:
output_shape = list(unsharded_tensor.shape)
output_shape[0] *= ps.ep_size
stacked_tensor = torch.empty(output_shape, dtype=unsharded_tensor.dtype, device=device)
# all gather expert tensors [32, H, I] -> [128, H, I]
torch.distributed.all_gather_into_tensor(stacked_tensor, unsharded_tensor, group=ps.ep_group)
yield from process_func(name, stacked_tensor)
del stacked_tensor
else:
if is_expert_layer:
yield from process_func(name, unsharded_tensor)
else:
yield name, unsharded_tensor
# TODO: support VeOmni LoRA
return param_generator(), None
class EngineEvalModeCtx(BaseEngineCtx):
def __init__(self, engine: VeOmniEngine, **kwargs):
super().__init__(engine=engine, mode="eval", **kwargs)
def __enter__(self):
assert isinstance(self.engine, VeOmniEngine)
super().__enter__()
self.prev_sp_group = get_ulysses_sequence_parallel_group()
set_ulysses_sequence_parallel_group(self.engine.ulysses_parallel_group)
self.engine.module.train()
def __exit__(self, exc_type, exc_value, traceback):
assert isinstance(self.engine, VeOmniEngine)
set_ulysses_sequence_parallel_group(self.prev_sp_group)
# https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes
# unshard the root FSDP module
if parallel_state.get_parallel_state().dp_shard_size > 1:
if fsdp_version(self.engine.module) == 1:
self.engine.module._handle.reshard(True)
elif fsdp_version(self.engine.module) == 2:
self.engine.module.reshard()
super().__exit__(exc_type, exc_value, traceback)
class EngineTrainModeCtx(BaseEngineCtx):
def __init__(self, engine: VeOmniEngine, **kwargs):
super().__init__(engine=engine, mode="train", **kwargs)
def __enter__(self):
assert isinstance(self.engine, VeOmniEngine)
super().__enter__()
self.prev_sp_group = get_ulysses_sequence_parallel_group()
set_ulysses_sequence_parallel_group(self.engine.ulysses_parallel_group)
# TODO: Switch to eval mode after Integrating the CI environment
# VeOmni (ref: https://github.com/ByteDance-Seed/VeOmni/pull/421)
self.engine.module.train()
def __exit__(self, exc_type, exc_value, traceback):
assert isinstance(self.engine, VeOmniEngine)
set_ulysses_sequence_parallel_group(self.prev_sp_group)
self.engine.optimizer_zero_grad()
super().__exit__(exc_type, exc_value, traceback)
@dataclass
class OmniSequenceShardCollator:
"""
Data collator to chunk inputs along the sequence length.
"""
# features to slice sequence dimension
sp_slice_features: dict[str, int] = field(
default_factory=lambda: {
"input_ids": -1,
"labels": -1,
"pixel_values": 0,
"pixel_values_videos": 0,
},
metadata={"help": "features to slice sequence dimension."},
)
# features to padding sequence dimension
padding_features: dict[str, int] = field(
default_factory=lambda: {
"pixel_values": 0,
},
metadata={"help": "features to padding sequence dimension."},
)
# padding scale for padding features
padding_scale: dict[str, int] = field(
default_factory=lambda: {"pixel_values": 4}, metadata={"help": "padding scale for padding features."}
)
def __post_init__(self):
self.sp_size = parallel_state.get_parallel_state().sp_size
self.sp_rank = parallel_state.get_parallel_state().sp_rank
def sp_slice(self, feature: torch.Tensor, dim: int = -1) -> dict[str, "torch.Tensor"]:
seq_length = feature.size(dim)
sp_chunk_size = (seq_length + self.sp_size - 1) // self.sp_size
return feature.narrow(dim, self.sp_rank * sp_chunk_size, sp_chunk_size)
def sp_padding(
self, tensor: "torch.Tensor", dim: int = -1, pad_value: int = 0, pad_scale: int = 1
) -> "torch.Tensor":
"""
Pads a tensor with pad_length to aligns tensor with sp size.
"""
seq_length = tensor.size(dim)
scale_sp_size = self.sp_size * pad_scale
sp_chunk_size = (seq_length + scale_sp_size - 1) // scale_sp_size
pad_size = sp_chunk_size * scale_sp_size - seq_length
if pad_size == 0:
return tensor
pad_shape = list(tensor.shape)
pad_shape[dim] = pad_size
pad = torch.full(pad_shape, fill_value=pad_value, dtype=tensor.dtype, device=tensor.device)
return torch.cat((tensor, pad), dim=dim)
def __call__(self, batch: Sequence[dict[str, "torch.Tensor"]]) -> dict[str, "torch.Tensor"]:
for key in batch.keys():
if key in self.padding_features.keys():
batch[key] = self.sp_padding(
batch[key],
dim=self.sp_slice_features.get(key, -1),
pad_value=self.padding_features[key],
pad_scale=self.padding_scale.get(key, 1),
)
# sp slice
for key in batch.keys():
if key in self.sp_slice_features.keys():
batch[key] = self.sp_slice(batch[key], dim=self.sp_slice_features[key])
return batch
@EngineRegistry.register(model_type="language_model", backend=["veomni"], device=["cuda", "npu"])
class VeOmniEngineWithLMHead(VeOmniEngine, FSDPEngineWithLMHead):
def prepare_model_inputs(self, micro_batch: TensorDict):
# TODO: Cannot work properly for qwen_vl ulysses
model_inputs, output_args = super().prepare_model_inputs(micro_batch)
input_ids_rmpad = model_inputs["input_ids"]
if self.module.config.model_type in VL_TYPE2INDEX.keys():
image_mask = input_ids_rmpad == VL_TYPE2INDEX[self.module.config.model_type]["IMAGE_INPUT_INDEX"]
video_mask = input_ids_rmpad == VL_TYPE2INDEX[self.module.config.model_type]["VIDEO_INPUT_INDEX"]
model_inputs.update({"image_mask": image_mask, "video_mask": video_mask})
if parallel_state.get_parallel_state().sp_enabled:
omni_sequence_shard_collator = OmniSequenceShardCollator()
omni_sequence_shard_collator(model_inputs)
return model_inputs, output_args
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/engine/veomni/transformer_impl.py",
"license": "Apache License 2.0",
"lines": 485,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/engine/veomni/utils.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from verl.utils.device import get_device_id, get_torch_device
VL_TYPE2INDEX = {
"qwen2_5_vl": {
"IMAGE_INPUT_INDEX": 151655,
"VIDEO_INPUT_INDEX": 151656,
},
"qwen3_vl": {
"IMAGE_INPUT_INDEX": 151655,
"VIDEO_INPUT_INDEX": 151656,
},
"qwen3_vl_moe": {
"IMAGE_INPUT_INDEX": 151655,
"VIDEO_INPUT_INDEX": 151656,
},
}
@torch.no_grad()
def offload_veomni_model_to_cpu(model, empty_cache: bool = True):
from torch.distributed.fsdp._fully_shard._fsdp_common import TrainingState
from torch.distributed.fsdp._fully_shard._fsdp_state import _get_module_fsdp_state
for module in model.modules():
state = _get_module_fsdp_state(module)
if state is None:
continue
fsdp_param_group = state._fsdp_param_group
if fsdp_param_group is None:
continue
fsdp_param_group._training_state = TrainingState.IDLE
model.reshard()
model.cpu()
if empty_cache:
get_torch_device().empty_cache()
@torch.no_grad()
def load_veomni_model_to_gpu(model):
device = get_device_id()
model.to(device)
@torch.no_grad()
def offload_veomni_optimizer(optimizer):
optimizers = []
# Check if this is a MultiOptimizer (for ep and non-ep parameters when ep+fsdp2 is enabled)
if hasattr(optimizer, "_is_multi_optimizer") and optimizer._is_multi_optimizer:
optimizers.extend(optimizer.optimizers_dict.values())
else:
optimizers.append(optimizer)
for opt in optimizers:
if not opt.state:
continue
for param_group in opt.param_groups:
for param in param_group["params"]:
state = opt.state[param]
for key, value in state.items():
if isinstance(value, torch.Tensor):
state[key] = value.to("cpu", non_blocking=True)
@torch.no_grad()
def load_veomni_optimizer(optimizer, device_id):
optimizers = []
# Check if this is a MultiOptimizer (for ep and non-ep parameters when ep+fsdp2 is enabled)
if hasattr(optimizer, "_is_multi_optimizer") and optimizer._is_multi_optimizer:
optimizers.extend(optimizer.optimizers_dict.values())
else:
optimizers.append(optimizer)
for opt in optimizers:
if not opt.state:
continue
for param_group in opt.param_groups:
for param in param_group["params"]:
state = opt.state[param]
for key, value in state.items():
if isinstance(value, torch.Tensor):
state[key] = value.to(device_id, non_blocking=True)
def _map_moe_params_qwen3_moe(name, tensor):
for i in range(tensor.size(0)):
new_key = name.replace("mlp.experts.", f"mlp.experts.{i}.") + ".weight"
yield new_key, tensor[i].to(get_device_id(), non_blocking=True)
MOE_PARAM_HANDERS = {
"qwen3_moe": _map_moe_params_qwen3_moe,
}
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/engine/veomni/utils.py",
"license": "Apache License 2.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/workers/rollout/rollout_vllm/test_vllm_abort.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test vLLM abort functionality.
Usage:
pytest tests/workers/rollout/rollout_vllm/test_vllm_abort.py -v -s
or
python tests/workers/rollout/rollout_vllm/test_vllm_abort.py
"""
import asyncio
import os
import time
from uuid import uuid4
def test_vllm_abort():
# ==================== Configuration ====================
MODEL_PATH = os.path.expanduser("~/models/Qwen/Qwen2.5-1.5B-Instruct") # /root/models/Qwen/Qwen2.5-1.5B-Instruct
GPUS_PER_NODE = 2
TP_SIZE = 1
ROLLOUT_NAME = "vllm"
ABORT_DELAY = 0.5 # seconds to wait before aborting
print("=" * 60)
print("vLLM Abort Test")
print("=" * 60)
print(f"Model: {MODEL_PATH}")
print(f"GPUs: {GPUS_PER_NODE}, TP Size: {TP_SIZE}")
print(f"Abort Delay: {ABORT_DELAY}s")
print("=" * 60)
# ==================== Initialize Ray ====================
print("\n[1] Initializing Ray...")
import ray
ray.init(
runtime_env={
"env_vars": {
"TOKENIZERS_PARALLELISM": "true",
"NCCL_DEBUG": "WARN",
"VLLM_LOGGING_LEVEL": "INFO",
"VLLM_USE_V1": "1",
}
},
ignore_reinit_error=True,
)
try:
# ==================== Create Config ====================
print("\n[2] Creating config...")
from hydra import compose, initialize_config_dir
from verl.utils.tokenizer import normalize_token_ids
config_dir = os.path.abspath("verl/verl/trainer/config")
if not os.path.exists(config_dir):
config_dir = os.path.abspath("verl/trainer/config")
with initialize_config_dir(config_dir=config_dir, version_base=None):
config = compose(config_name="ppo_trainer")
config.trainer.n_gpus_per_node = GPUS_PER_NODE
config.trainer.nnodes = 1
config.actor_rollout_ref.model.path = MODEL_PATH
config.actor_rollout_ref.rollout.name = ROLLOUT_NAME
config.actor_rollout_ref.rollout.mode = "async"
config.actor_rollout_ref.rollout.tensor_model_parallel_size = TP_SIZE
config.actor_rollout_ref.rollout.prompt_length = 512
config.actor_rollout_ref.rollout.response_length = 512 # Longer for abort test
# ==================== Create Rollout Server ====================
print("\n[3] Creating rollout server (this may take a while)...")
from verl.workers.rollout.replica import get_rollout_replica_class
rollout_config = config.actor_rollout_ref.rollout
model_config = config.actor_rollout_ref.model
rollout_server_class = get_rollout_replica_class(ROLLOUT_NAME)
server = rollout_server_class(
replica_rank=0,
config=rollout_config,
model_config=model_config,
gpus_per_node=GPUS_PER_NODE,
)
asyncio.run(server.init_standalone())
server_handle = server._server_handle
print(f"Server address: {server._server_address}")
# ==================== Load Tokenizer ====================
print("\n[4] Loading tokenizer...")
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True)
# ==================== Prepare Prompts ====================
print("\n[5] Preparing prompts (to ensure generation takes time)...")
NUM_PROMPTS = 8
prompts = [
"Write a very long story about a brave knight and dragon.",
"Explain the history of the Roman Empire in great detail.",
"Describe quantum computing and its applications thoroughly.",
"Write an essay about climate change and its global effects.",
"Who won the Champions League in 2019?",
"Write a detailed analysis of Shakespeare's Hamlet.",
"Describe the process of photosynthesis in plants.",
"Write about the French Revolution and its consequences.",
]
all_prompt_ids = []
for prompt in prompts[:NUM_PROMPTS]:
messages = [{"role": "user", "content": prompt}]
prompt_ids = normalize_token_ids(
tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
)
all_prompt_ids.append(prompt_ids)
print(f"Prepared {NUM_PROMPTS} prompts")
# ==================== Start Generations and Abort ====================
print("\n[6] Starting generations and then aborting...")
sampling_params = {
"temperature": 1.0,
"top_p": 1.0,
"logprobs": False,
}
# Start all generations concurrently
print(f"\n Starting {NUM_PROMPTS} generations...")
generate_refs = []
for i, prompt_ids in enumerate(all_prompt_ids):
request_id = f"abort_test_{i}_{uuid4().hex[:8]}"
ref = server_handle.generate.remote(
request_id=request_id,
prompt_ids=prompt_ids,
sampling_params=sampling_params,
image_data=None,
)
generate_refs.append((i, request_id, ref))
print(f" Started request {i}: {request_id}")
# Wait before aborting
print(f"\n Waiting {ABORT_DELAY}s before abort...")
time.sleep(ABORT_DELAY)
# Call abort
print(" Calling abort_all_requests...")
abort_start = time.perf_counter()
abort_result = ray.get(server_handle.abort_all_requests.remote())
abort_time = time.perf_counter() - abort_start
print(f" Abort took: {abort_time * 1000:.2f}ms")
print(f" Abort result: {abort_result}")
# Wait for all generations to finish
print("\n Waiting for all generations to complete...")
outputs = []
for i, request_id, ref in generate_refs:
try:
output = ray.get(ref, timeout=10.0)
outputs.append((i, request_id, output))
except ray.exceptions.GetTimeoutError:
print(f" Request {i} timed out!")
outputs.append((i, request_id, None))
# ==================== Print Results ====================
print("\n" + "=" * 60)
print("RESULTS")
print("=" * 60)
aborted_count = 0
completed_count = 0
timeout_count = 0
for i, request_id, output in outputs:
if output is None:
timeout_count += 1
print(f"[{i}] {request_id}: TIMEOUT")
elif output.stop_reason == "aborted":
aborted_count += 1
print(f"[{i}] {request_id}: ABORTED ({len(output.token_ids)} tokens)")
print(f"Partial Output: {tokenizer.decode(output.token_ids)}")
else:
completed_count += 1
print(f"[{i}] {request_id}: COMPLETED ({output.stop_reason}, {len(output.token_ids)} tokens)")
print(f"Full Output: {tokenizer.decode(output.token_ids)}")
print(f"\nSummary: {aborted_count} aborted, {completed_count} completed, {timeout_count} timeout")
print("\n" + "=" * 60)
print(f"Abort result: {abort_result}")
print("=" * 60)
print("Abort test completed!")
# Assertions for pytest
assert timeout_count == 0, "No requests should timeout"
assert aborted_count + completed_count == NUM_PROMPTS, "All requests should finish"
assert "aborted_count" in abort_result, "Abort result should contain aborted_count"
assert abort_time < 1.0, "Abort should be fast (< 1 second)"
finally:
print("\nShutting down Ray...")
ray.shutdown()
if __name__ == "__main__":
# Can still run as standalone script
test_vllm_abort()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/rollout/rollout_vllm/test_vllm_abort.py",
"license": "Apache License 2.0",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:examples/data_preprocess/pokemon.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
"""
Preprocess the llamafactory/pokemon-gpt4o-captions dataset to parquet format
"""
import argparse
import os
import datasets
from verl.utils.hdfs_io import copy, makedirs
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--local_dir", default=None)
parser.add_argument("--hdfs_dir", default=None)
parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.")
parser.add_argument(
"--local_save_dir",
default="~/data/pokemon-gpt4o-captions",
help="The save directory for the preprocessed dataset.",
)
args = parser.parse_args()
local_dataset_path = args.local_dataset_path
data_source = "llamafactory/pokemon-gpt4o-captions"
if local_dataset_path is not None:
dataset = datasets.load_dataset(
local_dataset_path,
)
else:
dataset = datasets.load_dataset(
data_source,
)
def map_fn(row: dict):
messages = []
conversation = row.pop("conversations")
for conv in conversation:
if conv["from"] == "gpt":
role = "assistant"
elif conv["from"] == "human":
role = "user"
else:
raise ValueError(f"Unknown role: {conv['from']}")
messages.append(
{
"role": role,
"content": conv["value"],
}
)
row["messages"] = messages
return row
dataset = dataset["train"].map(map_fn, num_proc=16)
dataset = dataset.train_test_split(test_size=0.1)
train_dataset = dataset["train"]
test_dataset = dataset["test"]
hdfs_dir = args.hdfs_dir
local_save_dir = args.local_dir
if local_save_dir is not None:
print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.")
else:
local_save_dir = args.local_save_dir
train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet"))
test_dataset.to_parquet(os.path.join(local_save_dir, "test.parquet"))
if hdfs_dir is not None:
makedirs(hdfs_dir)
copy(src=local_save_dir, dst=hdfs_dir)
| {
"repo_id": "verl-project/verl",
"file_path": "examples/data_preprocess/pokemon.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
verl-project/verl:verl/utils/sglang/sglang_fp8_utils.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from verl.utils.fp8_utils import FP8QuantizerHelper
class SGLangFP8QuantizerHelper(FP8QuantizerHelper):
def __init__(self, quant_config):
super().__init__(quant_config)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/sglang/sglang_fp8_utils.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/megatron/router_replay_patch.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from enum import Enum
import torch
try:
from megatron.core.transformer.moe.moe_utils import (
apply_router_token_dropping,
compute_routing_scores_for_aux_loss,
group_limited_topk,
)
from megatron.core.transformer.moe.token_dispatcher import MoEAlltoAllTokenDispatcher
except ImportError:
warnings.warn("NPU not support router replay for now.", stacklevel=2)
MoEAlltoAllTokenDispatcher = None
from megatron.core.transformer.moe.router import TopKRouter
from megatron.core.transformer.transformer_config import TransformerConfig
# https://github.com/THUDM/slime/blob/main/slime/utils/routing_replay.py
class RouterReplayAction(Enum):
RECORD = "record"
REPLAY_FORWARD = "replay_forward"
REPLAY_BACKWARD = "replay_backward"
class RouterReplay:
"""
A class to manage the recording and replaying of MoE routing decisions.
It holds all router instances and provides static methods to globally
control recording and replaying.
"""
# Static variable to hold all router instances, one per MoE layer.
router_instances = []
@staticmethod
def set_replay_data(all_layers_topk_indices: list):
"""
Distributes the topk indices for all layers to their respective RouterReplay instances.
:param all_layers_topk_indices: A list of tensors, where each tensor contains the
topk indices for a specific layer. The order
must match the instantiation order of the routers.
"""
if len(all_layers_topk_indices) != len(RouterReplay.router_instances):
raise ValueError(
f"The number of replay tensors ({len(all_layers_topk_indices)}) "
f"does not match the number of router instances ({len(RouterReplay.router_instances)})."
)
for i, router_instance in enumerate(RouterReplay.router_instances):
router_instance.set_target_indices(all_layers_topk_indices[i])
@staticmethod
def get_recorded_data() -> list:
"""
Collects the recorded topk indices from all RouterReplay instances.
:return: A list of tensors, each containing the recorded topk indices for a layer.
"""
return [router.get_recorded_indices() for router in RouterReplay.router_instances]
@staticmethod
def clear_global_indices():
"""Clears the recorded and target topk indices in all instances."""
for router in RouterReplay.router_instances:
router.clear_indices()
def __init__(self):
"""Initializes a RouterReplay instance for a specific layer."""
self.target_topk_idx = None # For replay
self.recorded_topk_idx = None # For recording
self.router_replay_action = None # Router replay action for this layer
self.replay_backward_list = [] # List of tensors for backward pass replay
RouterReplay.router_instances.append(self)
def set_target_indices(self, topk_indices: torch.Tensor):
"""Sets the target topk indices for replay."""
self.target_topk_idx = topk_indices
self.replay_backward_list.append(topk_indices)
def get_recorded_indices(self):
"""Returns the recorded topk indices."""
return self.recorded_topk_idx
def record_indices(self, topk_indices: torch.Tensor):
"""Records the topk indices."""
self.recorded_topk_idx = topk_indices
def clear_indices(self):
"""Clears the recorded and target topk indices."""
self.recorded_topk_idx = None
self.target_topk_idx = None
self.replay_backward_list = []
def set_router_replay_action(self, router_replay_action: RouterReplayAction):
"""Sets the router replay action for this layer."""
self.router_replay_action = router_replay_action
def clear_router_replay_action(self):
"""Clears the router replay action for this layer."""
self.router_replay_action = None
@staticmethod
def set_global_router_replay_action(router_replay_action: RouterReplayAction):
"""Sets the router replay action for all router instances."""
for router in RouterReplay.router_instances:
router.set_router_replay_action(router_replay_action)
@staticmethod
def clear_global_router_replay_action():
"""Clears the router replay action for all router instances."""
for router in RouterReplay.router_instances:
router.clear_router_replay_action()
def _patched_topk_routing_with_score_function(
logits: torch.Tensor,
topk: int,
use_pre_softmax: bool,
num_groups: int,
group_topk: int,
score_function: str,
expert_bias: torch.Tensor,
fused: bool,
router_replay: RouterReplay,
scaling_factor: float,
):
"""
Patched version of topk_routing_with_score_function that supports router replay.
"""
num_tokens, num_experts = logits.shape
def _compute_topk(scores, topk, num_groups=None, group_topk=None):
if group_topk:
return group_limited_topk(
scores=scores,
topk=topk,
num_tokens=num_tokens,
num_experts=num_experts,
num_groups=num_groups,
group_topk=group_topk,
)
else:
return torch.topk(scores, k=topk, dim=1)
def compute_topk(scores, topk, num_groups=None, group_topk=None):
# Default behavior if no replay is active
routing_action = router_replay.router_replay_action if router_replay is not None else None
if routing_action is None:
return _compute_topk(scores, topk, num_groups=num_groups, group_topk=group_topk)
if routing_action == RouterReplayAction.RECORD:
probs, top_indices = _compute_topk(scores, topk, num_groups=num_groups, group_topk=group_topk)
if router_replay is not None:
router_replay.record_indices(top_indices)
return probs, top_indices
elif routing_action == RouterReplayAction.REPLAY_FORWARD:
if router_replay is None or router_replay.target_topk_idx is None:
# Fallback if replay data is not available
return _compute_topk(scores, topk, num_groups=num_groups, group_topk=group_topk)
# Use the provided indices for replay
top_indices = router_replay.target_topk_idx
# Ensure indices are on the correct device
top_indices = top_indices.to(scores.device)
# Gather the scores for the replayed indices to get the probabilities
probs = scores.gather(1, top_indices)
return probs, top_indices
elif routing_action == RouterReplayAction.REPLAY_BACKWARD:
if router_replay is None or not router_replay.replay_backward_list:
# Fallback if replay data is not available
return _compute_topk(scores, topk, num_groups=num_groups, group_topk=group_topk)
# Use the last recorded indices for backward replay
top_indices = router_replay.replay_backward_list.pop(0)
# Ensure indices are on the correct device
top_indices = top_indices.to(scores.device)
# Gather the scores for the replayed indices to get the probabilities
probs = scores.gather(1, top_indices)
return probs, top_indices
else: # Unknown action, fallback
return _compute_topk(scores, topk, num_groups=num_groups, group_topk=group_topk)
if score_function == "softmax":
if use_pre_softmax:
scores = torch.softmax(logits, dim=-1, dtype=torch.float32).type_as(logits)
probs, top_indices = compute_topk(scores, topk, num_groups, group_topk)
else:
scores, top_indices = compute_topk(logits, topk, num_groups, group_topk)
probs = torch.softmax(scores, dim=-1, dtype=torch.float32).type_as(logits)
elif score_function == "sigmoid":
scores = torch.sigmoid(logits.float()).type_as(logits)
if expert_bias is not None:
scores_for_routing = scores + expert_bias
_, top_indices = compute_topk(scores_for_routing, topk, num_groups, group_topk)
scores = torch.gather(scores, dim=1, index=top_indices).type_as(logits)
else:
scores, top_indices = compute_topk(scores, topk, num_groups, group_topk)
probs = scores / (scores.sum(dim=-1, keepdim=True) + 1e-20) if topk > 1 else scores
else:
raise ValueError(f"Invalid score_function: {score_function}")
if scaling_factor:
probs = probs * scaling_factor
if torch.are_deterministic_algorithms_enabled():
# build [num_tokens, num_experts] from [num_tokens, topk]
routing_probs = torch.zeros_like(logits)
rows = torch.arange(num_tokens, device=logits.device).unsqueeze(1)
routing_probs.index_put_((rows, top_indices), probs, accumulate=False)
routing_map = torch.zeros_like(logits, dtype=logits.dtype)
routing_map.index_put_((rows, top_indices), torch.ones_like(probs, dtype=routing_map.dtype), accumulate=False)
routing_map = routing_map.bool()
else:
# TODO Try using element-wise operations instead of scatter?
routing_probs = torch.zeros_like(logits).scatter(1, top_indices, probs)
routing_map = torch.zeros_like(logits).int().scatter(1, top_indices, 1).bool()
return routing_probs, routing_map
def patched_routing(self, logits: torch.Tensor, *args, **kwargs):
"""Top-k routing function
Args:
logits (torch.Tensor): Logits tensor after gating.
Returns:
probs (torch.Tensor): The probabilities of token to experts assignment.
routing_map (torch.Tensor): The mapping of token to experts assignment,
with shape [num_tokens, num_experts].
"""
seq_length, bsz = logits.shape[:2]
logits = logits.view(-1, self.config.num_moe_experts)
# Apply Z-Loss
logits = self.apply_z_loss(logits)
# Calculate probs and routing_map for token dispatching
if self.routing_type == "sinkhorn":
probs, routing_map = self.sinkhorn_load_balancing(logits)
else:
probs, routing_map = _patched_topk_routing_with_score_function(
logits=logits,
topk=self.topk,
use_pre_softmax=self.config.moe_router_pre_softmax,
num_groups=self.config.moe_router_num_groups,
group_topk=self.config.moe_router_group_topk,
scaling_factor=self.config.moe_router_topk_scaling_factor,
score_function=self.score_function,
expert_bias=self.expert_bias,
fused=self.config.moe_router_fusion,
router_replay=getattr(self, "router_replay", None),
)
# Apply token dropping to probs and routing_map.
if self.config.moe_expert_capacity_factor is not None:
probs, routing_map = apply_router_token_dropping(
probs,
routing_map,
router_topk=self.topk,
capacity_factor=self.config.moe_expert_capacity_factor,
drop_policy=self.config.moe_token_drop_policy,
pad_to_capacity=self.config.moe_pad_expert_input_to_capacity,
)
# Apply each aux loss type and attach aux loss autograd function to probs
if self.training and torch.is_grad_enabled() and self.is_aux_loss_enabled():
# Calculate scores and routing_map for aux loss
routing_map_for_aux_loss, scores_for_aux_loss = compute_routing_scores_for_aux_loss(
logits, self.topk, self.score_function, fused=self.config.moe_router_fusion
)
probs = self._apply_aux_loss(probs, scores_for_aux_loss, routing_map_for_aux_loss)
probs = self._apply_seq_aux_loss(probs, scores_for_aux_loss, routing_map_for_aux_loss, seq_length, bsz)
probs = self._apply_global_aux_loss(probs, scores_for_aux_loss, routing_map_for_aux_loss)
# Update expert bias and tokens_per_expert
# Prevent extra local tokens accumulation on evaluation or activation recomputation
if self.enable_expert_bias and torch.is_grad_enabled():
with torch.no_grad():
self.local_tokens_per_expert += routing_map.sum(dim=0)
return probs, routing_map
def apply_router_replay_patch():
"""
Applies the monkey patch for MoE Router Replay functionality.
This patch dynamically adds the 'enable_routing_replay' attribute to TransformerConfig
and modifies the TopKRouter to support recording and replaying of routing decisions.
"""
print("Applying Router Replay Patch...")
# Clear router instances to avoid state leakage between model initializations.
RouterReplay.router_instances.clear()
# Step 1: Patch TransformerConfig to include the feature flag
if not hasattr(TransformerConfig, "enable_routing_replay"):
# Add class attribute with default value
TransformerConfig.enable_routing_replay = False
# Store original __init__ method
original_tf_config_init = TransformerConfig.__init__
# Define new __init__ method that safely handles enable_routing_replay parameter
def patched_tf_config_init(self, *args, **kwargs):
# Simple solution: remove the unknown parameter before calling original constructor
enable_routing_replay = kwargs.pop("enable_routing_replay", TransformerConfig.enable_routing_replay)
# Call original constructor with remaining kwargs
original_tf_config_init(self, *args, **kwargs)
# Set the instance attribute
self.enable_routing_replay = enable_routing_replay
# Apply the patch
TransformerConfig.__init__ = patched_tf_config_init
# Step 2: Patch TopKRouter only once to ensure idempotency.
if hasattr(TopKRouter, "_router_replay_patched"):
return
original_init = TopKRouter.__init__
# Step 3: Define the new __init__ method
def patched_init(self, *args, **kwargs):
original_init(self, *args, **kwargs)
self.router_replay = None
if self.config.enable_routing_replay:
self.router_replay = RouterReplay()
# Step 4: Patch MoEAlltoAllTokenDispatcher.preprocess to handle router replay
# When router replay is enabled, duplicate indices in top_indices can cause
# routing_map.sum() < num_tokens * topk, leading to split size mismatch in alltoall.
if MoEAlltoAllTokenDispatcher is not None and not hasattr(MoEAlltoAllTokenDispatcher, "_preprocess_patched"):
original_preprocess = MoEAlltoAllTokenDispatcher.preprocess
def patched_preprocess(self, routing_map):
"""Patched preprocess that handles router replay correctly for alltoall dispatcher."""
# Call original preprocess
result = original_preprocess(self, routing_map)
# Fix num_out_tokens when router replay is enabled
if (
getattr(self.config, "enable_routing_replay", False)
and not self.drop_and_pad
and self.config.moe_expert_capacity_factor is None
and not (
getattr(self.config, "moe_router_padding_for_quantization", None)
or getattr(self.config, "moe_router_padding_for_fp8", None)
)
):
# With router replay, duplicate indices can reduce the actual routed
# token count, so derive it from the routing map instead.
self.num_out_tokens = int(routing_map.sum().item())
return result
MoEAlltoAllTokenDispatcher.preprocess = patched_preprocess
MoEAlltoAllTokenDispatcher._preprocess_patched = True
# Step 5: Apply the patches
TopKRouter.__init__ = patched_init
TopKRouter.routing = patched_routing
TopKRouter._router_replay_patched = True
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/megatron/router_replay_patch.py",
"license": "Apache License 2.0",
"lines": 319,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/megatron/router_replay_utils.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Router Replay Utilities
Utilities for handling router replay functionality in Megatron models.
"""
import warnings
from typing import Optional
import torch
try:
from megatron.core.pipeline_parallel.utils import is_vp_first_stage, is_vp_last_stage
except ImportError:
warnings.warn("NPU not support router replay for now.", stacklevel=2)
pass
from megatron.core import parallel_state as mpu
from megatron.core.pipeline_parallel.schedules import get_schedule_table
from megatron.core.tensor_parallel import gather_from_sequence_parallel_region, scatter_to_sequence_parallel_region
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.transformer.transformer_layer import get_transformer_layer_offset
from verl.models.mcore.util import (
postprocess_packed_seqs,
preprocess_packed_seqs,
preprocess_thd_no_padding,
)
from verl.utils.device import get_device_name
from verl.utils.megatron.router_replay_patch import RouterReplay, RouterReplayAction
device_name = get_device_name()
# from megatron.core.transformer.transformer_block import get_num_layers_to_build
def get_num_layers_to_build(
config: TransformerConfig, vp_stage: Optional[int] = None, pp_rank: Optional[int] = None
) -> int:
"""
Determine the number of transformer layers to build for the current pipeline stage.
Args:
config (TransformerConfig): Configuration object containing transformer model parameters.
vp_stage (Optional[int]): Virtual pipeline stage number.
pp_rank (Optional[int]): Pipeline parallel rank.
Returns:
int: The number of layers to be built for the current pipeline stage.
"""
# If we have a custom PP layout, straightforwardly
# return the number of decoders in the layout array.
if hasattr(config, "pipeline_model_parallel_layout") and config.pipeline_model_parallel_layout is not None:
from megatron.core.transformer.enums import LayerType
return config.pipeline_model_parallel_layout.get_num_layers_to_build(
layer_type=LayerType.decoder, vp_stage=vp_stage
)
# Fallback for legacy tests.
if pp_rank is None:
pp_rank = mpu.get_pipeline_model_parallel_rank()
is_first_pp_stage = pp_rank == 0
is_last_pp_stage = pp_rank == config.pipeline_model_parallel_size - 1
if config.num_layers_in_first_pipeline_stage is not None or config.num_layers_in_last_pipeline_stage is not None:
assert not (config.account_for_embedding_in_pipeline_split or config.account_for_loss_in_pipeline_split), (
" \
Does not support standalone embedding stage and standalone loss stage with uneven pp"
)
# Number of layers to distribute over rest of pipeline stages
layers_to_distribute = config.num_layers
# Number of pipeline stages left for distributing transformer layers
pipeline_stages_left = config.pipeline_model_parallel_size
# If the uneven first (last) pipeline stage is enabled, remove the specified number
# of layers to calculate the number of layers on each middle pipeline stage.
if config.num_layers_in_first_pipeline_stage is not None:
layers_to_distribute -= config.num_layers_in_first_pipeline_stage
pipeline_stages_left -= 1
if config.num_layers_in_last_pipeline_stage is not None:
layers_to_distribute -= config.num_layers_in_last_pipeline_stage
pipeline_stages_left -= 1
# If pp_size <= 2, we do not have any intermediate pipeline stages, and we do not
# need to check if the left over layers are divisible by the left over stages.
if pipeline_stages_left > 0:
assert layers_to_distribute % pipeline_stages_left == 0, (
"With uneven pipelineing the left over layers must be divisible by left over stages"
)
num_layers_per_pipeline_rank = layers_to_distribute // pipeline_stages_left
else:
num_layers_per_pipeline_rank = 0
# If the uneven first (last) pipeline stage is enabled, return the specified number
# of layers for all virtual pipeline parallel stages within the first (last) pipeline
# parallel stage.
if is_first_pp_stage and config.num_layers_in_first_pipeline_stage is not None:
num_layers_per_pipeline_rank = config.num_layers_in_first_pipeline_stage
if is_last_pp_stage and config.num_layers_in_last_pipeline_stage is not None:
num_layers_per_pipeline_rank = config.num_layers_in_last_pipeline_stage
else:
# Include the embedding layer and loss layer into pipeline parallelism partition
num_layers = config.num_layers
if config.account_for_embedding_in_pipeline_split:
num_layers += 1
if config.account_for_loss_in_pipeline_split:
num_layers += 1
assert num_layers % config.pipeline_model_parallel_size == 0, (
"num_layers should be divisible by pipeline_model_parallel_size"
)
num_layers_per_pipeline_rank = num_layers // config.pipeline_model_parallel_size
vp_size = config.virtual_pipeline_model_parallel_size
if vp_size is not None and config.pipeline_model_parallel_size > 1:
# Interleaved pipeline parallelism:
# Number of layers in each model chunk is the number of layers in the stage,
# divided by the number of model chunks in a stage.
# With 8 layers, 2 stages, and 4 model chunks, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0] [2] [4] [6]
# Stage 1: [1] [3] [5] [7]
# With 8 layers, 2 stages, and 2 virtual stages, we want an assignment of
# layers to stages like (each list is a model chunk):
# Stage 0: [0, 1] [4, 5]
# Stage 1: [2, 3] [6, 7]
assert num_layers_per_pipeline_rank % vp_size == 0, (
f"num_layers_per_pipeline_rank {num_layers_per_pipeline_rank} \
should be divisible by vp_size {vp_size}"
)
num_layers_per_virtual_stage = num_layers_per_pipeline_rank // vp_size
num_layers_to_build = num_layers_per_virtual_stage
else:
# Non-interleaved pipeline parallelism:
# Each stage gets a contiguous set of layers.
num_layers_to_build = num_layers_per_pipeline_rank
# The embedding (or loss) layer cannot function as a standalone transformer layer
# Reduce the number of layers to construct by 1 on the first (or last) stage if the
# embedding (or loss) layer is included in the pipeline parallelism partition and placement.
if config.account_for_embedding_in_pipeline_split:
if is_vp_first_stage(vp_stage, vp_size) and is_first_pp_stage:
num_layers_to_build -= 1
assert num_layers_to_build >= 0, "Not enough layers in the first virtual pipeline stage"
if config.account_for_loss_in_pipeline_split:
if is_vp_last_stage(vp_stage, vp_size) and is_last_pp_stage:
num_layers_to_build -= 1
assert num_layers_to_build >= 0, "Not enough layers in the last virtual pipeline stage"
return num_layers_to_build
def is_moe_layer(tf_config, layer_idx):
moe_layer_freq = getattr(tf_config, "moe_layer_freq", None)
if isinstance(moe_layer_freq, int):
return layer_idx % moe_layer_freq == 0
elif isinstance(moe_layer_freq, list):
return moe_layer_freq[layer_idx] == 1
else:
raise ValueError(f"Unsupported moe_layer_freq type: {type(moe_layer_freq)}")
def get_moe_num_layers_to_build(
config: TransformerConfig, vp_stage: Optional[int] = None, pp_rank: Optional[int] = None
) -> int:
"""Count the number of MoE layers assigned to the current rank.
When ``moe_layer_freq`` is 1 or unset, every transformer layer is an MoE
layer, so the count equals the total layer count. Otherwise only layers
whose global index satisfies the frequency predicate are counted.
Args:
config: Megatron TransformerConfig providing layer layout information.
vp_stage: Virtual-pipeline stage index (None defaults to current).
pp_rank: Pipeline-parallel rank (None defaults to current).
Returns:
Number of MoE layers on the specified rank/stage.
"""
total_layers = get_num_layers_to_build(config, vp_stage=vp_stage, pp_rank=pp_rank)
layer_offset = get_transformer_layer_offset(config, vp_stage=vp_stage)
local_global_indices = range(layer_offset, layer_offset + total_layers)
num_moe_layers = sum(1 for idx in local_global_indices if is_moe_layer(config, idx))
return num_moe_layers
def merge_router_topk_indices(attention_mask, input_ids, mini_layer_topk_idx_list, tf_config, vp_rank=None):
"""
Merge recorded router top-k indices across sequence-parallel ranks for all router instances,
then pack/unpack them to align with the original (batch, seq_len) layout and append the result.
Args:
attention_mask (torch.Tensor): Attention mask of shape [batch_size, seq_len]. Used to determine
the valid token positions during pack/unpack.
input_ids (torch.Tensor): Input token IDs of shape [batch_size, seq_len]. Used together with
attention_mask for sequence packing/unpacking.
mini_layer_topk_idx_list (list): A Python list to which the merged top-k indices tensor will be appended.
tf_config: Megatron/Transformer engine configuration object. Used to locate router instances for
the current micro-batch.
vp_rank (Optional[int]): Virtual pipeline stage rank override. If None, the current VP rank from
Megatron parallel state will be used.
Returns:
None: The function has side effects only; it appends a tensor of shape
[1, dynamic_bs_all, layer_num, topk] to mini_layer_topk_idx_list.
"""
with torch.no_grad():
router_instances_list = RouterReplayHelper.get_micro_batch_router_list(tf_config, vp_rank)
layers_topk_idx = []
for router in router_instances_list:
layers_topk_idx.append(router.recorded_topk_idx.to(torch.uint8)) # dynamic_bs, topk
# layer_num, dynamic_bs, topk -> dynamic_bs, layer_num, topk
layers_topk_idx = torch.stack(layers_topk_idx).permute(1, 0, 2).to(device_name)
# dynamic_bs, layer_num, topk -> 1, dynamic_bs_all, layer_num, topk
layers_topk_idx = (
gather_from_sequence_parallel_region(layers_topk_idx, tensor_parallel_output_grad=False)
.unsqueeze(0)
.contiguous()
)
batch_size, seq_len = attention_mask.shape[:2]
_, packed_seq_params = preprocess_packed_seqs(input_ids, attention_mask, pre_process=True)
layers_topk_idx = postprocess_packed_seqs(
layers_topk_idx, packed_seq_params, attention_mask, batch_size, seq_len, post_process=True
)
mini_layer_topk_idx_list.append(layers_topk_idx.cpu())
def set_router_replay_data(layers_topk_idx, attention_mask, tf_config, vp_rank=None):
"""
Scatter the packed router top-k indices back to sequence-parallel ranks and update each local
RouterReplay instance with target indices for replay mode.
This function prepares the per-layer, per-sample top-k routing decisions (recorded during an earlier
forward) so that subsequent replay passes can follow exactly the same routing.
Args:
layers_topk_idx (torch.Tensor): Router top-k indices with shape [bs, max_seq_len, layer_num, topk].
This should be the merged output produced by merge_router_topk_indices.
attention_mask (torch.Tensor): Attention mask [batch_size, seq_len] used for pack/unpack alignment.
tf_config: Megatron/Transformer engine configuration object.
vp_rank (Optional[int]): Virtual pipeline stage rank override. If None, the current VP rank from
Megatron parallel state will be used.
Returns:
None: The function updates internal RouterReplay instances in-place.
"""
with torch.no_grad():
if layers_topk_idx.is_nested:
layers_topk_idx_rmpad, _ = preprocess_thd_no_padding(layers_topk_idx, pre_process=True)
else:
layers_topk_idx_rmpad, _ = preprocess_packed_seqs(layers_topk_idx, attention_mask, pre_process=True)
layers_topk_idx_rmpad = layers_topk_idx_rmpad.contiguous() # 1, dynamic_bs_all, layer_num, topk
# 1, dynamic_bs_split, layer_num, topk
layers_topk_idx_rmpad_split = scatter_to_sequence_parallel_region(
layers_topk_idx_rmpad.to(device_name).squeeze(dim=0)
).unsqueeze(dim=0)
# dynamic_bs_split, layer_num, topk -> layer_num, dynamic_bs_split, topk
layers_topk_idx_reshape = layers_topk_idx_rmpad_split.permute(0, 2, 1, 3).squeeze(
dim=0
) # layer_num, dynamic_bs_all, topk
local_rank_info = get_current_rank_layer_info(tf_config, vp_rank)
offset, end = local_rank_info["start"], local_rank_info["end"]
router_instances_list = RouterReplayHelper.get_micro_batch_router_list(tf_config, vp_rank)
# When dim-0 covers all layers (e.g. R3, or R2 with all-MoE models),
# index by absolute layer_idx; otherwise (R2 with mixed dense/MoE),
# dim-0 only contains MoE layers, index by MoE-layer ordinal.
index_by_layer = len(layers_topk_idx_reshape) == tf_config.num_layers
# For R2: count MoE layers before `offset` as the starting position.
moe_idx = sum(1 for i in range(offset) if is_moe_layer(tf_config, i))
router_offset = 0
for layer_idx in range(offset, end):
if not is_moe_layer(tf_config, layer_idx):
continue
router = router_instances_list[router_offset]
idx = layer_idx if index_by_layer else moe_idx
router.set_target_indices(layers_topk_idx_reshape[idx].to(torch.int64))
router_offset += 1
moe_idx += 1
def reorder_and_merge_vpp_layers(
micro_batch_tensor_list,
num_microbatches: int,
vpp_size: int,
microbatch_group_size_per_vp_stage: int,
) -> torch.Tensor:
"""
Reorder and merge per-VPP layer blocks into a contiguous layer dimension.
Given a tensor shaped as [bs*vpp_size, max_token_len, layer_num_per_vpp, topk], this function:
1) Builds the schedule table for virtual microbatches and reorders the first dimension so that entries
belonging to the same model chunk (VPP stage) become contiguous.
2) Reshapes and merges the (vpp_size, layer_num_per_vpp) into a single layer dimension, producing
[bs, max_token_len, layer_num, topk].
Args:
micro_batch_tensor_list : the list of Input tensor.
num_microbatches (int): Number of microbatches per pipeline stage (bs).
vpp_size (int): Virtual pipeline parallel size (number of model chunks).
microbatch_group_size_per_vp_stage (int): Number of consecutive microbatches processed per VPP stage.
Returns:
torch.Tensor: Output tensor of shape [bs, max_token_len, layer_num, topk].
Raises:
ValueError: If input tensor dimensionality or expected sizes do not match.
RuntimeError: If the computed output shape is unexpected or the schedule length mismatches.
"""
# 1) Build schedule table: map each virtual_microbatch_id -> (microbatch_id, model_chunk_id)
schedule_table = get_schedule_table(num_microbatches, vpp_size, microbatch_group_size_per_vp_stage)
# 2) Group by model_chunk_id to build reorder indices so entries of the same chunk become contiguous along dim 0
tensor_by_chunk = [[] for _ in range(vpp_size)]
mini_tensor_list = []
for vidx, (_mb, chunk_id) in enumerate(schedule_table):
tensor_by_chunk[chunk_id].append(micro_batch_tensor_list[vidx])
for chunk_id in range(vpp_size):
mini_tensor_list.append(torch.cat(tensor_by_chunk[chunk_id], dim=0))
out = torch.cat(mini_tensor_list, dim=2)
return out
def get_current_rank_layer_info(tf_config, vp_rank=None):
# When vp_rank is None, default to the current VP rank (or 0 if VP is disabled).
"""Return the local layer range/count for the current process and the full assignment table.
Args:
tf_config: Configuration object used by compute_pipeline_layer_assignment.
vp_rank (Optional[int]): Explicit virtual pipeline stage rank to query. If None, uses
mpu.get_virtual_pipeline_model_parallel_rank() when VP is enabled; otherwise 0.
Returns:
Tuple[dict, dict]: A tuple of (local_assignment, all_assignments) where local_assignment contains
keys {"start", "end", "count"} for the current (pp_rank, vp_stage).
"""
if vp_rank is None:
vp_rank = 0
num_layers_to_build = get_num_layers_to_build(tf_config, vp_stage=vp_rank)
offset = get_transformer_layer_offset(tf_config, vp_stage=vp_rank)
local = {}
local["start"] = offset
local["end"] = offset + num_layers_to_build
local["count"] = num_layers_to_build
return local
def pp_gather(local_layers_router_map, tf_config):
# TODO: Consider non-uniform layer allocation cases.
"""
Gather local router maps from all PP ranks into a global router map.
Args:
local_layers_router_map (torch.Tensor): Local router map of shape
[bs, max_seq_len, local_num_layers, topk].
tf_config: Configuration providing pipeline_model_parallel_size.
Returns:
torch.Tensor: Global router map of shape [bs, max_seq_len, num_layers, topk] placed on CPU.
"""
pp_size = tf_config.pipeline_model_parallel_size
if pp_size <= 1:
return local_layers_router_map
pp_group = mpu.get_pipeline_model_parallel_group()
world_size = torch.distributed.get_world_size(pp_group)
local_layers_router_map = local_layers_router_map.to(device_name)
layers_topk_idx_global_list = [
torch.empty(
size=local_layers_router_map.shape,
dtype=local_layers_router_map.dtype,
device=local_layers_router_map.device,
)
for _ in range(world_size)
]
torch.distributed.all_gather(
tensor=local_layers_router_map,
tensor_list=layers_topk_idx_global_list,
group=pp_group,
async_op=False,
)
vp_size = tf_config.virtual_pipeline_model_parallel_size
if vp_size is not None:
vpp_router_map_offset = [[] for _ in range(pp_size)]
for pp_stage in range(pp_size):
vpp_router_map_offset[pp_stage].append(0)
for vp_stage in range(vp_size):
num_layers_to_build = get_moe_num_layers_to_build(tf_config, vp_stage, pp_stage)
vpp_router_map_offset[pp_stage].append(num_layers_to_build + vpp_router_map_offset[pp_stage][-1])
layers_topk_idx_global = []
for vp_stage in range(vp_size):
for pp_stage in range(pp_size):
piece = slice(vpp_router_map_offset[pp_stage][vp_stage], vpp_router_map_offset[pp_stage][vp_stage + 1])
layers_topk_idx_global.append(layers_topk_idx_global_list[pp_stage][:, :, piece, :])
global_router_map = torch.cat(layers_topk_idx_global, dim=2).to("cpu")
else:
global_router_map = torch.cat(layers_topk_idx_global_list, dim=2).to("cpu")
return global_router_map
class RouterReplayHelper:
"""Helper class to query router replay state and locate local RouterReplay instances."""
@staticmethod
def get_micro_batch_router_list(tf_config, vp_rank=None):
"""
Return the list of RouterReplay instances corresponding to the current micro-batch and local
(pp_rank, vp_stage) layer range.
When virtual pipeline (VPP) is enabled, the local range for the PP rank is expanded to include
all VP stages by multiplying the per-VP count by vp_size. The returned slice is taken from the
global RouterReplay.router_instances list.
Args:
tf_config: Configuration object used to compute layer assignments.
vp_rank (Optional[int]): Explicit virtual pipeline stage to query. If None, the current VP
rank from Megatron parallel state is used when available.
Returns:
list: A contiguous sublist of RouterReplay.router_instances for the local layer range.
"""
vp_size = tf_config.virtual_pipeline_model_parallel_size
if vp_size is not None:
vp_rank = 0 if vp_rank is None else vp_rank
offset = 0
for pre_vp_stage in range(vp_size):
if pre_vp_stage == vp_rank:
break
offset += get_moe_num_layers_to_build(tf_config, pre_vp_stage)
else:
offset = 0
num_layers_to_build = get_num_layers_to_build(tf_config, vp_rank)
router_instances_list = RouterReplay.router_instances[offset : offset + num_layers_to_build]
return router_instances_list
@staticmethod
def is_r2_record_action(tf_config, vp_rank=None) -> bool:
"""Return True if the current router_replay_action is RECORD (R2) for the local router instances.
This inspects the first local RouterReplay instance's router_replay_action and compares it to
RouterReplayAction.RECORD.
"""
router_instances_list = RouterReplayHelper.get_micro_batch_router_list(tf_config, vp_rank)
return router_instances_list and router_instances_list[0].router_replay_action == RouterReplayAction.RECORD
@staticmethod
def is_replay_forward_action(tf_config, vp_rank=None) -> bool:
"""Return True if the current router_replay_action is REPLAY_FORWARD for the local router instances.
This inspects the first local RouterReplay instance's router_replay_action and compares it to
RouterReplayAction.REPLAY_FORWARD.
"""
router_instances_list = RouterReplayHelper.get_micro_batch_router_list(tf_config, vp_rank)
return (
router_instances_list and router_instances_list[0].router_replay_action == RouterReplayAction.REPLAY_FORWARD
)
@staticmethod
def is_replay_backward_action(tf_config, vp_rank=None) -> bool:
"""Return True if the current router_replay_action is REPLAY_BACKWARD for the local router instances.
This inspects the first local RouterReplay instance's router_replay_action and compares it to
RouterReplayAction.REPLAY_BACKWARD.
"""
router_instances_list = RouterReplayHelper.get_micro_batch_router_list(tf_config, vp_rank)
return (
router_instances_list
and router_instances_list[0].router_replay_action == RouterReplayAction.REPLAY_BACKWARD
)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/megatron/router_replay_utils.py",
"license": "Apache License 2.0",
"lines": 416,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/trainer/sft_trainer_ray.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import partial
from tensordict.tensorclass import NonTensorData
os.environ["NCCL_DEBUG"] = "WARN"
os.environ["TOKENIZERS_PARALLELISM"] = "true"
import logging
import hydra
import ray
import torch
import torch.distributed
from omegaconf import OmegaConf
from torch.utils.data import DistributedSampler
from torchdata.stateful_dataloader import StatefulDataLoader
from tqdm import tqdm
from verl.utils import tensordict_utils as tu
from verl.utils.checkpoint import CheckpointHandler, OrchestrationMode
from verl.utils.dataset.dataset_utils import SFTTensorCollator
from verl.utils.dataset.multiturn_sft_dataset import MultiTurnSFTDataset
from verl.utils.device import auto_set_device, get_device_name
from verl.utils.logger import log_with_rank
from verl.utils.tracking import Tracking
from verl.workers.engine_workers import TrainingWorker
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_SFT_LOGGING_LEVEL", "WARN"))
class SFTTrainer:
def __init__(
self,
config,
):
self.config = config
self._build_config()
self._build_dataset()
self._build_dataloader()
self._build_engine()
self._build_ckpt_handler()
# Initialize resume-related variables
self.resume_global_step = self.ckpt_handler.load_checkpoint()
self.device_name = self.config.trainer.device
print(self.config)
def _build_ckpt_handler(self):
resume_mode = getattr(self.config.trainer, "resume_mode", "auto")
resume_from_path = getattr(self.config.trainer, "resume_from_path", None)
max_ckpt_to_keep = getattr(self.config.trainer, "max_ckpt_to_keep", None)
default_hdfs_dir = getattr(self.config.trainer, "default_hdfs_dir", None)
self.ckpt_handler = CheckpointHandler(
engine=self.training_client,
train_dataloader=self.train_dataloader,
default_local_dir=self.config.trainer.default_local_dir,
max_ckpt_to_keep=max_ckpt_to_keep,
default_hdfs_dir=default_hdfs_dir,
resume_mode=resume_mode,
resume_from_path=resume_from_path,
mode=OrchestrationMode.RAY,
)
def _build_config(self):
from verl.utils.config import omega_conf_to_dataclass
self.model_config = omega_conf_to_dataclass(self.config.model)
self.engine_config = omega_conf_to_dataclass(self.config.engine)
self.optimizer_config = omega_conf_to_dataclass(self.config.optim)
self.checkpoint_config = omega_conf_to_dataclass(self.config.checkpoint)
self.profiler_config = omega_conf_to_dataclass(self.config.profiler)
# check profile interval
self.profiler_interval = self.config.trainer.profile_interval
self._validate_profiler_interval()
def _validate_profiler_interval(self):
assert len(self.profiler_interval) == 2
self.start_profile_step = self.profiler_interval[0]
self.end_profile_step = self.profiler_interval[1]
assert self.end_profile_step >= self.start_profile_step
if self.start_profile_step < 0:
assert self.end_profile_step < 0
def _build_engine(self):
from verl.workers.engine_workers import TrainingWorkerConfig
from verl.workers.utils.losses import sft_loss
self.loss_fn = partial(sft_loss, config=None)
config = TrainingWorkerConfig(
model_type="language_model",
model_config=self.model_config,
engine_config=self.engine_config,
optimizer_config=self.optimizer_config,
checkpoint_config=self.checkpoint_config,
profiler_config=self.profiler_config,
)
# create resource pool and worker group
from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup
n_gpus_per_node = self.config.trainer.n_gpus_per_node
nnodes = self.config.trainer.nnodes
self.resource_pool = RayResourcePool(process_on_nodes=[n_gpus_per_node] * nnodes)
ray_cls_with_init = RayClassWithInitArgs(ray.remote(TrainingWorker), config=config)
self.training_client = RayWorkerGroup(
resource_pool=self.resource_pool,
ray_cls_with_init=ray_cls_with_init,
device_name=self.config.trainer.device,
)
self.training_client.set_loss_fn(loss_fn=self.loss_fn)
self.training_client.reset()
def _build_dataset(self):
config = self.config
tokenizer = self.model_config.tokenizer
processor = self.model_config.processor
train_dataset = create_sft_dataset(
config.data.train_files,
config.data,
tokenizer,
processor=processor,
max_samples=config.data.get("train_max_samples", -1),
)
if config.data.val_files:
val_dataset = create_sft_dataset(
config.data.val_files,
config.data,
tokenizer,
processor=processor,
max_samples=config.data.get("val_max_samples", -1),
)
else:
val_dataset = None
self.train_dataset, self.val_dataset = train_dataset, val_dataset
def _build_dataloader(self):
# build dataset
config = self.config
# build dataloader
# Use data parallel rank and size instead of global rank and world size
# Set pin_memory_device when pin_memory is enabled.
device_name = get_device_name()
dp_rank = 0
dp_size = 1
self.train_sampler = DistributedSampler(
self.train_dataset, shuffle=True, num_replicas=dp_size, rank=dp_rank, drop_last=True
)
self.global_batch_size = config.data.train_batch_size
self.train_batch_size_per_dp = self.global_batch_size // dp_size
self.collate_fn = SFTTensorCollator(config.data.pad_mode)
self.train_dataloader = StatefulDataLoader(
dataset=self.train_dataset,
batch_size=self.train_batch_size_per_dp,
sampler=self.train_sampler,
collate_fn=self.collate_fn,
num_workers=8,
pin_memory=False,
drop_last=True,
pin_memory_device=device_name,
)
if self.val_dataset:
self.val_sampler = DistributedSampler(
self.val_dataset, shuffle=False, num_replicas=dp_size, rank=dp_rank, drop_last=True
)
self.val_dataloader = StatefulDataLoader(
dataset=self.val_dataset,
batch_size=self.train_batch_size_per_dp,
sampler=self.val_sampler,
collate_fn=self.collate_fn,
num_workers=8,
pin_memory=False,
drop_last=True,
pin_memory_device=device_name,
)
else:
self.val_dataloader = None
# update
if self.config.trainer.total_training_steps is not None:
self.total_training_steps = self.config.trainer.total_training_steps
else:
self.total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs
self.optimizer_config.total_training_steps = self.total_training_steps
self.steps_per_epoch = len(self.train_dataloader)
# manage save and test frequency
self.save_freq = self.config.trainer.save_freq
if self.save_freq == "after_each_epoch":
self.save_freq = self.steps_per_epoch
self.test_freq = self.config.trainer.test_freq
if self.test_freq == "after_each_epoch":
self.test_freq = self.steps_per_epoch
def _get_batch_seqlens(self, data):
# mean over dp group
is_nested = data["input_ids"].is_nested
if is_nested:
batch_seqlens: torch.Tensor = data["input_ids"].offsets().diff()
else:
batch_seqlens: torch.Tensor = data["attention_mask"].sum(dim=-1)
return batch_seqlens
def fit(self):
tracking = Tracking(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
default_backend=self.config.trainer.logger,
config=OmegaConf.to_container(self.config, resolve=True),
)
global_step = self.resume_global_step # Start from resumed step
last_valid_metric = None
log_with_rank(
f"Total training steps: {self.total_training_steps},",
logger=logger,
rank=0,
log_only_rank_0=True,
)
# With StatefulDataLoader, we don't need to manually calculate epochs and steps
# The dataloader will automatically resume from where it left off
if global_step > 0:
log_with_rank(
f"StatefulDataLoader will automatically resume from global step: {global_step}",
logger=logger,
rank=0,
log_only_rank_0=True,
)
# Calculate which epoch we're starting from for sampler.set_epoch()
start_epoch = global_step // self.steps_per_epoch
meta_info = {
"use_remove_padding": self.config.model.use_remove_padding,
"use_dynamic_bsz": self.config.data.use_dynamic_bsz,
"max_token_len_per_gpu": self.config.data.max_token_len_per_gpu,
"micro_batch_size_per_gpu": self.config.data.micro_batch_size_per_gpu,
"temperature": 1.0,
"global_batch_size": self.global_batch_size,
"pad_mode": self.config.data.pad_mode,
"pad_token_id": self.model_config.tokenizer.pad_token_id,
}
train_time = 0
total_tokens = 0
for epoch in range(start_epoch, self.config.trainer.total_epochs):
self.train_sampler.set_epoch(epoch=epoch)
for step_in_epoch, data in enumerate(
tqdm(
self.train_dataloader,
initial=global_step % self.steps_per_epoch if epoch == start_epoch else 0,
total=self.steps_per_epoch,
desc=f"Epoch {epoch + 1}/{self.config.trainer.total_epochs}",
)
):
global_step += 1
# construct tensordict
data = tu.get_tensordict(tensor_dict=data, non_tensor_dict=meta_info)
batch_seqlens = self._get_batch_seqlens(data=data).tolist()
# this is necessary. Otherwise, it is interpreted as NonTensorStack
batch_seqlens_ntd = NonTensorData(batch_seqlens)
tu.assign_non_tensor(data, update_lr_scheduler=True, global_token_num=batch_seqlens_ntd)
# start profile in SPMD mode
if global_step == self.start_profile_step:
self.training_client.start_profile()
# train for on batch
output = self.training_client.train_batch(data)
output = output.get()
if global_step == self.end_profile_step:
self.training_client.stop_profile()
metrics = tu.get(output, "metrics")
# TODO: we can actual accumulate metrics for N steps and perform aggregate metrics
metrics["train/loss"] = metrics.pop("loss")
metrics["train/grad_norm"] = metrics.pop("grad_norm")
metrics["train/lr"] = metrics.pop("lr")
metrics["train/mfu"] = metrics.pop("mfu")
metrics["train/global_tokens"] = torch.sum(torch.tensor(batch_seqlens, device=self.device_name)).item()
total_tokens += metrics["train/global_tokens"]
metrics["train/total_tokens(B)"] = total_tokens / 1e9
tracking.log(data=metrics, step=global_step)
is_last_step = global_step >= self.total_training_steps
is_valid_step = global_step % self.test_freq == 0
is_save_step = global_step % self.save_freq == 0
# early exit or validation step
if is_last_step and self.val_dataloader is not None or (self.test_freq > 0 and is_valid_step):
# Perform validation
val_losses = []
for val_data in self.val_dataloader:
val_data = tu.get_tensordict(tensor_dict=val_data, non_tensor_dict=meta_info)
output = self.training_client.infer_batch(val_data)
output = output.get()
metrics = tu.get(output, "metrics")
val_losses.append(metrics["loss"])
val_loss = torch.mean(torch.tensor(val_losses, device=self.device_name))
metric = {"val/loss": val_loss.detach().item()}
tracking.log(data=metric, step=global_step)
last_valid_metric = metric
if is_last_step or (self.save_freq > 0 and is_save_step):
self.ckpt_handler.save_checkpoint(step=global_step)
if is_last_step:
print(f"Total time for train steps: {train_time:.2f}s")
print(f"Final validation metrics: {last_valid_metric}")
return
def run_sft(config):
ray.init()
trainer = SFTTrainer(config=config)
trainer.fit()
@hydra.main(config_path="config", config_name="sft_trainer_engine", version_base=None)
def main(config):
# Automatically set `config.trainer.device = npu` when running on Ascend NPU.
auto_set_device(config)
run_sft(config)
def create_sft_dataset(data_paths, data_config, tokenizer, processor, max_samples=-1):
"""Create a dataset."""
# build dataset
# First check if a custom dataset class is specified
if data_config.custom_cls.get("path", None):
from verl.utils.import_utils import load_extern_type
dataset_cls = load_extern_type(data_config.custom_cls.path, data_config.custom_cls.name)
else:
# Default to multi-turn dataset
dataset_cls = MultiTurnSFTDataset
# Create datasets based on the selected class
dataset = dataset_cls(
parquet_files=data_paths, tokenizer=tokenizer, config=data_config, processor=processor, max_samples=max_samples
)
return dataset
if __name__ == "__main__":
main()
| {
"repo_id": "verl-project/verl",
"file_path": "verl/trainer/sft_trainer_ray.py",
"license": "Apache License 2.0",
"lines": 316,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/special_distributed/test_torch_functional.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from verl.utils.torch_functional import allgather_dict_into_dict
if __name__ == "__main__":
torch.distributed.init_process_group(backend="gloo")
local_rank = int(os.environ["LOCAL_RANK"])
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
metrics_dict = {"loss": [0 + rank, 1 + rank, 2 + rank], "grad_norm": rank}
result = allgather_dict_into_dict(data=metrics_dict, group=None)
assert result["loss"] == [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5]]
assert result["grad_norm"] == [0, 1, 2, 3]
print(result)
| {
"repo_id": "verl-project/verl",
"file_path": "tests/special_distributed/test_torch_functional.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/utils/chat_template.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
import logging
import os
from verl.utils.tokenizer import normalize_token_ids
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def initialize_system_prompt(tokenizer, **apply_chat_template_kwargs) -> list[int]:
"""
Initialize system prompt tokens for chat templates that support them.
Args:
tokenizer: The tokenizer with a chat template
**apply_chat_template_kwargs: Additional arguments for apply_chat_template
Returns:
List of token IDs for the system prompt, or empty list if not supported
"""
token1 = normalize_token_ids(
tokenizer.apply_chat_template([{"role": "user", "content": ""}], add_generation_prompt=False, tokenize=True)
)
token2 = normalize_token_ids(
tokenizer.apply_chat_template([{"role": "user", "content": ""}] * 2, add_generation_prompt=False, tokenize=True)
)
# get system prompt tokens
system_prompt = token1[: -(len(token2) - len(token1))]
return system_prompt
def extract_system_prompt_and_generation(tokenizer):
token1 = normalize_token_ids(
tokenizer.apply_chat_template([{"role": "user", "content": ""}], add_generation_prompt=False, tokenize=True)
)
token2 = normalize_token_ids(
tokenizer.apply_chat_template([{"role": "user", "content": ""}] * 2, add_generation_prompt=False, tokenize=True)
)
# get system prompt tokens
system_prompt = token1[: -(len(token2) - len(token1))]
# get generate prompt tokens
token3 = normalize_token_ids(
tokenizer.apply_chat_template([{"role": "user", "content": ""}], add_generation_prompt=True, tokenize=True)
)
generate_prompt = token3[len(token1) :]
return system_prompt, generate_prompt
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/chat_template.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
verl-project/verl:tests/single_controller/test_split_resource_pool.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ray
import torch
from verl import DataProto
from verl.single_controller.base import Worker
from verl.single_controller.base.decorator import Dispatch, register
from verl.single_controller.ray.base import (
RayClassWithInitArgs,
RayResourcePool,
RayWorkerGroup,
split_resource_pool,
)
from verl.utils.device import get_device_name, get_nccl_backend
@ray.remote
class Actor(Worker):
def __init__(self, worker_id) -> None:
super().__init__()
self.worker_id = worker_id
self.temp_tensor = torch.rand(4096, 4096).to(get_device_name())
if not torch.distributed.is_initialized():
rank = int(os.environ.get("RANK", 0))
world_size = int(os.environ.get("WORLD_SIZE", 1))
torch.distributed.init_process_group(backend=get_nccl_backend(), world_size=world_size, rank=rank)
@register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
def add(self, data: DataProto):
data.batch["a"] += self.rank + self.worker_id
return data
def test_split_resource_pool_with_split_size():
ray.init()
# assume we have 2 nodes, with 4 GPUs each
global_resource_pool = RayResourcePool(process_on_nodes=[4, 4])
global_resource_pool.get_placement_groups(device_name=get_device_name())
# first 4 gpus for actor_1, last 4 gpus for actor_2
actor_1_resource_pool, actor_2_resource_pool = split_resource_pool(resource_pool=global_resource_pool, split_size=4)
actor_cls_1 = RayClassWithInitArgs(cls=Actor, worker_id=0)
actor_cls_2 = RayClassWithInitArgs(cls=Actor, worker_id=100)
actor_worker_1 = RayWorkerGroup(
resource_pool=actor_1_resource_pool, ray_cls_with_init=actor_cls_1, device_name=get_device_name()
)
actor_worker_2 = RayWorkerGroup(
resource_pool=actor_2_resource_pool, ray_cls_with_init=actor_cls_2, device_name=get_device_name()
)
assert actor_worker_1.world_size == 4
assert actor_worker_2.world_size == 4
data = DataProto.from_dict({"a": torch.zeros(8)})
actor_output_1 = actor_worker_1.add(data)
actor_output_2 = actor_worker_2.add(data)
assert actor_output_1.batch["a"].tolist() == [0, 0, 1, 1, 2, 2, 3, 3]
assert actor_output_2.batch["a"].tolist() == [100, 100, 101, 101, 102, 102, 103, 103]
ray.shutdown()
def test_split_resource_pool_with_split_size_list():
ray.init()
# assume we have 4 nodes, with 2 GPUs each
global_resource_pool = RayResourcePool(process_on_nodes=[2, 2, 2, 2])
global_resource_pool.get_placement_groups(device_name=get_device_name())
# first 2 gpus for actor_1, last 6 gpus for actor_2
actor_1_resource_pool, actor_2_resource_pool = split_resource_pool(
resource_pool=global_resource_pool,
split_size=[2, 6],
)
actor_cls_1 = RayClassWithInitArgs(cls=Actor, worker_id=0)
actor_cls_2 = RayClassWithInitArgs(cls=Actor, worker_id=100)
actor_worker_1 = RayWorkerGroup(
resource_pool=actor_1_resource_pool, ray_cls_with_init=actor_cls_1, device_name=get_device_name()
)
actor_worker_2 = RayWorkerGroup(
resource_pool=actor_2_resource_pool, ray_cls_with_init=actor_cls_2, device_name=get_device_name()
)
assert actor_worker_1.world_size == 2
assert actor_worker_2.world_size == 6
data_1 = DataProto.from_dict({"a": torch.zeros(4)})
data_2 = DataProto.from_dict({"a": torch.zeros(6)})
actor_output_1 = actor_worker_1.add(data_1)
actor_output_2 = actor_worker_2.add(data_2)
print(actor_output_1.batch["a"].tolist())
print(actor_output_2.batch["a"].tolist())
assert actor_output_1.batch["a"].tolist() == [0, 0, 1, 1]
assert actor_output_2.batch["a"].tolist() == [100, 101, 102, 103, 104, 105]
ray.shutdown()
def test_split_resource_pool_with_split_size_list_cross_nodes():
ray.init()
# assume we have 4 nodes, with 2 GPUs each
global_resource_pool = RayResourcePool(process_on_nodes=[4, 4])
global_resource_pool.get_placement_groups(device_name=get_device_name())
# first 2 gpus for actor_1, last 6 gpus for actor_2
actor_1_resource_pool, actor_2_resource_pool = split_resource_pool(
resource_pool=global_resource_pool,
split_size=[2, 6],
)
actor_cls_1 = RayClassWithInitArgs(cls=Actor, worker_id=0)
actor_cls_2 = RayClassWithInitArgs(cls=Actor, worker_id=100)
actor_worker_1 = RayWorkerGroup(
resource_pool=actor_1_resource_pool, ray_cls_with_init=actor_cls_1, device_name=get_device_name()
)
actor_worker_2 = RayWorkerGroup(
resource_pool=actor_2_resource_pool, ray_cls_with_init=actor_cls_2, device_name=get_device_name()
)
assert actor_worker_1.world_size == 2
assert actor_worker_2.world_size == 6
data_1 = DataProto.from_dict({"a": torch.zeros(4)})
data_2 = DataProto.from_dict({"a": torch.zeros(6)})
actor_output_1 = actor_worker_1.add(data_1)
actor_output_2 = actor_worker_2.add(data_2)
print(actor_output_1.batch["a"].tolist())
print(actor_output_2.batch["a"].tolist())
assert actor_output_1.batch["a"].tolist() == [0, 0, 1, 1]
assert actor_output_2.batch["a"].tolist() == [100, 101, 102, 103, 104, 105]
ray.shutdown()
def test_split_resource_pool_with_split_twice():
ray.init()
# assume we have 4 nodes, with 2 GPUs each
global_resource_pool = RayResourcePool(process_on_nodes=[2, 2, 2, 2])
global_resource_pool.get_placement_groups(device_name=get_device_name())
# actors with [2, 1, 1, 1, 1, 2] (split twice)
rp_1, rp_2, rp_3 = split_resource_pool(
resource_pool=global_resource_pool,
split_size=[2, 4, 2],
)
rp_2_1, rp_2_2, rp_2_3, rp_2_4 = split_resource_pool(
resource_pool=rp_2,
split_size=1,
)
fp_list = [rp_1, rp_2_1, rp_2_2, rp_2_3, rp_2_4, rp_3]
correct_world_size = [2, 1, 1, 1, 1, 2]
correct_output = [
[0.0, 0.0, 1.0, 1.0], # 2 worker
[100.0, 100.0, 100.0, 100.0], # 1 worker
[200.0, 200.0, 200.0, 200.0], # 1 worker
[300.0, 300.0, 300.0, 300.0], # 1 worker
[400.0, 400.0, 400.0, 400.0], # 1 worker
[500.0, 500.0, 501.0, 501.0], # 2 worker
]
for idx, rp in enumerate(fp_list):
actor_cls = RayClassWithInitArgs(cls=Actor, worker_id=idx * 100)
actor_worker = RayWorkerGroup(resource_pool=rp, ray_cls_with_init=actor_cls, device_name=get_device_name())
data = DataProto.from_dict({"a": torch.zeros(4)})
actor_output = actor_worker.add(data)
assert actor_worker.world_size == correct_world_size[idx]
assert actor_output.batch["a"].tolist() == correct_output[idx]
ray.shutdown()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/single_controller/test_split_resource_pool.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/models/mcore/bridge.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from megatron.bridge import AutoBridge
from megatron.bridge.models.conversion.param_mapping import AutoMapping
from megatron.bridge.peft.canonical_lora import CanonicalLoRA
from megatron.bridge.peft.dora import DoRA
from megatron.bridge.peft.lora import LoRA, VLMLoRA
except ImportError:
# `pip install verl[mcore]` or
print("Megatron-Bridge package not found. Please install Megatron-Bridge with `pip install megatron-bridge`")
raise
import torch
from megatron.core import tensor_parallel
def _ensure_model_list(model):
return model if isinstance(model, list) else [model]
class LinearForLastLayer(torch.nn.Linear):
"""
A custom linear layer implementation for the last layer of a model.
This layer extends PyTorch's Linear module with functionality specifically designed
for handling the final layer in transformer models with sequence parallelism.
Attributes:
sequence_parallel: Boolean indicating whether sequence parallelism is enabled
"""
def __init__(
self,
input_size,
output_size,
*,
sequence_parallel: bool,
):
"""
Initializes the LinearForLastLayer.
Args:
input_size: The size of the input features
output_size: The size of the output features
sequence_parallel (bool): Whether sequence parallelism is enabled
"""
super().__init__(in_features=input_size, out_features=output_size, bias=False)
self.sequence_parallel = sequence_parallel
if self.sequence_parallel:
self.weight.sequence_parallel = True
def forward(
self,
input_,
weight=None,
runtime_gather_output=None,
):
"""
Forward pass for the linear layer.
This method computes the linear transformation and handles sequence parallelism
if enabled, gathering outputs from different sequence parallel regions.
Args:
input_: Input tensor
weight: Placeholder for compatibility
runtime_gather_output: Placeholder for compatibility
Returns:
tuple: (logits, None) where logits is the output of the linear transformation
"""
logits = super().forward(input_)
logits = logits.float()
if self.sequence_parallel:
logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False)
return logits, None
# Make Megatron-Bridge AutoMapping treats the custom last layer as replicated.
AutoMapping.register_module_type("LinearForLastLayer", "replicated")
def make_value_model(hidden_size, sequence_parallel):
"""Creates a pre-wrap hook that replace the output layer with a value head.
Args:
hidden_size (int): The hidden size of the model's transformer layers.
sequence_parallel (bool): Whether sequence parallelism is enabled.
Returns:
A hook function that can be used as a `pre_wrap_hook` in Megatron-Bridge.
The hook itself takes the model as input and prepares it for value head activation.
"""
from megatron.core import parallel_state
def hook(model):
model_post_process = []
if (
parallel_state.get_pipeline_model_parallel_world_size() > 1
and parallel_state.get_virtual_pipeline_model_parallel_world_size() is not None
):
for i in range(parallel_state.get_virtual_pipeline_model_parallel_world_size()):
model_post_process.append(parallel_state.is_pipeline_last_stage(ignore_virtual=False, vp_stage=i))
else:
model_post_process.append(parallel_state.is_pipeline_last_stage())
model_list = _ensure_model_list(model)
assert len(model_post_process) == len(model_list), "Model list length and post process list length must match."
for index, model_chunk in enumerate(model_list):
if not model_post_process[index]:
continue
model_chunk.output_layer = LinearForLastLayer(
input_size=hidden_size,
output_size=1,
sequence_parallel=sequence_parallel,
)
return hook
def freeze_moe_router(model):
"""Pre-wrap hook to freeze MoE router parameters.
Args:
model: List of MegatronModule instances or single module
Returns:
The model with frozen router parameters
"""
for model_chunk in _ensure_model_list(model):
if hasattr(model_chunk, "decoder") and hasattr(model_chunk.decoder, "layers"):
for layer in model_chunk.decoder.layers:
if hasattr(layer.mlp, "router"):
if hasattr(layer.mlp.router, "weight"):
layer.mlp.router.weight.requires_grad = False
if hasattr(layer.mlp.router, "bias"):
layer.mlp.router.bias.requires_grad = False
if hasattr(layer.mlp, "shared_experts"):
if (
hasattr(layer.mlp.shared_experts, "gate_weight")
and layer.mlp.shared_experts.gate_weight is not None
):
layer.mlp.shared_experts.gate_weight.requires_grad = False
if (
hasattr(layer.mlp.shared_experts, "gate_bias")
and layer.mlp.shared_experts.gate_bias is not None
):
layer.mlp.shared_experts.gate_bias.requires_grad = False
return model
__all__ = [
"AutoBridge",
"make_value_model",
"freeze_moe_router",
"LoRA",
"VLMLoRA",
"DoRA",
"CanonicalLoRA",
]
| {
"repo_id": "verl-project/verl",
"file_path": "verl/models/mcore/bridge.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/megatron_peft_utils.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for PEFT (Parameter-Efficient Fine-Tuning) of Megatron in VERL."""
import os
from pathlib import Path
from typing import Iterator
import torch
# Map megatron lora target modules to HF-style module names for vLLM
MEGATRON_TO_HF_MODULES = {
"linear_qkv": ["q_proj", "k_proj", "v_proj"],
"linear_proj": ["o_proj"],
"linear_fc1": ["gate_proj", "up_proj"],
"linear_fc2": ["down_proj"],
"router": ["gate"],
# Canonical LoRA mappings
"linear_q": ["q_proj"],
"linear_k": ["k_proj"],
"linear_v": ["v_proj"],
"linear_fc1_up": ["up_proj"],
"linear_fc1_gate": ["gate_proj"],
# MLA mappings
"linear_kv_down_proj": ["kv_a_proj_with_mqa"],
"linear_kv_up_proj": ["kv_b_proj"],
"linear_q_down_proj": ["q_a_proj"],
"linear_q_up_proj": ["q_b_proj"],
"linear_q_proj": ["q_proj"],
# DSA indexer mappings
"linear_wq_b": ["wq_b"],
"linear_wk": ["wk"],
"linear_weights_proj": ["weights_proj"],
}
# Modules with stacked parameters that need .base_layer suffix in vLLM
STACKED_PARAMS = [
".q_proj.weight",
".q_proj.bias",
".k_proj.weight",
".k_proj.bias",
".v_proj.weight",
".v_proj.bias",
".o_proj.weight",
".o_proj.bias",
".gate_proj.weight",
".up_proj.weight",
".down_proj.weight",
".mlp.gate.weight",
".mlp.gate.bias",
".mlp.gate.e_score_correction_bias",
".kv_a_proj_with_mqa.weight",
".kv_b_proj.weight",
".q_a_proj.weight",
".q_b_proj.weight",
".wq_b.weight",
".wk.weight",
".weights_proj.weight",
]
def _get_rank_checkpoint_path(base_path: str) -> str:
"""Get rank-specific checkpoint path following Megatron's convention.
Returns path like: base_path/mp_rank_{tp:02d}_{pp:03d}_{ep:03d}/
Args:
base_path: Base checkpoint directory
Returns:
Rank-specific subdirectory path
"""
from megatron.core import mpu
tensor_rank = mpu.get_tensor_model_parallel_rank()
pipeline_rank = mpu.get_pipeline_model_parallel_rank()
expert_rank = mpu.get_expert_model_parallel_rank()
pipeline_parallel = mpu.get_pipeline_model_parallel_world_size() > 1
expert_parallel = mpu.get_expert_model_parallel_world_size() > 1
if not pipeline_parallel:
rank_path = os.path.join(base_path, f"mp_rank_{tensor_rank:02d}")
else:
rank_path = os.path.join(base_path, f"mp_rank_{tensor_rank:02d}_{pipeline_rank:03d}")
if expert_parallel:
rank_path = rank_path + f"_{expert_rank:03d}"
return rank_path
def get_adapter_state_dict(model):
"""Extract only adapter parameters from a model.
Args:
model: PyTorch model (possibly wrapped in DDP/Float16Module)
Returns:
Dict of adapter parameter names to tensors
"""
from verl.utils.megatron_utils import unwrap_model
# Unwrap model from DDP/Float16Module
unwrapped = unwrap_model(model)
if isinstance(unwrapped, list):
unwrapped = unwrapped[0]
adapter_state = {}
for name, param in unwrapped.named_parameters():
if ".adapter." in name.lower():
adapter_state[name] = param.data.clone()
return adapter_state
def save_adapter_checkpoint(
model: torch.nn.Module | list[torch.nn.Module],
checkpoint_path: str,
rank: int = 0,
):
"""Save only adapter parameters to checkpoint.
This is much more efficient than saving the full model when using PEFT,
as adapters typically represent <1% of total parameters.
Uses Megatron's distributed checkpoint structure: each rank saves to
checkpoint_path/mp_rank_{tp:02d}_{pp:03d}/adapter.pt
Args:
model: Model or list of models
checkpoint_path: Base path to save checkpoint (rank-specific subdirs created)
rank: Process rank (used for logging only)
"""
if isinstance(model, list):
models = model
else:
models = [model]
# Get adapter state from first model
adapter_state = get_adapter_state_dict(models[0])
if not adapter_state:
if rank == 0:
print("Warning: No adapter parameters found to save")
return
# Get rank-specific directory path
Path(checkpoint_path).mkdir(parents=True, exist_ok=True)
rank_path = _get_rank_checkpoint_path(checkpoint_path)
adapter_file = rank_path + "_adapter.pt"
torch.save(
{
"adapter_state_dict": adapter_state,
},
adapter_file,
)
if rank == 0:
print(f"Saved {len(adapter_state)} adapter parameters to {checkpoint_path} (distributed)")
def load_adapter_checkpoint(
model: torch.nn.Module | list[torch.nn.Module],
checkpoint_path: str,
strict: bool = True,
):
"""Load adapter parameters from checkpoint.
Loads from Megatron's distributed checkpoint structure: reads from
checkpoint_path/mp_rank_{tp:02d}_{pp:03d}/adapter.pt for each rank.
Args:
model: Model or list of models
checkpoint_path: Base path to checkpoint directory
strict: Whether to strictly enforce parameter name matching
"""
from megatron.core import mpu
from verl.utils.megatron_utils import unwrap_model
# Get rank-specific path
rank_path = _get_rank_checkpoint_path(checkpoint_path)
adapter_file = rank_path + "_adapter.pt"
if not os.path.isfile(adapter_file):
raise FileNotFoundError(f"Adapter checkpoint not found: {adapter_file}")
checkpoint = torch.load(adapter_file, map_location="cpu")
adapter_state = checkpoint.get("adapter_state_dict", {})
if not adapter_state:
print("Warning: No adapter parameters found in checkpoint")
return
if isinstance(model, list):
models = model
else:
models = [model]
# Load adapter parameters into each model (for VPP, models may have multiple chunks)
loaded_count = 0
for m in models:
unwrapped = unwrap_model(m)
if isinstance(unwrapped, list):
unwrapped = unwrapped[0]
# Load parameters
_, unexpected = unwrapped.load_state_dict(adapter_state, strict=False)
if strict and unexpected:
raise RuntimeError(f"Error loading adapter checkpoint:\nUnexpected keys: {unexpected}")
loaded_count += len(adapter_state)
if (
mpu.get_data_parallel_rank() == 0
and mpu.get_tensor_model_parallel_rank() == 0
and mpu.get_pipeline_model_parallel_rank() == 0
):
print(f"Loaded {len(adapter_state)} adapter parameters from {checkpoint_path}")
def count_adapter_parameters(model):
"""Count the number of trainable adapter parameters.
Args:
model: PyTorch model
Returns:
Tuple of (adapter_params, total_params, percentage)
"""
from verl.utils.megatron_utils import unwrap_model
unwrapped = unwrap_model(model)
if isinstance(unwrapped, list):
unwrapped = unwrapped[0]
adapter_params = 0
total_params = 0
for name, param in unwrapped.named_parameters():
total_params += param.numel()
if "lora" in name.lower() or "adapter" in name.lower():
if param.requires_grad:
adapter_params += param.numel()
percentage = 100 * adapter_params / total_params if total_params > 0 else 0
return adapter_params, total_params, percentage
def print_adapter_info(model):
"""Print information about adapter parameters in the model."""
adapter_params, total_params, percentage = count_adapter_parameters(model)
print(f"\n{'=' * 60}")
print("PEFT Adapter Information:")
print(f" Total parameters: {total_params:,}")
print(f" Adapter parameters: {adapter_params:,}")
print(f" Trainable percentage: {percentage:.2f}%")
print(f"{'=' * 60}\n")
def convert_megatron_to_hf_target_modules(megatron_modules: list[str]) -> list[str]:
"""Convert megatron lora target modules to HF-style module names.
Args:
megatron_modules: List of megatron-style module names.
Returns:
List of HF-style module names with duplicates removed.
"""
hf_target_modules = []
for module in megatron_modules:
if module in MEGATRON_TO_HF_MODULES:
hf_target_modules.extend(MEGATRON_TO_HF_MODULES[module])
else:
hf_target_modules.append(module)
# Remove duplicates while preserving order
return list(dict.fromkeys(hf_target_modules))
def build_peft_config_for_vllm(lora_config: dict) -> dict:
"""Build a peft_config dict compatible with vLLM's PEFTHelper from megatron lora config.
Args:
lora_config: Megatron lora configuration dictionary.
Returns:
A dictionary compatible with vLLM's PEFTHelper.from_dict().
"""
from peft import TaskType
target_modules = lora_config.get("target_modules", ["linear_qkv", "linear_proj", "linear_fc1", "linear_fc2"])
exclude_modules = lora_config.get("exclude_modules", [])
hf_target_modules = convert_megatron_to_hf_target_modules(target_modules)
hf_exclude_modules = convert_megatron_to_hf_target_modules(exclude_modules)
return {
"task_type": TaskType.CAUSAL_LM,
"r": lora_config.get("rank", 0),
"lora_alpha": lora_config.get("alpha", 32),
"target_modules": hf_target_modules,
"exclude_modules": hf_exclude_modules,
"bias": "none",
"lora_dropout": lora_config.get("dropout", 0.0),
}
# vLLM needs to target all-linear no matter about specific LoRA config
def add_base_layer_suffix(
params: Iterator[tuple[str, torch.Tensor]],
model_type: str,
) -> Iterator[tuple[str, torch.Tensor]]:
"""Yield param pairs with a base-layer suffix added to the param name.
Args:
params: Iterator of (param_name, tensor)
model_type: The type of the model (e.g., "llama").
"""
stacked_params = STACKED_PARAMS
# TODO: other models may have more special treatment, or integrate this into Megatron-Bridge
if model_type == "llama":
stacked_params = [".embed_tokens.weight", *STACKED_PARAMS]
for name, param in params:
ending_suffix = ""
for suffix in stacked_params:
if name.endswith(suffix):
ending_suffix = suffix
break
if ending_suffix:
suffix = ending_suffix.rsplit(".", 1)[-1]
name = f"{name[: -len(suffix)]}base_layer.{suffix}"
yield name, param
__all__ = [
"get_adapter_state_dict",
"save_adapter_checkpoint",
"load_adapter_checkpoint",
"count_adapter_parameters",
"print_adapter_info",
"convert_megatron_to_hf_target_modules",
"build_peft_config_for_vllm",
"add_base_layer_suffix",
]
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/megatron_peft_utils.py",
"license": "Apache License 2.0",
"lines": 286,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/config/megatron_peft.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PEFT configuration of Megatron for VERL."""
def get_peft_cls(model_config, bridge, provider, dtype=None):
"""Get PEFT class from model config.
Args:
model_config: Model configuration object.
bridge: Megatron-Bridge AutoBridge instance.
provider: Provider instance.
Returns:
PEFT configuration object (LoRAConfig, CanonicalLoRAConfig, DoRAConfig) or None.
"""
peft_cls = None
if not hasattr(model_config, "lora"):
return peft_cls
lora_cfg = model_config.lora
# Only enable if rank > 0
if lora_cfg.get("rank", 0) <= 0:
return peft_cls
assert bridge is not None and provider is not None, "LoRA/PEFT only supported via Megatron-Bridge"
from verl.models.mcore.bridge import CanonicalLoRA, DoRA, LoRA, VLMLoRA
lora_dtype = lora_cfg.get("dtype", dtype)
if lora_dtype is not None:
from verl.utils.torch_dtypes import PrecisionType
lora_dtype = PrecisionType.to_dtype(lora_dtype)
lora_type = lora_cfg.get("type", "lora")
if lora_type == "lora":
peft_cls = LoRA(
target_modules=lora_cfg.get("target_modules", ["linear_qkv", "linear_proj", "linear_fc1", "linear_fc2"]),
dim=lora_cfg.get("rank"),
alpha=lora_cfg.get("alpha", 32),
dropout=lora_cfg.get("dropout", 0.0),
dropout_position=lora_cfg.get("dropout_position", "pre"),
lora_A_init_method=lora_cfg.get("lora_A_init_method", "xavier"),
lora_B_init_method=lora_cfg.get("lora_B_init_method", "zero"),
a2a_experimental=lora_cfg.get("a2a_experimental", False),
lora_dtype=lora_dtype,
exclude_modules=lora_cfg.get("exclude_modules", []),
)
if lora_type == "vlm_lora":
peft_cls = VLMLoRA(
target_modules=lora_cfg.get("target_modules", ["linear_qkv", "linear_proj", "linear_fc1", "linear_fc2"]),
dim=lora_cfg.get("rank"),
alpha=lora_cfg.get("alpha", 32),
dropout=lora_cfg.get("dropout", 0.0),
dropout_position=lora_cfg.get("dropout_position", "pre"),
lora_A_init_method=lora_cfg.get("lora_A_init_method", "xavier"),
lora_B_init_method=lora_cfg.get("lora_B_init_method", "zero"),
a2a_experimental=lora_cfg.get("a2a_experimental", False),
lora_dtype=lora_dtype,
freeze_vision_model=lora_cfg.get("freeze_vision_model", True),
freeze_vision_projection=lora_cfg.get("freeze_vision_projection", True),
freeze_language_model=lora_cfg.get("freeze_language_model", True),
exclude_modules=lora_cfg.get("exclude_modules", []),
)
elif lora_type == "canonical_lora":
peft_cls = CanonicalLoRA(
target_modules=lora_cfg.get(
"target_modules",
[
"linear_q",
"linear_k",
"linear_v",
"linear_proj",
"linear_fc1_up",
"linear_fc1_gate",
"linear_fc2",
],
),
dim=lora_cfg.get("rank"),
alpha=lora_cfg.get("alpha", 32),
dropout=lora_cfg.get("dropout", 0.0),
dropout_position=lora_cfg.get("dropout_position", "pre"),
lora_A_init_method=lora_cfg.get("lora_A_init_method", "xavier"),
lora_B_init_method=lora_cfg.get("lora_B_init_method", "zero"),
exclude_modules=lora_cfg.get("exclude_modules", []),
)
elif lora_type == "dora":
peft_cls = DoRA(
target_modules=lora_cfg.get("target_modules", ["linear_qkv", "linear_proj", "linear_fc1", "linear_fc2"]),
dim=lora_cfg.get("rank"),
alpha=lora_cfg.get("alpha", 32),
dropout=lora_cfg.get("dropout", 0.0),
dropout_position=lora_cfg.get("dropout_position", "pre"),
lora_A_init_method=lora_cfg.get("lora_A_init_method", "xavier"),
lora_B_init_method=lora_cfg.get("lora_B_init_method", "zero"),
exclude_modules=lora_cfg.get("exclude_modules", []),
)
print(
f"Enabling {lora_type.upper()} with rank={lora_cfg.get('rank')}, "
f"alpha={lora_cfg.get('alpha')}, dropout={lora_cfg.get('dropout')}"
)
return peft_cls
__all__ = [
"get_peft_cls",
]
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/config/megatron_peft.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/experimental/vla/test_sim_envs.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy as np
import pytest
from omegaconf import OmegaConf
# @pytest.mark.parametrize("simulator_type", ["libero", "isaac"])
@pytest.mark.parametrize("simulator_type", ["isaac"])
def test_sim_env_creation_and_step(simulator_type):
num_envs = 8
actions = np.array(
[
[5.59112417e-01, 8.06460073e-02, 1.36817226e-02, -4.64279854e-04, -1.72158767e-02, -6.57548380e-04, -1],
[2.12711899e-03, -3.13366604e-01, 3.41386353e-04, -4.64279854e-04, -8.76528812e-03, -6.57548380e-04, -1],
[7.38182960e-02, -4.64548351e-02, -6.63602950e-02, -4.64279854e-04, -2.32520114e-02, -6.57548380e-04, -1],
[7.38182960e-02, -1.60845593e-01, 3.41386353e-04, -4.64279854e-04, 1.05503430e-02, -6.57548380e-04, -1],
[7.38182960e-02, -3.95982152e-01, -7.97006313e-02, -5.10713711e-03, 3.22804279e-02, -6.57548380e-04, -1],
[2.41859427e-02, -3.64206941e-01, -6.63602950e-02, -4.64279854e-04, 1.05503430e-02, -6.57548380e-04, -1],
[4.62447664e-02, -5.16727952e-01, -7.97006313e-02, -4.64279854e-04, 1.05503430e-02, 8.73740975e-03, -1],
[4.62447664e-02, -5.73923331e-01, 3.41386353e-04, -4.64279854e-04, 6.92866212e-03, -6.57548380e-04, -1],
]
)
cfg = OmegaConf.create(
{
"max_episode_steps": 512,
"only_eval": False,
"reward_coef": 1.0,
"init_params": {
"camera_names": ["agentview"],
},
"video_cfg": {
"save_video": True,
"video_base_dir": "/tmp/test_sim_env_creation_and_step",
},
"task_suite_name": "libero_10",
"num_envs": num_envs,
"num_group": 1,
"group_size": num_envs,
"seed": 0,
},
)
sim_env = None
if simulator_type == "isaac":
from verl.experimental.vla.envs.isaac_env.isaac_env import IsaacEnv
sim_env = IsaacEnv(cfg, rank=0, world_size=1)
elif simulator_type == "libero":
from verl.experimental.vla.envs.libero_env.libero_env import LiberoEnv
sim_env = LiberoEnv(cfg, rank=0, world_size=1)
else:
raise ValueError(f"simulator_type {simulator_type} is not supported")
video_count = 0
for i in [0]:
# The first call to step with actions=None will reset the environment
step = 0
sim_env.reset_envs_to_state_ids([0] * num_envs, [i] * num_envs)
for action in actions:
obs_venv, reward_venv, terminated_venv, truncated_venv, info_venv = sim_env.step(
np.array([action] * num_envs)
)
assert isinstance(obs_venv, dict)
assert reward_venv.shape == (num_envs,)
assert terminated_venv.shape == (num_envs,)
assert truncated_venv.shape == (num_envs,)
assert isinstance(info_venv, dict)
if terminated_venv.any() or truncated_venv.any():
break
step += 1
sim_env.flush_video(video_sub_dir=f"task_{i}")
assert os.path.exists(os.path.join(cfg.video_cfg.video_base_dir, f"rank_0/task_{i}/{video_count}.mp4"))
os.remove(os.path.join(cfg.video_cfg.video_base_dir, f"rank_0/task_{i}/{video_count}.mp4"))
video_count += 1
print("test passed")
sim_env.close()
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/experimental/vla/test_sim_envs.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/single_controller/test_get_set_dispatch_collect_cpu.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from verl.single_controller.base import Worker
def test_get_set_dispatch_collect_cpu():
os.environ["RANK"] = "0"
os.environ["LOCAL_RANK"] = "0"
os.environ["WORLD_SIZE"] = "2"
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12345"
ref = Worker()
ref._register_dispatch_collect_info(mesh_name="actor", dp_rank=0, is_collect=True)
actor = Worker()
actor._register_dispatch_collect_info(mesh_name="actor", dp_rank=1, is_collect=False)
actor_rollout_ref = Worker()
actor_rollout_ref.set_dispatch_collect(mesh_name="ref", **ref.get_dispatch_collect())
actor_rollout_ref.set_dispatch_collect(mesh_name="actor", **actor.get_dispatch_collect())
assert actor_rollout_ref._query_dispatch_info("ref") == 0
assert actor_rollout_ref._query_collect_info("ref")
assert actor_rollout_ref._query_dispatch_info("actor") == 1
assert not actor_rollout_ref._query_collect_info("actor")
# test conflict mesh_name
actor2 = Worker()
actor2._register_dispatch_collect_info(mesh_name="actor", dp_rank=1, is_collect=False)
with pytest.raises(AssertionError):
actor_rollout_ref.set_dispatch_collect(mesh_name="actor", **actor2.get_dispatch_collect())
| {
"repo_id": "verl-project/verl",
"file_path": "tests/single_controller/test_get_set_dispatch_collect_cpu.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/workers/engine_workers.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import os
from contextlib import nullcontext
from functools import partial
from itertools import chain
import torch
from codetiming import Timer
from omegaconf import DictConfig, open_dict
from tensordict import NonTensorData, TensorDict
from torch.distributed.device_mesh import init_device_mesh
try:
from verl.workers.engine.mindspeed.transformer_impl import repatch
except ImportError:
repatch = None
from verl.checkpoint_engine import CheckpointEngineRegistry
from verl.single_controller.base import Worker
from verl.single_controller.base.decorator import Dispatch, make_nd_compute_dataproto_dispatch_fn, register
from verl.utils import tensordict_utils as tu
from verl.utils.config import omega_conf_to_dataclass
from verl.utils.device import get_device_name, set_expandable_segments
from verl.utils.distributed import initialize_global_process_group_ray
from verl.utils.flops_counter import FlopsCounter
from verl.utils.memory_utils import aggressive_empty_cache
from verl.utils.metric.utils import Metric
from verl.utils.profiler import DistProfiler, DistProfilerExtension, ProfilerConfig, log_gpu_memory_usage
from verl.utils.py_functional import append_to_dict
from verl.utils.tensordict_utils import maybe_fix_3d_position_ids
from verl.utils.torch_functional import allgather_dict_into_dict
from verl.workers.config import ActorConfig, HFModelConfig, RolloutConfig, TrainingWorkerConfig
from verl.workers.rollout.base import BaseRollout, get_rollout_class
from verl.workers.utils.losses import ppo_loss
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def _with_routing_replay_flag(enabled: bool):
"""Decorator to set 'enable_routing_replay' flag on the data TensorDict."""
def decorator(func):
@functools.wraps(func)
def wrapper(self, data: TensorDict, *args, **kwargs):
if self.enable_routing_replay:
tu.assign_non_tensor_data(data, "enable_routing_replay", enabled)
return func(self, data, *args, **kwargs)
return wrapper
return decorator
class TrainingWorker(Worker, DistProfilerExtension):
"""
TrainingWorker provides a Tinker-like API (https://thinkingmachines.ai/tinker/) as a RayWorkerGroup
to a single controller. Currently, we only provide more coarse grained APIs,
and do not provide exact APIs as Tinker does. But this can be added in the future.
"""
def __init__(self, config: TrainingWorkerConfig):
Worker.__init__(self)
from verl.workers.engine import BaseEngine, EngineRegistry
initialize_global_process_group_ray(timeout_second=None)
self.config = config
self.model_config = self.config.model_config
self.engine_config = self.config.engine_config
self.optimizer_config = self.config.optimizer_config
self.checkpoint_config = self.config.checkpoint_config
self.device_name = get_device_name()
if self.engine_config is None:
assert self.optimizer_config is None
if self.config.auto_select_engine_optim_fn is None:
raise ValueError(
"engine_config is not provided and auto_select_engine_optim_fn is not set. "
"Cannot determine engine backend."
)
# Support automatically select engine backend given model config
self.engine_config, self.optimizer_config = self.config.auto_select_engine_optim_fn(
self.model_config, self.device_name
)
# we use the one defined in model
# TODO: this is not elegant and should refactor later
self.engine_config.use_remove_padding = self.model_config.use_remove_padding
self.engine_config.use_fused_kernels = self.model_config.use_fused_kernels
if repatch is not None:
# NPU MindSpeed patch, will be refactored with MindSpeedEngine.
repatch(self.engine_config.get("override_transformer_config", {}))
# TODO: add DistProfilerExtension
self.profiler_config = self.config.profiler_config
if self.profiler_config is not None:
self.profiler_tool_config = self.profiler_config.tool_config.get(self.profiler_config.tool, {})
else:
self.profiler_tool_config = None
DistProfilerExtension.__init__(
self, DistProfiler(rank=self.rank, config=self.profiler_config, tool_config=self.profiler_tool_config)
)
self.engine: BaseEngine = EngineRegistry.new(
model_type=self.config.model_type,
backend=self.engine_config.strategy,
model_config=self.model_config,
engine_config=self.engine_config,
optimizer_config=self.optimizer_config,
checkpoint_config=self.checkpoint_config,
)
# build dispatch info
self._register_dispatch_collect_info(
mesh_name="train",
dp_rank=self.engine.get_data_parallel_rank(),
is_collect=self.engine.is_mp_src_rank_with_outputs(),
)
self.flops_counter = FlopsCounter(self.model_config.hf_config)
self.loss_fn = None
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def to(self, device, model=True, optimizer=True, grad=True):
"""Manual control of load/offload"""
assert device in ["cpu", "device"]
if device == "device":
device = get_device_name()
self.engine.to(device=device, model=model, optimizer=optimizer, grad=grad)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def set_loss_fn(self, loss_fn):
self.loss_fn = loss_fn
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def reset(self):
"""
Reset the model engine to the initial state. If the engine is not initialized,
we initialize it. Otherwise, reload ckpt and reset states
"""
self.engine.initialize()
def _postprocess_output(self, output, *, global_token_num, delta_time, forward_only, images_seqlens):
"""
Args:
output: a dictionary containing loss, model_outputs and metrics
Returns:
"""
# TODO: whether to log memory
# metrics["perf/max_memory_allocated_gb"] = get_torch_device().max_memory_allocated() / (1024 ** 3)
# metrics["perf/max_memory_reserved_gb"] = get_torch_device().max_memory_reserved() / (1024 ** 3)
# metrics["perf/cpu_memory_used_gb"] = psutil.virtual_memory().used / (1024 ** 3)
metrics: dict = output.pop("metrics")
# perform all gather in dp group to ensure that it's correct.
# Here each metric in metrics can be a list (micro-batch metrics) or a singleton
# we should always sum the loss of each micro-batch as we scale by global_bsz/global_token
loss = torch.sum(torch.tensor(output.pop("loss"), device=self.device_name))
dp_group = self.engine.get_data_parallel_group()
if dp_group is not None:
torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.AVG, group=dp_group)
loss = loss.item()
# For grad_norm, we do not perform all reduce because it is already been done when clipping grad
grad_norm = metrics.pop("grad_norm", None)
lr = metrics.pop("lr", None)
# For other metrics, we perform all gather in dp group (only if DP > 1)
if dp_group is not None:
final_metrics = allgather_dict_into_dict(data=metrics, group=dp_group)
else:
final_metrics = metrics
final_metrics["loss"] = loss
if grad_norm is not None:
final_metrics["grad_norm"] = grad_norm
if lr is not None:
final_metrics["lr"] = lr
# TODO: confirm the mtp loss IS same across dp
for k, v in final_metrics.items():
if k.startswith("mtp_losses"):
flatten_v = [sublist[0] for sublist in v] # sublist should be single element
final_metrics[k] = sum(flatten_v) / len(flatten_v)
# compute mfu
if global_token_num is not None:
estimated_flops, promised_flops = self.flops_counter.estimate_flops(
global_token_num, delta_time, images_seqlens=images_seqlens
)
final_metrics["mfu"] = estimated_flops / promised_flops / torch.distributed.get_world_size()
if forward_only:
final_metrics["mfu"] /= 3.0
# model outputs
model_output = output.pop("model_output", {})
# We only return final_metrics
final_output = tu.get_tensordict(tensor_dict=model_output, non_tensor_dict={"metrics": final_metrics})
return final_output
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="train"), blocking=False)
def train_mini_batch(self, data: TensorDict) -> TensorDict:
"""Split a batch into N mini-batches run for multiple epochs
Args:
data:
Returns:
"""
maybe_fix_3d_position_ids(data)
batch_size_per_dp = data.shape[0]
disable_auto_offload = tu.pop(data, key="disable_auto_offload", default=False)
mini_batch_size = tu.pop(data, key="mini_batch_size", default=None)
num_mini_batch = tu.pop(data, key="num_mini_batch", default=None)
epochs = tu.pop(data, key="epochs", default=1)
seed = tu.pop(data, key="seed", default=42)
dataloader_kwargs = tu.pop(data, key="dataloader_kwargs", default={})
assert mini_batch_size is not None or num_mini_batch is not None
if mini_batch_size is None:
assert batch_size_per_dp % num_mini_batch == 0, f"Got {batch_size_per_dp=} and {num_mini_batch=}"
mini_batch_size_per_gpu = batch_size_per_dp // num_mini_batch
else:
assert mini_batch_size % self.engine.get_data_parallel_size() == 0, (
f"Got {mini_batch_size=} and {self.engine.get_data_parallel_size()=}"
)
mini_batch_size_per_gpu = mini_batch_size // self.engine.get_data_parallel_size()
# make iterator
dataloader = tu.make_iterator(
data,
mini_batch_size=mini_batch_size_per_gpu,
epochs=epochs,
seed=seed + self.engine.get_data_parallel_rank(),
dataloader_kwargs=dataloader_kwargs,
)
with (
self.engine.train_mode(disable_auto_offload=disable_auto_offload),
Timer(name="train_batch", logger=None),
):
# update
output_lst = []
total_num_iterations = data.shape[0] // mini_batch_size_per_gpu * epochs
for batch_idx, mini_batch_td in enumerate(dataloader):
# add global token num
global_token_num = mini_batch_td["input_ids"].offsets().diff().tolist() # (total_nnz,)
# allgather from dp rank
global_token_num_output = [None] * self.engine.get_data_parallel_size()
torch.distributed.all_gather_object(
global_token_num_output, global_token_num, self.engine.get_data_parallel_group()
)
global_token_num = [x for xs in global_token_num_output for x in xs]
tu.assign_non_tensor(
mini_batch_td,
global_token_num=NonTensorData(global_token_num),
update_lr_scheduler=batch_idx == total_num_iterations - 1,
disable_auto_offload=True,
)
actor_output = self.train_batch(mini_batch_td)
output_lst.append(actor_output)
if self.engine.is_mp_src_rank_with_outputs():
actor_output = [tu.get(output, "metrics") for output in output_lst]
metrics = {}
for output in actor_output:
for key, val in output.items():
# flattn dp and micro batch
if isinstance(val, list):
output[key] = (
Metric.aggregate_dp(val)
if isinstance(val[0], Metric)
else list(chain.from_iterable(val))
)
append_to_dict(metrics, output)
output = tu.get_tensordict(tensor_dict={}, non_tensor_dict={"metrics": metrics}).cpu()
else:
output = None
return output
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="train"), blocking=False)
def train_batch(self, data: TensorDict) -> TensorDict:
assert self.loss_fn is not None, "loss function can't be None when calling train_batch"
assert not self.engine_config.forward_only, "Can't run `train_batch` when forward_only is in the engine config."
# global_token_num should be a list of number of tokens of each seq in this batch
global_token_num = tu.get(data, key="global_token_num")
disable_auto_offload = tu.get(data, key="disable_auto_offload", default=False)
images_seqlens = tu.get(data, key="images_seqlens", default=None)
# inject engineering parameters if not specified
default_keys = dict(
use_remove_padding=self.model_config.use_remove_padding,
use_dynamic_bsz=self.engine_config.use_dynamic_bsz,
max_token_len_per_gpu=self.engine_config.max_token_len_per_gpu,
micro_batch_size_per_gpu=self.engine_config.micro_batch_size_per_gpu,
use_fused_kernels=self.engine_config.use_fused_kernels,
)
for key, val in default_keys.items():
if key not in data.keys():
tu.assign_non_tensor(data, **{key: val})
with (
self.engine.train_mode(disable_auto_offload=disable_auto_offload),
Timer(name="train_batch", logger=None) as timer,
):
output = self.engine.train_batch(data, loss_function=self.loss_fn)
# containing loss, model_output and metrics
# for training, we only care about loss and metrics
delta_time = timer.last
update_lr_scheduler = tu.get(data, key="update_lr_scheduler", default=False)
# update lr scheduler
if update_lr_scheduler:
lr = self.engine.lr_scheduler_step()
else:
lr = None
if self.engine.is_mp_src_rank_with_outputs():
# we don't need model_output in training. Maybe we change out mind later
output.pop("model_output")
if lr is not None:
output["metrics"]["lr"] = lr
final_output = self._postprocess_output(
output,
global_token_num=global_token_num,
delta_time=delta_time,
forward_only=False,
images_seqlens=images_seqlens,
).cpu()
else:
final_output = None
return final_output
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="train"), blocking=False)
def infer_batch(self, data: TensorDict) -> TensorDict:
# add mfu calculator
global_token_num = tu.get(data, key="global_token_num")
compute_loss = tu.get(data, key="compute_loss", default=True)
disable_auto_offload = tu.get(data, key="disable_auto_offload", default=False)
no_lora_adapter = tu.pop(data, key="no_lora_adapter", default=False)
images_seqlens = tu.get(data, key="images_seqlens", default=None)
default_keys = dict(
use_remove_padding=self.model_config.use_remove_padding,
use_dynamic_bsz=self.engine_config.use_dynamic_bsz,
max_token_len_per_gpu=self.engine_config.infer_max_token_len_per_gpu,
micro_batch_size_per_gpu=self.engine_config.infer_micro_batch_size_per_gpu,
use_fused_kernels=self.engine_config.use_fused_kernels,
)
for key, val in default_keys.items():
if key not in data.keys():
tu.assign_non_tensor(data, **{key: val})
# for sft training, we need to compute loss in eval
loss_function = self.loss_fn if compute_loss else None
with (
self.engine.eval_mode(disable_auto_offload=disable_auto_offload),
Timer(name="eval_batch", logger=None) as timer,
):
adapter_ctx = self.engine.disable_adapter() if no_lora_adapter else nullcontext()
with adapter_ctx:
output = self.engine.infer_batch(data, loss_function=loss_function)
delta_time = timer.last
if self.engine.is_mp_src_rank_with_outputs():
final_output = self._postprocess_output(
output,
global_token_num=global_token_num,
delta_time=delta_time,
forward_only=True,
images_seqlens=images_seqlens,
).cpu()
else:
final_output = None
return final_output
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None):
return self.engine.save_checkpoint(local_path, hdfs_path, global_step, max_ckpt_to_keep)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=False):
return self.engine.load_checkpoint(local_path, hdfs_path, del_local_after_load)
class ActorRolloutRefWorker(Worker, DistProfilerExtension):
"""Hybrid worker that includes actor model, rollout and optional ref model.
For standalone actor or rollout, use ActorWorker or BaseRollout respectively.
NOTE: ActorRolloutRefWorker no longer support spmd mode and run native server mode.
"""
def __init__(self, config: DictConfig, role: str, **kwargs):
Worker.__init__(self)
self.config = config
self.role = role
self.actor: TrainingWorker = None
self.ref: TrainingWorker = None
self.rollout: BaseRollout = None
assert self.role in ["actor", "rollout", "ref", "actor_rollout", "actor_rollout_ref"]
self._is_actor = self.role in ["actor", "actor_rollout", "actor_rollout_ref"]
self._is_rollout = self.role in ["rollout", "actor_rollout", "actor_rollout_ref"]
self._is_ref = self.role in ["ref", "actor_rollout_ref"]
if self._is_actor:
omega_profiler_config = config.actor.get("profiler", {})
elif self._is_rollout:
# NOTE: In colocation mode, rollout config may not take effect (follow the actor config)
# This is for extendability in AsyncRL cases
omega_profiler_config = config.rollout.get("profiler", {})
else:
omega_profiler_config = config.ref.get("profiler", {})
profiler_config = omega_conf_to_dataclass(omega_profiler_config, dataclass_type=ProfilerConfig)
if omega_profiler_config.get("tool", None) in ["npu", "nsys", "torch", "torch_memory"]:
tool_config = omega_conf_to_dataclass(
omega_profiler_config.get("tool_config", {}).get(omega_profiler_config.get("tool"))
)
else:
tool_config = None
self.enable_routing_replay = (
self.config.actor.strategy == "megatron" and self.config.actor.megatron.router_replay.mode != "disabled"
)
DistProfilerExtension.__init__(
self, DistProfiler(rank=self.rank, config=profiler_config, tool_config=tool_config)
)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def set_loss_fn(self, loss_fn):
self.actor.set_loss_fn(loss_fn=loss_fn)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def to(self, device, model=True, optimizer=True, grad=True):
"""Manual control of load/offload"""
self.actor.to(device=device, model=model, optimizer=optimizer, grad=grad)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def init_model(self):
model_config: HFModelConfig = omega_conf_to_dataclass(self.config.model)
# 1. build reference model
if "ref" in self.role:
# TODO: align ref config with actor config
with open_dict(self.config.ref):
self.config.ref.ppo_mini_batch_size = self.config.actor.ppo_mini_batch_size
self.config.ref.ppo_micro_batch_size = self.config.ref.pop("log_prob_micro_batch_size", None)
self.config.ref.ppo_micro_batch_size_per_gpu = self.config.ref.pop(
"log_prob_micro_batch_size_per_gpu", None
)
self.config.ref.use_dynamic_bsz = self.config.ref.pop("log_prob_use_dynamic_bsz", False)
self.config.ref.ppo_max_token_len_per_gpu = self.config.ref.pop("log_prob_max_token_len_per_gpu", None)
ref_config: ActorConfig = omega_conf_to_dataclass(self.config.ref)
ref_config.model_config = model_config
# construct TrainingWorkerConfig
ref_training_config = TrainingWorkerConfig(
model_type="language_model",
model_config=ref_config.model_config,
engine_config=ref_config.engine,
optimizer_config=ref_config.optim,
checkpoint_config=ref_config.checkpoint,
)
# assign engine configs
ref_training_config.engine_config.use_dynamic_bsz = self.config.ref.use_dynamic_bsz
ref_training_config.engine_config.infer_max_token_len_per_gpu = self.config.ref.ppo_max_token_len_per_gpu
ref_training_config.engine_config.infer_micro_batch_size_per_gpu = (
self.config.ref.ppo_micro_batch_size_per_gpu
)
ref_training_config.engine_config.use_remove_padding = model_config.use_remove_padding
self.ref = TrainingWorker(config=ref_training_config)
self.ref.reset()
self.set_dispatch_collect(mesh_name="ref", **self.ref.get_dispatch_collect())
# 2. build actor model
if "actor" in self.role:
actor_config: ActorConfig = omega_conf_to_dataclass(self.config.actor)
actor_config.model_config = model_config
actor_training_config = TrainingWorkerConfig(
model_type="language_model",
model_config=actor_config.model_config,
engine_config=actor_config.engine,
optimizer_config=actor_config.optim,
checkpoint_config=actor_config.checkpoint,
)
assert self.config.actor.use_dynamic_bsz == self.config.rollout.log_prob_use_dynamic_bsz
# assign engine configs
actor_training_config.engine_config.use_dynamic_bsz = self.config.actor.use_dynamic_bsz
actor_training_config.engine_config.infer_max_token_len_per_gpu = (
self.config.rollout.log_prob_max_token_len_per_gpu
)
actor_training_config.engine_config.infer_micro_batch_size_per_gpu = (
self.config.rollout.log_prob_micro_batch_size_per_gpu
)
actor_training_config.engine_config.max_token_len_per_gpu = self.config.actor.ppo_max_token_len_per_gpu
actor_training_config.engine_config.micro_batch_size_per_gpu = (
self.config.actor.ppo_micro_batch_size_per_gpu
)
actor_training_config.engine_config.use_remove_padding = model_config.use_remove_padding
if self.config.actor.use_dynamic_bsz:
assert self.config.rollout.log_prob_max_token_len_per_gpu is not None
assert self.config.actor.ppo_max_token_len_per_gpu is not None
else:
assert self.config.rollout.log_prob_micro_batch_size_per_gpu is not None
assert self.config.actor.ppo_micro_batch_size_per_gpu is not None
self.loss_fn = partial(ppo_loss, config=actor_config)
self.actor = TrainingWorker(config=actor_training_config)
self.actor.reset()
self.actor.set_loss_fn(self.loss_fn)
self.set_dispatch_collect(mesh_name="actor", **self.actor.get_dispatch_collect())
# 3. build rollout engine
if "rollout" in self.role:
rollout_config: RolloutConfig = omega_conf_to_dataclass(self.config.rollout)
# TODO: move rollout_device_mesh into ServerAdapter
# 3.1 build rollout device mesh (sglang need only)
infer_tp = rollout_config.tensor_model_parallel_size * rollout_config.data_parallel_size
infer_pp = rollout_config.pipeline_model_parallel_size
infer_world_size = infer_tp * infer_pp
dp = self.world_size // infer_world_size
assert self.world_size % infer_world_size == 0, (
f"rollout world_size: {self.world_size} is not divisible by infer_world_size: {infer_world_size}"
)
rollout_device_mesh = init_device_mesh(
get_device_name(), mesh_shape=(dp, infer_tp, infer_pp), mesh_dim_names=["dp", "infer_tp", "infer_pp"]
)
# 3.2 initialize rollout engine
rollout_cls: type[BaseRollout] = get_rollout_class(rollout_config.name, rollout_config.mode)
self.rollout = rollout_cls(
config=rollout_config, model_config=model_config, device_mesh=rollout_device_mesh
)
# used for LoRA
self.base_sync_done: bool = "dummy" not in self.config.rollout.load_format
self.layered_summon = self.config.rollout.get("layered_summon", False)
self.peft_merge: bool = model_config.lora.get("merge", False)
# 4. build checkpoint engine
if "actor" in self.role:
checkpoint_engine_config = omega_conf_to_dataclass(self.config.rollout.checkpoint_engine)
backend = checkpoint_engine_config.backend
bucket_size = checkpoint_engine_config.update_weights_bucket_megabytes << 20
engine_kwargs = checkpoint_engine_config.engine_kwargs.get(backend, {})
self.checkpoint_engine = CheckpointEngineRegistry.new(
backend, is_master=(torch.distributed.get_rank() == 0), bucket_size=bucket_size, **engine_kwargs
)
# Free cached GPU memory so colocated vLLM processes can see it via cudaMemGetInfo
aggressive_empty_cache(force_sync=True)
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="ref"))
@DistProfiler.annotate(color="olive", role="ref_compute_log_prob")
@_with_routing_replay_flag(enabled=False)
def compute_ref_log_prob(self, data: TensorDict) -> TensorDict:
output = self.ref.infer_batch(data=data)
return output.cpu() if output is not None else None
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
@DistProfiler.annotate(color="blue", role="actor_compute_log_prob")
@_with_routing_replay_flag(enabled=True)
def compute_log_prob(self, data: TensorDict) -> TensorDict:
output = self.actor.infer_batch(data)
return output.cpu() if output is not None else None
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="actor"))
@DistProfiler.annotate(color="red", role="actor_update")
@_with_routing_replay_flag(enabled=True)
def update_actor(self, data: TensorDict) -> TensorDict:
output = self.actor.train_mini_batch(data=data)
return output.cpu() if output is not None else None
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def load_checkpoint(self, local_path, hdfs_path=None, del_local_after_load=False):
assert "actor" in self.role, "load_checkpoint only support actor role"
self.actor.load_checkpoint(local_path, hdfs_path, del_local_after_load)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, max_ckpt_to_keep=None):
assert "actor" in self.role, "save_checkpoint only support actor role"
self.actor.save_checkpoint(local_path, hdfs_path, global_step, max_ckpt_to_keep)
@register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
async def update_weights(self, global_steps: int = None):
"""Update weights from trainer to rollout.
1. For sync training with colocated trainer and rollout, update rollout directly from model engine.
- before update_weights: rollout should be in sleep mode.
- after update_weights: rollout should be in wake_up mode.
2. For async training with disaggregated trainer and rollout, send_weights only by checkpoint engine.
"""
# 0. send_weights only for async training with disaggregated trainer and rollout
if self.config.rollout.checkpoint_engine.backend != "naive":
per_tensor_param, _ = self.actor.engine.get_per_tensor_param()
await self.checkpoint_engine.send_weights(per_tensor_param)
return
set_expandable_segments(False)
log_gpu_memory_usage("Before resume weights", logger=logger)
# 1. resume weights and update weights
if self.config.rollout.free_cache_engine:
await self.rollout.resume(tags=["weights"])
log_gpu_memory_usage("After resume weights", logger=logger)
# 2. get per tensor generator from engine, this will load model to gpu
per_tensor_param, peft_config = self.actor.engine.get_per_tensor_param(
layered_summon=self.layered_summon, base_sync_done=True
)
await self.rollout.update_weights(
per_tensor_param, peft_config=peft_config, base_sync_done=True, global_steps=global_steps
)
do_lora_base_sync = False
if not self.peft_merge and peft_config is not None:
# set sleep level for LoRA adapter weights only sync
# TODO: make this configurable so that users with small
# main memory can trade sync time to avoid OOM
self.rollout.sleep_level = 1
do_lora_base_sync = (not self.base_sync_done) or (
self.rollout.sleep_level != 1 and self.config.rollout.free_cache_engine
)
if do_lora_base_sync:
per_tensor_base_params, _ = self.actor.engine.get_per_tensor_param(
layered_summon=self.layered_summon, base_sync_done=False
)
await self.rollout.update_weights(per_tensor_base_params, peft_config=peft_config, base_sync_done=False)
log_gpu_memory_usage("After update_weights", logger=logger)
# 3. offload model to cpu
self.actor.engine.to("cpu", model=True, optimizer=False, grad=False)
aggressive_empty_cache(force_sync=True)
# 4. resume kv_cache
if self.config.rollout.free_cache_engine:
await self.rollout.resume(tags=["kv_cache"])
log_gpu_memory_usage("After resume kv_cache", logger=logger)
self.base_sync_done = True
set_expandable_segments(True)
@register(dispatch_mode=Dispatch.DP_COMPUTE, blocking=False)
def execute_checkpoint_engine(self, method: str, *args, **kwargs):
"""Execute checkpoint engine method.
Args:
method (str): Checkpoint engine method name.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
"""
return getattr(self.checkpoint_engine, method)(*args, **kwargs)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/engine_workers.py",
"license": "Apache License 2.0",
"lines": 583,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/vllm/vllm_fp8_utils.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from dataclasses import dataclass, field
from unittest.mock import patch
import torch
import vllm
from packaging import version
try:
from vllm.model_executor.layers.fused_moe.layer import FusedMoE
from vllm.model_executor.layers.linear import LinearBase
except ImportError as e:
raise ImportError("FP8 quantization not available") from e
from verl.utils.kernel.fp8_kernel import scaled_fp8_blockwise
logger = logging.getLogger(__name__)
# Ref: https://github.com/NVIDIA-NeMo/RL/commit/bc24887c72a6e1b2699a228bc87c588546dfe6b7
@dataclass()
class FP8State:
# A cache of fp8 parameter names, we can check this cache to see if a
# param name corresponds to a fp8 weight
seen_params: set = field(default_factory=lambda: set())
fp8_param_names: set = field(default_factory=lambda: set())
vllm_patches: list = field(default_factory=lambda: [])
fp8_state: FP8State = FP8State()
def is_fp8_model(vllm_config):
from vllm.model_executor.layers.quantization.fp8 import Fp8Config
if hasattr(vllm_config, "quant_config") and isinstance(vllm_config.quant_config, Fp8Config):
return True
return False
def get_module_from_param_name(model, name: str):
# Split the name into parts (e.g., 'layers', '0', 'self_attn', 'q_proj', 'weight')
# The module path is all but the last part (the parameter's own name)
path_parts = name.split(".")
module_path = path_parts[:-1]
# Replace with the fused model name
packed_modules_mapping = model.packed_modules_mapping
reversed_mapping = {
original_name: fused_name
for fused_name, original_names_list in packed_modules_mapping.items()
for original_name in original_names_list
}
if module_path[-1] in reversed_mapping.keys():
module_path[-1] = reversed_mapping[module_path[-1]]
current_module = model
try:
# Traverse the model hierarchy
for part in module_path:
if isinstance(current_module, FusedMoE):
return current_module
elif isinstance(current_module, torch.nn.ModuleList):
current_module = current_module[int(part)]
else:
current_module = getattr(current_module, part)
except (AttributeError, IndexError, ValueError) as e:
print(f"Warning: Could not find module for parameter '{name}'. Error: {e}")
return current_module
def is_fp8_weight(name, model):
if name not in fp8_state.seen_params:
fp8_state.seen_params.add(name)
# Filter out bias params
if name.endswith("weight"):
module = get_module_from_param_name(model, name)
# We currently only quantize linear layers
if (isinstance(module, LinearBase) and module.weight.dtype == torch.float8_e4m3fn) or (
isinstance(module, FusedMoE)
and module.w13_weight.dtype == torch.float8_e4m3fn
and module.w2_weight.dtype == torch.float8_e4m3fn
):
fp8_state.fp8_param_names.add(name)
return name in fp8_state.fp8_param_names
def quant_weights(weights, model, quant_config, dtype=torch.bfloat16):
"""Quantize weights to FP8 format using a memory-efficient generator.
Args:
weights: Generator or iterable of (name, tensor) pairs
model: The model to check for FP8 weight names
quant_config: Quantization configuration with weight_block_size
dtype: Data type for intermediate computation (default: bfloat16)
Yields:
Tuples of (name, tensor) for each weight and its scale
"""
if quant_config.weight_block_size is None:
raise ValueError("Currently only support blockwise quantization, please set weight_block_size in quant_config")
is_vllm_11_or_later = version.parse(vllm.__version__) >= version.parse("0.11.0")
for k, v in weights:
if not is_fp8_weight(k, model):
yield (k, v)
continue
# Cast the weight into fp8 and its scale factor
if torch.distributed.get_rank() == 0:
logger.debug(f"Quantizing to FP8 blockwise: {k}")
param_lp, param_scale = scaled_fp8_blockwise(
v.to(dtype),
weight_block_size=quant_config.weight_block_size,
)
param_scale = param_scale.squeeze(-1)
# Yield the quantized weight
yield (k, param_lp)
# Yield the scale with appropriate naming based on vLLM version
if is_vllm_11_or_later:
if "expert" in k:
yield (k + "_scale_inv", param_scale)
else:
yield (k + "_scale", param_scale)
else:
yield (k + "_scale_inv", param_scale)
# Explicitly delete original tensor reference to help GC
del v, param_lp, param_scale
def load_quanted_weights(weights, model_runner):
model = model_runner.model
quant_config = model_runner.vllm_config.quant_config
vllm_dtype = model_runner.vllm_config.model_config.dtype
weights_quantized = quant_weights(weights, model, quant_config, dtype=vllm_dtype)
# Monkey patch the param class to their subclass, as certain models
# will check the param type to call the proper weightloader
for name, param in model.named_parameters():
if hasattr(param, "subclass_type"):
param.orig_type = param.__class__
param.__class__ = param.subclass_type
# Finally load the weights into vllm
loaded_params = model.load_weights(weights_quantized)
# Undo the type change above to the original type
for name, param in model.named_parameters():
if hasattr(param, "subclass_type"):
param.__class__ = param.orig_type
return loaded_params
def process_weights_after_loading_for_vllm10(self, layer) -> None:
"""This function is used to process the weights after loading for a Linear layer, it is used for vllm v0.10
Compared to the original process_weights_after_loading in vllm, we just avoid creation of
new torch.nn.Parameter objects, because that removes the weight_loader attribute which we need for refit.
"""
logger.debug("Applying patch process_weights_after_loading")
try:
from vllm.model_executor.parameter import (
BlockQuantScaleParameter,
ModelWeightParameter,
)
except Exception:
print("error")
from torch.nn import Parameter
def _create_param_from_subclass_attributes(custom_param):
param = Parameter(custom_param.data, requires_grad=False)
base_param_dir = dir(torch.nn.Parameter)
custom_param_dir = dir(custom_param)
# Find the attributes that are unique to the custom parameter
custom_attributes = [
attr for attr in custom_param_dir if attr not in base_param_dir and not attr.startswith("__")
]
# Set the custom attributes into the base parameter object
for attr in custom_attributes:
setattr(param, attr, getattr(custom_param, attr))
param.subclass_type = type(custom_param)
return param
assert self.block_quant and self.quant_config.is_checkpoint_fp8_serialized
assert self.quant_config.activation_scheme == "dynamic"
weight = layer.weight.data
weight_scale_inv = layer.weight_scale_inv.data
weight = self._maybe_pad_weight(weight)
layer.weight = _create_param_from_subclass_attributes(
ModelWeightParameter(
data=weight,
output_dim=0,
input_dim=1,
weight_loader=layer.weight.weight_loader,
)
)
layer.weight_scale_inv = _create_param_from_subclass_attributes(
BlockQuantScaleParameter(
data=weight_scale_inv,
output_dim=0,
input_dim=1,
weight_loader=layer.weight_scale_inv.weight_loader,
)
)
def process_weights_after_loading_for_vllm11(self, layer) -> None:
"""This function is used to process the weights after loading for a Linear layer, it is used for vllm 0.11
Compared to the original process_weights_after_loading in vllm, we just avoid creation of
new torch.nn.Parameter objects, because that removes the weight_loader attribute which we need for refit.
"""
from torch.nn import Parameter
from vllm.model_executor.layers.quantization.utils.fp8_utils import (
maybe_post_process_fp8_weight_block,
process_fp8_weight_block_strategy,
)
from vllm.model_executor.parameter import (
BlockQuantScaleParameter,
ModelWeightParameter,
)
assert self.block_quant and self.quant_config.is_checkpoint_fp8_serialized
assert self.quant_config.activation_scheme == "dynamic"
def _create_param_from_subclass_attributes(custom_param):
param = Parameter(custom_param.data, requires_grad=False)
base_param_dir = dir(torch.nn.Parameter)
custom_param_dir = dir(custom_param)
# Find the attributes that are unique to the custom parameter
custom_attributes = [
attr for attr in custom_param_dir if attr not in base_param_dir and not attr.startswith("__")
]
# Set the custom attributes into the base parameter object
for attr in custom_attributes:
setattr(param, attr, getattr(custom_param, attr))
param.subclass_type = type(custom_param)
return param
weight_scale = layer.weight_scale_inv if hasattr(layer, "weight_scale_inv") else layer.weight_scale
weight, weight_scale = process_fp8_weight_block_strategy(layer.weight, weight_scale)
layer.weight = _create_param_from_subclass_attributes(
ModelWeightParameter(
data=weight.data,
output_dim=0,
input_dim=1,
weight_loader=layer.weight.weight_loader,
)
)
layer.weight_scale = _create_param_from_subclass_attributes(
BlockQuantScaleParameter(
data=weight_scale.data,
output_dim=0,
input_dim=1,
weight_loader=layer.weight_scale_inv.weight_loader,
)
)
del layer.weight_scale_inv
if version.parse(vllm.__version__) == version.parse("0.11.0"):
maybe_post_process_fp8_weight_block(layer, self.cutlass_block_fp8_supported)
else:
maybe_post_process_fp8_weight_block(layer)
def process_weights_after_loading_moe_for_vllm10(self, layer) -> None:
"""This function is used to process the weights after loading for a FusedMoE layer, it is used for vllm v0.10"""
from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import is_rocm_aiter_moe_enabled
from vllm.model_executor.layers.quantization.fp8 import _is_col_major, _swap_w13_to_w31
from vllm.model_executor.layers.quantization.utils.fp8_utils import (
get_col_major_tma_aligned_tensor,
requant_weight_ue8m0_inplace,
)
from vllm.utils.deep_gemm import is_blackwell_deep_gemm_used
self.rocm_aiter_moe_enabled = is_rocm_aiter_moe_enabled()
assert self.quant_config.activation_scheme == "dynamic"
if self.flashinfer_moe_enabled:
w13_weight = _swap_w13_to_w31(layer.w13_weight.data)
w13_weight_scale_inv = _swap_w13_to_w31(layer.w13_weight_scale_inv.data)
w2_weight = layer.w2_weight.data
w2_weight_scale_inv = layer.w2_weight_scale_inv.data
else:
w13_weight = layer.w13_weight.data
w13_weight_scale_inv = layer.w13_weight_scale_inv.data
w2_weight = layer.w2_weight
w2_weight_scale_inv = layer.w2_weight_scale_inv
from torch.nn import Parameter
def _create_param_from_subclass_attributes(custom_data, custom_weight):
param = Parameter(custom_data, requires_grad=False)
base_param_dir = dir(torch.nn.Parameter)
custom_weight_dir = dir(custom_weight)
# Find the attributes that are unique to the custom parameter
custom_attributes = [
attr for attr in custom_weight_dir if attr not in base_param_dir and not attr.startswith("__")
]
# Set the custom attributes into the base parameter object
for attr in custom_attributes:
setattr(param, attr, getattr(custom_weight, attr))
return param
layer.w13_weight = _create_param_from_subclass_attributes(w13_weight, layer.w13_weight)
layer.w13_weight_scale_inv = _create_param_from_subclass_attributes(
w13_weight_scale_inv, layer.w13_weight_scale_inv
)
layer.w2_weight = _create_param_from_subclass_attributes(w2_weight, layer.w2_weight)
layer.w2_weight_scale_inv = _create_param_from_subclass_attributes(w2_weight_scale_inv, layer.w2_weight_scale_inv)
# DeepGemm scales need to be transposed and aligned. We try to do
# it ahead of time for performance reasons.
if self.allow_deep_gemm and not is_blackwell_deep_gemm_used():
# Lazy import to avoid CUDA initialization problems.
if _is_col_major(layer.w13_weight_scale_inv):
layer.w13_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w13_weight_scale_inv).contiguous()
if _is_col_major(layer.w2_weight_scale_inv):
layer.w2_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w2_weight_scale_inv).contiguous()
if is_blackwell_deep_gemm_used():
assert layer.weight_block_size is not None
# Re-quantise the expert weights so their scales are UE8M0.
block_sz = tuple(layer.weight_block_size)
requant_weight_ue8m0_inplace(
layer.w13_weight.data,
layer.w13_weight_scale_inv.data,
block_sz,
)
requant_weight_ue8m0_inplace(
layer.w2_weight.data,
layer.w2_weight_scale_inv.data,
block_sz,
)
if _is_col_major(layer.w13_weight_scale_inv):
layer.w13_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w13_weight_scale_inv).contiguous()
if _is_col_major(layer.w2_weight_scale_inv):
layer.w2_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w2_weight_scale_inv).contiguous()
def process_weights_after_loading_moe_for_vllm11(self, layer) -> None:
"""This function is used to process the weights after loading for a FusedMoE layer, it is used for vllm 0.11"""
from vllm.model_executor.layers.quantization.utils.flashinfer_utils import (
swap_w13_to_w31,
)
from vllm.model_executor.layers.quantization.utils.fp8_utils import (
expert_weight_is_col_major,
requant_weight_ue8m0_inplace,
)
from vllm.utils.deep_gemm import (
get_col_major_tma_aligned_tensor,
is_deep_gemm_e8m0_used,
)
try:
from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import is_rocm_aiter_moe_enabled
self.rocm_aiter_moe_enabled = is_rocm_aiter_moe_enabled()
except ImportError:
from vllm._aiter_ops import rocm_aiter_ops
self.rocm_aiter_moe_enabled = rocm_aiter_ops.is_fused_moe_enabled()
assert self.block_quant and self.quant_config.is_checkpoint_fp8_serialized
assert self.quant_config.activation_scheme == "dynamic"
if self.flashinfer_moe_backend is not None:
layer.w13_weight.data = swap_w13_to_w31(layer.w13_weight.data)
layer.w13_weight_scale_inv.data = swap_w13_to_w31(layer.w13_weight_scale_inv.data)
if self.allow_deep_gemm and not is_deep_gemm_e8m0_used():
if expert_weight_is_col_major(layer.w13_weight_scale_inv):
layer.w13_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w13_weight_scale_inv)
if expert_weight_is_col_major(layer.w2_weight_scale_inv):
layer.w2_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w2_weight_scale_inv)
if is_deep_gemm_e8m0_used():
assert layer.weight_block_size is not None
# Re-quantise the expert weights so their scales are UE8M0.
block_sz = tuple(layer.weight_block_size)
requant_weight_ue8m0_inplace(
layer.w13_weight.data,
layer.w13_weight_scale_inv.data,
block_sz,
)
requant_weight_ue8m0_inplace(
layer.w2_weight.data,
layer.w2_weight_scale_inv.data,
block_sz,
)
# Ensure column-major TMA alignment expected by DeepGEMM.
if expert_weight_is_col_major(layer.w13_weight_scale_inv):
layer.w13_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w13_weight_scale_inv)
if expert_weight_is_col_major(layer.w2_weight_scale_inv):
layer.w2_weight_scale_inv = get_col_major_tma_aligned_tensor(layer.w2_weight_scale_inv)
def apply_vllm_fp8_patches():
logger.info("Applying vllm fp8 patches for blockwise quantization")
func1_path = "vllm.model_executor.layers.quantization.fp8.Fp8LinearMethod.process_weights_after_loading"
patcher1 = patch(
func1_path,
process_weights_after_loading_for_vllm11
if version.parse(vllm.__version__) >= version.parse("0.11.0")
else process_weights_after_loading_for_vllm10,
)
patcher1.start()
func2_path = "vllm.model_executor.layers.quantization.fp8.Fp8MoEMethod.process_weights_after_loading"
patcher2 = patch(
func2_path,
process_weights_after_loading_moe_for_vllm11
if version.parse(vllm.__version__) >= version.parse("0.11.0")
else process_weights_after_loading_moe_for_vllm10,
)
patcher2.start()
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/vllm/vllm_fp8_utils.py",
"license": "Apache License 2.0",
"lines": 369,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/trainer/ppo/rollout_corr_helper.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Rollout Correction Helper Module
This module provides a complete pipeline to address **off-policy issues** in RL training,
including:
1. Policy mismatch between rollout and training implementations (e.g., vLLM BFloat16 vs FSDP FP32)
2. Model update staleness (training on trajectories from older checkpoints)
3. General distribution shifts between data collection and training
Its core capabilities include computing importance sampling (IS) weights,
filtering outlier samples via rejection sampling (RS), and
tracking metrics to diagnose and correct off-policy issues.
## Core Capabilities
1. **Multi-Granularity Aggregation**:
- Importance Sampling (IS):
Token-level
Sequence-level
- Rejection Sampling (RS):
Divergence-based filters (token_k*, seq_sum_k*, seq_mean_k*, seq_max_k*)
2. **Memory-Efficient Design**:
- Log-space computations to avoid numerical overflow/underflow.
- Fixed safety bounds (exp(±20)) for stable exponentiation.
- Metrics calculated without large intermediate tensors (prevents CUDA OOM).
3. **Comprehensive Metrics Tracking**:
- IS/RS statistics (mean/max/min, effective sample size ESS, rejection rate).
- Off-policy diagnostics (KL divergence, perplexity PPL, log PPL difference, χ² divergence).
- Sequence-level breakdowns (deviation from ideal weights, outlier fraction).
## Key Interfaces & Usage
- compute_rollout_correction_and_rejection_mask(): compute IS weights + rejection mask.
- compute_rollout_correction_weights(): only compute truncated IS weights (for variance
reduction, no outlier rejection).
- compute_rollout_rejection_mask(): only filter outliers (for sample cleaning, no IS weight
computation).
- compute_offpolicy_metrics(): called by core functions to calculate off-policy diagnostics
(KL/PPL/χ²) — no direct external calls needed.
### Integration Notes
- Used in `ray_trainer.py` via `compute_rollout_correction_and_add_to_batch()` (batch training pipeline).
- Used in `dp_actor.py` for distributed worker computations (distributed training scenarios).
- All functions support batch inputs and valid token masking (via `response_mask`).
## References
- "When Speed Kills Stability: Demystifying RL Collapse from the Training-Inference Mismatch": https://richardli.xyz/rl-collapse
- Off-policy RL (theoretical basis for IS): https://fengyao.notion.site/off-policy-rl
"""
import math
from typing import Any, Optional
import torch
import verl.utils.torch_functional as verl_F
from verl.protocol import DataProto
from verl.trainer.config.algorithm import RolloutCorrectionConfig
from verl.workers.config.actor import PolicyLossConfig
# Safety bound to prevent numerical overflow/underflow when exponentiating
# exp(20) ≈ 485 million (upper limit for stable weights), exp(-20) ≈ 2e-9 (lower limit)
SAFETY_BOUND = 20.0
SUPPORTED_ROLLOUT_RS_OPTIONS: set[str] = {
"token_k1",
"token_k2",
"token_k3",
"seq_sum_k1",
"seq_sum_k2",
"seq_sum_k3",
"seq_mean_k1",
"seq_mean_k2",
"seq_mean_k3",
"seq_max_k2",
"seq_max_k3",
}
TOKEN_LEVEL_ROLLOUT_RS_OPTIONS: set[str] = {"token_k1", "token_k2", "token_k3"}
def _parse_rollout_rs_thresholds(
options: list[str], threshold_spec: Optional[str | float]
) -> dict[str, dict[str, Optional[float]]]:
if threshold_spec is None:
raise ValueError("rollout_rs_threshold must be provided for rejection sampling.")
if isinstance(threshold_spec, int | float):
raw_specs: list[str] = [str(threshold_spec)]
elif isinstance(threshold_spec, str):
raw_specs = [part.strip() for part in threshold_spec.split(",") if part.strip()]
else:
raise TypeError("rollout_rs_threshold must be a string or numeric value specifying per-option thresholds.")
if not raw_specs:
raise ValueError("rollout_rs_threshold must contain at least one threshold value.")
if len(raw_specs) not in (1, len(options)):
raise ValueError(
f"rollout_rs_threshold expects either one threshold shared by all options or exactly "
f"{len(options)} thresholds to match the provided rollout_rs options."
)
if len(raw_specs) == 1 and len(options) > 1:
raw_specs = raw_specs * len(options)
thresholds: dict[str, dict[str, Optional[float]]] = {}
for option, spec in zip(options, raw_specs, strict=False):
if option.endswith("k1"):
if "_" in spec:
lower_str, upper_str = spec.split("_", 1)
else:
upper_str = spec
lower_str = str(1.0 / float(upper_str))
try:
lower = float(lower_str)
upper = float(upper_str)
except ValueError as exc:
raise ValueError(f"Invalid numeric threshold '{spec}' for option '{option}'.") from exc
if lower <= 0 or upper <= 0:
raise ValueError(f"Thresholds for option '{option}' must be positive, got {spec}.")
thresholds[option] = {
"lower": lower,
"upper": upper,
}
else:
if "_" in spec:
raise ValueError(
f"rollout_rs_threshold for option '{option}' must provide a single upper bound "
f"without '_'. Received '{spec}'."
)
try:
upper = float(spec)
except ValueError as exc:
raise ValueError(f"Invalid numeric threshold '{spec}' for option '{option}'.") from exc
if upper <= 0:
raise ValueError(f"Threshold for option '{option}' must be positive, got {spec}.")
thresholds[option] = {
"lower": None,
"upper": upper,
}
return thresholds
def compute_rollout_rejection_mask(
log_ratio: torch.Tensor,
response_mask: torch.Tensor,
rollout_rs: str = "token_k1",
rollout_rs_threshold: Optional[str | float] = None,
) -> tuple[torch.Tensor, dict[str, float]]:
"""Compute hard trust region mask using divergence estimators.
This function enforces a hard trust region constraint by masking tokens/sequences
where the estimated divergence (between training and rollout policies) exceeds
a threshold. Unlike PPO's soft clipping, this provides a hard boundary.
Multiple rejection criteria can be supplied via a comma separated `rollout_rs` string.
All requested options must pass for a token/sequence to remain valid.
Supported KL divergence-based modes (ideal = 0.0 unless noted):
- "token_k{1,2,3}": Token-level divergences.
- "seq_sum_k{1,2,3}": Sum of token divergences per sequence.
- "seq_mean_k{1,2,3}": Mean of token divergences per sequence.
- "seq_max_k{2,3}": Maximum token divergence per sequence.
Args:
log_ratio: Log ratio of training policy probability to rollout policy probability,
shape (batch_size, seq_length).
response_mask: Binary mask for valid tokens (1=valid, 0=padding),
shape (batch_size, seq_length).
rollout_rs: Comma separated rejection sampling options (e.g. "token_k1,seq_sum_k3").
rollout_rs_threshold: Threshold specification string (required). Provide one entry per
rollout_rs option separated by commas. Each entry must be a positive number.
For K1-style options (``*k1``), specify ``lower_upper`` (e.g. ``"0.1_1.2"``)
to denote lower/upper ratio bounds; other options accept a single upper bound.
Returns:
Tuple containing:
modified_response_mask: Response mask with trust region violations masked (0=rejected),
shape (batch_size, seq_length).
metrics: Dictionary of trust region metrics (all scalars).
"""
if rollout_rs is None or not isinstance(rollout_rs, str):
raise ValueError("rollout_rs must be a non-empty string (comma separated for multiple options).")
if rollout_rs_threshold is None:
raise ValueError("rollout_rs_threshold must be provided for rejection sampling.")
if log_ratio.shape[0] == 0:
return response_mask, {}
# rollout_rs supports chained criteria via comma separation (e.g. "token_k1,seq_mean_k3").
# Every listed option must pass; combined_mask aggregates them via logical AND.
option_modes = [opt.strip() for opt in rollout_rs.split(",") if opt.strip()]
if not option_modes:
raise ValueError("rollout_rs must contain at least one valid option.")
normalized_options: list[str] = []
seen: set[str] = set()
for opt in option_modes:
if opt not in SUPPORTED_ROLLOUT_RS_OPTIONS:
raise ValueError(
f"Invalid rollout_rs option: {opt}. Must be one of {sorted(SUPPORTED_ROLLOUT_RS_OPTIONS)}."
)
if opt not in seen:
normalized_options.append(opt)
seen.add(opt)
threshold_specs = _parse_rollout_rs_thresholds(normalized_options, rollout_rs_threshold)
log_ratio_safe: torch.Tensor = torch.clamp(log_ratio, min=-SAFETY_BOUND, max=SAFETY_BOUND)
token_k1: torch.Tensor = -log_ratio_safe
token_k2: torch.Tensor = 0.5 * log_ratio_safe**2
token_k3: torch.Tensor = torch.exp(log_ratio_safe) - 1.0 - log_ratio_safe
response_mask_bool: torch.Tensor = response_mask.bool()
seq_valid_mask: torch.Tensor = response_mask.sum(dim=-1) > 0
# combined_mask accumulates per-option passes; any failure flips tokens to 0.
combined_mask: torch.Tensor = torch.ones_like(response_mask, dtype=log_ratio.dtype)
metrics: dict[str, float] = {}
def _sequence_sum(values: torch.Tensor) -> torch.Tensor:
return verl_F.masked_sum(values, response_mask, axis=-1)
def _sequence_mean(values: torch.Tensor) -> torch.Tensor:
return verl_F.masked_mean(values, response_mask, axis=-1)
def _sequence_max(values: torch.Tensor) -> torch.Tensor:
mask_bool = response_mask.bool()
neg_inf = torch.tensor(float("-inf"), device=values.device, dtype=values.dtype)
masked_values = values.masked_fill(~mask_bool, neg_inf)
max_values = masked_values.max(dim=-1).values
return torch.where(max_values == neg_inf, torch.zeros_like(max_values), max_values)
for option_name in normalized_options:
thresholds_info = threshold_specs[option_name]
is_k1_option = option_name.endswith("k1")
upper_value = thresholds_info["upper"]
lower_value = thresholds_info["lower"]
apply_lower_threshold = is_k1_option
lower_log: Optional[float] = None
upper_log: Optional[float] = None
if is_k1_option:
if lower_value is None or upper_value is None:
raise ValueError(
f"rollout_rs_threshold for option '{option_name}' must specify both lower and upper bounds."
)
lower_log = math.log(lower_value)
upper_log = math.log(upper_value)
else:
if upper_value is None:
raise ValueError(f"rollout_rs_threshold for option '{option_name}' must specify an upper bound.")
level = "sequence" if option_name not in TOKEN_LEVEL_ROLLOUT_RS_OPTIONS else "token"
per_token_stat: torch.Tensor
per_sequence_stat: Optional[torch.Tensor] = None
token_keep_bool: torch.Tensor
if option_name == "token_k1":
if lower_log is None:
raise ValueError("Threshold specification for token_k1 must include lower and upper bounds.")
per_token_stat = token_k1
token_keep_bool = (per_token_stat >= lower_log) & (per_token_stat <= upper_log)
elif option_name == "token_k2":
per_token_stat = token_k2
token_keep_bool = per_token_stat <= upper_value
elif option_name == "token_k3":
per_token_stat = token_k3
token_keep_bool = per_token_stat <= upper_value
elif option_name.startswith("seq_sum"):
if option_name.endswith("k1"):
if lower_log is None:
raise ValueError(
f"Threshold specification for option '{option_name}' must include lower and upper bounds."
)
seq_stat = _sequence_sum(token_k1)
seq_keep_bool_direct = (seq_stat >= lower_log) & (seq_stat <= upper_log)
elif option_name.endswith("k2"):
seq_stat = _sequence_sum(token_k2)
seq_keep_bool_direct = seq_stat <= upper_value
elif option_name.endswith("k3"):
seq_stat = _sequence_sum(token_k3)
seq_keep_bool_direct = seq_stat <= upper_value
else:
raise ValueError(f"Unsupported rollout_rs option: {option_name}.")
per_sequence_stat = seq_stat
token_keep_bool = seq_keep_bool_direct.unsqueeze(-1).expand_as(response_mask_bool)
per_token_stat = seq_stat.unsqueeze(-1).expand_as(response_mask)
elif option_name.startswith("seq_mean"):
if option_name.endswith("k1"):
if lower_log is None:
raise ValueError(
f"Threshold specification for option '{option_name}' must include lower and upper bounds."
)
seq_stat = _sequence_mean(token_k1)
seq_keep_bool_direct = (seq_stat >= lower_log) & (seq_stat <= upper_log)
elif option_name.endswith("k2"):
seq_stat = _sequence_mean(token_k2)
seq_keep_bool_direct = seq_stat <= upper_value
elif option_name.endswith("k3"):
seq_stat = _sequence_mean(token_k3)
seq_keep_bool_direct = seq_stat <= upper_value
else:
raise ValueError(f"Unsupported rollout_rs option: {option_name}.")
per_sequence_stat = seq_stat
token_keep_bool = seq_keep_bool_direct.unsqueeze(-1).expand_as(response_mask_bool)
per_token_stat = seq_stat.unsqueeze(-1).expand_as(response_mask)
elif option_name.startswith("seq_max"):
if option_name.endswith("k2"):
seq_stat = _sequence_max(token_k2)
seq_keep_bool_direct = seq_stat <= upper_value
elif option_name.endswith("k3"):
seq_stat = _sequence_max(token_k3)
seq_keep_bool_direct = seq_stat <= upper_value
else:
raise ValueError(f"Unsupported rollout_rs option: {option_name}.")
per_sequence_stat = seq_stat
token_keep_bool = seq_keep_bool_direct.unsqueeze(-1).expand_as(response_mask_bool)
per_token_stat = seq_stat.unsqueeze(-1).expand_as(response_mask)
else:
raise ValueError(f"Unsupported rollout_rs option: {option_name}.")
metrics_upper_threshold = upper_log if is_k1_option else upper_value
metrics_lower_threshold = lower_log if (is_k1_option and lower_log is not None) else 0.0
token_keep_mask = token_keep_bool.to(dtype=log_ratio.dtype)
combined_mask = combined_mask * token_keep_mask
seq_keep_bool_tensor = (~((~token_keep_bool) & response_mask_bool)).all(dim=-1)
option_metrics = compute_rs_metrics(
option_name=option_name,
rs_statistic=per_token_stat,
response_mask=response_mask,
seq_valid_mask=seq_valid_mask,
level=level,
per_sequence_values=per_sequence_stat,
rollout_rs_threshold=metrics_upper_threshold,
rollout_rs_threshold_lower=metrics_lower_threshold,
apply_lower_threshold=apply_lower_threshold,
)
metrics.update(option_metrics)
token_masked_fraction = verl_F.masked_mean(1 - token_keep_mask, response_mask).item()
seq_valid_float = seq_valid_mask.float()
if seq_valid_float.sum() > 0:
seq_keep_float = seq_keep_bool_tensor.to(dtype=log_ratio.dtype)
seq_masked_fraction = (((1.0 - seq_keep_float) * seq_valid_float).sum() / seq_valid_float.sum()).item()
else:
seq_masked_fraction = 0.0
metrics[f"rollout_rs_{option_name}_masked_fraction"] = token_masked_fraction
metrics[f"rollout_rs_{option_name}_seq_masked_fraction"] = seq_masked_fraction
final_mask = combined_mask
metrics["rollout_rs_masked_fraction"] = verl_F.masked_mean(1 - final_mask, response_mask).item()
final_keep_bool = (final_mask > 0.5) & response_mask_bool
seq_has_masked: torch.Tensor = (~final_keep_bool & response_mask_bool).any(dim=-1)
metrics["rollout_rs_seq_masked_fraction"] = seq_has_masked.float().mean().item()
modified_response_mask: torch.Tensor = (response_mask * final_mask).to(dtype=response_mask.dtype)
return modified_response_mask, metrics
def compute_rs_metrics(
option_name: str,
rs_statistic: torch.Tensor,
response_mask: torch.Tensor,
seq_valid_mask: torch.Tensor,
*,
level: str,
per_sequence_values: Optional[torch.Tensor],
rollout_rs_threshold: float,
rollout_rs_threshold_lower: float,
apply_lower_threshold: bool,
) -> dict[str, float]:
"""Compute metrics for hard trust region enforcement (per-option).
Args:
option_name: Original option string supplied by the user.
rs_statistic: Trust region statistic (per token) used for thresholding.
response_mask: Binary mask for valid tokens (1=valid, 0=padding).
seq_valid_mask: Boolean mask indicating sequences with at least one valid token.
level: "token" or "sequence" describing aggregation level.
per_sequence_values: Optional per-sequence statistic (same semantics as rs_statistic).
rollout_rs_threshold: Upper threshold.
rollout_rs_threshold_lower: Lower threshold (ignored if ``apply_lower_threshold`` is False).
apply_lower_threshold: Whether to mask/log metrics for values below the lower threshold.
"""
if not response_mask.any():
raise ValueError("response_mask must contain at least one valid token (1).")
metrics: dict[str, float] = {}
prefix = f"rollout_rs_{option_name}"
mask_bool: torch.Tensor = response_mask.bool()
# Compute sequence statistics (used by several metrics).
if per_sequence_values is not None:
seq_values = per_sequence_values
else:
seq_values = verl_F.masked_mean(rs_statistic, response_mask, axis=-1)
if seq_values.dim() > 1:
seq_values = seq_values.squeeze(-1)
seq_values_valid = seq_values[seq_valid_mask]
# Mean of the statistic (always reported).
metrics[f"{prefix}_mean"] = verl_F.masked_mean(rs_statistic, response_mask).item()
# Max/min values.
if level == "sequence" and seq_values_valid.numel() > 0:
metrics[f"{prefix}_max"] = seq_values_valid.max().item()
metrics[f"{prefix}_min"] = seq_values_valid.min().item()
else:
metrics[f"{prefix}_max"] = rs_statistic.masked_fill(~mask_bool, float("-inf")).max().item()
metrics[f"{prefix}_min"] = rs_statistic.masked_fill(~mask_bool, float("inf")).min().item()
# Fractions above/below the thresholds.
if level == "sequence" and seq_values_valid.numel() > 0:
fraction_high = (seq_values_valid > rollout_rs_threshold).float().mean().item()
fraction_low = (
(seq_values_valid < rollout_rs_threshold_lower).float().mean().item() if apply_lower_threshold else 0.0
)
else:
fraction_high = verl_F.masked_mean((rs_statistic > rollout_rs_threshold).float(), response_mask).item()
fraction_low = (
verl_F.masked_mean((rs_statistic < rollout_rs_threshold_lower).float(), response_mask).item()
if apply_lower_threshold
else 0.0
)
metrics[f"{prefix}_fraction_high"] = fraction_high
metrics[f"{prefix}_fraction_low"] = fraction_low
# Standard deviation (clamped for stability).
mask_count: torch.Tensor = response_mask.sum()
if mask_count > 1:
if apply_lower_threshold:
clamp_min = rollout_rs_threshold_lower
else:
clamp_min = 0.0
stat_for_std: torch.Tensor = rs_statistic.clamp(min=clamp_min, max=rollout_rs_threshold)
mean_clamped: torch.Tensor = verl_F.masked_mean(stat_for_std, response_mask)
stat_var: torch.Tensor = verl_F.masked_mean(stat_for_std.square(), response_mask) - mean_clamped.square()
metrics[f"{prefix}_std"] = torch.sqrt(torch.clamp(stat_var, min=0.0)).item()
else:
metrics[f"{prefix}_std"] = 0.0
# Sequence-level summary metrics.
if seq_values_valid.numel() > 0:
metrics[f"{prefix}_seq_mean"] = seq_values_valid.mean().item()
metrics[f"{prefix}_seq_std"] = seq_values_valid.std().item() if seq_values_valid.numel() > 1 else 0.0
metrics[f"{prefix}_seq_max"] = seq_values_valid.max().item()
metrics[f"{prefix}_seq_min"] = seq_values_valid.min().item()
metrics[f"{prefix}_seq_max_deviation"] = (seq_values_valid - 0.0).abs().max().item()
metrics[f"{prefix}_seq_fraction_high"] = (seq_values_valid > rollout_rs_threshold).float().mean().item()
if apply_lower_threshold:
metrics[f"{prefix}_seq_fraction_low"] = (
(seq_values_valid < rollout_rs_threshold_lower).float().mean().item()
)
else:
metrics[f"{prefix}_seq_mean"] = 0.0
metrics[f"{prefix}_seq_std"] = 0.0
metrics[f"{prefix}_seq_max"] = 0.0
metrics[f"{prefix}_seq_min"] = 0.0
metrics[f"{prefix}_seq_max_deviation"] = 0.0
metrics[f"{prefix}_seq_fraction_high"] = 0.0
metrics[f"{prefix}_seq_fraction_low"] = 0.0
return metrics
def compute_rollout_correction_weights(
log_ratio: torch.Tensor,
response_mask: torch.Tensor,
rollout_is: str = "token",
rollout_is_threshold: float = 2.0,
rollout_is_batch_normalize: bool = False,
) -> tuple[torch.Tensor, dict[str, float]]:
"""Compute importance sampling weights to correct for off-policy distribution shifts.
This function calculates IS weights (π_train / π_rollout) using log ratios for numerical stability.
It supports multiple aggregation levels and truncates extreme weights to prevent training instability.
Key design:
- Log-space computations to avoid overflow
- Truncation of extreme weights (TIS: Truncated Importance Sampling)
- Optional batch normalization (normalize to mean=1.0)
- Metrics tracking for weight distribution analysis
Args:
log_ratio: Log ratio of training policy probability to rollout policy probability,
shape (batch_size, seq_length).
response_mask: Binary mask for valid tokens (1=valid, 0=padding),
shape (batch_size, seq_length).
rollout_is: IS weight aggregation level, must be one of:
- "token": Per-token weights (biased, low variance)
- "sequence": Per-sequence weight (product of tokens; unbiased, high variance)
rollout_is_threshold: Upper threshold for truncating extreme weights (e.g., 2.0),
default 2.0.
rollout_is_batch_normalize: Whether to normalize IS weights to have mean=1.0 per batch,
default False.
Returns:
Tuple containing:
rollout_is_weights: Truncated IS weights (masked to zero for padding tokens),
shape (batch_size, seq_length). If batch_normalize=True, normalized to mean=1.0.
metrics: Dictionary of IS weight metrics (all scalars), including:
- rollout_is_mean/max/min: Statistic of weights (before batch normalization)
- rollout_is_eff_sample_size: Effective sample size (ESS)
- rollout_is_seq_*: Sequence-level weight statistics
- rollout_is_batch_norm_factor: Normalization factor (only if batch_normalize=True)
"""
# Validate input parameters
valid_is_levels = {"token", "sequence"}
if rollout_is not in valid_is_levels:
raise ValueError(f"Invalid rollout_is: {rollout_is}. Must be one of {valid_is_levels}.")
if rollout_is_threshold <= 0:
raise ValueError(f"rollout_is_threshold must be positive, got {rollout_is_threshold}.")
# Compute IS weights from log ratio (handles different aggregation levels)
if rollout_is == "token":
# Per-token IS weight: exp(log(π_train/π_rollout)) with safety clamp
log_ratio_for_metrics: torch.Tensor = log_ratio
log_ratio_safe: torch.Tensor = torch.clamp(log_ratio, min=-SAFETY_BOUND, max=SAFETY_BOUND)
rollout_is_weights: torch.Tensor = torch.exp(log_ratio_safe)
elif rollout_is == "sequence":
# Sequence-level IS weight: product of token ratios (exp(sum(log ratios)))
log_ratio_sum: torch.Tensor = verl_F.masked_sum(log_ratio, response_mask, axis=-1).unsqueeze(
-1
) # Shape: (batch_size, 1)
log_ratio_for_metrics = log_ratio_sum
log_ratio_sum_safe: torch.Tensor = torch.clamp(log_ratio_sum, min=-SAFETY_BOUND, max=SAFETY_BOUND)
rollout_is_weights = torch.exp(log_ratio_sum_safe).expand_as(log_ratio) # Broadcast to sequence length
else:
raise ValueError(f"Unsupported rollout_is: {rollout_is}")
# Zero out weights for padding tokens using response mask
rollout_is_weights = rollout_is_weights * response_mask
# Compute IS weight metrics (BEFORE truncation to get accurate fraction_high/low)
metrics: dict[str, float] = compute_is_metrics(
rollout_is_weights=rollout_is_weights,
log_ratio_for_metrics=log_ratio_for_metrics,
response_mask=response_mask,
rollout_is=rollout_is,
rollout_is_threshold=rollout_is_threshold,
)
# Truncate extreme weights (TIS: Truncated Importance Sampling)
rollout_is_weights = rollout_is_weights.clamp(max=rollout_is_threshold)
# Detach weights to prevent gradient flow (mathematically required by IS theory)
# IS weights change the measure, not the objective. See §3.2.2 in docs/algo/rollout_corr_math.md
rollout_is_weights = rollout_is_weights.detach()
# Apply batch normalization if requested
if rollout_is_batch_normalize:
# Compute mean based on aggregation level
mask_float = response_mask.to(dtype=rollout_is_weights.dtype)
if rollout_is == "token":
# Token-level: normalize over all token weights
if torch.distributed.is_available() and torch.distributed.is_initialized():
weights_mean = verl_F.distributed_masked_mean(rollout_is_weights, mask_float)
else:
weights_mean = verl_F.masked_mean(rollout_is_weights, response_mask)
elif rollout_is == "sequence":
# Sequence-level: normalize over sequence weights (one weight per sequence)
# For each sequence, compute mean over valid tokens (they all have the same weight)
# then average across sequences
seq_weights = verl_F.masked_mean(rollout_is_weights, response_mask, axis=-1) # (batch_size,)
seq_mask = (response_mask.sum(dim=-1) > 0).to(dtype=rollout_is_weights.dtype)
if torch.distributed.is_available() and torch.distributed.is_initialized():
weights_mean = verl_F.distributed_masked_mean(seq_weights, seq_mask)
else:
weights_mean = (seq_weights * seq_mask).sum() / seq_mask.sum().clamp_min(1e-8)
else:
raise ValueError(f"Unsupported rollout_is: {rollout_is}")
# Normalize to mean=1.0 (avoid division by zero)
if weights_mean > 1e-8:
rollout_is_weights = rollout_is_weights / weights_mean
metrics["rollout_is_batch_norm_factor"] = weights_mean.item()
else:
metrics["rollout_is_batch_norm_factor"] = 1.0
return rollout_is_weights, metrics
def compute_is_metrics(
rollout_is_weights: torch.Tensor,
log_ratio_for_metrics: torch.Tensor,
response_mask: torch.Tensor,
rollout_is: str,
rollout_is_threshold: float,
) -> dict[str, float]:
"""Compute comprehensive metrics for truncated importance sampling weights.
This function calculates statistics for truncated IS weights (TIS), using log-space
for accurate threshold checks and clamped weights for stable mean/std calculations.
Args:
rollout_is_weights: Truncated IS weights (π_train / π_rollout),
shape (batch_size, seq_length).
log_ratio_for_metrics: Log ratio of training to rollout probabilities (unclamped),
shape varies by aggregation level.
response_mask: Binary mask for valid tokens (1=valid, 0=padding),
shape (batch_size, seq_length).
rollout_is: IS weight aggregation level (matches compute_rollout_correction_weights).
rollout_is_threshold: Upper threshold for truncated IS weights.
Returns:
Dictionary of IS weight metrics (all scalars).
"""
if not response_mask.any():
raise ValueError("response_mask must contain at least one valid token (1).")
metrics: dict[str, float] = {}
device: torch.device = rollout_is_weights.device
# Default lower threshold (reciprocal of upper threshold)
rollout_is_threshold_lower: float = 1.0 / rollout_is_threshold
# Precompute log thresholds for accurate checks
log_threshold_upper: torch.Tensor = torch.log(torch.tensor(rollout_is_threshold, device=device))
log_threshold_lower: torch.Tensor = torch.log(torch.tensor(rollout_is_threshold_lower, device=device))
# Compute metrics based on aggregation level
if rollout_is == "sequence":
# Sequence-level aggregation: use log-space for unclamped stats
log_max: torch.Tensor = log_ratio_for_metrics.max()
log_min: torch.Tensor = log_ratio_for_metrics.min()
metrics["rollout_is_max"] = torch.exp(torch.clamp(log_max, max=SAFETY_BOUND)).item()
metrics["rollout_is_min"] = torch.exp(log_min).item()
# Mean uses truncated weights to avoid overflow
metrics["rollout_is_mean"] = verl_F.masked_mean(rollout_is_weights, response_mask).item()
# Fraction of weights exceeding thresholds (log-space for accuracy)
exceeds_upper: torch.Tensor = log_ratio_for_metrics > log_threshold_upper
below_lower: torch.Tensor = log_ratio_for_metrics < log_threshold_lower
metrics["rollout_is_ratio_fraction_high"] = exceeds_upper.float().mean().item()
metrics["rollout_is_ratio_fraction_low"] = below_lower.float().mean().item()
else: # token-level
# Token-level aggregation: compute directly from truncated weights
metrics["rollout_is_mean"] = verl_F.masked_mean(rollout_is_weights, response_mask).item()
# Fraction of tokens exceeding thresholds
rollout_is_above_threshold: torch.Tensor = rollout_is_weights > rollout_is_threshold
rollout_is_below_threshold: torch.Tensor = rollout_is_weights < rollout_is_threshold_lower
metrics["rollout_is_ratio_fraction_high"] = verl_F.masked_mean(
rollout_is_above_threshold.float(), response_mask
).item()
metrics["rollout_is_ratio_fraction_low"] = verl_F.masked_mean(
rollout_is_below_threshold.float(), response_mask
).item()
# Max/min (mask out padding tokens)
mask_bool: torch.Tensor = response_mask.bool()
metrics["rollout_is_max"] = rollout_is_weights.masked_fill(~mask_bool, float("-inf")).max().item()
metrics["rollout_is_min"] = rollout_is_weights.masked_fill(~mask_bool, float("inf")).min().item()
# Compute standard deviation (using clamped weights for stability)
mask_count: torch.Tensor = response_mask.sum()
if mask_count > 1:
weights_for_std: torch.Tensor = rollout_is_weights.clamp(
min=rollout_is_threshold_lower, max=rollout_is_threshold
)
mean_clamped: torch.Tensor = verl_F.masked_mean(weights_for_std, response_mask)
rollout_is_var: torch.Tensor = (
verl_F.masked_mean(weights_for_std.square(), response_mask) - mean_clamped.square()
)
metrics["rollout_is_std"] = torch.sqrt(torch.clamp(rollout_is_var, min=0.0)).item()
else:
metrics["rollout_is_std"] = 0.0
# Compute Effective Sample Size (ESS) for truncated weights
weights_for_ess: torch.Tensor = rollout_is_weights.clamp(min=rollout_is_threshold_lower, max=rollout_is_threshold)
mean_for_ess: torch.Tensor = verl_F.masked_mean(weights_for_ess, response_mask)
is_weights_normalized: torch.Tensor = weights_for_ess / (mean_for_ess + 1e-8) # Avoid division by zero
metrics["rollout_is_eff_sample_size"] = (
1.0 / verl_F.masked_mean(is_weights_normalized.square(), response_mask).item()
)
# Add sequence-level metrics if weights have batch dimension
if rollout_is_weights.dim() > 1:
seq_mean_weights: torch.Tensor = verl_F.masked_mean(rollout_is_weights, response_mask, axis=-1)
metrics["rollout_is_seq_mean"] = seq_mean_weights.mean().item()
metrics["rollout_is_seq_std"] = seq_mean_weights.std().item() if seq_mean_weights.numel() > 1 else 0.0
metrics["rollout_is_seq_max"] = seq_mean_weights.max().item()
metrics["rollout_is_seq_min"] = seq_mean_weights.min().item()
# Sequence deviation from ideal weight (1.0)
seq_deviation: torch.Tensor = (seq_mean_weights - 1.0).abs()
metrics["rollout_is_seq_max_deviation"] = seq_deviation.max().item()
# Fraction of sequences with extreme weights
metrics["rollout_is_seq_fraction_high"] = (seq_mean_weights > rollout_is_threshold).float().mean().item()
metrics["rollout_is_seq_fraction_low"] = (seq_mean_weights < rollout_is_threshold_lower).float().mean().item()
return metrics
def compute_rollout_correction_and_rejection_mask(
old_log_prob: torch.Tensor,
rollout_log_prob: torch.Tensor,
response_mask: torch.Tensor,
rollout_is: Optional[str] = None,
rollout_is_threshold: Optional[float] = 2.0,
rollout_is_batch_normalize: bool = False,
rollout_rs: Optional[str] = None,
rollout_rs_threshold: Optional[str | float] = None,
) -> tuple[Optional[DataProto], torch.Tensor, dict[str, float]]:
"""Unified interface for computing IS weights and rejection masks.
This function combines IS weight calculation (truncated) and rejection sampling (masked)
into a single pipeline.
Key design:
- Separation of IS weights (for variance reduction) and rejection masks (for sample filtering)
- Comprehensive metrics tracking for mismatch diagnosis
Args:
old_log_prob: Log probabilities from the training policy (e.g., FSDP FP32),
shape (batch_size, seq_length).
rollout_log_prob: Log probabilities from the rollout policy (e.g., vLLM BF16),
shape (batch_size, seq_length).
response_mask: Binary mask for valid tokens (1=valid, 0=padding),
shape (batch_size, seq_length).
rollout_is: IS weight aggregation level (see compute_rollout_correction_weights for options).
Set to None to disable IS weight computation.
rollout_is_threshold: Upper threshold for truncated IS weights (used if rollout_is is set),
default 2.0.
rollout_rs: Rejection sampling aggregation modes as a comma separated string
(see compute_rollout_rejection_mask for the full list). Set to None to disable
rejection sampling.
rollout_rs_threshold: Threshold specification string (see compute_rollout_rejection_mask for details).
Provide one threshold per option (comma separated). For K1-style options, specify
``lower_upper`` to denote the lower/upper ratio bounds.
rollout_is_batch_normalize: Whether to normalize IS weights to have mean=1.0 per batch.
Default: False.
Returns:
Tuple containing:
rollout_is_weights_proto: DataProto with IS weights (None if rollout_is is None),
key "rollout_is_weights", shape (batch_size, seq_length).
modified_response_mask: Response mask with rejection sampling applied,
shape (batch_size, seq_length).
metrics: Dictionary of all metrics (prefixed with "rollout_corr/"), including:
- IS weight statistics
- Rejection sampling rates
- Policy mismatch metrics (KL, PPL, etc.)
"""
# Validate input masks
if not response_mask.any():
raise ValueError("response_mask must contain at least one valid token (1).")
if old_log_prob.shape != rollout_log_prob.shape:
raise ValueError(
f"old_log_prob shape {old_log_prob.shape} does not match rollout_log_prob shape {rollout_log_prob.shape}."
)
if old_log_prob.shape != response_mask.shape:
raise ValueError(
f"log_prob shape {old_log_prob.shape} does not match response_mask shape {response_mask.shape}."
)
# Step 1: Compute log ratio (log(π_train / π_rollout))
log_ratio: torch.Tensor = old_log_prob - rollout_log_prob
metrics: dict[str, float] = {}
# Step 2: Compute IS weights (if enabled)
rollout_is_weights: Optional[torch.Tensor] = None
if rollout_is is not None and rollout_is_threshold is not None:
rollout_is_weights, is_metrics = compute_rollout_correction_weights(
log_ratio=log_ratio,
response_mask=response_mask,
rollout_is=rollout_is,
rollout_is_threshold=rollout_is_threshold,
rollout_is_batch_normalize=rollout_is_batch_normalize,
)
metrics.update(is_metrics)
# Step 3: Compute rejection mask (if enabled)
modified_response_mask: torch.Tensor = response_mask.clone()
if rollout_rs is not None:
if rollout_rs_threshold is None:
raise ValueError(
"rollout_rs_threshold must be explicitly provided when rollout_rs is enabled. "
"Set rollout_rs_threshold to the desired threshold value."
)
modified_response_mask, rs_metrics = compute_rollout_rejection_mask(
log_ratio=log_ratio,
response_mask=response_mask,
rollout_rs=rollout_rs,
rollout_rs_threshold=rollout_rs_threshold,
)
metrics.update(rs_metrics)
# Step 4: Compute off-policy metrics (KL, PPL, χ², etc.)
offpolicy_metrics: dict[str, float] = compute_offpolicy_metrics(
old_log_prob=old_log_prob,
rollout_log_prob=rollout_log_prob,
response_mask=response_mask,
)
metrics.update(offpolicy_metrics)
# Step 6: Add "rollout_corr/" prefix to all metrics for logging consistency
metrics_scalar: dict[str, float] = {}
for key, value in metrics.items():
if isinstance(value, torch.Tensor):
metrics_scalar[f"rollout_corr/{key}"] = value.item()
else:
metrics_scalar[f"rollout_corr/{key}"] = value
# Step 7: Wrap IS weights in DataProto for consistency with API
rollout_is_weights_proto: Optional[DataProto] = None
if rollout_is_weights is not None:
rollout_is_weights_proto = DataProto.from_dict(tensors={"rollout_is_weights": rollout_is_weights})
return rollout_is_weights_proto, modified_response_mask, metrics_scalar
def compute_offpolicy_metrics(
old_log_prob: torch.Tensor,
rollout_log_prob: Optional[torch.Tensor],
response_mask: torch.Tensor,
) -> dict[str, Any]:
"""Compute off-policy diagnostic metrics (helper function).
This helper function operates on raw tensors and is used internally by:
- compute_rollout_correction_and_rejection_mask() in this module (automatically included)
- Tests (test_rollout_corr.py, test_rollout_corr_integration.py)
These metrics help diagnose the off-policy gap between rollout and training policies,
which can arise from:
- Policy mismatch (e.g., vLLM BF16 vs FSDP FP32)
- Model staleness (training on trajectories from older checkpoints)
- General distribution shifts
Key metrics:
- kl: Direct KL divergence estimator KL(π_rollout || π_training)
- k3_kl: K3 KL estimator for stability (more stable for small KL)
- training_ppl: Perplexity of training policy
- rollout_ppl: Perplexity of rollout policy
- log_ppl_diff: Difference in log perplexities
- ppl_ratio: Ratio of training PPL to rollout PPL
- chi2_token: Token-level χ² divergence E[ρ²] - 1
- chi2_seq: Sequence-level χ² divergence E[(∏ρ_t)²] - 1
Args:
old_log_prob: Log probabilities from training policy, shape (batch_size, seq_length)
rollout_log_prob: Log probabilities from rollout policy, shape (batch_size, seq_length)
response_mask: Mask for valid tokens, shape (batch_size, seq_length)
Returns:
Dictionary of off-policy metrics (without prefix)
"""
# Validate that we have at least one valid token
assert response_mask.any(), "Expected at least one valid token in response_mask"
metrics = {}
# 1. Training policy perplexity (always available)
# Formula: exp(-1/|T| * Σ log π_training(y_t|y_<t))
# where |T| is the number of tokens generated by the model
mean_log_prob_training = verl_F.masked_mean(old_log_prob, response_mask, axis=-1) # (batch_size,)
training_ppl = torch.exp(-mean_log_prob_training).mean() # Batch mean of per-sequence PPL
metrics["training_ppl"] = training_ppl.detach().item()
# Also log log-ppl for easier analysis (avoids exponential scale)
metrics["training_log_ppl"] = (-mean_log_prob_training).mean().detach().item()
# 2. Compute rollout off-policy metrics (only if rollout_log_probs available)
if rollout_log_prob is not None:
# 2a. kl: Direct estimator for KL(π_rollout || π_training)
# This is the standard KL divergence: E[log(π_rollout) - log(π_training)]
# Positive value means rollout policy is more confident than training policy
metrics["kl"] = verl_F.masked_mean(rollout_log_prob - old_log_prob, response_mask).detach().item()
# 2b. k3_kl: K3 estimator for KL(π_rollout || π_training)
# More stable for small KL values using: E[exp(log_ratio) - log_ratio - 1]
# Formula: KL ≈ E[r - log(r) - 1] where r = π_training/π_rollout
log_ratio = old_log_prob - rollout_log_prob
k3_kl_matrix = torch.exp(log_ratio) - log_ratio - 1
metrics["k3_kl"] = verl_F.masked_mean(k3_kl_matrix, response_mask).detach().item()
# 2c. Rollout policy perplexity
mean_log_prob_rollout = verl_F.masked_mean(rollout_log_prob, response_mask, axis=-1) # (batch_size,)
rollout_ppl = torch.exp(-mean_log_prob_rollout).mean() # Batch mean of per-sequence PPL
metrics["rollout_ppl"] = rollout_ppl.detach().item()
metrics["rollout_log_ppl"] = (-mean_log_prob_rollout).mean().detach().item()
# 2d. Log PPL difference (sequence-level perplexity difference)
# log_ppl_diff = mean_log_prob_rollout - mean_log_prob_training
# Since ppl = exp(-log_prob), we have:
# log(ppl_ratio) = log(training_ppl/rollout_ppl) = log_ppl_diff
# Positive value means training assigns lower probability (higher PPL) than rollout
log_ppl_diff = mean_log_prob_rollout - mean_log_prob_training
metrics["log_ppl_diff"] = log_ppl_diff.mean().detach().item()
metrics["log_ppl_abs_diff"] = log_ppl_diff.abs().mean().detach().item()
metrics["log_ppl_diff_max"] = log_ppl_diff.max().detach().item()
metrics["log_ppl_diff_min"] = log_ppl_diff.min().detach().item()
# 2e. PPL ratio (how much higher is training PPL vs rollout PPL)
# Compute per-sequence ratio first, then average
# For numerical stability, compute in log space using log_ppl_diff
# Note: log_ppl_diff = log(ppl_ratio), so ppl_ratio = exp(log_ppl_diff)
# This is the inverse of geometric IS: ppl_ratio_i = 1 / geometric_is_i for each sequence
ppl_ratio = torch.exp(log_ppl_diff).mean() # mean(exp(log_ppl_diff)) = mean(ppl_ratio_i)
metrics["ppl_ratio"] = ppl_ratio.detach().item()
# 2f. Chi-squared divergence: χ²(π_training || π_rollout) = E_μ[ρ²] - 1
# where ρ = π_training / π_rollout and μ = π_rollout (rollout distribution)
# This measures the variance of importance sampling weights
# Token-level: E_token[ρ²] - 1 (averaged over all tokens)
log_ratio_safe = torch.clamp(log_ratio, min=-SAFETY_BOUND, max=SAFETY_BOUND)
rho_token = torch.exp(log_ratio_safe) # ρ = π_training / π_rollout (token-level)
rho_squared_token = rho_token.square()
chi2_token = verl_F.masked_mean(rho_squared_token, response_mask) - 1.0
metrics["chi2_token"] = chi2_token.detach().item()
# Sequence-level: E_seq[(Π ρ_t)²] - 1 = E_seq[exp(2 * Σ log ρ_t)] - 1
log_ratio_sum = verl_F.masked_sum(log_ratio, response_mask, axis=-1) # Σ log ρ_t per sequence
log_ratio_sum_safe = torch.clamp(log_ratio_sum, min=-SAFETY_BOUND, max=SAFETY_BOUND)
rho_squared_seq = torch.exp(2.0 * log_ratio_sum_safe) # (Π ρ_t)²
chi2_seq = rho_squared_seq.mean() - 1.0
metrics["chi2_seq"] = chi2_seq.detach().item()
return metrics
def compute_rollout_correction_and_add_to_batch(
batch: DataProto, rollout_corr_config: RolloutCorrectionConfig
) -> tuple[DataProto, dict]:
"""Compute rollout correction weights and apply rejection sampling.
Computes importance sampling weights to correct for off-policy issues between
rollout and training policies. Applies rejection sampling by modifying response_mask.
Always updates response_mask; conditionally adds IS weights.
Key behavior:
- response_mask: ALWAYS updated with rejection (RS exclusions removed from training)
- rollout_is_weights: Added to batch ONLY if rollout_is parameter is set
This separation ensures:
- Rejection works independently of IS weight application
- Metrics can be monitored before enabling IS weight correction
Args:
batch: DataProto with old_log_probs, rollout_log_probs, response_mask
Returns:
Tuple of (updated_batch, metrics):
updated_batch: Batch with modified response_mask (always) and rollout_is_weights (if enabled)
metrics: Dict of IS and off-policy metrics, all with "rollout_corr/" prefix
Note:
The implementation is copied from szrlee <szrlee@gmail.com>.
"""
# Get new API parameters directly from config
rollout_is = rollout_corr_config.get("rollout_is", None)
rollout_is_threshold = rollout_corr_config.get("rollout_is_threshold", 2.0)
rollout_is_batch_normalize = rollout_corr_config.get("rollout_is_batch_normalize", False)
rollout_rs = rollout_corr_config.get("rollout_rs", None)
rollout_rs_threshold = rollout_corr_config.get("rollout_rs_threshold", None)
# Compute IS weights and get modified response_mask
rollout_is_weights, modified_response_mask, rollout_corr_metrics = compute_rollout_correction_and_rejection_mask(
old_log_prob=batch.batch["old_log_probs"],
rollout_log_prob=batch.batch["rollout_log_probs"],
response_mask=batch.batch["response_mask"],
rollout_is=rollout_is,
rollout_is_threshold=rollout_is_threshold,
rollout_is_batch_normalize=rollout_is_batch_normalize,
rollout_rs=rollout_rs,
rollout_rs_threshold=rollout_rs_threshold,
)
# ALWAYS update response_mask with rejection applied
batch.batch["response_mask"] = modified_response_mask
# Add IS weights to batch if computed
if rollout_is_weights is not None:
batch = batch.union(rollout_is_weights)
return batch, rollout_corr_metrics
def compute_rollout_corr_metrics_from_logprobs(
log_prob: torch.Tensor,
rollout_log_prob: torch.Tensor,
response_mask: torch.Tensor,
) -> dict[str, float]:
"""Compute rollout correction metrics from log probabilities during training.
This function is used in the actor to compute metrics using the CURRENT policy
log probabilities versus rollout log probabilities, allowing tracking of the
off-policy gap as training progresses.
It computes off-policy diagnostic metrics (KL, PPL, χ²) from log probabilities.
Args:
log_prob: Current policy log probabilities, shape (batch_size, seq_length)
rollout_log_prob: Rollout policy log probabilities, shape (batch_size, seq_length)
response_mask: Valid token mask, shape (batch_size, seq_length)
Returns:
Dictionary of metrics with "rollout_corr/" prefix
"""
# Compute off-policy diagnostic metrics
offpolicy_metrics = compute_offpolicy_metrics(
old_log_prob=log_prob,
rollout_log_prob=rollout_log_prob,
response_mask=response_mask,
)
# Add rollout_corr/ prefix to all metrics
metrics_with_prefix = {}
for key, value in offpolicy_metrics.items():
if isinstance(value, torch.Tensor):
metrics_with_prefix[f"rollout_corr/{key}"] = value.item()
else:
metrics_with_prefix[f"rollout_corr/{key}"] = value
return metrics_with_prefix
def apply_bypass_mode(
batch: DataProto,
rollout_corr_config: Optional[RolloutCorrectionConfig] = None,
policy_loss_config: PolicyLossConfig = None,
) -> None:
"""
Setup bypass mode: Use rollout_log_probs as old_log_probs.
Bypass mode skips expensive actor forward pass for old_log_prob computation
by setting old_log_probs = rollout_log_probs (2 policies instead of 3).
Uses compute_policy_loss_bypass_mode() which supports:
- loss_type="ppo_clip" (default): PPO clipped objective (IS handled by ratio)
- loss_type="reinforce": REINFORCE with explicit IS weights
Both loss types benefit from rejection sampling (RS) which masks out-of-distribution samples.
Note:
The implementation is copied from szrlee <szrlee@gmail.com>.
"""
from omegaconf import open_dict
if "rollout_log_probs" not in batch.batch:
raise ValueError(
"bypass_mode=True requires rollout_log_probs in batch. "
"Ensure rollout worker is configured to calculate_log_probs=true."
)
# Use rollout log probs as old log probs (zero-cost substitution)
batch.batch["old_log_probs"] = batch.batch["rollout_log_probs"]
with open_dict(policy_loss_config):
# Pass rollout_correction config to actor for loss computation and metrics
policy_loss_config["rollout_correction"] = rollout_corr_config
# Always use bypass_mode loss function which handles both loss_types
policy_loss_config["loss_mode"] = "bypass_mode"
| {
"repo_id": "verl-project/verl",
"file_path": "verl/trainer/ppo/rollout_corr_helper.py",
"license": "Apache License 2.0",
"lines": 920,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/workers/test_fsdp_attn_implementation.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test for attn_implementation override configuration in FSDP workers.
This test verifies that the fix for honoring attn_implementation override config
works correctly in the ActorRolloutRefWorker._build_model_optimizer method.
"""
from unittest.mock import Mock, patch
import pytest
import torch
from omegaconf import OmegaConf
from transformers import AutoConfig, AutoModelForCausalLM
# Only run these tests if we can import verl components
try:
from verl.workers.config import FSDPEngineConfig # noqa: F401
from verl.workers.fsdp_workers import (
ActorRolloutRefWorker, # noqa: F401
CriticWorker, # noqa: F401
)
VERL_AVAILABLE = True
except ImportError:
VERL_AVAILABLE = False
@pytest.mark.skipif(not VERL_AVAILABLE, reason="VERL components not available")
class TestFSDPAttnImplementation:
"""Test cases for attn_implementation override in FSDP workers."""
def test_attn_implementation_extraction_logic(self):
"""Test the core logic for extracting attn_implementation from override config."""
# Test case 1: Default behavior
override_config = {}
attn_impl = override_config.get("attn_implementation", "flash_attention_2")
assert attn_impl == "flash_attention_2"
# Test case 2: Override to eager
override_config = {"attn_implementation": "eager"}
attn_impl = override_config.get("attn_implementation", "flash_attention_2")
assert attn_impl == "eager"
# Test case 3: Override to sdpa
override_config = {"attn_implementation": "sdpa"}
attn_impl = override_config.get("attn_implementation", "flash_attention_2")
assert attn_impl == "sdpa"
# Test case 4: Other configs don't affect attn_implementation
override_config = {"other_setting": "value", "dropout": 0.1}
attn_impl = override_config.get("attn_implementation", "flash_attention_2")
assert attn_impl == "flash_attention_2"
@patch("transformers.AutoConfig.from_pretrained")
@patch("transformers.AutoModelForCausalLM.from_pretrained")
def test_attn_implementation_passed_to_autoconfig(self, mock_model_from_pretrained, mock_config_from_pretrained):
"""Test that attn_implementation is correctly passed to AutoConfig.from_pretrained."""
# Mock the AutoConfig return value
mock_config = Mock()
mock_config.tie_word_embeddings = False
mock_config.architectures = ["LlamaForCausalLM"]
mock_config_from_pretrained.return_value = mock_config
# Mock the model return value
mock_model = Mock()
mock_model_from_pretrained.return_value = mock_model
# Test data
test_cases = [
({}, "flash_attention_2"), # Default
({"attn_implementation": "eager"}, "eager"), # Override to eager
({"attn_implementation": "sdpa"}, "sdpa"), # Override to sdpa
]
for override_config, expected_attn_impl in test_cases:
# Reset mocks
mock_config_from_pretrained.reset_mock()
mock_model_from_pretrained.reset_mock()
# Simulate the logic from FSDP workers
attn_implementation = override_config.get("attn_implementation", "flash_attention_2")
# This simulates what happens in _build_model_optimizer
AutoConfig.from_pretrained("test_path", trust_remote_code=False, attn_implementation=attn_implementation)
# Verify AutoConfig.from_pretrained was called with correct attn_implementation
mock_config_from_pretrained.assert_called_once_with(
"test_path", trust_remote_code=False, attn_implementation=expected_attn_impl
)
@patch("transformers.AutoConfig.from_pretrained")
@patch("transformers.AutoModelForCausalLM.from_pretrained")
def test_attn_implementation_passed_to_model(self, mock_model_from_pretrained, mock_config_from_pretrained):
"""Test that attn_implementation is correctly passed to model.from_pretrained."""
# Mock the AutoConfig return value
mock_config = Mock()
mock_config.tie_word_embeddings = False
mock_config.architectures = ["LlamaForCausalLM"]
mock_config_from_pretrained.return_value = mock_config
# Mock the model return value
mock_model = Mock()
mock_model_from_pretrained.return_value = mock_model
# Test with override config
override_config = {"attn_implementation": "eager"}
attn_implementation = override_config.get("attn_implementation", "flash_attention_2")
# This simulates what happens in _build_model_optimizer
AutoModelForCausalLM.from_pretrained(
pretrained_model_name_or_path="test_path",
torch_dtype=torch.bfloat16,
config=mock_config,
trust_remote_code=False,
attn_implementation=attn_implementation,
)
# Verify AutoModelForCausalLM.from_pretrained was called with correct attn_implementation
mock_model_from_pretrained.assert_called_once_with(
pretrained_model_name_or_path="test_path",
torch_dtype=torch.bfloat16,
config=mock_config,
trust_remote_code=False,
attn_implementation="eager",
)
def test_override_config_integration(self):
"""Test that override_config from Hydra configuration works correctly."""
# Simulate the OmegaConf configuration structure used in VERL
config_dict = {
"model": {"path": "/test/path", "override_config": {"attn_implementation": "eager", "dropout": 0.1}}
}
# Convert to OmegaConf structure
omegaconf = OmegaConf.create(config_dict)
# Simulate what happens in the FSDP worker
override_model_config = OmegaConf.to_container(OmegaConf.create(omegaconf.model.get("override_config", {})))
# Test extraction
attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2")
assert attn_implementation == "eager"
# Test that other configs are preserved
assert override_model_config.get("dropout") == 0.1
def test_hydra_plus_prefix_config(self):
"""Test that Hydra +prefix configurations work correctly."""
# This simulates the configuration when user specifies:
# +actor_rollout_ref.model.override_config.attn_implementation=eager
# The + prefix in Hydra adds new keys to the config
config_dict = {
"actor_rollout_ref": {
"model": {
"path": "/test/path",
"override_config": {
"attn_implementation": "eager" # This gets added via +prefix
},
}
}
}
omegaconf = OmegaConf.create(config_dict)
# Extract override config as done in FSDP workers
override_model_config = OmegaConf.to_container(
OmegaConf.create(omegaconf.actor_rollout_ref.model.get("override_config", {}))
)
# Verify extraction works
attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2")
assert attn_implementation == "eager"
def test_backward_compatibility(self):
"""Test that the fix maintains backward compatibility."""
# Test case 1: No override_config at all (old behavior)
config_without_override = {}
attn_implementation = config_without_override.get("attn_implementation", "flash_attention_2")
assert attn_implementation == "flash_attention_2"
# Test case 2: Empty override_config
config_with_empty_override = {"override_config": {}}
override_config = config_with_empty_override.get("override_config", {})
attn_implementation = override_config.get("attn_implementation", "flash_attention_2")
assert attn_implementation == "flash_attention_2"
# Test case 3: override_config with other settings but no attn_implementation
config_with_other_overrides = {"override_config": {"dropout": 0.1, "hidden_size": 1024}}
override_config = config_with_other_overrides.get("override_config", {})
attn_implementation = override_config.get("attn_implementation", "flash_attention_2")
assert attn_implementation == "flash_attention_2"
def test_critic_attn_implementation_extraction_logic(self):
"""Test the core logic for extracting attn_implementation from override config for CriticWorker."""
# Test case 1: Default behavior for critic
override_config = {}
attn_impl = override_config.get("attn_implementation", "flash_attention_2")
assert attn_impl == "flash_attention_2"
# Test case 2: Override to eager for critic
override_config = {"attn_implementation": "eager"}
attn_impl = override_config.get("attn_implementation", "flash_attention_2")
assert attn_impl == "eager"
# Test case 3: Override to sdpa for critic
override_config = {"attn_implementation": "sdpa"}
attn_impl = override_config.get("attn_implementation", "flash_attention_2")
assert attn_impl == "sdpa"
# Test case 4: Other configs don't affect attn_implementation for critic
override_config = {"other_setting": "value", "dropout": 0.1}
attn_impl = override_config.get("attn_implementation", "flash_attention_2")
assert attn_impl == "flash_attention_2"
@patch("transformers.AutoConfig.from_pretrained")
def test_critic_attn_implementation_passed_to_autoconfig(self, mock_config_from_pretrained):
"""Test that attn_implementation is correctly passed to AutoConfig.from_pretrained in CriticWorker."""
# Mock the AutoConfig return value
mock_config = Mock()
mock_config.tie_word_embeddings = False
mock_config.architectures = ["LlamaForCausalLM"]
mock_config.num_labels = 1
mock_config_from_pretrained.return_value = mock_config
# Test data for critic model
test_cases = [
({}, "flash_attention_2"), # Default
({"attn_implementation": "eager"}, "eager"), # Override to eager
({"attn_implementation": "sdpa"}, "sdpa"), # Override to sdpa
]
for override_config, expected_attn_impl in test_cases:
# Reset mocks
mock_config_from_pretrained.reset_mock()
# Simulate the logic from CriticWorker _build_critic_model_optimizer
attn_implementation = override_config.get("attn_implementation", "flash_attention_2")
# This simulates what should happen in CriticWorker._build_critic_model_optimizer
# (This is where the fix needs to be applied in the actual implementation)
AutoConfig.from_pretrained(
"test_path",
attn_implementation=attn_implementation,
trust_remote_code=False,
)
# Verify AutoConfig.from_pretrained was called with correct attn_implementation
mock_config_from_pretrained.assert_called_once_with(
"test_path",
attn_implementation=expected_attn_impl,
trust_remote_code=False,
)
def test_critic_override_config_integration(self):
"""Test that override_config from Hydra configuration works correctly for CriticWorker."""
# Simulate the OmegaConf configuration structure used in VERL for critic
config_dict = {
"critic": {
"model": {"path": "/test/path", "override_config": {"attn_implementation": "eager", "dropout": 0.1}}
}
}
# Convert to OmegaConf structure
omegaconf = OmegaConf.create(config_dict)
# Simulate what happens in the CriticWorker
override_model_config = OmegaConf.to_container(
OmegaConf.create(omegaconf.critic.model.get("override_config", {}))
)
# Test extraction for critic
attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2")
assert attn_implementation == "eager"
# Test that other configs are preserved for critic
assert override_model_config.get("dropout") == 0.1
def test_critic_hydra_plus_prefix_config(self):
"""Test that Hydra +prefix configurations work correctly for CriticWorker."""
# This simulates the configuration when user specifies:
# +critic.model.override_config.attn_implementation=eager
# The + prefix in Hydra adds new keys to the config
config_dict = {
"critic": {
"model": {
"path": "/test/path",
"override_config": {
"attn_implementation": "eager" # This gets added via +prefix for critic
},
}
}
}
omegaconf = OmegaConf.create(config_dict)
# Extract override config as done in CriticWorker
override_model_config = OmegaConf.to_container(
OmegaConf.create(omegaconf.critic.model.get("override_config", {}))
)
# Verify extraction works for critic
attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2")
assert attn_implementation == "eager"
def test_both_actor_and_critic_configuration(self):
"""Test that both actor and critic can have different attn_implementation overrides simultaneously."""
# This simulates a complete training configuration with both actor and critic overrides
config_dict = {
"actor_rollout_ref": {"model": {"override_config": {"attn_implementation": "eager"}}},
"critic": {"model": {"override_config": {"attn_implementation": "sdpa"}}},
}
omegaconf = OmegaConf.create(config_dict)
# Extract actor override config
actor_override_config = OmegaConf.to_container(
OmegaConf.create(omegaconf.actor_rollout_ref.model.get("override_config", {}))
)
actor_attn_implementation = actor_override_config.get("attn_implementation", "flash_attention_2")
# Extract critic override config
critic_override_config = OmegaConf.to_container(
OmegaConf.create(omegaconf.critic.model.get("override_config", {}))
)
critic_attn_implementation = critic_override_config.get("attn_implementation", "flash_attention_2")
# Verify both can be configured independently
assert actor_attn_implementation == "eager"
assert critic_attn_implementation == "sdpa"
def test_critic_backward_compatibility(self):
"""Test that the CriticWorker fix maintains backward compatibility."""
# Test case 1: No override_config at all for critic (old behavior)
config_without_override = {}
attn_implementation = config_without_override.get("attn_implementation", "flash_attention_2")
assert attn_implementation == "flash_attention_2"
# Test case 2: Empty override_config for critic
config_with_empty_override = {"override_config": {}}
override_config = config_with_empty_override.get("override_config", {})
attn_implementation = override_config.get("attn_implementation", "flash_attention_2")
assert attn_implementation == "flash_attention_2"
# Test case 3: override_config with other settings but no attn_implementation for critic
config_with_other_overrides = {"override_config": {"dropout": 0.1, "num_labels": 1}}
override_config = config_with_other_overrides.get("override_config", {})
attn_implementation = override_config.get("attn_implementation", "flash_attention_2")
assert attn_implementation == "flash_attention_2"
def test_attn_implementation_fix_integration():
"""Integration test to verify the entire fix works as expected."""
# This test simulates the complete flow from configuration to model creation
# Step 1: Simulate Hydra configuration with +prefix
# user_config = "+actor_rollout_ref.model.override_config.attn_implementation=eager"
# This would result in a config structure like:
config_dict = {"actor_rollout_ref": {"model": {"override_config": {"attn_implementation": "eager"}}}}
# Step 2: Extract override_model_config as done in FSDP workers
omegaconf = OmegaConf.create(config_dict)
override_model_config = OmegaConf.to_container(
OmegaConf.create(omegaconf.actor_rollout_ref.model.get("override_config", {}))
)
# Step 3: Apply the fix logic
attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2")
# Step 4: Verify the fix works
assert attn_implementation == "eager"
# Step 5: Verify this would be passed to both AutoConfig and Model creation
# (This would normally be done with mocks, but we can test the parameter preparation)
config_params = {"attn_implementation": attn_implementation}
model_params = {"attn_implementation": attn_implementation}
assert config_params["attn_implementation"] == "eager"
assert model_params["attn_implementation"] == "eager"
def test_critic_attn_implementation_fix_integration():
"""Integration test to verify the entire fix works as expected for CriticWorker."""
# This test simulates the complete flow from configuration to model creation for critic
# Step 1: Simulate Hydra configuration with +prefix for critic
# user_config = "+critic.model.override_config.attn_implementation=sdpa"
# This would result in a config structure like:
config_dict = {"critic": {"model": {"override_config": {"attn_implementation": "sdpa"}}}}
# Step 2: Extract override_model_config as should be done in CriticWorker
omegaconf = OmegaConf.create(config_dict)
override_model_config = OmegaConf.to_container(OmegaConf.create(omegaconf.critic.model.get("override_config", {})))
# Step 3: Apply the fix logic (what needs to be implemented in CriticWorker)
attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2")
# Step 4: Verify the fix works for critic
assert attn_implementation == "sdpa"
# Step 5: Verify this would be passed to AutoConfig creation for critic
config_params = {"attn_implementation": attn_implementation}
assert config_params["attn_implementation"] == "sdpa"
def test_complete_training_configuration():
"""Integration test for a complete training configuration with both actor and critic overrides."""
# This test simulates a realistic training configuration where both
# actor and critic have different attention implementations
config_dict = {
"actor_rollout_ref": {
"model": {
"path": "/shared/models/llama-7b",
"override_config": {"attn_implementation": "eager", "torch_dtype": "bfloat16"},
}
},
"critic": {
"model": {
"path": "/shared/models/llama-7b",
"override_config": {"attn_implementation": "sdpa", "num_labels": 1},
}
},
}
omegaconf = OmegaConf.create(config_dict)
# Extract configurations as would be done in the workers
actor_override_config = OmegaConf.to_container(
OmegaConf.create(omegaconf.actor_rollout_ref.model.get("override_config", {}))
)
critic_override_config = OmegaConf.to_container(OmegaConf.create(omegaconf.critic.model.get("override_config", {})))
# Apply the fix logic for both
actor_attn_implementation = actor_override_config.get("attn_implementation", "flash_attention_2")
critic_attn_implementation = critic_override_config.get("attn_implementation", "flash_attention_2")
# Verify both configurations work independently
assert actor_attn_implementation == "eager"
assert critic_attn_implementation == "sdpa"
# Verify other configs are preserved
assert actor_override_config.get("torch_dtype") == "bfloat16"
assert critic_override_config.get("num_labels") == 1
if __name__ == "__main__":
# Run basic tests
test_attn_implementation_fix_integration()
test_critic_attn_implementation_fix_integration()
test_complete_training_configuration()
if VERL_AVAILABLE:
# Run class-based tests
test_class = TestFSDPAttnImplementation()
test_class.test_attn_implementation_extraction_logic()
test_class.test_override_config_integration()
test_class.test_hydra_plus_prefix_config()
test_class.test_backward_compatibility()
# Run new critic tests
test_class.test_critic_attn_implementation_extraction_logic()
test_class.test_critic_override_config_integration()
test_class.test_critic_hydra_plus_prefix_config()
test_class.test_both_actor_and_critic_configuration()
test_class.test_critic_backward_compatibility()
print("✓ All FSDP attn_implementation tests passed!")
print("✓ All CriticWorker attn_implementation tests passed!")
else:
print("⚠ VERL components not available, skipping VERL-specific tests")
print("✓ Integration tests passed!")
print("✓ Critic integration tests passed!")
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/test_fsdp_attn_implementation.py",
"license": "Apache License 2.0",
"lines": 395,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/models/mcore/model_forward_1f1b_overlap.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional
import torch
from megatron.core.models.common.model_chunk_schedule_plan import TransformerModelChunkSchedulePlan
from megatron.core.models.gpt.gpt_model import GPTModel
from megatron.core.utils import make_viewless_tensor
from torch import Tensor
from verl.models.mcore.util import preprocess_packed_seqs
from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy
from verl.utils.megatron_utils import unwrap_model
from verl.utils.model import CausalLMOutputForPPO
from .util import postprocess_packed_seqs, postprocess_packed_seqs_for_dict_output
def gptmodel_forward_1f1b_overlap(
model: GPTModel,
input_ids: Tensor,
position_ids: Tensor,
attention_mask: Tensor,
labels: Tensor = None,
labels_mask: Tensor = None,
multi_modal_inputs: Optional[dict] = None,
logits_processor: Optional[Callable] = None,
logits_processor_args: Optional[dict] = None,
temperature: float = 1.0,
) -> TransformerModelChunkSchedulePlan:
pre_process: bool = unwrap_model(model).pre_process
post_process: bool = unwrap_model(model).post_process
assert logits_processor is None, "only support fused kernel"
batch_size, seq_len = attention_mask.shape[:2]
input_ids_rmpad, packed_seq_params = preprocess_packed_seqs(input_ids, attention_mask, pre_process=pre_process)
input_ids_rmpad = input_ids_rmpad.contiguous()
schedule_plan = model.build_schedule_plan(
input_ids=input_ids_rmpad,
attention_mask=attention_mask,
labels=labels,
position_ids=position_ids,
packed_seq_params=packed_seq_params,
)
if post_process:
attention_mask_out = attention_mask
def _postprocess(
self,
hidden_states,
input_ids,
position_ids,
labels,
rotary_pos_emb,
rotary_pos_cos,
rotary_pos_sin,
mtp_in_postprocess=None,
loss_mask=None,
decoder_input=None,
attention_mask=None,
inference_params=None,
packed_seq_params=None,
sequence_len_offset=None,
runtime_gather_output=None,
extra_block_kwargs=None,
inference_context=None,
):
"""patched from https://github.com/NVIDIA/Megatron-LM/blob/core_r0.14.0/megatron/core/models/gpt/gpt_model.py#L412"""
"""Postprocesses decoder hidden states to generate logits or compute loss.
Applies Multi-Token Prediction if enabled, generates output logits through
the output layer, and computes language model loss when labels are provided.
"""
from megatron.core import parallel_state
from megatron.core.tensor_parallel import gather_from_sequence_parallel_region
in_inference_mode = inference_context is not None and not self.training
if in_inference_mode:
assert runtime_gather_output, "Inference must always gather TP logits"
# logits and loss
output_weight = None
if self.share_embeddings_and_output_weights:
output_weight = self.shared_embedding_or_output_weight()
if mtp_in_postprocess:
hidden_states = self.mtp(
input_ids=input_ids,
position_ids=position_ids,
hidden_states=hidden_states,
attention_mask=attention_mask,
inference_params=inference_params,
rotary_pos_emb=rotary_pos_emb,
rotary_pos_cos=rotary_pos_cos,
rotary_pos_sin=rotary_pos_sin,
packed_seq_params=packed_seq_params,
sequence_len_offset=sequence_len_offset,
embedding=self.embedding,
**(extra_block_kwargs or {}),
)
if not self.post_process:
return hidden_states
if self.mtp_process:
from megatron.core.transformer.multi_token_prediction import (
MTPLossAutoScaler,
MTPLossLoggingHelper,
roll_tensor,
)
mtp_labels = labels.clone()
hidden_states_list = torch.chunk(hidden_states, 1 + self.config.mtp_num_layers, dim=0)
hidden_states = hidden_states_list[0]
if loss_mask is None:
# if loss_mask is not provided, use all ones as loss_mask
loss_mask = torch.ones_like(mtp_labels)
for mtp_layer_number in range(self.config.mtp_num_layers):
# output
mtp_logits, _ = self.output_layer(
hidden_states_list[mtp_layer_number + 1],
weight=output_weight,
runtime_gather_output=runtime_gather_output,
)
# Calc loss for the current Multi-Token Prediction (MTP) layers.
mtp_labels, _ = roll_tensor(mtp_labels, shifts=-1, dims=-1, cp_group=self.cp_group)
loss_mask, num_tokens = roll_tensor(loss_mask, shifts=-1, dims=-1, cp_group=self.cp_group)
mtp_loss = self.compute_language_model_loss(mtp_labels, mtp_logits)
mtp_loss = loss_mask * mtp_loss
if self.training:
# TODO(shifangx): remove the use of parallel_state here
# after moving loss logging to loss_func in pretrain_gpt.py
MTPLossLoggingHelper.save_loss_to_tracker(
torch.sum(mtp_loss) / num_tokens,
mtp_layer_number,
self.config.mtp_num_layers,
avg_group=parallel_state.get_data_parallel_group(with_context_parallel=True),
)
mtp_loss_scale = self.config.mtp_loss_scaling_factor / self.config.mtp_num_layers
if self.config.calculate_per_token_loss:
hidden_states = MTPLossAutoScaler.apply(hidden_states, mtp_loss_scale * mtp_loss)
else:
hidden_states = MTPLossAutoScaler.apply(hidden_states, mtp_loss_scale * mtp_loss / num_tokens)
if logits_processor is not None:
logits, _ = self.output_layer(
hidden_states, weight=output_weight, runtime_gather_output=runtime_gather_output
)
output_orig = logits.transpose(0, 1).contiguous()
args = {
k: preprocess_packed_seqs(v, attention_mask_out, pre_process=True)[0]
for k, v in logits_processor_args.items()
}
output_dict = logits_processor(output_orig, **args)
output = {
k: postprocess_packed_seqs(
v, packed_seq_params, attention_mask_out, batch_size, seq_len, post_process=post_process
)
for k, v in output_dict.items()
}
else:
# fused kernel
labels_rmpad, _ = preprocess_packed_seqs(labels, attention_mask, pre_process=True)
labels_mask_rmpad, _ = preprocess_packed_seqs(labels_mask, attention_mask, pre_process=True)
labels_rmpad = labels_rmpad.contiguous()
labels_mask_rmpad = labels_mask_rmpad.contiguous()
output = CausalLMOutputForPPO(
loss=None,
logits=None,
past_key_values=None,
hidden_states=hidden_states,
attentions=None,
)
if self.config.sequence_parallel:
hidden_states = gather_from_sequence_parallel_region(hidden_states)
logprobs, entropy = linear_cross_entropy(
hidden_states,
self.output_layer.weight,
labels_rmpad,
temperature,
"none",
parallel_state.get_tensor_model_parallel_group(),
)
output.entropy = entropy
output.log_probs = logprobs
output = postprocess_packed_seqs_for_dict_output(
labels_mask_rmpad,
output,
packed_seq_params,
attention_mask,
batch_size,
seq_len,
post_process=post_process,
)
output_ = [output["log_probs"]]
# TODO NOW 1f1b overlap only support one tensor output
# if "entropy" in output:
# output_.append(output["entropy"])
output_ = tuple(output_)
return output_
def _custom_post_process_node_forward_impl(self, hidden_states):
if self.gpt_model.decoder.final_layernorm and not self.gpt_model.mtp_process:
hidden_states = self.gpt_model.decoder.final_layernorm(hidden_states)
# TENorm produces a "viewed" tensor. This will result in schedule.py's
# deallocate_output_tensor() throwing an error, so a viewless tensor is
# created to prevent this.
hidden_states = make_viewless_tensor(inp=hidden_states, requires_grad=True, keep_graph=True)
# Run GPTModel._postprocess
output = self.gpt_model._postprocess(
hidden_states=hidden_states,
input_ids=self.chunk_state.input_ids,
position_ids=self.chunk_state.position_ids,
labels=self.chunk_state.labels,
decoder_input=self.chunk_state.decoder_input,
rotary_pos_emb=self.chunk_state.rotary_pos_emb,
rotary_pos_cos=self.chunk_state.rotary_pos_cos,
rotary_pos_sin=self.chunk_state.rotary_pos_sin,
mtp_in_postprocess=False,
loss_mask=self.chunk_state.loss_mask,
attention_mask=self.chunk_state.attention_mask,
packed_seq_params=self.chunk_state.packed_seq_params,
sequence_len_offset=self.chunk_state.sequence_len_offset,
runtime_gather_output=self.chunk_state.runtime_gather_output,
extra_block_kwargs=self.chunk_state.extra_block_kwargs,
)
return output
schedule_plan.post_process.forward_impl = _custom_post_process_node_forward_impl.__get__(
schedule_plan.post_process, schedule_plan.post_process.__class__
)
unwrap_model(model)._postprocess = _postprocess.__get__(unwrap_model(model), unwrap_model(model).__class__)
return schedule_plan
| {
"repo_id": "verl-project/verl",
"file_path": "verl/models/mcore/model_forward_1f1b_overlap.py",
"license": "Apache License 2.0",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/agent_loop/utils.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Any
def resolve_config_path(config_path: str) -> str:
"""Resolve agent loop configuration file path.
In multi-node Ray training, relative paths may not resolve correctly
because the working directory on remote nodes can differ from the driver node.
This function resolves relative paths by checking multiple locations in order:
1. If already absolute, return as-is
2. Try current working directory
3. Try relative to verl package installation (project root)
Args:
config_path: Configuration file path (relative or absolute)
Returns:
Absolute path to the configuration file
Raises:
FileNotFoundError: If the configuration file cannot be found
"""
# Return absolute paths unchanged
if os.path.isabs(config_path):
return config_path
# Try current working directory first
cwd = os.path.abspath(os.getcwd())
cwd_path = os.path.abspath(os.path.join(cwd, config_path))
if (cwd_path == cwd or cwd_path.startswith(cwd + os.sep)) and os.path.exists(cwd_path):
return cwd_path
# Try relative to verl project root (where verl package is installed)
try:
import verl
verl_package_dir = os.path.abspath(os.path.dirname(verl.__file__))
# Strategy 1: For development/editable installs.
project_root = os.path.dirname(verl_package_dir)
dev_path = os.path.abspath(os.path.join(project_root, config_path))
if (dev_path == project_root or dev_path.startswith(project_root + os.sep)) and os.path.exists(dev_path):
return dev_path
# Strategy 2: For standard package installations.
install_path = os.path.abspath(os.path.join(verl_package_dir, config_path))
if (install_path == verl_package_dir or install_path.startswith(verl_package_dir + os.sep)) and os.path.exists(
install_path
):
return install_path
except (ImportError, AttributeError):
pass # verl not installed or __file__ not available
# File not found - raise clear error
raise FileNotFoundError(
f"Agent loop configuration file not found: {config_path}. Tried current directory and verl project root."
)
# tokenizer.apply_chat_template is not working properly for gpt-oss model.
# Because the chat template requires tool call messages to parse tool response messages
# so we need to format the tool response manually.
def format_gpt_oss_tool_response_manually(tool_response: str, tool_call_name: str) -> str:
"""Format tool response for gpt-oss model.
Args:
tool_response: Tool response string
tool_call_name: Name of the tool that was called
Returns:
Formatted tool response string
"""
return f"<|start|>functions.{tool_call_name} to=assistant<|channel|>commentary<|message|>{tool_response}<|end|>"
def add_generation_prompt_for_gpt_oss(message_content: str) -> str:
"""Add generation prompt for gpt-oss model.
Args:
message_content: Message content string
Returns:
Message content string with generation prompt
"""
return message_content + "<|start|>assistant"
def build_gpt_oss_tool_response_text(messages: list[dict[str, Any]], tool_call_names: list[str]) -> str:
"""Build gpt-oss tool response text (manual formatting + generation prompt)."""
tool_response_texts: list[str] = []
for i, tool_msg in enumerate(messages):
actual_tool_name = tool_call_names[i]
formatted = format_gpt_oss_tool_response_manually(tool_msg["content"], actual_tool_name)
tool_response_texts.append(formatted)
return add_generation_prompt_for_gpt_oss("".join(tool_response_texts))
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/agent_loop/utils.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/rollout/vllm_rollout/utils.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import json
import logging
import os
import platform
import signal
import threading
from types import MethodType
from typing import Any, Literal, get_args
import torch
from verl.utils.device import is_npu_available
from verl.utils.vllm import TensorLoRARequest, VLLMHijack
from verl.utils.vllm.patch import patch_vllm_moe_model_weight_loader
from verl.utils.vllm.vllm_fp8_utils import apply_vllm_fp8_patches, is_fp8_model, load_quanted_weights
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
# magic numbers that ensure we are using the same LoRA adapter during the rollout and training process
VLLM_LORA_INT_ID = 123
VLLM_LORA_NAME = "123"
VLLM_LORA_PATH = "simon_lora_path"
VLLM_ASCEND_REQUIRED_ENV_VARS = {"VLLM_ALL2ALL_BACKEND": "flashinfer_all2allv", "VLLM_ASCEND_ENABLE_NZ": "0"}
def set_death_signal():
"""Kill the current process when the parent process exits."""
if platform.system() != "Linux":
return
libc = ctypes.CDLL("libc.so.6")
libc.prctl(1, signal.SIGKILL)
if os.getppid() == 1:
os.kill(os.getpid(), signal.SIGKILL)
def get_device_uuid(device_id: int) -> str:
from vllm.platforms import current_platform
# Convert torch.npu.current_device to its corresponding ASCEND_RT_VISIBLE_DEVICES.
if is_npu_available:
if os.getenv("ASCEND_RT_VISIBLE_DEVICES") is not None:
npu_visible_devices = os.environ["ASCEND_RT_VISIBLE_DEVICES"].split(",")
assert device_id < len(npu_visible_devices), f"device_id {device_id} must less than {npu_visible_devices}"
return "NPU-" + npu_visible_devices[device_id]
else:
return f"NPU-{device_id}"
else:
return current_platform.get_device_uuid(device_id)
def get_vllm_max_lora_rank(lora_rank: int):
"""
For vLLM, automatically adjusts the `max_lora_rank` to the nearest allowed value.
The allowed values are retrieved from vLLM's MaxLoRARanks type definition.
"""
assert lora_rank > 0, f"lora_rank must be greater than 0, get {lora_rank}"
try:
from vllm.config.lora import MaxLoRARanks
except Exception:
# FIXME: migrate vllm version https://github.com/vllm-project/vllm/blob/main/vllm/config/lora.py#L25
MaxLoRARanks = Literal[1, 8, 16, 32, 64, 128, 256, 320, 512]
vllm_max_lora_ranks = sorted(get_args(MaxLoRARanks))
if lora_rank > vllm_max_lora_ranks[-1]:
raise ValueError(f"lora_rank must be less than or equal to {vllm_max_lora_ranks[-1]}, but got {lora_rank}")
for rank in vllm_max_lora_ranks:
if lora_rank <= rank:
return rank
# https://github.com/vllm-project/vllm/issues/13175
def monkey_patch_compute_logits(model, vocab_size: int):
original_compute_logits = model.compute_logits
def compute_logits(
self,
*args,
**kwargs,
) -> torch.Tensor:
logits = original_compute_logits(*args, **kwargs)
logits[..., vocab_size:] = float("-inf")
return logits
model.compute_logits = MethodType(compute_logits, model)
class vLLMColocateWorkerExtension:
"""
The class for vLLM's worker to inherit from, in the colocate setting.
By defining an extension class, the code can work no matter what is
the underlying worker class. This way, the code can be compatible
with both vLLM V0 and V1.
NOTE: we define this class in a separate module, and the main module
should pass the full qualified name as `worker_extension_cls` argument.
Feature support:
1. LoRA
2. Online FP8 quantization
"""
def __new__(cls, **kwargs):
set_death_signal()
# 1. patch for Lora
VLLMHijack.hijack()
# 2. patch online fp8 quant
if os.environ.get("VERL_VLLM_FP8_QUANT_ENABLED", "0") == "1":
apply_vllm_fp8_patches()
# 3. patch QAT (compressed-tensors NVFP4) for dynamic weight loading
vllm_config = kwargs.get("vllm_config")
quant_config = getattr(vllm_config, "quant_config", None) if vllm_config else None
_is_qat_model = getattr(quant_config, "quant_format", None) == "nvfp4-pack-quantized"
if _is_qat_model:
from verl.utils.qat import apply_qat_patches
apply_qat_patches()
logger.info("Applied QAT patches in vLLM worker subprocess")
# TODO: For ascend NPU, when the corresponding vllm-ascend version is upgraded to v0.13.0,
# please remove the VLLM_ASCEND_REQUIRED_ENV_VARS variable replacement action.
# This is only a fix for vllm version < v0.13.0.
if is_npu_available:
for k in VLLM_ASCEND_REQUIRED_ENV_VARS:
if k not in os.environ:
os.environ[k] = VLLM_ASCEND_REQUIRED_ENV_VARS[k]
instance = super().__new__(cls)
instance._is_qat_model = _is_qat_model
return instance
def monkey_patch_model(self, vocab_size: int):
# patch compute_logits to avoid sampling OOV token
monkey_patch_compute_logits(self.model_runner.model, vocab_size)
# patch weight loader to support MoE model
patch_vllm_moe_model_weight_loader(self.model_runner.model)
def update_weights_from_ipc(self, peft_config: dict = None, base_sync_done=False, use_shm: bool = False):
"""Update the weights of the rollout model."""
from vllm.platforms import current_platform
from verl.workers.rollout.vllm_rollout.bucketed_weight_transfer import BucketedWeightReceiver
if current_platform.device_type == "npu" and self.device is None:
self.device = torch.device(f"npu:{self.local_rank}")
# In async mode, make sure the old lora is removed before adding the new one
if peft_config and base_sync_done:
self.remove_lora(VLLM_LORA_INT_ID)
use_standard_weight_load = not (peft_config and base_sync_done) and not is_fp8_model(
self.model_runner.vllm_config
)
if self._is_qat_model:
# QAT: Prepare for weight loading BEFORE receiving any buckets
from verl.utils.qat import prepare_qat_for_load_weights
prepare_qat_for_load_weights(self.model_runner.model, device=self.device)
logger.info("QAT: prepare_qat_for_load_weights completed")
elif use_standard_weight_load:
# Re-apply here because async IPC weight sync can happen long after init and lose MoE weight_loader attrs.
patch_vllm_moe_model_weight_loader(self.model_runner.model)
assert self.device is not None
receiver = BucketedWeightReceiver(
zmq_handle=self._get_zmq_handle(),
device=self.device,
use_shm=use_shm,
)
receiver.receive_weights(
on_bucket_received=lambda weights: self._update_weights(
weights, peft_config=peft_config, base_sync_done=base_sync_done
)
)
if self._is_qat_model:
# QAT: call process_weights_after_loading AFTER all buckets are received
from verl.utils.qat import manual_process_weights_after_loading
manual_process_weights_after_loading(self.model_runner.model)
logger.info("QAT: process_weights_after_loading completed")
elif use_standard_weight_load:
# Some post-load transforms are non-idempotent; run once after all buckets.
from vllm.model_executor.model_loader.utils import process_weights_after_loading
model = self.model_runner.model
model_config = self.model_runner.vllm_config.model_config
process_weights_after_loading(model, model_config, self.device)
def _update_weights(self, weights: list[tuple[str, torch.Tensor]], peft_config: dict, base_sync_done: bool):
if peft_config and base_sync_done:
weights = dict(weights)
lora_request = TensorLoRARequest(
lora_name=VLLM_LORA_NAME,
lora_int_id=VLLM_LORA_INT_ID,
lora_path=VLLM_LORA_PATH,
peft_config=peft_config,
lora_tensors=weights,
)
self.add_lora(lora_request)
logger.info(f"vLLM load weights, loaded_params: {len(weights)}")
else:
# Add the FP8 related logic here as sharding manager has been deprecated.
# Check if FP8 quantization is enabled and apply appropriate weight loading
if is_fp8_model(self.model_runner.vllm_config):
logger.info(f"FP8 model detected (async): {self.model_runner.vllm_config.quant_config}")
# Convert bf16 weights to fp8 format before loading
loaded_params = load_quanted_weights(weights, self.model_runner)
logger.info(f"FP8 weights loaded (async), loaded_params: {len(loaded_params)}")
else:
logger.info("Loading standard weights (non-FP8, async)")
self.model_runner.model.load_weights(weights)
def _get_zmq_handle(self) -> str:
"""Get ZMQ handle for communication."""
if not hasattr(self, "device_uuid") or not self.device_uuid:
self.device_uuid = get_device_uuid(self.device.index)
return f"ipc:///tmp/rl-colocate-zmq-{self.device_uuid}.sock"
class SuppressSignalInThread:
def __enter__(self):
self.original_signal = signal.signal
def no_op_signal(sig, action):
if threading.current_thread() is not threading.main_thread():
print(f"Ignored signal {sig} in thread {threading.current_thread().name}")
return
return self.original_signal(sig, action)
signal.signal = no_op_signal
return self
def __exit__(self, exc_type, exc_val, exc_tb):
signal.signal = self.original_signal
def build_cli_args_from_config(config: dict[str, Any]) -> list[str]:
"""
Convert a config dictionary to CLI arguments for vLLM server.
Handles different value types appropriately:
- None: skipped
- bool True: adds '--key'
- bool False: skipped
- list: expands to '--key item1 item2 ...'
- empty list: skipped (vLLM uses nargs="+" which requires at least one value)
- dict: JSON serialized
- other: string converted
Args:
config: Dictionary of configuration key-value pairs
Returns:
List of CLI argument strings
"""
cli_args = []
for k, v in config.items():
if v is None:
continue
if isinstance(v, bool):
if v:
cli_args.append(f"--{k}")
elif isinstance(v, list):
if not v:
# Skip empty lists - vLLM uses nargs="+" which requires at least one value
continue
# Lists need to be expanded as multiple separate arguments
# e.g., --cuda-graph-sizes 1 2 4 8 becomes ['--cuda-graph-sizes', '1', '2', '4', '8']
cli_args.append(f"--{k}")
cli_args.extend([str(item) for item in v])
else:
cli_args.append(f"--{k}")
# Use json.dumps for dict to ensure valid JSON format
cli_args.append(json.dumps(v) if isinstance(v, dict) else str(v))
return cli_args
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/rollout/vllm_rollout/utils.py",
"license": "Apache License 2.0",
"lines": 243,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/utils/test_mlflow_key_sanitization.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest.mock import patch
from verl.utils.tracking import _MlflowLoggingAdapter
class TestMlflowLoggingAdapter(unittest.TestCase):
def test_sanitize_key_and_warning(self):
"""Test key sanitization for invalid characters and consecutive slashes with warnings."""
adapter = _MlflowLoggingAdapter()
data = {
"valid_key": 1.0,
"invalid@key!": 2.0,
"another/valid-key": 3.0,
"bad key#": 4.0,
"val-aux//reward/mean_at_1": 5.0,
"val-core///acc/best_at_5": 6.0,
"metric////with/many////slashes": 7.0,
}
# Patch mlflow.log_metrics to capture the metrics actually sent
with (
patch("mlflow.log_metrics") as mock_log_metrics,
patch.object(adapter, "logger") as mock_logger,
):
adapter.log(data, step=5)
# Check that invalid characters are sanitized
sent_metrics = mock_log_metrics.call_args[1]["metrics"]
self.assertIn("invalid_at_key_", sent_metrics) # @ becomes _at_, ! becomes _
self.assertIn("bad key_", sent_metrics) # # becomes _, space remains
self.assertNotIn("invalid@key!", sent_metrics)
self.assertNotIn("bad key#", sent_metrics)
# Check that consecutive slashes are collapsed to single slashes
self.assertIn("val-aux/reward/mean_at_1", sent_metrics)
self.assertIn("val-core/acc/best_at_5", sent_metrics)
self.assertIn("metric/with/many/slashes", sent_metrics)
self.assertNotIn("val-aux//reward/mean_at_1", sent_metrics)
self.assertNotIn("val-core///acc/best_at_5", sent_metrics)
# Check that warnings were logged for all sanitized keys
warning_msgs = [str(call) for call in mock_logger.warning.call_args_list]
# Warnings for invalid characters
self.assertTrue(any("invalid@key!" in msg and "invalid_at_key_" in msg for msg in warning_msgs))
self.assertTrue(any("bad key#" in msg and "bad key_" in msg for msg in warning_msgs))
# Warnings for consecutive slashes
self.assertTrue(any("val-aux//reward/mean_at_1" in msg for msg in warning_msgs))
self.assertTrue(any("val-core///acc/best_at_5" in msg for msg in warning_msgs))
self.assertTrue(any("metric////with/many////slashes" in msg for msg in warning_msgs))
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_mlflow_key_sanitization.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:examples/tutorial/agent_loop_get_started/sandbox.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import aiohttp
from transformers.utils import get_json_schema
from verl.tools.base_tool import BaseTool, OpenAIFunctionToolSchema, ToolResponse
class SandboxTool(BaseTool):
def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema):
super().__init__(config, tool_schema)
# Different model may use different code pattern, e.g. python, py, etc.
self.code_pattern = re.compile(r"```py(.*?)```", re.DOTALL)
async def code_interpreter(self, code: str) -> str:
"""Execute the code in the sandbox.
Args:
code: The code to be executed.
Returns:
str: The output of the code execution.
"""
async with aiohttp.ClientSession() as session:
async with session.post(
self.config.get("sandbox_fusion_url"),
json={"code": code},
) as resp:
resp.raise_for_status()
result = await resp.json()
stdout, stderr = result["run_result"]["stdout"], result["run_result"]["stderr"]
return stdout + stderr
def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema:
schema = get_json_schema(self.code_interpreter)
return OpenAIFunctionToolSchema(**schema)
async def execute(self, instance_id: str, parameters: dict, **kwargs) -> tuple[str, float, dict]:
code = parameters["code"]
matches = self.code_pattern.findall(code)
if matches:
code = matches[0].strip()
# NOTE: Some script may not explicitly print result, we need to add a print statement to the end of the script.
# More better way is to SFT the model to make it print result by default, we skip SFT stage in this tutorial.
lines = code.split("\n")
for i, line in reversed(list(enumerate(lines))):
if line == "":
continue
if not lines[i].startswith("print"):
lines[i] = f"print({line})"
break
code = "\n".join(lines)
result = await self.code_interpreter(code)
return ToolResponse(text=result), 0.0, {}
| {
"repo_id": "verl-project/verl",
"file_path": "examples/tutorial/agent_loop_get_started/sandbox.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/trainer/main_generation_server.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generate responses given a dataset of prompts
"""
import os
import aiohttp
import hydra
import numpy as np
import ray
os.environ["NCCL_DEBUG"] = "WARN"
os.environ["TOKENIZERS_PARALLELISM"] = "true"
# os.environ['TORCH_COMPILE_DISABLE'] = '1'
import asyncio
from pprint import pprint
import pandas as pd
from omegaconf import OmegaConf
from openai.types.chat import ChatCompletion
from verl.utils.hdfs_io import makedirs
from verl.workers.rollout.replica import get_rollout_replica_class
async def start_server(config):
tp_size = config.actor_rollout_ref.rollout.tensor_model_parallel_size
num_replicas = (config.trainer.n_gpus_per_node * config.trainer.nnodes) // tp_size
rollout_config = config.actor_rollout_ref.rollout
model_config = config.actor_rollout_ref.model
# create standalone rollout server
rollout_server_class = get_rollout_replica_class(config.actor_rollout_ref.rollout.name)
rollout_servers = [
rollout_server_class(
replica_rank=replica_rank,
config=rollout_config,
model_config=model_config,
gpus_per_node=config.trainer.n_gpus_per_node,
)
for replica_rank in range(num_replicas)
]
await asyncio.gather(*[server.init_standalone() for server in rollout_servers])
server_handles = [server._server_handle for server in rollout_servers]
server_addresses = [server._server_address for server in rollout_servers]
assert len(server_handles) == num_replicas
assert len(server_addresses) == num_replicas
return server_handles, server_addresses
async def submit_request(server_address, **chat_complete_request):
try:
extra_headers = chat_complete_request.pop("extra_headers", {})
timeout = aiohttp.ClientTimeout(total=None)
session = aiohttp.ClientSession(timeout=timeout)
async with session.post(
url=f"http://{server_address}/v1/chat/completions",
headers={"Authorization": "Bearer token-abc123", **extra_headers},
json=chat_complete_request,
) as resp:
data = await resp.json()
return ChatCompletion(**data)
finally:
await session.close()
async def generate_per_replica(server_address, model_path: str, n_samples: int, sampling_params: dict, chat_lst: list):
# here we should sample n_samples for each chat_lst.
# we use aiohttp to avoid hang in AsyncOpenAI when the number of requests is large.
# client = AsyncOpenAI(
# api_key="123-abc",
# base_url=f"http://{server_address}/v1",
# )
chat_complete_request = [
{
"model": model_path,
"messages": messages,
**sampling_params,
}
for messages in chat_lst
for _ in range(n_samples)
]
tasks = [submit_request(server_address, **req) for req in chat_complete_request]
results = await asyncio.gather(*tasks)
return results
async def generate(
server_addresses: list, model_path: str, n_samples: int, sampling_params: dict, chat_numpy: np.ndarray
):
num_replicas = len(server_addresses)
chat_sub_array = np.array_split(chat_numpy, num_replicas)
chat_sub_array = [chat.tolist() for chat in chat_sub_array]
assert len(server_addresses) == len(chat_sub_array)
results = await asyncio.gather(
*[
generate_per_replica(server_addresses[i], model_path, n_samples, sampling_params, chat_sub_array[i])
for i in range(num_replicas)
]
)
return results
@hydra.main(config_path="config", config_name="ppo_trainer", version_base=None)
def main(config):
ray.init(runtime_env={"env_vars": {"TOKENIZERS_PARALLELISM": "true", "NCCL_DEBUG": "WARN", "VLLM_USE_V1": "1"}})
pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values
OmegaConf.resolve(config)
n_samples = config.actor_rollout_ref.rollout.n
if config.actor_rollout_ref.rollout.temperature == 0.0:
assert n_samples == 1, "When temperature=0, n_samples must be 1."
assert n_samples >= 1, "n_samples should always >= 1"
sampling_params = {
"temperature": config.actor_rollout_ref.rollout.temperature,
"top_p": config.actor_rollout_ref.rollout.top_p,
# "top_k": config.actor_rollout_ref.rollout.top_k,
"max_tokens": config.actor_rollout_ref.rollout.response_length,
}
from omegaconf import ListConfig
train_files = config.data.train_files
if not isinstance(train_files, list | ListConfig):
train_files = [train_files]
# read dataset. Note that the dataset should directly contain chat template format (e.g., a list of dictionary)
datasets = []
for train_file in train_files:
dataset = pd.read_parquet(train_file)
datasets.append(dataset)
# concat dataset
dataset = pd.concat(datasets, axis=0, ignore_index=True)
chat_lst = dataset[config.data.prompt_key].tolist()
chat_lst = [chat.tolist() for chat in chat_lst]
chat_numpy = np.array(chat_lst)
# start native server
server_handles, server_addresses = asyncio.run(start_server(config))
# run generate
gen_results = asyncio.run(
generate(server_addresses, config.actor_rollout_ref.model.path, n_samples, sampling_params, chat_numpy)
)
# reshape results into a numpy array
import itertools
results = list(itertools.chain.from_iterable(gen_results))
# extract content from results
results = np.array([result.choices[0].message.content for result in results])
results = np.reshape(results, (-1, n_samples))
assert results.shape == (len(chat_lst), n_samples)
results = results.tolist()
# add to the data frame
dataset["responses"] = results
# write to a new parquet
output_dir = os.path.dirname(config.data.output_path)
makedirs(output_dir, exist_ok=True)
print(f"Saving results to {config.data.output_path}")
dataset.to_parquet(config.data.output_path)
if __name__ == "__main__":
main()
| {
"repo_id": "verl-project/verl",
"file_path": "verl/trainer/main_generation_server.py",
"license": "Apache License 2.0",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/experimental/agent_loop/test_gpt_oss_tool_parser.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from transformers import AutoTokenizer
from verl.experimental.agent_loop.tool_parser import GptOssToolParser
@pytest.mark.asyncio
@pytest.mark.skip(reason="local test only")
async def test_gpt_oss_tool_parser():
example_text = """
<|start|>assistant<|channel|>commentary to=functions.get_current_weather \
<|constrain|>json<|message|>{"location": "Tokyo"}<|call|>
<|start|>functions.get_current_weather to=assistant<|channel|>commentary<|message|>\
{ "temperature": 20, "sunny": true }<|end|>"""
tokenizer = AutoTokenizer.from_pretrained("openai/gpt-oss-20b")
response_ids = tokenizer.encode(example_text)
tool_parser = GptOssToolParser(tokenizer)
_, function_calls = await tool_parser.extract_tool_calls(response_ids)
assert len(function_calls) == 1
assert function_calls[0].name == "get_current_weather"
assert function_calls[0].arguments == '{"location": "Tokyo"}'
| {
"repo_id": "verl-project/verl",
"file_path": "tests/experimental/agent_loop/test_gpt_oss_tool_parser.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/models/transformers/qwen3_vl.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import os
from dataclasses import dataclass
from typing import Optional
import torch
from transformers.models.qwen3_vl.modeling_qwen3_vl import (
Qwen3VLCausalLMOutputWithPast,
Qwen3VLForConditionalGeneration,
)
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def get_rope_index(
processor,
input_ids: torch.Tensor,
image_grid_thw: Optional[torch.Tensor] = None,
video_grid_thw: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
**kwargs,
) -> torch.Tensor:
"""
Gets the position ids for Qwen3-VL, it should be generated before sharding the sequence.
The batch dim has been removed and the input_ids should be a 1D tensor representing a single example.
https://github.com/huggingface/transformers/blob/v4.57.0/src/transformers/models/qwen3_vl/modeling_qwen3_vl.py#L916
"""
spatial_merge_size = processor.image_processor.merge_size
image_token_id = processor.image_token_id
video_token_id = processor.video_token_id
vision_start_token_id = processor.vision_start_token_id
# Since we use timestamps to separate videos,
# like <t1> <vision_start> <frame1> <vision_end> <t2> <vision_start> <frame2> <vision_end>,
# the video_grid_thw should also be split
if video_grid_thw is not None:
video_grid_thw = torch.repeat_interleave(video_grid_thw, video_grid_thw[:, 0], dim=0)
video_grid_thw[:, 0] = 1
if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
position_ids = torch.ones(3, input_ids.shape[0], dtype=input_ids.dtype, device=input_ids.device)
image_index, video_index = 0, 0
attention_mask = attention_mask.to(input_ids.device)
input_ids = input_ids[attention_mask == 1]
image_nums, video_nums = 0, 0
vision_start_indices = torch.argwhere(input_ids == vision_start_token_id)
vision_tokens = input_ids[vision_start_indices + 1]
image_nums = (vision_tokens == image_token_id).sum()
video_nums = (vision_tokens == video_token_id).sum()
input_tokens = input_ids.tolist()
llm_pos_ids_list: list = []
st = 0
remain_images, remain_videos = image_nums, video_nums
for _ in range(image_nums + video_nums):
if image_token_id in input_tokens and remain_images > 0:
ed_image = input_tokens.index(image_token_id, st)
else:
ed_image = len(input_tokens) + 1
if video_token_id in input_tokens and remain_videos > 0:
ed_video = input_tokens.index(video_token_id, st)
else:
ed_video = len(input_tokens) + 1
if ed_image < ed_video:
t, h, w = (
image_grid_thw[image_index][0],
image_grid_thw[image_index][1],
image_grid_thw[image_index][2],
)
image_index += 1
remain_images -= 1
ed = ed_image
else:
t, h, w = (
video_grid_thw[video_index][0],
video_grid_thw[video_index][1],
video_grid_thw[video_index][2],
)
video_index += 1
remain_videos -= 1
ed = ed_video
llm_grid_t, llm_grid_h, llm_grid_w = (
t.item(),
h.item() // spatial_merge_size,
w.item() // spatial_merge_size,
)
text_len = ed - st
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
# t_index is always 0 because llm_grid_t is always 1
# (we use timestamps to encode the temporal information for videos)
t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten()
w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten()
llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx)
st = ed + llm_grid_t * llm_grid_h * llm_grid_w
if st < len(input_tokens):
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
text_len = len(input_tokens) - st
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
position_ids[..., attention_mask == 1] = llm_positions.to(position_ids.device)
else:
if attention_mask is not None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids.unsqueeze(0).expand(3, -1).to(attention_mask.device)
else:
position_ids = torch.arange(input_ids.shape[1], device=input_ids.device).view(1, -1).expand(3, -1)
return position_ids
def _get_input_embeds(
model: "Qwen3VLForConditionalGeneration",
input_ids: torch.LongTensor,
attention_mask: Optional[torch.Tensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
):
inputs_embeds = model.get_input_embeddings()(input_ids)
image_mask, video_mask = None, None
if pixel_values is not None:
pixel_values = pixel_values.type(model.visual.dtype)
image_embeds, deepstack_image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw)
n_image_tokens = (input_ids == model.config.image_token_id).sum().item()
n_image_features = image_embeds.shape[0]
if n_image_tokens != n_image_features:
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
mask = input_ids == model.config.image_token_id
mask_unsqueezed = mask.unsqueeze(-1)
mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
image_mask = mask_expanded.to(inputs_embeds.device)
image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
if pixel_values_videos is not None:
pixel_values_videos = pixel_values_videos.type(model.visual.dtype)
video_embeds, deepstack_video_embeds = model.visual(pixel_values_videos, grid_thw=video_grid_thw)
n_video_tokens = (input_ids == model.config.video_token_id).sum().item()
n_video_features = video_embeds.shape[0]
if n_video_tokens != n_video_features:
raise ValueError(
f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}"
)
mask = input_ids == model.config.video_token_id
mask_unsqueezed = mask.unsqueeze(-1)
mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
video_mask = mask_expanded.to(inputs_embeds.device)
video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
visual_pos_masks = None
deepstack_visual_embeds = None
if image_mask is not None and video_mask is not None:
# aggregate visual_pos_masks and deepstack_visual_embeds
image_mask = image_mask[..., 0]
video_mask = video_mask[..., 0]
visual_pos_masks = image_mask | video_mask
deepstack_visual_embeds = []
image_mask_joint = image_mask[visual_pos_masks]
video_mask_joint = video_mask[visual_pos_masks]
for img_embed, vid_embed in zip(deepstack_image_embeds, deepstack_video_embeds, strict=False):
embed_joint = img_embed.new_zeros(visual_pos_masks.sum(), img_embed.shape[-1]).to(img_embed.device)
embed_joint[image_mask_joint, :] = img_embed
embed_joint[video_mask_joint, :] = vid_embed
deepstack_visual_embeds.append(embed_joint)
elif image_mask is not None:
image_mask = image_mask[..., 0]
visual_pos_masks = image_mask
deepstack_visual_embeds = deepstack_image_embeds
elif video_mask is not None:
video_mask = video_mask[..., 0]
visual_pos_masks = video_mask
deepstack_visual_embeds = deepstack_video_embeds
if pixel_values is None and pixel_values_videos is None:
config = model.config.vision_config
patch_dim = config.in_channels * config.temporal_patch_size * config.patch_size**2
pixel_values = torch.zeros((16, patch_dim), dtype=inputs_embeds.dtype, device=inputs_embeds.device)
image_grid_thw = torch.tensor([[1, 4, 4]], dtype=torch.long, device=inputs_embeds.device)
image_embeds, dummy_deepstack_image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw)
inputs_embeds += 0.0 * image_embeds.mean()
for emb in dummy_deepstack_image_embeds or []:
inputs_embeds += 0.0 * emb.mean()
if attention_mask is not None:
attention_mask = attention_mask.to(inputs_embeds.device)
return {
"inputs_embeds": inputs_embeds,
"attention_mask": attention_mask,
"visual_pos_masks": visual_pos_masks,
"deepstack_visual_embeds": deepstack_visual_embeds,
}
@dataclass
class Qwen3VLCausalLMOutputForPPO(Qwen3VLCausalLMOutputWithPast):
log_probs: Optional[torch.FloatTensor] = None
entropy: Optional[torch.FloatTensor] = None
def qwen3_vl_base_forward(
self: "Qwen3VLForConditionalGeneration",
input_ids: torch.LongTensor,
attention_mask: Optional[torch.Tensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
**kwargs,
):
input_kwargs = _get_input_embeds(
self, input_ids, attention_mask, pixel_values, pixel_values_videos, image_grid_thw, video_grid_thw
) # avoid lora module having multiple keyword arguments
kwargs.update(input_kwargs)
return self.language_model(
input_ids=None,
**kwargs,
)
def forward_with_normal_backend(
self: "Qwen3VLForConditionalGeneration",
input_ids: torch.LongTensor = None,
labels: Optional[torch.LongTensor] = None,
temperature: float = 1.0,
**kwargs,
) -> "Qwen3VLCausalLMOutputForPPO":
outputs = self.model(input_ids, **kwargs)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
return Qwen3VLCausalLMOutputForPPO(
logits=logits,
hidden_states=outputs.hidden_states,
)
def forward_with_torch_backend(
self: "Qwen3VLForConditionalGeneration",
input_ids: torch.LongTensor = None,
labels: Optional[torch.LongTensor] = None,
temperature: float = 1.0,
**kwargs,
) -> "Qwen3VLCausalLMOutputForPPO":
from verl.utils.experimental.torch_functional import FusedLinearForPPO
outputs = self.model(input_ids, **kwargs)
hidden_states = outputs[0]
# Loss calculations
if labels is not None:
rolled_labels = torch.roll(labels, shifts=-1, dims=-1)
elif input_ids is not None:
rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1)
else:
raise RuntimeError("To use forward_with_torch_backend, either labels or input_ids must be provided.")
fused_linear_for_ppo = FusedLinearForPPO()
log_probs, entropy = fused_linear_for_ppo.forward(
hidden_states=hidden_states,
vocab_weights=self.lm_head.weight,
input_ids=rolled_labels,
temperature=temperature,
)
return Qwen3VLCausalLMOutputForPPO(
log_probs=log_probs,
entropy=entropy,
hidden_states=outputs.hidden_states,
)
def forward_with_triton_backend(
self: "Qwen3VLForConditionalGeneration",
input_ids: torch.LongTensor = None,
labels: Optional[torch.LongTensor] = None,
temperature: float = 1.0,
**kwargs,
) -> "Qwen3VLCausalLMOutputForPPO":
from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy
outputs = self.model(input_ids, **kwargs)
hidden_states = outputs[0]
# Loss calculations
if labels is not None:
rolled_labels = torch.roll(labels, shifts=-1, dims=-1)
elif input_ids is not None:
rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1)
else:
raise RuntimeError("To use forward_with_triton_backend, either labels or input_ids must be provided.")
log_probs, entropy = linear_cross_entropy(
hidden_states,
self.lm_head.weight,
rolled_labels,
temperature,
"none",
)
return Qwen3VLCausalLMOutputForPPO(
log_probs=log_probs,
entropy=entropy,
hidden_states=outputs.hidden_states,
)
def patch_qwen3_vl_moe_sparse_moe_block_forward():
"""
Monkey patch to fix a bug in transformers 4.57.3 where Qwen3VLMoeTextSparseMoeBlock.forward
incorrectly uses torch.zeros_like(hidden_states) instead of torch.zeros_like(router_logits)
when creating router_weights (line 148 in modeling_qwen3_vl_moe.py).
This is a minimal fix that only changes the problematic line while keeping the rest of the
original implementation intact.
"""
try:
from transformers.models.qwen3_vl_moe.modeling_qwen3_vl_moe import Qwen3VLMoeTextSparseMoeBlock
except ImportError:
# Model not available, skip patching
return
# Store the original forward method for reference
original_forward = Qwen3VLMoeTextSparseMoeBlock.forward
@functools.wraps(original_forward)
def patched_forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size = hidden_states.shape[0]
hidden_states = hidden_states.reshape(-1, self.hidden_size)
router_logits = self.gate(hidden_states)
routing_weights = torch.nn.functional.softmax(router_logits, dim=-1, dtype=torch.float)
routing_weights, router_indices = torch.topk(routing_weights, self.top_k, dim=-1)
routing_weights = routing_weights / routing_weights.sum(dim=-1, keepdim=True)
# BUG FIX: Original code incorrectly uses hidden_states here, should use router_logits
routing_weights = routing_weights.to(router_logits.dtype)
router_weights = torch.zeros_like(router_logits).scatter_(1, router_indices, routing_weights)
hidden_states = hidden_states.reshape(batch_size, -1, self.hidden_size)
routed_out = self.experts(hidden_states, router_weights, router_indices)
return routed_out
# Apply the patch
Qwen3VLMoeTextSparseMoeBlock.forward = patched_forward
logger.info("Monkey patched Qwen3VLMoeTextSparseMoeBlock.forward to fix router_weights bug")
| {
"repo_id": "verl-project/verl",
"file_path": "verl/models/transformers/qwen3_vl.py",
"license": "Apache License 2.0",
"lines": 327,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/utils/test_groupwise.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ.setdefault("VERL_FORCE_DEVICE", "cpu") # ensure CPU for tests
import numpy as np
import pytest
import torch
from verl.utils import as_torch_index, group_mean_std
def test_as_torch_index_basic_integers():
g = as_torch_index([2, 2, 5, 7, 5, 2])
assert g.dtype == torch.long
assert g.device.type == "cpu"
# Values should be contiguous 0..G-1, keeping equal labels equal
assert g.tolist()[0] == g.tolist()[1]
assert len(torch.unique(g)) == 3 # {2,5,7} -> 3 groups
def test_as_torch_index_near_integer_floats():
arr = np.array([1.0000001, 2.0, 1.0, 3.0000000001], dtype=np.float64)
g = as_torch_index(arr) # should round to integers then factorize
assert g.dtype == torch.long
assert len(torch.unique(g)) == 3 # {1,2,3}
def test_as_torch_index_factorization_mixed():
labels = ["a", "b", "a", "c", "0042", 42]
g = as_torch_index(labels)
# "0042" and 42 should NOT be the same group (strings are not coerced here)
assert g.tolist()[4] != g.tolist()[5]
assert len(torch.unique(g)) == 5
def test_group_mean_std_simple():
# groups: 0 -> [1, 3], 1 -> [2]
scores = torch.tensor([1.0, 2.0, 3.0], dtype=torch.float32)
gidx = as_torch_index([0, 1, 0])
mean_g, std_g, cnt_g = group_mean_std(scores, gidx)
# group 0: mean = (1+3)/2 = 2
# sample std (unbiased) = sqrt( (sum(x^2) - (sum(x)^2)/n) / (n-1) )
# = sqrt( (1^2+3^2) - (1+3)^2/2 ) / (2-1) = sqrt(10 - 16/2) = sqrt(2)
assert torch.allclose(mean_g, torch.tensor([2.0, 0.0]))
assert torch.allclose(cnt_g, torch.tensor([2.0, 1.0]))
# singleton group -> std = 1.0
assert mean_g[1].item() == 0.0
assert std_g[1].item() == 1.0
assert pytest.approx(std_g[0].item(), rel=1e-6) == (2.0**0.5)
def test_group_mean_std_empty():
scores = torch.tensor([], dtype=torch.float32)
gidx = torch.tensor([], dtype=torch.long)
mean_g, std_g, cnt_g = group_mean_std(scores, gidx)
assert mean_g.numel() == 0 and std_g.numel() == 0 and cnt_g.numel() == 0
def test_group_mean_std_default_device_no_force_env(monkeypatch):
"""
Regression test:
- group_mean_std(device=None) must not pass a device *module* (e.g., torch.cuda)
into Tensor.to(device=...), which crashes with:
TypeError: to() received an invalid combination of arguments - got (..., device=module, ...)
"""
# Simulate a non-pytest environment (training code path) while keeping the test CPU-only.
monkeypatch.delenv("VERL_FORCE_DEVICE", raising=False)
monkeypatch.delenv("PYTEST_CURRENT_TEST", raising=False)
# Force device selection to CPU even if CUDA is available on the test machine.
import verl.utils.device as device_mod
monkeypatch.setattr(device_mod, "is_cuda_available", False)
monkeypatch.setattr(device_mod, "is_npu_available", False)
scores = torch.tensor([1.0, 2.0, 3.0], dtype=torch.float32)
gidx = torch.tensor([0, 1, 0], dtype=torch.long)
mean_g, std_g, cnt_g = group_mean_std(scores, gidx)
assert mean_g.device.type == "cpu"
assert std_g.device.type == "cpu"
assert cnt_g.device.type == "cpu"
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_groupwise.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/utils/groupwise.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Group-wise helpers for RL training utilities.
Public API:
- as_torch_index(index, device=None) -> torch.LongTensor
- group_mean_std(scores, gidx, eps=1e-6, device=None) -> (mean_g, std_g, count_g)
Default device policy:
- If `device` is None:
* In pytest (detected by env "PYTEST_CURRENT_TEST"): use CPU.
* Else if CUDA is available: use CUDA.
* Else: use CPU.
- You can override via env "VERL_FORCE_DEVICE" (e.g., "cuda:0" / "cpu").
Notes:
- as_torch_index: canonicalizes arbitrary group labels to a contiguous 1-D torch.long
tensor in range [0..G-1]. Robust to torch/numpy/list/tuple, ints/floats/bools,
numeric strings, UUIDs, mixed object arrays. Near-integer floats (|x-round(x)|<=1e-6)
are rounded; otherwise factorization is applied.
- group_mean_std: pure-PyTorch per-group mean/std with Bessel correction for variance
(denominator max(count-1, 1)). Singleton groups fallback to mean=0, std=1 for
compatibility with common “native” conventions.
"""
from __future__ import annotations
import os
from typing import Any, Optional
import numpy as np
import torch
from verl.utils.device import get_device_name
__all__ = ["as_torch_index", "group_mean_std"]
def _resolve_device(explicit: Optional[torch.device | str]) -> torch.device:
"""
Resolve device according to policy described in the module docstring.
Priority:
1) explicit argument
2) VERL_FORCE_DEVICE env
3) pytest detection -> cpu
4) cuda if available, else cpu
"""
if explicit is not None:
return torch.device(explicit)
forced = os.getenv("VERL_FORCE_DEVICE")
if forced:
return torch.device(forced)
# Heuristic: pytest sets PYTEST_CURRENT_TEST
if "PYTEST_CURRENT_TEST" in os.environ:
return torch.device("cpu")
return torch.device(get_device_name())
def _to_1d_numpy_object_array(x: Any) -> np.ndarray:
"""Best-effort: convert arbitrary input into a 1-D numpy array; fallback to object dtype."""
try:
arr = np.asarray(x)
except Exception:
try:
arr = np.array(list(x), dtype=object)
except Exception:
arr = np.array([x], dtype=object)
if arr.ndim != 1:
arr = arr.reshape(-1)
return arr
def as_torch_index(index: Any, device: torch.device | str | None = None) -> torch.Tensor:
"""
Convert arbitrary group labels to a contiguous 1-D torch.long tensor (0..G-1).
Args:
index: Any iterable of labels or tensor/ndarray.
device: Target device; if None, resolved via _resolve_device().
Returns:
torch.LongTensor with shape (N,)
"""
target = _resolve_device(device)
# ---------- Fast path: torch.Tensor ----------
if isinstance(index, torch.Tensor):
t = index.reshape(-1)
if t.dtype in (
torch.int64,
torch.int32,
torch.int16,
torch.int8,
getattr(torch, "uint8", torch.uint8),
torch.bool,
):
return t.to(device=target, dtype=torch.long)
if t.dtype in (torch.float16, torch.float32, torch.float64, torch.bfloat16):
t64 = t.to(dtype=torch.float64)
rounded = torch.round(t64)
if torch.allclose(t64, rounded, rtol=0.0, atol=1e-6):
return rounded.to(device=target, dtype=torch.long)
arr = np.array([str(x.item()) for x in t], dtype=object)
else:
arr = np.array([str(x.item()) if hasattr(x, "item") else str(x) for x in t], dtype=object)
else:
# ---------- Non-torch: go through numpy ----------
arr = _to_1d_numpy_object_array(index)
# Pure integers (incl. bool)
if arr.dtype != object and np.issubdtype(arr.dtype, np.integer):
return torch.from_numpy(arr.astype(np.int64, copy=False)).to(device=target)
# Floats nearly equal to integers
if arr.dtype != object and np.issubdtype(arr.dtype, np.floating):
arr64 = arr.astype(np.float64, copy=False)
rounded = np.rint(arr64)
if np.allclose(arr64, rounded, rtol=0.0, atol=1e-6):
return torch.from_numpy(rounded.astype(np.int64)).to(device=target)
# fall through
# Try numeric string coercion
try:
coerced = arr.astype(np.int64)
return torch.from_numpy(coerced).to(device=target)
except Exception:
pass
if arr.dtype != object:
arr = arr.astype(object)
# ---------- Factorization (UUIDs / mixed types / arbitrary labels) ----------
try:
_, inv = np.unique(arr, return_inverse=True)
except Exception:
sarr = np.array([str(x) for x in arr], dtype=object)
_, inv = np.unique(sarr, return_inverse=True)
inv = inv.astype(np.int64, copy=False)
return torch.from_numpy(inv).to(device=target)
@torch.no_grad()
def group_mean_std(
scores: torch.Tensor,
gidx: torch.Tensor,
eps: float = 1e-6,
device: torch.device | str | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Compute per-group mean/std/count in pure PyTorch.
mean_g = sum / count
std_g = sqrt( max( (sum2 - sum^2/count) / max(count-1, 1), eps ) )
Singleton groups fallback to mean=0, std=1.
Args:
scores: (N,) float tensor.
gidx : (N,) long/int tensor with group indices (0..G-1).
eps : Numerical floor for variance.
device: Target device; if None, resolved via _resolve_device().
Returns:
mean_g: (G,) float32
std_g : (G,) float32
count : (G,) float32
"""
target = _resolve_device(device)
scores = scores.reshape(-1).to(device=target, dtype=torch.float32)
gidx = gidx.reshape(-1).to(device=target, dtype=torch.long)
if scores.numel() != gidx.numel():
raise ValueError(f"scores and gidx length mismatch: {scores.numel()} vs {gidx.numel()}")
G = int(torch.max(gidx).item()) + 1 if gidx.numel() > 0 else 0
if G == 0:
# Return empty tensors on the selected device
empty = torch.empty(0, device=target, dtype=torch.float32)
return empty, empty, empty
ones = torch.ones_like(scores, dtype=torch.float32)
count = torch.zeros(G, device=target, dtype=torch.float32).index_add_(0, gidx, ones)
s1 = torch.zeros(G, device=target, dtype=torch.float32).index_add_(0, gidx, scores)
s2 = torch.zeros(G, device=target, dtype=torch.float32).index_add_(0, gidx, scores * scores)
mean = s1 / count.clamp_min(1.0)
var_num = s2 - (s1 * s1) / count.clamp_min(1.0)
denom = (count - 1.0).clamp_min(1.0)
var = var_num / denom
std = torch.sqrt(torch.clamp(var, min=eps))
# Singleton groups: mean=0, std=1
single = count <= 1.0
if torch.any(single):
mean = mean.clone()
std = std.clone()
mean[single] = 0.0
std[single] = 1.0
return mean, std, count
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/groupwise.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/models/transformers/glm4v.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import itertools
import logging
import os
from dataclasses import dataclass
from typing import Optional
import torch
import torch.distributed as dist
from transformers.modeling_flash_attention_utils import _flash_attention_forward, fa_peft_integration_check
from transformers.models.glm4v.modeling_glm4v import (
Glm4vCausalLMOutputWithPast,
Glm4vForConditionalGeneration,
Glm4vTextAttention,
)
from transformers.utils import is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10
from verl.utils.device import is_npu_available
from verl.utils.ulysses import (
gather_heads_scatter_seq,
gather_seq_scatter_heads,
get_ulysses_sequence_parallel_group,
get_ulysses_sequence_parallel_world_size,
validate_ulysses_config,
)
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
if is_flash_attn_2_available():
from flash_attn import flash_attn_func, flash_attn_varlen_func
_flash_supports_window_size = "window_size" in inspect.signature(flash_attn_func).parameters
_flash_supports_deterministic = "deterministic" in inspect.signature(flash_attn_func).parameters
_flash_use_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
if is_npu_available:
from transformers.integrations.npu_flash_attention import npu_flash_attn_func as flash_attn_func
from transformers.integrations.npu_flash_attention import npu_flash_attn_varlen_func as flash_attn_varlen_func
from transformers.modeling_flash_attention_utils import flash_attn_supports_top_left_mask
_flash_supports_window_size = "window_size" in inspect.signature(flash_attn_func).parameters
_flash_supports_deterministic = "deterministic" in inspect.signature(flash_attn_func).parameters
_flash_use_top_left_mask = flash_attn_supports_top_left_mask()
_flash_deterministic_enabled = os.getenv("FLASH_ATTENTION_DETERMINISTIC", "0") == "1"
def get_rope_index(
processor,
input_ids: torch.Tensor,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Gets the position ids for GLM4V in padding-free format.
The batch dim has been removed and the input_ids should be a 1D tensor representing a single example.
"""
spatial_merge_size = processor.image_processor.merge_size
image_token_id = processor.tokenizer.convert_tokens_to_ids("<|image|>")
video_start_token_id = processor.tokenizer.convert_tokens_to_ids("<|begin_of_video|>")
video_end_token_id = processor.tokenizer.convert_tokens_to_ids("<|end_of_video|>")
if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
position_ids = torch.ones(3, input_ids.size(0), dtype=input_ids.dtype, device=input_ids.device) # (3, seqlen)
image_index, video_index = 0, 0
video_group_index = 0
input_ids_filtered = input_ids[attention_mask == 1]
input_tokens = input_ids_filtered.tolist()
input_token_type = []
video_check_flg = False
for token in input_tokens:
if token == video_start_token_id:
video_check_flg = True
elif token == video_end_token_id:
video_check_flg = False
if token == image_token_id and not video_check_flg:
input_token_type.append("image")
elif token == image_token_id and video_check_flg:
input_token_type.append("video")
else:
input_token_type.append("text")
input_type_group = []
for key, group in itertools.groupby(enumerate(input_token_type), lambda x: x[1]):
group = list(group)
start_index = group[0][0]
end_index = group[-1][0] + 1
input_type_group.append((key, start_index, end_index))
llm_pos_ids_list = []
video_frame_num = 1
for modality_type, start_idx, end_idx in input_type_group:
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
if modality_type == "image":
t, h, w = (
image_grid_thw[image_index][0],
image_grid_thw[image_index][1],
image_grid_thw[image_index][2],
)
llm_grid_t, llm_grid_h, llm_grid_w = (
t.item(),
h.item() // spatial_merge_size,
w.item() // spatial_merge_size,
)
t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten()
w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten()
llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + st_idx)
image_index += 1
video_frame_num = 1
elif modality_type == "video":
t, h, w = (
video_frame_num,
video_grid_thw[video_index][1],
video_grid_thw[video_index][2],
)
llm_grid_t, llm_grid_h, llm_grid_w = (
t,
h.item() // spatial_merge_size,
w.item() // spatial_merge_size,
)
for t_idx in range(llm_grid_t):
t_index = torch.tensor(t_idx).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(1, -1, llm_grid_w).flatten()
w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(1, llm_grid_h, -1).flatten()
llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + st_idx)
video_group_index += 1
if video_group_index >= video_grid_thw[video_index][0]:
video_index += 1
video_group_index = 0
video_frame_num += 1
else:
text_len = end_idx - start_idx
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
video_frame_num = 1
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
position_ids[..., attention_mask == 1] = llm_positions.to(position_ids.device)
else:
if attention_mask is not None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids.unsqueeze(0).expand(3, -1).to(input_ids.device)
else:
position_ids = torch.arange(input_ids.shape[0], device=input_ids.device).view(1, -1).expand(3, -1)
return position_ids
def prepare_fa2_from_position_ids(
query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, position_ids: torch.Tensor
):
assert position_ids.ndim == 2 # (batch_size, seq_length)
query = query.contiguous().view(-1, query.size(-2), query.size(-1))
key = key.contiguous().view(-1, key.size(-2), key.size(-1))
value = value.contiguous().view(-1, value.size(-2), value.size(-1))
position_ids = position_ids.view(-1)
cu_seqlens = torch.cat(
(
(position_ids == 0).nonzero().view(-1).to(torch.int32),
torch.tensor(position_ids.size(), device=position_ids.device, dtype=torch.int32),
)
)
max_length = cu_seqlens.diff().max() # use cu_seqlens to infer max_length for qwen2vl mrope
return (query, key, value, (cu_seqlens, cu_seqlens), (max_length, max_length))
def _custom_flash_attention_forward(
query_states: torch.Tensor,
key_states: torch.Tensor,
value_states: torch.Tensor,
attention_mask: Optional[torch.Tensor],
query_length: int,
is_causal: bool = True,
position_ids: Optional[torch.Tensor] = None,
use_top_left_mask: bool = False,
deterministic: Optional[bool] = None,
**kwargs,
):
"""
Patches flash attention forward to handle 3D position ids in mrope. (3, batch_size, seq_length)
"""
# Assuming 4D tensors, key_states.shape[1] is the key/value sequence length (source length).
flash_kwargs = {}
if _flash_supports_deterministic:
flash_kwargs["deterministic"] = deterministic if deterministic is not None else _flash_deterministic_enabled
if kwargs.get("softcap") is not None:
flash_kwargs["softcap"] = kwargs.pop("softcap")
query_states, key_states, value_states = fa_peft_integration_check(
query_states, key_states, value_states, target_dtype=torch.bfloat16
)
if position_ids is not None:
assert position_ids.ndim == 2 # (batch_size, seq_length / sp_size)
sp_size = get_ulysses_sequence_parallel_world_size()
if sp_size > 1:
# qkv: (batch_size, seq_length / sp_size, num_head, head_size)
validate_ulysses_config(query_states.size(2), sp_size)
query_states = gather_seq_scatter_heads(query_states, seq_dim=1, head_dim=2)
key_states = gather_seq_scatter_heads(key_states, seq_dim=1, head_dim=2)
value_states = gather_seq_scatter_heads(value_states, seq_dim=1, head_dim=2)
position_ids_lst = [torch.empty_like(position_ids) for _ in range(sp_size)]
position_ids = dist.all_gather(position_ids_lst, position_ids, group=get_ulysses_sequence_parallel_group())
position_ids = torch.cat(position_ids_lst, dim=-1) # (batch_size, seq_length)
if position_ids is not None and query_length != 1 and not (torch.diff(position_ids, dim=-1) >= 0).all():
batch_size = query_states.size(0)
q, k, v, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_q, max_seqlen_k) = prepare_fa2_from_position_ids(
query_states, key_states, value_states, position_ids
)
attn_output = flash_attn_varlen_func(
q=q,
k=k,
v=v,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
max_seqlen_q=max_seqlen_q,
max_seqlen_k=max_seqlen_k,
dropout_p=kwargs.pop("dropout", 0.0),
softmax_scale=kwargs.pop("softmax_scale", None),
causal=is_causal,
**flash_kwargs,
)
attn_output = attn_output.view(batch_size, -1, attn_output.size(-2), attn_output.size(-1))
else:
attn_output = _flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
query_length,
is_causal=is_causal,
use_top_left_mask=use_top_left_mask,
deterministic=deterministic,
**kwargs,
) # do not pass position_ids to old flash_attention_forward
if sp_size > 1:
# (batch_size, seq_length, num_head, head_size)
attn_output = gather_heads_scatter_seq(attn_output, head_dim=2, seq_dim=1)
return attn_output
def glm4v_attn_forward(
self: "Glm4vTextAttention",
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
**kwargs,
) -> tuple[torch.Tensor, None, None]:
from transformers.models.glm4v.modeling_glm4v import apply_multimodal_rotary_pos_emb, repeat_kv
bsz, q_len, _ = hidden_states.size() # q_len = seq_length / sp_size
query_states = self.q_proj(hidden_states) # (batch_size, seq_length / sp_size, num_heads * head_size)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
# Because the input can be padded, the absolute sequence length depends on the max position id.
cos, sin = position_embeddings
query_states, key_states = apply_multimodal_rotary_pos_emb(
query_states, key_states, cos, sin, self.rope_scaling["mrope_section"]
)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
dropout_rate = 0.0 if not self.training else self.attention_dropout
# This is before the transpose
q_len = query_states.shape[2]
# FA2 uses non-transposed inputs
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
attn_output = _custom_flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
query_length=q_len,
is_causal=getattr(self, "is_causal", True),
dropout=dropout_rate,
use_top_left_mask=_flash_use_top_left_mask,
position_ids=position_ids, # important: pass position ids
) # (batch_size, seq_length / sp_size, num_head, head_size)
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, None
def _get_input_embeds(
model: "Glm4vForConditionalGeneration",
input_ids: torch.LongTensor,
attention_mask: Optional[torch.Tensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
):
inputs_embeds = model.get_input_embeddings()(input_ids)
if pixel_values is not None:
pixel_values = pixel_values.type(model.visual.dtype)
image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw)
n_image_tokens = (input_ids == model.config.image_token_id).sum().item()
n_image_features = image_embeds.shape[0]
if n_image_tokens != n_image_features:
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
mask = input_ids == model.config.image_token_id
mask_unsqueezed = mask.unsqueeze(-1)
mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
image_mask = mask_expanded.to(inputs_embeds.device)
image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
if pixel_values_videos is not None:
pixel_values_videos = pixel_values_videos.type(model.visual.dtype)
video_embeds = model.visual(pixel_values_videos, grid_thw=video_grid_thw)
n_video_tokens = (input_ids == model.config.video_token_id).sum().item()
n_video_features = video_embeds.shape[0]
if n_video_tokens != n_video_features:
raise ValueError(
f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}"
)
mask = input_ids == model.config.video_token_id
mask_unsqueezed = mask.unsqueeze(-1)
mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
video_mask = mask_expanded.to(inputs_embeds.device)
video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
if pixel_values is None and pixel_values_videos is None: # handle mixed text-image data
pixel_values = torch.zeros((16, 1176), dtype=inputs_embeds.dtype, device=inputs_embeds.device)
image_grid_thw = torch.tensor([[1, 4, 4]], dtype=torch.long, device=inputs_embeds.device)
image_embeds = model.visual(pixel_values, grid_thw=image_grid_thw)
inputs_embeds += 0.0 * image_embeds.mean()
if attention_mask is not None:
attention_mask = attention_mask.to(inputs_embeds.device)
return inputs_embeds, attention_mask
def process_position_ids(position_ids: torch.Tensor) -> torch.Tensor:
if position_ids.ndim != 3 or position_ids.size(0) != 4:
# we concat the text position ids with the 3D vision position ids by default
# see https://github.com/huggingface/transformers/pull/39447
raise ValueError("position_ids should be a 3D tensor of shape (4, batch_size, seq_length).")
return position_ids
@dataclass
class Glm4vCausalLMOutputForPPO(Glm4vCausalLMOutputWithPast):
log_probs: Optional[torch.FloatTensor] = None
entropy: Optional[torch.FloatTensor] = None
def glm4v_base_forward(
self: "Glm4vForConditionalGeneration",
input_ids: torch.LongTensor,
attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
**kwargs,
):
kwargs["inputs_embeds"], kwargs["attention_mask"] = _get_input_embeds(
self, input_ids, attention_mask, pixel_values, pixel_values_videos, image_grid_thw, video_grid_thw
) # avoid lora module having multiple keyword arguments
return self.language_model(
input_ids=None,
**kwargs,
)
def glm4v_forward(
self: "Glm4vForConditionalGeneration",
input_ids: torch.LongTensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
**kwargs,
):
return self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=process_position_ids(position_ids),
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
**kwargs,
)
def forward_with_normal_backend(
self: Glm4vForConditionalGeneration,
input_ids: torch.LongTensor = None,
labels: Optional[torch.LongTensor] = None,
temperature: float = 1.0,
**kwargs,
) -> "Glm4vCausalLMOutputWithPast":
outputs = glm4v_forward(self, input_ids, **kwargs)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
return Glm4vCausalLMOutputWithPast(
logits=logits,
hidden_states=outputs.hidden_states,
)
def forward_with_torch_backend(
self: Glm4vForConditionalGeneration,
input_ids: torch.LongTensor = None,
labels: Optional[torch.LongTensor] = None,
temperature: float = 1.0,
**kwargs,
) -> tuple | Glm4vCausalLMOutputForPPO:
from verl.utils.experimental.torch_functional import FusedLinearForPPO
outputs = glm4v_forward(self, input_ids, **kwargs)
hidden_states = outputs[0]
# Loss calculations
if labels is not None:
rolled_labels = torch.roll(labels, shifts=-1, dims=-1)
elif input_ids is not None:
rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1)
else:
raise RuntimeError("To use forward_with_torch_backend, either labels or input_ids must be provided.")
fused_linear_for_ppo = FusedLinearForPPO()
log_probs, entropy = fused_linear_for_ppo.forward(
hidden_states=hidden_states,
vocab_weights=self.lm_head.weight,
input_ids=rolled_labels,
temperature=temperature,
)
return Glm4vCausalLMOutputForPPO(
log_probs=log_probs,
entropy=entropy,
hidden_states=outputs.hidden_states,
)
def forward_with_triton_backend(
self: Glm4vForConditionalGeneration,
input_ids: torch.LongTensor = None,
labels: Optional[torch.LongTensor] = None,
temperature: float = 1.0,
**kwargs,
) -> tuple | Glm4vCausalLMOutputForPPO:
from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy
outputs = glm4v_forward(self, input_ids, **kwargs)
hidden_states = outputs[0]
# Loss calculations
if labels is not None:
rolled_labels = torch.roll(labels, shifts=-1, dims=-1)
elif input_ids is not None:
rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1)
else:
raise RuntimeError("To use forward_with_triton_backend, either labels or input_ids must be provided.")
log_probs, entropy = linear_cross_entropy(
hidden_states,
self.lm_head.weight,
rolled_labels,
temperature,
"none",
)
return Glm4vCausalLMOutputForPPO(
log_probs=log_probs,
entropy=entropy,
hidden_states=outputs.hidden_states,
)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/models/transformers/glm4v.py",
"license": "Apache License 2.0",
"lines": 449,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/attention_utils.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
_index_first_axis, _pad_input, _rearrange, _unpad_input = None, None, None, None
def _get_attention_functions() -> tuple[Callable, Callable, Callable, Callable]:
"""Dynamically import attention functions based on available hardware."""
from verl.utils.device import is_torch_npu_available
global _index_first_axis, _pad_input, _rearrange, _unpad_input
if is_torch_npu_available(check_device=False):
from verl.utils.npu_flash_attn_utils import index_first_axis, pad_input, rearrange, unpad_input
else:
from flash_attn.bert_padding import index_first_axis, pad_input, rearrange, unpad_input
_index_first_axis, _pad_input, _rearrange, _unpad_input = index_first_axis, pad_input, rearrange, unpad_input
return _index_first_axis, _pad_input, _rearrange, _unpad_input
def index_first_axis(*args, **kwargs):
"""
Unified entry point for `index_first_axis` across CUDA and NPU backends.
Dynamically dispatches to the appropriate device-specific implementation:
- On CUDA: `flash_attn.bert_padding.index_first_axis`
- On NPU: `transformers.integrations.npu_flash_attention.index_first_axis`
(falls back to `transformers.modeling_flash_attention_utils._index_first_axis`
in newer versions of transformers).
Users can call this function directly without worrying about the underlying device.
"""
func, *_ = _get_attention_functions()
return func(*args, **kwargs)
def pad_input(*args, **kwargs):
"""
Unified entry point for `pad_input` across CUDA and NPU backends.
Dynamically dispatches to the appropriate device-specific implementation:
- On CUDA: `flash_attn.bert_padding.pad_input`
- On NPU: `transformers.integrations.npu_flash_attention.pad_input`
(falls back to `transformers.modeling_flash_attention_utils._pad_input`
in newer versions of transformers).
Users can call this function directly without worrying about the underlying device.
"""
_, func, *_ = _get_attention_functions()
return func(*args, **kwargs)
def rearrange(*args, **kwargs):
"""
Unified entry point for `rearrange` across CUDA and NPU backends.
Dynamically dispatches to the appropriate device-specific implementation:
- On CUDA: `flash_attn.bert_padding.rearrange`
- On NPU: `transformers.integrations.npu_flash_attention.rearrange`
(falls back to `einops.rearrange` if no dedicated NPU implementation exists).
Users can call this function directly without worrying about the underlying device.
"""
*_, func, _ = _get_attention_functions()
return func(*args, **kwargs)
def unpad_input(*args, **kwargs):
"""
Unified entry point for `unpad_input` across CUDA and NPU backends.
Dynamically dispatches to the appropriate device-specific implementation:
- On CUDA: `flash_attn.bert_padding.unpad_input`
- On NPU: `transformers.integrations.npu_flash_attention.unpad_input`
(falls back to `transformers.modeling_flash_attention_utils._unpad_input`
in newer versions of transformers).
Users can call this function directly without worrying about the underlying device.
"""
*_, func = _get_attention_functions()
return func(*args, **kwargs)
__all__ = ["index_first_axis", "pad_input", "rearrange", "unpad_input"]
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/attention_utils.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/engine/mindspeed/transformer_impl.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
try:
from mindspeed.megatron_adaptor import repatch
except ImportError:
repatch = None
from verl.trainer.config import CheckpointConfig
from verl.workers.config import HFModelConfig, McoreEngineConfig, McoreOptimizerConfig
from ..base import EngineRegistry
from ..megatron import MegatronEngineWithLMHead
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
@EngineRegistry.register(model_type="language_model", backend="megatron", device="npu")
class MindspeedEngineWithLMHead(MegatronEngineWithLMHead):
def __init__(
self,
model_config: HFModelConfig,
engine_config: McoreEngineConfig,
optimizer_config: McoreOptimizerConfig,
checkpoint_config: CheckpointConfig,
):
super().__init__(model_config, engine_config, optimizer_config, checkpoint_config)
repatch_config = {"use_flash_attn": True}
if self.engine_config.context_parallel_size > 1:
repatch_config["context_parallel_size"] = self.engine_config.context_parallel_size
repatch(repatch_config)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/engine/mindspeed/transformer_impl.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/dataset/dataset_utils.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import torch
from tensordict.tensorclass import NonTensorData
class DatasetPadMode(str, Enum):
"""Padding mode for dataset"""
RIGHT = "right"
LEFT_RIGHT = "left_right"
NO_PADDING = "no_padding"
class SFTTensorCollator:
"""
A custom collate_fn that handles batching of sequences.
1. for variable-length sequences, convert them into NestedTensors.
2. for fixed-length sequences, use default_collate.
"""
def __init__(self, pad_mode: DatasetPadMode = DatasetPadMode.LEFT_RIGHT):
self.pad_mode = pad_mode
def __call__(self, batch: list[dict[str, any]]) -> dict[str, any]:
if self.pad_mode == DatasetPadMode.NO_PADDING:
return self.collate_variable_batch(batch)
elif self.pad_mode in [DatasetPadMode.RIGHT, DatasetPadMode.LEFT_RIGHT]:
from torch.utils.data import default_collate
return default_collate(batch)
else:
raise NotImplementedError(f"pad_mode {self.pad_mode} not implemented")
def collate_variable_batch(self, batch: list[dict[str, any]]) -> dict[str, any]:
"""
Collates a list of samples into a single batch.
Args:
batch: A list of dictionary samples from the dataset.
Returns:
A dictionary representing the batched data, with variable-length
sequences converted to NestedTensors.
"""
final_batch = {}
tensor_keys = set().union(*(d.keys() for d in batch))
# Handle tensor values by creating a NestedTensor.
for key in tensor_keys:
if isinstance(batch[0][key], torch.Tensor):
tensors = [item[key] for item in batch]
final_batch[key] = torch.nested.as_nested_tensor(tensors, layout=torch.jagged)
else:
tensors = [NonTensorData(item.get(key)) for item in batch]
final_batch[key] = torch.stack(tensors, dim=0)
return final_batch
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/dataset/dataset_utils.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/checkpoint/checkpoint_handler.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: add unit tests
import json
import logging
import os
import re
from enum import Enum
import torch
import verl.utils.hdfs_io as hdfs_io
from verl.single_controller import WorkerGroup
from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path, get_checkpoint_tracker_filename
from verl.utils.logger import log_with_rank
from verl.workers.engine import BaseEngine
def extract_step(path):
match = re.search(r"global_step_(\d+)", path)
if match:
return int(match.group(1))
return None
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_SFT_LOGGING_LEVEL", "WARN"))
class OrchestrationMode(Enum):
SPMD = 0
RAY = 1
class CheckpointHandler:
"""
Checkpoint handler handles the path, global_step of a checkpoint folder.
Currently, it only works with a single model.
We can expand it to support multiple models. It is expected to be used with SPMD style (e.g., torchrun)
"""
def __init__(
self,
engine: BaseEngine | WorkerGroup,
train_dataloader,
*,
default_local_dir,
max_ckpt_to_keep=None,
default_hdfs_dir=None,
resume_mode="auto",
resume_from_path=None,
mode=OrchestrationMode.SPMD,
lora_train_meta=None,
):
self.default_local_dir = default_local_dir
self.max_ckpt_to_keep = max_ckpt_to_keep
self.default_hdfs_dir = default_hdfs_dir
self.resume_mode = resume_mode
self.resume_from_path = resume_from_path
self.engine = engine
self.train_dataloader = train_dataloader
self.mode = mode
self.lora_train_meta = lora_train_meta
if self.mode == OrchestrationMode.SPMD:
self.rank = torch.distributed.get_rank()
self.is_mp_src_rank_with_outputs = self.engine.is_mp_src_rank_with_outputs()
self.dp_rank = self.engine.get_data_parallel_rank()
elif self.mode == OrchestrationMode.RAY:
self.rank = 0
self.is_mp_src_rank_with_outputs = True
self.dp_rank = 0
else:
raise ValueError(f"Unknown {self.mode=}")
def save_checkpoint(self, step):
"""Save checkpoint using FSDPCheckpointManager with improved tracking"""
from verl.utils.fs import local_mkdir_safe
# Determine checkpoint path
local_global_step_folder = os.path.join(self.default_local_dir, f"global_step_{step}")
if self.rank == 0:
print(f"Saving checkpoint to: {local_global_step_folder}")
# Get max checkpoints to keep
max_ckpt_to_keep = self.max_ckpt_to_keep
# Use checkpoint manager to save
self.engine.save_checkpoint(
local_path=local_global_step_folder, global_step=step, max_ckpt_to_keep=max_ckpt_to_keep
)
# Save dataloader state. Note that we only save the iterator in the train_dataloader.
# So it's identical in each dp rank.
if self.rank == 0 and self.lora_train_meta is not None:
local_mkdir_safe(local_global_step_folder)
lora_meta_path = os.path.join(local_global_step_folder, "lora_train_meta.json")
with open(lora_meta_path, "w", encoding="utf-8") as f:
json.dump(self.lora_train_meta, f, ensure_ascii=False, indent=4)
print(f"Saved LoRA rank/alpha metadata to: {lora_meta_path}")
if self.is_mp_src_rank_with_outputs:
dp_rank = self.dp_rank
local_mkdir_safe(local_global_step_folder)
dataloader_local_path = os.path.join(local_global_step_folder, f"data_{dp_rank}.pt")
# Use StatefulDataLoader's built-in state dict functionality
dataloader_state_dict = self.train_dataloader.state_dict()
torch.save(dataloader_state_dict, dataloader_local_path)
print(f"Saved dataloader state to: {dataloader_local_path}")
if self.rank == 0:
# Update latest checkpoint tracker (atomic write)
tracker_file = get_checkpoint_tracker_filename(self.default_local_dir)
temp_tracker_file = tracker_file + ".tmp"
with open(temp_tracker_file, "w") as f:
f.write(str(step))
os.rename(temp_tracker_file, tracker_file)
print(f"Updated checkpoint tracker: {tracker_file}")
# Copy to HDFS if configured
if self.rank == 0 and self.default_hdfs_dir:
hdfs_io.makedirs(self.default_hdfs_dir, exist_ok=True)
hdfs_io.copy(src=local_global_step_folder, dst=self.default_hdfs_dir, dirs_exist_ok=True)
if self.mode == OrchestrationMode.SPMD:
torch.distributed.barrier()
def load_checkpoint(self):
# Determine resume path based on configuration
checkpoint_path = self._determine_resume_path()
if checkpoint_path is None:
return 0
# extract resume step from checkpoint path
resume_step = extract_step(checkpoint_path)
if resume_step is None:
log_with_rank(
f"Warning: Could not extract step number from {checkpoint_path}, starting from step 0",
logger=logger,
rank=self.rank,
level=logging.WARNING,
log_only_rank_0=True,
)
return 0
self.resume_global_step = resume_step
# Use checkpoint manager to load model state
self.engine.load_checkpoint(checkpoint_path)
# Always load dataloader state for StatefulDataLoader
self._load_dataloader_state(checkpoint_path)
return resume_step
def _load_dataloader_state(self, checkpoint_path: str):
"""Load dataloader state from checkpoint"""
dp_rank = self.dp_rank
dataloader_path = os.path.join(checkpoint_path, f"data_{dp_rank}.pt")
if os.path.exists(dataloader_path):
# Use StatefulDataLoader's built-in state dict functionality
dataloader_state_dict = torch.load(dataloader_path, map_location="cpu", weights_only=False)
self.train_dataloader.load_state_dict(dataloader_state_dict)
log_with_rank(
f"Successfully loaded dataloader state from {dataloader_path}",
logger=logger,
rank=self.rank,
log_only_rank_0=True,
)
else:
log_with_rank(
f"Warning: No dataloader state found at {dataloader_path}, will start from scratch",
logger=logger,
rank=self.rank,
level=logging.WARNING,
log_only_rank_0=True,
)
def _determine_resume_path(self):
"""Determine the path to resume from based on resume_mode configuration"""
resume_mode = self.resume_mode
resume_from_path = self.resume_from_path
if resume_mode == "disable":
return None
elif resume_mode == "auto":
if resume_from_path is not None:
assert os.path.exists(resume_from_path), (
"resume_from_path must be null or an existing path when resume_mode is 'auto'"
)
assert "global_step_" in resume_from_path, "resume_from_path must specify the global_steps"
return resume_from_path
# Try to find the latest checkpoint in the default directory
return self._find_latest_checkpoint()
elif resume_mode == "resume_path":
assert os.path.exists(resume_from_path), (
"resume_from_path must be an existing path when resume_mode is 'resume_path'"
)
assert "global_step_" in resume_from_path, "resume_from_path must specify the global_steps"
return resume_from_path
else:
raise ValueError(f"Invalid resume_mode: {resume_mode}. Must be 'auto', 'disable', or 'resume_path'")
def _find_latest_checkpoint(self):
"""Find the latest checkpoint in the default local directory"""
checkpoint_dir = self.default_local_dir
if not os.path.exists(checkpoint_dir):
return None
latest_checkpoint = find_latest_ckpt_path(checkpoint_dir)
if latest_checkpoint and self.rank == 0:
step_num = extract_step(latest_checkpoint)
print(f"Found latest checkpoint: {latest_checkpoint} (step {step_num})")
return latest_checkpoint
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/checkpoint/checkpoint_handler.py",
"license": "Apache License 2.0",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/experimental/agent_loop/test_standalone_rollout.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import pytest
import ray
from omegaconf import DictConfig
from openai import AsyncOpenAI, OpenAI
from tests.experimental.agent_loop.agent_utils import init_agent_loop_manager
from verl.checkpoint_engine import CheckpointEngineManager
from verl.utils import omega_conf_to_dataclass
from verl.workers.rollout.replica import get_rollout_replica_class
@pytest.fixture
def init_config() -> DictConfig:
from hydra import compose, initialize_config_dir
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")):
config = compose(config_name="ppo_trainer")
config.trainer.n_gpus_per_node = 4
config.trainer.nnodes = 2
config.actor_rollout_ref.actor.use_dynamic_bsz = True
config.actor_rollout_ref.model.path = os.path.expanduser("~/models/Qwen/Qwen2.5-1.5B-Instruct")
config.actor_rollout_ref.rollout.name = os.environ["ROLLOUT_NAME"]
config.actor_rollout_ref.rollout.mode = "async"
config.actor_rollout_ref.rollout.skip_tokenizer_init = False
return config
@pytest.mark.asyncio
@pytest.mark.parametrize("tp_size", [2, 4])
async def test_standalone_rollout(init_config, tp_size):
"""Test standalone rollout single node and multi nodes."""
ray.init(
runtime_env={
"env_vars": {
"TOKENIZERS_PARALLELISM": "true",
"NCCL_DEBUG": "WARN",
"VLLM_LOGGING_LEVEL": "INFO",
"VLLM_USE_V1": "1",
"NCCL_P2P_DISABLE": "1", # disable p2p in L20
}
}
)
init_config.actor_rollout_ref.rollout.tensor_model_parallel_size = tp_size
num_replicas = (init_config.trainer.n_gpus_per_node * init_config.trainer.nnodes) // tp_size
rollout_config = init_config.actor_rollout_ref.rollout
model_config = init_config.actor_rollout_ref.model
# create standalone rollout server
rollout_server_class = get_rollout_replica_class(init_config.actor_rollout_ref.rollout.name)
rollout_servers = [
rollout_server_class(
replica_rank=replica_rank, config=rollout_config, model_config=model_config, gpus_per_node=2
)
for replica_rank in range(num_replicas)
]
await asyncio.gather(*[server.init_standalone() for server in rollout_servers])
server_handles = [server._server_handle for server in rollout_servers]
server_addresses = [server._server_address for server in rollout_servers]
assert len(server_handles) == num_replicas
assert len(server_addresses) == num_replicas
os.environ.pop("HTTPS_PROXY", None)
os.environ.pop("HTTP_PROXY", None)
os.environ.pop("NO_PROXY", None)
client = AsyncOpenAI(
api_key="123-abc",
base_url=f"http://{server_addresses[0]}/v1",
)
completion = await client.chat.completions.create(
model=init_config.actor_rollout_ref.model.path,
messages=[{"role": "user", "content": "What can you do?"}],
)
print(completion.choices[0].message.content)
ray.shutdown()
@pytest.mark.skip(reason="local test only")
def test_hybrid_rollout_with_ep(init_config):
"""Test hybrid rollout with expert parallelism, DP=2, TP=4, EP=8."""
ray.init(
runtime_env={
"env_vars": {
"TOKENIZERS_PARALLELISM": "true",
"NCCL_DEBUG": "WARN",
"VLLM_LOGGING_LEVEL": "INFO",
"VLLM_USE_V1": "1",
}
}
)
model_path = os.path.expanduser("~/models/Qwen/Qwen3-30B-A3B-Instruct-2507")
init_config.actor_rollout_ref.model.path = model_path
# parallelism config
init_config.actor_rollout_ref.rollout.tensor_model_parallel_size = 2
init_config.actor_rollout_ref.rollout.data_parallel_size = 4
init_config.actor_rollout_ref.rollout.expert_parallel_size = 8
# 1. init hybrid worker: FSDP+rollout
# - build FSDP model and optimizer
# - offload FSDP model and optimizer, build rollout
# - sleep rollout and load FSDP model and optimizer
agent_loop_manager = init_agent_loop_manager(init_config)
checkpoint_manager = CheckpointEngineManager(
config=omega_conf_to_dataclass(init_config.actor_rollout_ref.rollout.checkpoint_engine),
trainer=agent_loop_manager.worker_group,
replicas=agent_loop_manager.rollout_replicas,
)
checkpoint_manager.sleep_replicas()
checkpoint_manager.update_weights()
# 3. test async openai call
server_address = agent_loop_manager.server_addresses[0]
client = OpenAI(
api_key="123-abc",
base_url=f"http://{server_address}/v1",
)
smapling_params = {
"temperature": 1.0,
"top_p": 1.0,
"max_tokens": 512,
}
response = client.chat.completions.create(
model=model_path,
messages=[{"role": "user", "content": "What can you do?"}],
**smapling_params,
)
completion = response.choices[0].message.content
print(f"response: {completion}")
print("Test passed!")
ray.shutdown()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/experimental/agent_loop/test_standalone_rollout.py",
"license": "Apache License 2.0",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/workers/rollout/replica.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
from abc import ABC, abstractmethod
from enum import Enum
from typing import Any, Callable, Optional
import ray
from omegaconf import DictConfig
from pydantic import BaseModel
from ray.actor import ActorHandle
from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup, ResourcePoolManager
from verl.utils.config import omega_conf_to_dataclass
from verl.utils.device import is_torch_npu_available
from verl.workers.config import HFModelConfig, RolloutConfig
logger = logging.getLogger(__file__)
# Max number of concurrent calls to the methods of Rollout,
# excluding calls to generate method.
CONTROL_METHOD_CONCURRENCY = 16
class TokenOutput(BaseModel):
token_ids: list[int]
"""response token ids"""
log_probs: Optional[list[float]] = None
"""logprobs of response token ids"""
routed_experts: Optional[Any] = None
"""routed experts of response token ids"""
stop_reason: Optional[str] = None
"""stop reason: 'completed', 'aborted', or None for unknown"""
num_preempted: Optional[int] = None
"""number of preempted times for metric calculation"""
extra_info: dict[str, Any] = {}
"""extra info for rollout"""
class RolloutMode(Enum):
# Rollout engine and training engine(fsdp/megatron) fused in same process
# Rollout and trainer share GPUs, switch context with weight synchronization.
# Usage scenarios: on-policy training.
HYBRID = "hybrid"
# Rollout engine colocated with hybrid engine in same ray placement group but in separate process.
# Rollout and hybrid processes share GPUs, switch context without weight synchronization.
# Usage scenarios: GRM (LLM as a judge).
COLOCATED = "colocated"
# Standalone rollout server with separate GPU resource, disaggregated architecture.
# Usage scenarios: off-policy training.
STANDALONE = "standalone"
class RolloutReplica(ABC):
"""Rollout replica is an individual server instance, which may be deployed on single or multiple nodes.
It is equivalent to launch server in each node with command line:
SGLang:
```
python -m sglang.launch_server --node-rank 0 --nnode 2 ...
python -m sglang.launch_server --node-rank 1 --nnode 2 ...
```
vLLM:
```
vllm serve --data-parallel-size 16 --data-parallel-size-local 8 --data-parallel-start-rank 0 ...
vllm serve --data-parallel-size 16 --data-parallel-size-local 8 --data-parallel-start-rank 8 ...
```
Args:
replica_rank: int, rank of this rollout replica.
config: RolloutConfig, full config.
model_config: DictConfig, model config.
gpus_per_node: int, number of gpus per node.
"""
def __init__(
self,
replica_rank: int,
config: RolloutConfig,
model_config: DictConfig,
gpus_per_node: int = 8,
is_reward_model: bool = False,
) -> None:
self.replica_rank = replica_rank
self.config: RolloutConfig = omega_conf_to_dataclass(config)
self.model_config: HFModelConfig = model_config
self.world_size = (
self.config.tensor_model_parallel_size
* self.config.data_parallel_size
* self.config.pipeline_model_parallel_size
)
self.gpus_per_node = gpus_per_node
self.gpus_per_replica_node = min(gpus_per_node, self.world_size)
assert self.world_size % self.gpus_per_replica_node == 0, (
f"world_size {self.world_size} must be divisible by gpus_per_node {self.gpus_per_replica_node}"
)
self.nnodes = self.world_size // self.gpus_per_replica_node
self.is_reward_model = is_reward_model
self.rollout_mode: RolloutMode = None
self.workers: list[ActorHandle] = []
self.resource_pool: RayResourcePool = None
self.bundle_indices: list[int] = []
self.servers: list[ActorHandle] = []
self._server_address: str = None
self._server_handle: ActorHandle = None
async def init_hybrid(self, worker_group: RayWorkerGroup):
"""Init hybrid rollout server, rollout engine and training engine(fsdp/megatron) fused in same process.
Args:
worker_group: RayWorkerGroup, fused workers where training engine(fsdp/megatron) have been initialized.
"""
self.rollout_mode = RolloutMode.HYBRID
self.workers = worker_group.workers[
self.world_size * self.replica_rank : self.world_size * (self.replica_rank + 1)
]
await self.launch_servers()
async def init_hybrid_colocated(self, worker_group: RayWorkerGroup, resource_pool: RayResourcePool):
"""Init hybrid rollout server, rollout engine and training engine(fsdp/megatron) fused in same process.
Args:
worker_group: RayWorkerGroup, fused workers where training engine(fsdp/megatron) have been initialized.
resource_pool: RayResourcePool, ray placement group where hybrid engine processes have been launched.
bundle_indices: list[int], bundle indices for this rollout replica.
"""
self.rollout_mode = RolloutMode.HYBRID
self.workers = worker_group.workers[
self.world_size * self.replica_rank : self.world_size * (self.replica_rank + 1)
]
self.resource_pool = resource_pool
self.bundle_indices = [self.replica_rank * self.world_size + idx for idx in range(self.world_size)]
await self.launch_servers()
# TODO(sgm): this should be the default solution, but need to make the RolloutMode more clear.
async def init_colocated(self, resource_pool: RayResourcePool):
"""Init colocated rollout server, rollout engine and hybrid engine colocated in same ray placement group
but in separate processes.
Args:
resource_pool: RayResourcePool, ray placement group where hybrid engine processes have been launched.
"""
self.rollout_mode = RolloutMode.COLOCATED
self.resource_pool = resource_pool
use_gpu = self.rollout_worker_use_gpu()
worker_group = RayWorkerGroup(
resource_pool=self.resource_pool,
ray_cls_with_init=self.get_ray_class_with_init_args(),
bin_pack=False,
name_prefix=f"rollout_colocate_{self.replica_rank}"
if not self.is_reward_model
else f"rollout_reward_colocate_{self.replica_rank}",
use_gpu=use_gpu,
device_name="cuda" if not is_torch_npu_available(check_device=False) else "npu",
)
self.workers = worker_group.workers
await self.launch_servers()
async def init_standalone(self):
"""Init standalone rollout server, create new resource pool for this rollout."""
# create resource pool for this rollout
self.rollout_mode = RolloutMode.STANDALONE
resource_pool_name = (
f"rollout_pool_{self.replica_rank}"
if not self.is_reward_model
else f"rollout_pool_reward_{self.replica_rank}"
)
resource_pool_spec = {
resource_pool_name: [self.gpus_per_replica_node] * self.nnodes,
}
resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=None)
resource_pool_manager.create_resource_pool()
self.resource_pool = resource_pool_manager.resource_pool_dict[resource_pool_name]
# create worker group for this rollout
use_gpu = self.rollout_worker_use_gpu()
worker_group = RayWorkerGroup(
resource_pool=self.resource_pool,
ray_cls_with_init=self.get_ray_class_with_init_args(),
bin_pack=False,
name_prefix=f"rollout_standalone_{self.replica_rank}"
if not self.is_reward_model
else f"rollout_reward_standalone_{self.replica_rank}",
use_gpu=use_gpu,
device_name="cuda" if not is_torch_npu_available(check_device=False) else "npu",
)
self.workers = worker_group.workers
await self.launch_servers()
def get_ray_class_with_init_args(self) -> RayClassWithInitArgs:
"""Get rollout worker actor class for colocated and standalone mode."""
from verl.checkpoint_engine.base import CheckpointEngineWorker
rollout_worker_actor_cls = ray.remote(CheckpointEngineWorker)
return RayClassWithInitArgs(
cls=rollout_worker_actor_cls,
rollout_config=self.config,
model_config=self.model_config,
replica_rank=self.replica_rank,
)
@abstractmethod
async def launch_servers(self):
"""Launch http server in each node."""
raise NotImplementedError
@property
def server_address(self) -> str:
"""Get rollout server address for OpenAI chat completion."""
return self._server_address
@property
def server_handle(self) -> ActorHandle:
"""Get rollout server handle for Token-in-token-out generation."""
return self._server_handle
@property
def max_concurrency(self) -> int:
# 1000 is Ray's default max_concurrency for async execution.
# Add some margin to account for control method call.
return max(1000, self.config.max_num_seqs + CONTROL_METHOD_CONCURRENCY)
def rollout_worker_use_gpu(self) -> bool:
return True
async def wake_up(self):
"""Wake up each rollout server."""
await asyncio.gather(*[server.wake_up.remote() for server in self.servers])
async def sleep(self):
"""Sleep each rollout server."""
await asyncio.gather(*[server.sleep.remote() for server in self.servers])
async def abort_all_requests(self):
"""Partial rollout: abort and save all unfinished requests in each rollout server."""
await asyncio.gather(*[server.abort_all_requests.remote() for server in self.servers])
async def resume_generation(self):
"""Resume generation on all servers after abort_all_requests."""
await asyncio.gather(*[server.resume_generation.remote() for server in self.servers])
async def clear_kv_cache(self):
"""reset kv cache in each rollout server."""
await asyncio.gather(*[server.clear_kv_cache.remote() for server in self.servers])
async def start_profile(self, **kwargs):
"""Start profiling on the replica."""
await asyncio.gather(*[server.start_profile.remote(**kwargs) for server in self.servers])
async def stop_profile(self):
"""Stop profiling on the replica."""
await asyncio.gather(*[server.stop_profile.remote() for server in self.servers])
class RolloutReplicaRegistry:
"""Factory for managing rollout replica implementations."""
_registry: dict[str, Callable[[], type[RolloutReplica]]] = {}
@classmethod
def register(cls, name: str, loader: Callable[[], type[RolloutReplica]]) -> None:
"""Register a new rollout replica type."""
cls._registry[name] = loader
@classmethod
def get(cls, name: str) -> type[RolloutReplica]:
"""Get a rollout replica class by name."""
if name not in cls._registry:
raise ValueError(f"Unknown rollout mode: {name}. Available: {list(cls._registry.keys())}")
return cls._registry[name]()
# Loader functions for built-in types
def _load_vllm():
from verl.workers.rollout.vllm_rollout.vllm_async_server import vLLMReplica
return vLLMReplica
def _load_sglang():
os.environ["SGLANG_USE_CPU_ENGINE"] = "1"
try:
import vllm # noqa: F401
except ImportError:
import sys
import types
from unittest.mock import Mock
mock_vllm = types.ModuleType("vllm")
mock_custom_ops = types.ModuleType("vllm._custom_ops")
mock_custom_ops.scaled_fp8_quant = Mock()
mock_vllm._custom_ops = mock_custom_ops
mock_model_executor = types.ModuleType("vllm.model_executor")
mock_layers = types.ModuleType("vllm.model_executor.layers")
mock_activation = types.ModuleType("vllm.model_executor.layers.activation")
class GeluAndMul: # noqa: N801
pass
class SiluAndMul: # noqa: N801
pass
mock_activation.GeluAndMul = GeluAndMul
mock_activation.SiluAndMul = SiluAndMul
mock_layers.activation = mock_activation
mock_model_executor.layers = mock_layers
mock_vllm.model_executor = mock_model_executor
sys.modules["vllm"] = mock_vllm
sys.modules["vllm._custom_ops"] = mock_custom_ops
sys.modules["vllm.model_executor"] = mock_model_executor
sys.modules["vllm.model_executor.layers"] = mock_layers
sys.modules["vllm.model_executor.layers.activation"] = mock_activation
from verl.workers.rollout.sglang_rollout.async_sglang_server import SGLangReplica
del os.environ["SGLANG_USE_CPU_ENGINE"]
return SGLangReplica
def _load_trtllm():
from verl.workers.rollout.trtllm_rollout.trtllm_async_server import TRTLLMReplica
return TRTLLMReplica
# Register built-in types
RolloutReplicaRegistry.register("vllm", _load_vllm)
RolloutReplicaRegistry.register("sglang", _load_sglang)
RolloutReplicaRegistry.register("trtllm", _load_trtllm)
# Original function for backward compatibility
def get_rollout_replica_class(rollout: str) -> type[RolloutReplica]:
return RolloutReplicaRegistry.get(rollout)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/rollout/replica.py",
"license": "Apache License 2.0",
"lines": 289,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/rollout/utils.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import uvicorn
from fastapi import FastAPI
logger = logging.getLogger(__file__)
def get_max_position_embeddings(hf_config) -> int:
max_len = getattr(hf_config, "max_position_embeddings", None)
if max_len is None:
text_config = getattr(hf_config, "text_config", None)
if text_config is not None:
max_len = getattr(text_config, "max_position_embeddings", None)
if max_len is None:
raise ValueError("max_position_embeddings not found in HFModelConfig!")
return int(max_len)
class _UvicornServerAutoPort(uvicorn.Server):
"""Uvicorn Server that reports the system-assigned port when port=0."""
def __init__(self, config: uvicorn.Config) -> None:
super().__init__(config)
self.actual_port: int | None = None
self._startup_done: asyncio.Event = asyncio.Event()
async def startup(self, sockets=None) -> None:
try:
await super().startup(sockets=sockets)
if self.servers and self.config.port == 0:
sock = self.servers[0].sockets[0]
self.actual_port = sock.getsockname()[1]
else:
self.actual_port = self.config.port
finally:
self._startup_done.set()
async def get_port(self) -> int | None:
await self._startup_done.wait()
return self.actual_port
async def run_uvicorn(app: FastAPI, server_args, server_address) -> tuple[int, asyncio.Task]:
app.server_args = server_args
config = uvicorn.Config(app, host=server_address, port=0, log_level="warning")
server = _UvicornServerAutoPort(config)
server_task = asyncio.create_task(server.serve())
server_port = await server.get_port()
if server_port is None:
# server.startup() failed. await the task to re-raise exception from server.serve()
await server_task
# Fails on unexpected situation.
raise RuntimeError("Unexpected: HTTP server started without reporting listened port")
logger.info(f"HTTP server started on port {server_port}")
return server_port, server_task
async def ensure_async_iterator(iterable):
"""Convert an iterable to an async iterator."""
if hasattr(iterable, "__aiter__"):
async for item in iterable:
yield item
else:
for item in iterable:
yield item
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/rollout/utils.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/models/transformers/apertus.py | # Copyright 2025 The SwissAI Initiative
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from typing import Callable, Optional
import torch
if sys.version_info >= (3, 11):
pass
else:
pass
from transformers.cache_utils import Cache
from transformers.models.apertus.modeling_apertus import apply_rotary_pos_emb
from transformers.utils import logging
# Import compatibility wrapper for flash_attn_supports_top_left_mask
from verl.utils.ulysses import (
gather_heads_scatter_seq,
gather_seq_scatter_heads,
get_ulysses_sequence_parallel_world_size,
validate_ulysses_config,
)
logger = logging.get_logger(__name__)
def apertus_attn_forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_value: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""
Adapted from transformers 4.49.0 to support Ulysses sequence parallelism for transformers >= 4.48.0.
Key differences from Llama attention:
- QK normalization applied after Q/K projections
NOTE: This function has been tested only on transformers versions between 4.48.0 and 4.50.0.
"""
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
from transformers.models.apertus.modeling_apertus import eager_attention_forward
bsz, q_len, _ = hidden_states.shape
query_states = self.q_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
########## AlltoAll for Ulysses ##########
ulysses_sp_size = get_ulysses_sequence_parallel_world_size()
if ulysses_sp_size > 1:
validate_ulysses_config(self.config.num_attention_heads, ulysses_sp_size)
query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1)
key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1)
value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1)
full_q_len = query_states.size(2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
logger.warning_once(
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. "
"Falling back to eager attention. This warning can be removed using the argument "
'`attn_implementation="eager"` when loading the model.'
)
else:
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous()
########## AlltoAll for Ulysses ##########
if ulysses_sp_size > 1:
attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| {
"repo_id": "verl-project/verl",
"file_path": "verl/models/transformers/apertus.py",
"license": "Apache License 2.0",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/special_sanity/check_dataproto_usage.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This CI test is used for checking whether DataProto is used in the code of some directory
"""
import os
from argparse import ArgumentParser
from pathlib import Path
SEARCH_WHITELIST = []
SEARCH_KEYWORDS = ["DataProto"]
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--directory", "-d", required=True, type=str)
args = parser.parse_args()
directory_in_str = args.directory
pathlist = Path(directory_in_str).glob("**/*.py")
for path in pathlist:
path_in_str = str(path.absolute())
# judge whether current path is in pre-defined search whitelist or not.
path_in_whitelist = False
for sw in SEARCH_WHITELIST:
# for easy debugging in non-linux system
sw = sw.replace("/", os.sep)
if sw in path_in_str:
print(f"[SKIP] File {path_in_str} is in device api usage check whitelist, checking is skipped.")
path_in_whitelist = True
break
if path_in_whitelist:
continue
with open(path_in_str, encoding="utf-8") as f:
file_content = f.read()
find_invalid_device_management = False
for sk in SEARCH_KEYWORDS:
if sk in file_content:
find_invalid_device_management = True
break
print(
f"[CHECK] File {path_in_str} is detected for DataProto usage check, check result: "
f"{'success' if not find_invalid_device_management else f'failed, because detect {sk}'}."
)
assert not find_invalid_device_management, (
f"file {path_in_str} contains DataProto usage, please use TensorDict directly!"
)
| {
"repo_id": "verl-project/verl",
"file_path": "tests/special_sanity/check_dataproto_usage.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/test_protocol_v2_on_cpu.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Replace DataProto with raw TensorDict
"""
import copy
import random
import numpy as np
import pytest
import torch
from tensordict.tensorclass import NonTensorData, NonTensorStack
from verl.utils import tensordict_utils as tu
def test_union_tensor_dict():
obs = torch.randn(100, 10)
meta_info1 = {"top_p": 0.8}
meta_info2 = {"top_p": 0.9}
data1 = {"obs": obs, "act": torch.randn(100, 3), "data_sources": ["gsm8k"] * 100}
data2 = {"obs": obs, "next_obs": torch.randn(100, 10), "rew": torch.randn(100), "data_sources": ["gsm8k"] * 100}
data_with_copied_obs = {"obs": obs.clone(), "next_obs": torch.randn(100, 10), "rew": torch.randn(100)}
data1 = tu.get_tensordict(tensor_dict=data1)
data2 = tu.get_tensordict(tensor_dict=data2)
data_with_copied_obs = tu.get_tensordict(data_with_copied_obs)
tu.union_tensor_dict(data1, data2)
with pytest.raises(AssertionError):
# conflict in tensor values
tu.union_tensor_dict(data1, data_with_copied_obs)
data1 = tu.assign_non_tensor(data1, **meta_info1)
tu.union_tensor_dict(data1, data2) # works ok
data2 = tu.assign_non_tensor(data2, **meta_info2)
with pytest.raises(AssertionError):
# conflict in NonTensorData
tu.union_tensor_dict(data1, data2)
data1.pop("top_p")
data2.pop("top_p")
data2["data_sources"][0] = "math"
with pytest.raises(AssertionError):
# conflict in NonTensorData
tu.union_tensor_dict(data1, data2)
def test_tensor_dict_constructor():
obs = torch.ones(100, 10)
act = torch.zeros(100, 10, 3)
data_source = ["gsm8k"] * 100
non_tensor_dict = {"name": "abdce"}
data = tu.get_tensordict(
tensor_dict={"obs": obs, "act": act, "data_source": data_source}, non_tensor_dict=non_tensor_dict
)
assert data.batch_size == torch.Size([100])
# test slicing
assert torch.all(torch.eq(data[0]["obs"], torch.ones(10))).item()
assert torch.all(torch.eq(data[0]["act"], torch.zeros(10, 3))).item()
assert data[0]["data_source"] == "gsm8k"
assert torch.all(torch.eq(data[0:2]["obs"], torch.ones(2, 10))).item()
assert torch.all(torch.eq(data[0:2]["act"], torch.zeros(2, 10, 3))).item()
assert data[0:2]["data_source"] == ["gsm8k"] * 2
# test non tensor data
assert data["name"] == "abdce"
def test_index_select_tensor_dict():
vocab_size = 128
a = torch.randint(low=0, high=vocab_size, size=(11,))
b = torch.randint(low=0, high=vocab_size, size=(13,))
c = torch.randint(low=0, high=vocab_size, size=(12,))
d = torch.randint(low=0, high=vocab_size, size=(15,))
input_ids = [a, b, c, d]
input_ids = torch.nested.as_nested_tensor(input_ids, layout=torch.jagged)
padded_tensor = torch.randn(4, 10)
non_tensor_dict = {"global_batch_size": "4"}
data = tu.get_tensordict(
tensor_dict={
"input_ids": input_ids,
"padded_tensor": padded_tensor,
},
non_tensor_dict=non_tensor_dict,
)
assert data.batch_size == torch.Size([4])
# test index select
indices = torch.tensor([1, 3])
selected_data = tu.index_select_tensor_dict(data, indices)
assert selected_data.batch_size == torch.Size([2])
target_input_ids = torch.nested.as_nested_tensor([input_ids[idx] for idx in indices], layout=torch.jagged)
target_select_data = tu.get_tensordict(
tensor_dict={
"input_ids": target_input_ids,
"padded_tensor": padded_tensor[indices],
},
non_tensor_dict=non_tensor_dict,
)
tu.assert_tensordict_eq(selected_data, target_select_data)
def test_tensordict_with_images():
# each sample contains a sequence with multiple images of different sizes
vocab_size = 128
a = torch.randint(low=0, high=vocab_size, size=(11,))
b = torch.randint(low=0, high=vocab_size, size=(13,))
input_ids = [a, b]
input_ids = torch.nested.as_nested_tensor(input_ids, layout=torch.jagged)
# must be numpy
# TODO(vermouth1992). We may use nested tensor too. But this requires nested over nested
a_images = [
torch.randint(low=0, high=255, size=(3, 256, 256), dtype=torch.uint8).numpy(),
torch.randint(low=0, high=255, size=(3, 128, 128), dtype=torch.uint8).numpy(),
]
b_images = [
torch.randint(low=0, high=255, size=(3, 256, 256), dtype=torch.uint8).numpy(),
torch.randint(low=0, high=255, size=(3, 128, 128), dtype=torch.uint8).numpy(),
torch.randint(low=0, high=255, size=(3, 64, 64), dtype=torch.uint8).numpy(),
]
images = [a_images, b_images]
data = tu.get_tensordict({"input_ids": input_ids, "images": images})
assert np.all(np.equal(data[0]["images"][0], a_images[0]))
assert torch.all(torch.eq(data[0]["input_ids"], a))
def test_tensordict_with_packing():
vocab_size = 128
a = torch.randint(low=0, high=vocab_size, size=(11,))
b = torch.randint(low=0, high=vocab_size, size=(13,))
input_ids = [a, b]
input_ids = torch.nested.as_nested_tensor(input_ids, layout=torch.jagged)
data = tu.get_tensordict({"input_ids": input_ids})
# test cu_seqlens
cu_seqlens = torch.tensor([0, 11, 24])
assert torch.all(torch.eq(cu_seqlens, data["input_ids"].offsets()))
# test index
assert torch.all(torch.eq(data["input_ids"][0], a))
assert torch.all(torch.eq(data["input_ids"][1], b))
assert torch.all(torch.eq(data[0]["input_ids"], a))
assert torch.all(torch.eq(data[1]["input_ids"], b))
data_lst = data.chunk(2)
assert torch.all(torch.eq(data_lst[0]["input_ids"][0], a))
assert torch.all(torch.eq(data_lst[1]["input_ids"][0], b))
def test_tensordict_eq():
obs = torch.tensor([1, 2, 3, 4, 5, 6])
data_sources = ["abc", "def", "abc", "def", "pol", "klj"]
non_tensor_dict = {"train_sample_kwargs": {"top_p": 1.0}, "val_sample_kwargs": {"top_p": 0.7}}
data = tu.get_tensordict({"obs": obs, "data_sources": data_sources}, non_tensor_dict=non_tensor_dict)
obs = torch.tensor([1, 2, 3, 4, 5, 6])
data_sources = ["abc", "def", "abc", "def", "pol", "klj"]
non_tensor_dict = {"train_sample_kwargs": {"top_p": 1.0}, "val_sample_kwargs": {"top_p": 0.7}}
data1 = tu.get_tensordict({"obs": obs, "data_sources": data_sources}, non_tensor_dict=non_tensor_dict)
tu.assert_tensordict_eq(data, data1)
data2 = copy.deepcopy(data1)
data2["obs"][0] += 1
with pytest.raises(AssertionError):
tu.assert_tensordict_eq(data, data2)
data2 = copy.deepcopy(data1)
data2["data_sources"][0] = "math"
with pytest.raises(AssertionError):
tu.assert_tensordict_eq(data, data2)
data2 = copy.deepcopy(data1)
data2["train_sample_kwargs"]["top_p"] = 0.9
with pytest.raises(AssertionError):
tu.assert_tensordict_eq(data, data2)
tensor_list = [
torch.tensor([1, 2, 3, 3, 2]),
torch.tensor([4, 5]),
torch.tensor([7, 8, 10, 14]),
torch.tensor([10, 11, 12]),
torch.tensor([13, 14, 15, 18]),
torch.tensor([16, 17]),
]
obs = torch.nested.as_nested_tensor(tensor_list, layout=torch.jagged)
data_sources = ["abc", "def", "abc", "def", "pol", "klj"]
non_tensor_dict = {"train_sample_kwargs": {"top_p": 1.0}, "val_sample_kwargs": {"top_p": 0.7}}
data3 = tu.get_tensordict({"obs": obs, "data_sources": data_sources}, non_tensor_dict=non_tensor_dict)
tensor_list[0] = torch.tensor([1, 2, 3, 3, 2])
obs = torch.nested.as_nested_tensor(tensor_list, layout=torch.jagged)
data4 = tu.get_tensordict({"obs": obs, "data_sources": data_sources}, non_tensor_dict=non_tensor_dict)
tu.assert_tensordict_eq(data3, data4)
tensor_list[0] = torch.tensor([1, 2, 4])
obs = torch.nested.as_nested_tensor(tensor_list, layout=torch.jagged)
data5 = tu.get_tensordict({"obs": obs, "data_sources": data_sources}, non_tensor_dict=non_tensor_dict)
with pytest.raises(AssertionError):
tu.assert_tensordict_eq(data3, data5)
tensor_list[0] = torch.tensor([4, 5])
tensor_list[1] = torch.tensor([1, 2, 3, 3, 2])
obs = torch.nested.as_nested_tensor(tensor_list, layout=torch.jagged)
data6 = tu.get_tensordict({"obs": obs, "data_sources": data_sources}, non_tensor_dict=non_tensor_dict)
with pytest.raises(AssertionError):
tu.assert_tensordict_eq(data3, data6)
def test_tensor_dict_make_iterator():
obs = torch.tensor([1, 2, 3, 4, 5, 6])
input_ids = torch.nested.as_nested_tensor(
[
torch.tensor([0, 1]),
torch.tensor([2]),
torch.tensor([3, 4]),
torch.tensor([5]),
torch.tensor([6, 7, 8]),
torch.tensor([9]),
],
layout=torch.jagged,
)
data_sources = ["abc", "def", "abc", "def", "pol", "klj"]
non_tensor_dict = {"train_sample_kwargs": {"top_p": 1.0}, "val_sample_kwargs": {"top_p": 0.7}}
dataset = tu.get_tensordict(
{"obs": obs, "data_sources": data_sources, "input_ids": input_ids}, non_tensor_dict=non_tensor_dict
)
dataloader = tu.make_iterator(
dataset, mini_batch_size=2, epochs=2, seed=0, dataloader_kwargs={"shuffle": False, "drop_last": False}
)
expected_tensor_dict = [
tu.index_select_tensor_dict(dataset, indices=list(range(0, 2))),
tu.index_select_tensor_dict(dataset, indices=list(range(2, 4))),
tu.index_select_tensor_dict(dataset, indices=list(range(4, 6))),
tu.index_select_tensor_dict(dataset, indices=list(range(0, 2))),
tu.index_select_tensor_dict(dataset, indices=list(range(2, 4))),
tu.index_select_tensor_dict(dataset, indices=list(range(4, 6))),
]
i = 0
for d in dataloader:
tu.assert_tensordict_eq(d, expected_tensor_dict[i])
i += 1
data_iter_1 = tu.make_iterator(dataset, mini_batch_size=3, epochs=1, seed=1, dataloader_kwargs={"shuffle": True})
data_list_1 = []
for data in data_iter_1:
data_list_1.append(data)
data_iter_2 = tu.make_iterator(dataset, mini_batch_size=3, epochs=1, seed=1, dataloader_kwargs={"shuffle": True})
data_list_2 = []
for data in data_iter_2:
data_list_2.append(data)
for data1, data2 in zip(data_list_1, data_list_2, strict=True):
tu.assert_tensordict_eq(data1, data2)
def test_reorder():
obs = torch.tensor([1, 2, 3, 4, 5, 6])
labels = ["a", "b", "c", "d", "e", "f"]
non_tensor_dict = {"name": "abdce"}
data = tu.get_tensordict(tensor_dict={"obs": obs, "labels": labels}, non_tensor_dict=non_tensor_dict)
data = data[torch.tensor([3, 4, 2, 0, 1, 5])]
assert torch.all(torch.eq(data["obs"], torch.tensor([4, 5, 3, 1, 2, 6])))
assert np.all(data["labels"] == np.array(["d", "e", "c", "a", "b", "f"]))
assert data["name"] == "abdce"
def test_chunk_concat():
obs = torch.tensor([1, 2, 3, 4, 5, 6])
labels = ["a", "b", "c", "d", "e", "f"]
data = tu.get_tensordict({"obs": obs, "labels": labels}, non_tensor_dict={"name": "abcde"})
data_split = data.tensor_split(indices_or_sections=5, dim=0)
expected_idx_lst = [[0, 1], [2], [3], [4], [5]]
for d, expected_idx in zip(data_split, expected_idx_lst, strict=False):
tu.assert_tensordict_eq(d, data[expected_idx])
data_split = data.chunk(2)
assert len(data_split) == 2
assert torch.all(torch.eq(data_split[0]["obs"], torch.tensor([1, 2, 3])))
assert np.all(data_split[0]["labels"] == np.array(["a", "b", "c"]))
assert data_split[0]["name"] == "abcde"
assert torch.all(torch.eq(data_split[1]["obs"], torch.tensor([4, 5, 6])))
assert np.all(data_split[1]["labels"] == np.array(["d", "e", "f"]))
assert data_split[1]["name"] == "abcde"
concat_data = torch.cat(data_split, dim=0)
assert torch.all(torch.eq(concat_data["obs"], data["obs"]))
assert np.all(concat_data["labels"] == data["labels"])
assert concat_data["name"] == data["name"]
data1 = tu.get_tensordict(tensor_dict={"obs": obs, "labels": labels}, non_tensor_dict={"name": "abcde"})
data2 = tu.get_tensordict(tensor_dict={"obs": obs, "labels": labels}, non_tensor_dict={"name": "def"})
data3 = tu.get_tensordict(tensor_dict={"obs": obs, "labels": labels}, non_tensor_dict={"name": "cfg"})
output = torch.cat([data1, data2, data3], dim=0)
# concat NonTensorData will keep the first one.
assert output["name"] == "abcde"
def test_pop():
obs = torch.randn(3, 10)
act = torch.randn(3, 3)
labels = ["a", ["b"], []]
dataset = tu.get_tensordict({"obs": obs, "act": act, "labels": labels}, non_tensor_dict={"2": 2, "1": 1})
dataset1 = copy.deepcopy(dataset)
# test pop keys
popped_dataset = tu.pop_keys(dataset, keys=["obs", "2"])
assert popped_dataset.batch_size[0] == 3
assert popped_dataset.keys() == {"obs", "2"}
assert torch.all(torch.eq(popped_dataset["obs"], obs)).item()
assert popped_dataset["2"] == 2
assert dataset.keys() == {"act", "1", "labels"}
# test pop non-exist key
with pytest.raises(KeyError):
tu.pop_keys(dataset, keys=["obs", "2"])
# test single pop
# NonTensorData
assert tu.pop(dataset1, key="2") == 2
# NonTensorStack
assert tu.pop(dataset1, key="labels") == ["a", ["b"], []]
# Tensor
assert torch.all(torch.eq(tu.pop(dataset1, key="obs"), obs)).item()
def test_get():
obs = torch.randn(3, 10)
act = torch.randn(3, 3)
labels = ["a", ["b"], []]
dataset = tu.get_tensordict({"obs": obs, "act": act, "labels": labels}, non_tensor_dict={"2": 2, "1": 1})
# test pop keys
popped_dataset = tu.get_keys(dataset, keys=["obs", "2"])
assert popped_dataset.batch_size[0] == 3
assert torch.all(torch.eq(popped_dataset["obs"], dataset["obs"])).item()
assert popped_dataset["2"] == dataset["2"]
# test pop non-exist key
with pytest.raises(KeyError):
tu.get_keys(dataset, keys=["obs", "3"])
# test single pop
# NonTensorData
assert tu.get(dataset, key="2") == 2
# NonTensorStack
assert tu.get(dataset, key="labels") == ["a", ["b"], []]
# Tensor
assert torch.all(torch.eq(tu.get(dataset, key="obs"), obs)).item()
# Non-exist key
assert tu.get(dataset, key="3", default=3) == 3
def test_repeat():
# Create a DataProto object with some batch and non-tensor data
obs = torch.tensor([[1, 2], [3, 4], [5, 6]])
labels = ["a", "b", "c"]
data = tu.get_tensordict({"obs": obs, "labels": labels}, non_tensor_dict={"info": "test_info"})
# Test interleave=True
repeated_data_interleave = data.repeat_interleave(repeats=2)
expected_obs_interleave = torch.tensor([[1, 2], [1, 2], [3, 4], [3, 4], [5, 6], [5, 6]])
expected_labels_interleave = ["a", "a", "b", "b", "c", "c"]
assert torch.all(torch.eq(repeated_data_interleave["obs"], expected_obs_interleave))
assert repeated_data_interleave["labels"] == expected_labels_interleave
assert repeated_data_interleave["info"] == "test_info"
# Test interleave=False
repeated_data_no_interleave = data.repeat(2)
expected_obs_no_interleave = torch.tensor([[1, 2], [3, 4], [5, 6], [1, 2], [3, 4], [5, 6]])
expected_labels_no_interleave = ["a", "b", "c", "a", "b", "c"]
assert torch.all(torch.eq(repeated_data_no_interleave["obs"], expected_obs_no_interleave))
assert repeated_data_no_interleave["labels"] == expected_labels_no_interleave
assert repeated_data_no_interleave["info"] == "test_info"
def test_dataproto_pad_unpad():
obs = torch.tensor([[1, 2], [3, 4], [5, 6]])
labels = ["a", "b", "c"]
data = tu.get_tensordict(tensor_dict={"obs": obs, "labels": labels}, non_tensor_dict={"info": "test_info"})
padded_data, pad_size = tu.pad_to_divisor(data, size_divisor=2)
assert pad_size == 1
expected_obs = torch.tensor([[1, 2], [3, 4], [5, 6], [1, 2]])
expected_labels = ["a", "b", "c", "a"]
assert torch.all(torch.eq(padded_data["obs"], expected_obs))
assert padded_data["labels"] == expected_labels
assert padded_data["info"] == "test_info"
unpadd_data = tu.unpad(padded_data, pad_size=pad_size)
assert torch.all(torch.eq(unpadd_data["obs"], obs))
assert unpadd_data["labels"] == labels
assert unpadd_data["info"] == "test_info"
padded_data, pad_size = tu.pad_to_divisor(data, size_divisor=3)
assert pad_size == 0
expected_obs = torch.tensor([[1, 2], [3, 4], [5, 6]])
expected_labels = ["a", "b", "c"]
assert torch.all(torch.eq(padded_data["obs"], expected_obs))
assert padded_data["labels"] == expected_labels
assert padded_data["info"] == "test_info"
unpadd_data = tu.unpad(padded_data, pad_size=pad_size)
assert torch.all(torch.eq(unpadd_data["obs"], obs))
assert unpadd_data["labels"] == labels
assert unpadd_data["info"] == "test_info"
padded_data, pad_size = tu.pad_to_divisor(data, size_divisor=7)
assert pad_size == 4
expected_obs = torch.tensor([[1, 2], [3, 4], [5, 6], [1, 2], [3, 4], [5, 6], [1, 2]])
expected_labels = ["a", "b", "c", "a", "b", "c", "a"]
assert torch.all(torch.eq(padded_data["obs"], expected_obs))
assert padded_data["labels"] == expected_labels
assert padded_data["info"] == "test_info"
unpadd_data = tu.unpad(padded_data, pad_size=pad_size)
assert torch.all(torch.eq(unpadd_data["obs"], obs))
assert unpadd_data["labels"] == labels
assert unpadd_data["info"] == "test_info"
def test_torch_save_data_proto():
obs = torch.tensor([[1, 2], [3, 4], [5, 6]])
labels = ["a", "b", "c"]
data = tu.get_tensordict({"obs": obs, "labels": labels}, non_tensor_dict={"info": "test_info"})
filename = "test_data.pt"
torch.save(data, filename)
loaded_data = torch.load(filename, weights_only=False)
assert torch.all(torch.eq(loaded_data["obs"], data["obs"]))
assert loaded_data["labels"] == data["labels"]
assert loaded_data["info"] == data["info"]
import os
os.remove(filename)
def test_len():
obs = torch.tensor([[1, 2], [3, 4], [5, 6]])
labels = np.array(["a", "b", "c"], dtype=object)
data = tu.get_tensordict({"obs": obs, "labels": labels.tolist()}, non_tensor_dict={"info": "test_info"})
assert len(data) == 3
data = tu.get_tensordict({"labels": labels.tolist()}, non_tensor_dict={"info": "test_info"})
assert len(data) == 3
data_item = data[0]
assert len(data_item) == 0
data = tu.get_tensordict({}, non_tensor_dict={"info": "test_info"})
assert len(data) == 0
def test_dataproto_index():
data_len = 100
idx_num = 10
obs = torch.randn(data_len, 10)
labels = [random.choice(["abc", "cde"]) for _ in range(data_len)]
data = tu.get_tensordict({"obs": obs, "labels": labels})
labels_np = np.array(labels)
idx_np_int = np.random.randint(0, data_len, size=(idx_num,))
result_np_int = data[idx_np_int]
assert result_np_int.keys() == data.keys()
assert result_np_int["obs"].shape[0] == idx_num
assert len(result_np_int["labels"]) == idx_num
assert np.array_equal(result_np_int["obs"].cpu().numpy(), obs[idx_np_int].numpy())
assert np.array_equal(result_np_int["labels"], labels_np[idx_np_int])
idx_torch_int = torch.randint(0, data_len, size=(idx_num,))
result_torch_int = data[idx_torch_int]
assert result_torch_int.keys() == data.keys()
assert result_torch_int["obs"].shape[0] == idx_num
assert len(result_torch_int["labels"]) == idx_num
assert np.array_equal(result_torch_int["obs"].cpu().numpy(), obs[idx_torch_int].cpu().numpy())
assert np.array_equal(result_torch_int["labels"], labels_np[idx_torch_int.cpu().numpy()])
idx_list_int = [np.random.randint(0, data_len) for _ in range(idx_num)]
result_list_int = data[idx_list_int]
assert result_list_int.keys() == data.keys()
assert result_list_int["obs"].shape[0] == idx_num
assert len(result_list_int["labels"]) == idx_num
assert np.array_equal(result_list_int["obs"].cpu().numpy(), obs[idx_list_int].cpu().numpy())
assert np.array_equal(result_list_int["labels"], labels_np[idx_list_int])
# idx_np_bool = np.random.randint(0, 2, size=(data_len,), dtype=bool)
# result_np_bool = data[idx_np_bool]
# assert result_np_bool.keys() == data.keys()
# assert result_np_bool["obs"].shape[0] == idx_np_bool.sum()
# assert len(result_np_bool["labels"]) == idx_np_bool.sum()
# assert np.array_equal(result_np_bool["obs"].cpu().numpy(), obs[idx_np_bool].cpu().numpy())
# assert np.array_equal(result_np_bool["labels"], labels_np[idx_np_bool])
idx_torch_bool = torch.randint(0, 2, size=(data_len,), dtype=torch.bool)
result_torch_bool = data[idx_torch_bool]
assert result_torch_bool.keys() == data.keys()
assert result_torch_bool["obs"].shape[0] == idx_torch_bool.sum().item()
assert len(result_torch_bool["labels"]) == idx_torch_bool.sum().item()
assert np.array_equal(result_torch_bool["obs"].cpu().numpy(), obs[idx_torch_bool].cpu().numpy())
assert np.array_equal(result_torch_bool["labels"], labels_np[idx_torch_bool])
# idx_list_bool = [np.random.randint(0, 2, dtype=bool) for _ in range(data_len)]
# result_list_bool = data[idx_list_bool]
# assert result_list_bool.keys() == data.keys()
# assert result_list_bool["obs"].shape[0] == sum(idx_list_bool)
# assert len(result_list_bool["labels"]) == sum(idx_list_bool)
# assert np.array_equal(result_list_bool["obs"].cpu().numpy(), obs[idx_list_bool].cpu().numpy())
# assert np.array_equal(result_list_bool["labels"], labels_np[idx_list_bool])
def test_select():
obs = torch.randn(100, 10)
act = torch.randn(100, 3)
dataset = tu.get_tensordict({"obs": obs, "act": act}, non_tensor_dict={"2": 2, "1": 1})
subset = dataset.select("obs", "2")
assert torch.all(torch.eq(subset["obs"], dataset["obs"]))
assert subset["2"] == dataset["2"]
assert "act" not in subset.keys()
assert "1" not in subset.keys()
def test_dataproto_no_batch():
labels = ["a", "b", "c"]
data = tu.get_tensordict(tensor_dict={"labels": labels}, non_tensor_dict={"info": "test_info"})
selected = data.select("labels")
assert selected["labels"] == labels
pop_data = tu.pop_keys(data, keys=["labels"])
assert pop_data["labels"] == labels
assert "labels" not in data
def test_sample_level_repeat():
# Create a DataProto object with some batch and non-tensor data
obs = torch.tensor([[1, 2], [3, 4], [5, 6]])
labels = ["a", "b", "c"]
data = tu.get_tensordict({"obs": obs, "labels": labels}, non_tensor_dict={"info": "test_info"})
# list
repeated_data_interleave = data.repeat_interleave(repeats=torch.tensor([3, 1, 2]))
expected_obs_interleave = torch.tensor([[1, 2], [1, 2], [1, 2], [3, 4], [5, 6], [5, 6]])
expected_labels_interleave = ["a", "a", "a", "b", "c", "c"]
assert torch.all(torch.eq(repeated_data_interleave["obs"], expected_obs_interleave))
assert repeated_data_interleave["labels"] == expected_labels_interleave
assert repeated_data_interleave["info"] == "test_info"
# torch.tensor
repeated_data_no_interleave = data.repeat_interleave(repeats=torch.tensor([1, 2, 3]))
expected_obs_no_interleave = torch.tensor([[1, 2], [3, 4], [3, 4], [5, 6], [5, 6], [5, 6]])
expected_labels_no_interleave = ["a", "b", "b", "c", "c", "c"]
assert torch.all(torch.eq(repeated_data_no_interleave["obs"], expected_obs_no_interleave))
assert repeated_data_no_interleave["labels"] == expected_labels_no_interleave
assert repeated_data_no_interleave["info"] == "test_info"
def test_dataproto_chunk_after_index():
data_len = 4
obs = torch.randn(data_len, 4)
labels = [f"label_{i}" for i in range(data_len)]
data = tu.get_tensordict(tensor_dict={"obs": obs, "labels": labels}, non_tensor_dict={"name": "abc"})
# Test with boolean numpy array
bool_mask = torch.tensor([True, False, True, False])
selected = data[bool_mask]
assert isinstance(selected.batch_size, torch.Size)
assert all(isinstance(d, int) for d in selected.batch_size) # int or List[int]
# Test with integer numpy array
int_mask = torch.tensor([0, 2])
selected = data[int_mask]
assert isinstance(selected.batch_size, torch.Size)
assert all(isinstance(d, int) for d in selected.batch_size)
# Test with boolean list
list_mask = [True, False, True, False]
selected = data[list_mask]
assert isinstance(selected.batch_size, torch.Size)
assert all(isinstance(d, int) for d in selected.batch_size)
# Test with list
list_mask = [0, 2]
selected = data[list_mask]
assert isinstance(selected.batch_size, torch.Size)
assert all(isinstance(d, int) for d in selected.batch_size)
# Test with torch tensor (bool)
torch_bool_mask = torch.tensor([True, False, True, False])
selected = data[torch_bool_mask]
assert isinstance(selected.batch_size, torch.Size)
assert all(isinstance(d, int) for d in selected.batch_size)
# Test with torch tensor (int)
torch_int_mask = torch.tensor([0, 2])
selected = data[torch_int_mask]
assert isinstance(selected.batch_size, torch.Size)
assert all(isinstance(d, int) for d in selected.batch_size)
def test_concat_nested_tensor():
# Test 2D nested tensors
vocab_size = 128
a = torch.randint(low=0, high=vocab_size, size=(11,))
b = torch.randint(low=0, high=vocab_size, size=(13,))
c = torch.randint(low=0, high=vocab_size, size=(12,))
d = torch.randint(low=0, high=vocab_size, size=(15,))
nested_a_b = torch.nested.as_nested_tensor([a, b], layout=torch.jagged)
nested_c_d = torch.nested.as_nested_tensor([c, d], layout=torch.jagged)
output = tu.concat_nested_tensors([nested_a_b, nested_c_d])
output_values = output.values()
expected = torch.cat([a, b, c, d], dim=0)
assert torch.all(torch.eq(output_values, expected)).item()
# Test 3D nested tensors
a_3d = torch.randint(low=0, high=vocab_size, size=(4, 4))
b_3d = torch.randint(low=0, high=vocab_size, size=(4, 5))
c_3d = torch.randint(low=0, high=vocab_size, size=(4, 6))
d_3d = torch.randint(low=0, high=vocab_size, size=(4, 7))
nested_a_b_3d = torch.nested.as_nested_tensor([a_3d, b_3d], layout=torch.jagged)
nested_c_d_3d = torch.nested.as_nested_tensor([c_3d, d_3d], layout=torch.jagged)
output_3d = tu.concat_nested_tensors([nested_a_b_3d, nested_c_d_3d])
assert output_3d.shape[0] == 4
output_3d_unbind = output_3d.unbind(0)
assert torch.all(torch.eq(output_3d_unbind[0], a_3d)).item()
assert torch.all(torch.eq(output_3d_unbind[1], b_3d)).item()
assert torch.all(torch.eq(output_3d_unbind[2], c_3d)).item()
assert torch.all(torch.eq(output_3d_unbind[3], d_3d)).item()
# Test 4D nested tensors
a_4d = torch.randint(low=0, high=vocab_size, size=(2, 3, 4))
b_4d = torch.randint(low=0, high=vocab_size, size=(2, 3, 5))
c_4d = torch.randint(low=0, high=vocab_size, size=(2, 3, 3))
d_4d = torch.randint(low=0, high=vocab_size, size=(2, 3, 6))
nested_a_b_4d = torch.nested.as_nested_tensor([a_4d, b_4d], layout=torch.jagged)
nested_c_d_4d = torch.nested.as_nested_tensor([c_4d, d_4d], layout=torch.jagged)
output_4d = tu.concat_nested_tensors([nested_a_b_4d, nested_c_d_4d])
assert output_4d.shape[0] == 4
output_4d_unbind = output_4d.unbind(0)
assert torch.all(torch.eq(output_4d_unbind[0], a_4d)).item()
assert torch.all(torch.eq(output_4d_unbind[1], b_4d)).item()
assert torch.all(torch.eq(output_4d_unbind[2], c_4d)).item()
assert torch.all(torch.eq(output_4d_unbind[3], d_4d)).item()
def test_concat_tensordict():
vocab_size = 128
a = torch.randint(low=0, high=vocab_size, size=(11,))
b = torch.randint(low=0, high=vocab_size, size=(13,))
c = torch.randint(low=0, high=vocab_size, size=(12,))
d = torch.randint(low=0, high=vocab_size, size=(15,))
nested_a_b = torch.nested.as_nested_tensor([a, b], layout=torch.jagged)
nested_c_d = torch.nested.as_nested_tensor([c, d], layout=torch.jagged)
tensordict1 = tu.get_tensordict(
tensor_dict={"input_ids": nested_a_b, "labels": ["a", "b"]}, non_tensor_dict={"temp": 1.0}
)
tensordict2 = tu.get_tensordict(
tensor_dict={"input_ids": nested_c_d, "labels": ["c", "d"]}, non_tensor_dict={"temp": 2.0}
)
tensordict1_copy = copy.deepcopy(tensordict1)
tensordict2_copy = copy.deepcopy(tensordict2)
output = tu.concat_tensordict([tensordict1, tensordict2])
assert torch.all(torch.eq(output["input_ids"].values(), torch.cat([a, b, c, d]))).item()
assert output["labels"] == ["a", "b", "c", "d"]
assert output["temp"] == 1.0
# make sure tensordict1 and tensordict2 is untouched
tu.assert_tensordict_eq(tensordict1, tensordict1_copy)
tu.assert_tensordict_eq(tensordict2, tensordict2_copy)
# test concat tensordict with only NonTensorStack and NonTensorData
tensordict1 = tu.get_tensordict(tensor_dict={"labels": ["a", "b"]}, non_tensor_dict={"temp": 1.0})
tensordict2 = tu.get_tensordict(tensor_dict={"labels": ["c", "d"]}, non_tensor_dict={"temp": 2.0})
output = tu.concat_tensordict([tensordict1, tensordict2])
assert output["labels"] == ["a", "b", "c", "d"]
assert output["temp"] == 1.0
assert output.batch_size[0] == 4
# test concat tensordict with only NonTensorData
tensordict1 = tu.get_tensordict(tensor_dict={}, non_tensor_dict={"temp": 1.0})
tensordict2 = tu.get_tensordict(tensor_dict={}, non_tensor_dict={"temp": 2.0})
output = tu.concat_tensordict([tensordict1, tensordict2])
assert len(output.batch_size) == 0
assert output["temp"] == 1.0
def test_chunk_tensordict():
# Qwen-VL 3d position_ids
position_ids = torch.nested.as_nested_tensor(
[
torch.arange(4).expand(4, 4),
torch.arange(5).expand(4, 5),
torch.arange(6).expand(4, 6),
torch.arange(7).expand(4, 7),
],
layout=torch.jagged,
)
input_ids = torch.nested.as_nested_tensor(
[torch.arange(4), torch.arange(5), torch.arange(6), torch.arange(7)], layout=torch.jagged
)
attention_mask = torch.nested.as_nested_tensor(
[
torch.randint(low=0, high=2, size=[3, 4]),
torch.randint(low=0, high=2, size=[3, 5]),
torch.randint(low=0, high=2, size=[3, 6]),
torch.randint(low=0, high=2, size=[3, 7]),
],
layout=torch.jagged,
)
multi_modal_inputs = torch.stack(
[
NonTensorData({"pixel_values": torch.randn(3, 224, 224)}),
NonTensorData(None),
NonTensorData({"pixel_values": torch.randn(3, 128, 128)}),
NonTensorData({"pixel_values": torch.randn(3, 128, 128)}),
]
)
td = tu.get_tensordict(
{
"input_ids": input_ids,
"position_ids": position_ids,
"attention_mask": attention_mask,
"multi_modal_inputs": multi_modal_inputs,
},
)
assert len(td) == 4
chunks = tu.chunk_tensordict(td, chunks=2)
for i, chunk in enumerate(chunks):
assert len(chunk) == 2
for key, val in chunk.items():
if isinstance(val, torch.Tensor) and val.is_nested:
tensors = td[key].unbind(dim=0)
expected = torch.nested.as_nested_tensor(tensors[i * 2 : (i + 1) * 2], layout=torch.jagged)
assert torch.all(torch.eq(val.values(), expected.values())).item()
else:
expected = td[key][i * 2 : (i + 1) * 2]
for tensor, expect in zip(val, expected, strict=False):
if tensor.data is None:
assert expect is None
else:
assert torch.all(torch.eq(tensor.data["pixel_values"], expect["pixel_values"])).item()
def test_assign_non_tensor_stack_with_nested_lists():
"""Test assign_non_tensor_stack with lists of lists."""
td = tu.get_tensordict({"obs": torch.randn(3, 4)}, non_tensor_dict={})
# Lists of varying lengths (like turn_scores or tool_rewards)
turn_scores = [[], [0.5, 0.8], [0.9]]
tu.assign_non_tensor_stack(td, "turn_scores", turn_scores)
# Verify data is accessible
assert len(td["turn_scores"]) == 3
assert list(td["turn_scores"][0]) == []
assert list(td["turn_scores"][1]) == [0.5, 0.8]
assert list(td["turn_scores"][2]) == [0.9]
def test_assign_non_tensor_stack_with_nested_dicts():
"""Test assign_non_tensor_stack with lists of dicts."""
td = tu.get_tensordict({"obs": torch.randn(3, 4)}, non_tensor_dict={})
# Lists of dicts (like reward_extra_info)
reward_extra_info = [{"acc": 1.0, "loss": 0.1}, {"acc": 0.0, "loss": 0.9}, {"acc": 1.0, "loss": 0.05}]
tu.assign_non_tensor_stack(td, "reward_extra_info", reward_extra_info)
# Verify data is accessible
assert len(td["reward_extra_info"]) == 3
assert dict(td["reward_extra_info"][0]) == {"acc": 1.0, "loss": 0.1}
assert dict(td["reward_extra_info"][1]) == {"acc": 0.0, "loss": 0.9}
assert dict(td["reward_extra_info"][2]) == {"acc": 1.0, "loss": 0.05}
def test_assign_non_tensor_stack_with_complex_nested():
"""Test assign_non_tensor_stack with lists of lists of dicts."""
td = tu.get_tensordict({"obs": torch.randn(2, 4)}, non_tensor_dict={})
# Lists of lists of dicts (like raw_prompt)
raw_prompt = [
[{"content": "Question 1", "role": "user"}],
[{"content": "Question 2", "role": "user"}, {"content": "Answer 2", "role": "assistant"}],
]
tu.assign_non_tensor_stack(td, "raw_prompt", raw_prompt)
# Verify data is accessible
assert len(td["raw_prompt"]) == 2
assert len(td["raw_prompt"][0]) == 1
assert dict(td["raw_prompt"][0][0]) == {"content": "Question 1", "role": "user"}
assert len(td["raw_prompt"][1]) == 2
assert dict(td["raw_prompt"][1][0]) == {"content": "Question 2", "role": "user"}
def test_assign_non_tensor_handles_wrappers():
td = tu.get_tensordict({"obs": torch.randn(3, 4)}, non_tensor_dict={})
meta = {"top_p": 0.8}
tu.assign_non_tensor(td, **meta)
assert td["top_p"] == 0.8
wrapped = NonTensorData(0.3)
stack = NonTensorStack.from_list([NonTensorData(1.0), NonTensorData(2.0), NonTensorData(3.0)])
tu.assign_non_tensor(td, wrapped=wrapped, stack=stack)
assert td["wrapped"] == 0.3
assert td["stack"] == [1.0, 2.0, 3.0]
def test_assign_non_tensor_stack_batch_size_check():
td = tu.get_tensordict({"obs": torch.randn(3, 4)}, non_tensor_dict={})
stack = NonTensorStack.from_list([NonTensorData(1.0), NonTensorData(2.0)])
with pytest.raises(RuntimeError):
tu.assign_non_tensor(td, stack=stack)
def test_assign_non_tensor_with_auto_detection():
"""Test assign_non_tensor automatically detects and handles nested structures."""
td = tu.get_tensordict({"obs": torch.randn(3, 4)}, non_tensor_dict={})
# Mix of simple and nested data
tu.assign_non_tensor(
td,
metadata="experiment_1", # Simple value
turn_scores=[[], [0.5, 0.8], [0.9]], # Nested list
reward_extra_info=[{"acc": 1.0}, {"acc": 0.0}, {"acc": 1.0}], # List of dicts
simple_list=["a", "b", "c"], # Simple list (also uses NonTensorStack for consistency)
)
# Verify all data is accessible
assert td["metadata"] == "experiment_1"
assert len(td["turn_scores"]) == 3
assert list(td["turn_scores"][1]) == [0.5, 0.8]
assert len(td["reward_extra_info"]) == 3
assert dict(td["reward_extra_info"][0]) == {"acc": 1.0}
assert len(td["simple_list"]) == 3
assert td["simple_list"][0] == "a"
def test_get_tensordict_with_nested_lists():
"""Test get_tensordict automatically handles nested lists."""
obs = torch.randn(3, 4)
turn_scores = [[], [0.5, 0.8], [0.9]]
# This should automatically convert turn_scores to NonTensorStack
td = tu.get_tensordict({"obs": obs, "turn_scores": turn_scores})
# Verify tensors and nested data are both accessible
assert torch.all(torch.eq(td["obs"], obs))
assert len(td["turn_scores"]) == 3
assert list(td["turn_scores"][0]) == []
assert list(td["turn_scores"][1]) == [0.5, 0.8]
def test_get_tensordict_with_nested_dicts():
"""Test get_tensordict automatically handles lists of dicts."""
obs = torch.randn(3, 4)
reward_extra_info = [{"acc": 1.0}, {"acc": 0.0}, {"acc": 1.0}]
td = tu.get_tensordict({"obs": obs, "reward_extra_info": reward_extra_info})
assert torch.all(torch.eq(td["obs"], obs))
assert len(td["reward_extra_info"]) == 3
assert dict(td["reward_extra_info"][0]) == {"acc": 1.0}
def test_get_tensordict_with_complex_nested_structures():
"""Test get_tensordict with lists of lists of dicts."""
obs = torch.randn(2, 4)
raw_prompt = [
[{"content": "Q1", "role": "user"}],
[{"content": "Q2", "role": "user"}, {"content": "A2", "role": "assistant"}],
]
td = tu.get_tensordict({"obs": obs, "raw_prompt": raw_prompt})
assert torch.all(torch.eq(td["obs"], obs))
assert len(td["raw_prompt"]) == 2
assert dict(td["raw_prompt"][0][0]) == {"content": "Q1", "role": "user"}
def test_get_tensordict_agent_loop_scenario():
"""Test the complete agent loop scenario with all nested types.
This simulates the exact use case from agent loops with:
- turn_scores: lists of lists
- reward_extra_info: lists of dicts
- raw_prompt: lists of lists of dicts
- tool_rewards: lists of lists
"""
prompts = torch.randn(2, 10)
responses = torch.randn(2, 5)
# Nested structures from agent loop
data_source = ["lighteval/MATH", "lighteval/MATH"]
uid = ["uuid-1", "uuid-2"]
turn_scores = [[], [0.5, 0.8]] # Lists of varying lengths
reward_extra_info = [{"acc": 1.0, "loss": 0.1}, {"acc": 0.0, "loss": 0.9}]
raw_prompt = [
[{"content": "Compute 4 @ 2", "role": "user"}],
[{"content": "Compute 8 @ 7", "role": "user"}],
]
tool_rewards = [[0.0], []] # List of lists
# This should handle all nested structures automatically
td = tu.get_tensordict(
tensor_dict={
"prompts": prompts,
"responses": responses,
"data_source": data_source,
"uid": uid,
"turn_scores": turn_scores,
"reward_extra_info": reward_extra_info,
"raw_prompt": raw_prompt,
"tool_rewards": tool_rewards,
},
non_tensor_dict={"global_steps": 42},
)
# Verify all data types are accessible
assert torch.all(torch.eq(td["prompts"], prompts))
assert torch.all(torch.eq(td["responses"], responses))
assert td["data_source"] == data_source
assert td["uid"] == uid
# Verify nested structures
assert len(td["turn_scores"]) == 2
assert list(td["turn_scores"][0]) == []
assert list(td["turn_scores"][1]) == [0.5, 0.8]
assert len(td["reward_extra_info"]) == 2
assert dict(td["reward_extra_info"][0]) == {"acc": 1.0, "loss": 0.1}
assert len(td["raw_prompt"]) == 2
assert dict(td["raw_prompt"][0][0]) == {"content": "Compute 4 @ 2", "role": "user"}
assert len(td["tool_rewards"]) == 2
assert list(td["tool_rewards"][0]) == [0.0]
assert list(td["tool_rewards"][1]) == []
# Verify metadata
assert td["global_steps"] == 42
def test_contiguous():
# create a tensordict that contains normal tensor, nested tensor,
# nontensorstack with numpy, nontensorstack with tensor, NonTensorData with numpy and NonTensorData with tensor
a = torch.randn(3, 4) # contiguous tensor
b = torch.randn(3, 4)[:, :-1] # non contiguous tensor
c = torch.nested.as_nested_tensor([torch.randn(3), torch.randn(4), torch.randn(5)], layout=torch.jagged)
d = torch.randn(10, 12)
e = torch.randn(11, 12)
f = torch.randn(13, 12)
data = tu.get_tensordict(
tensor_dict={"a": a, "b": b, "c": c, "nt": [{"pixel": d}, {"pixel": e}, {"pixel": f}]},
non_tensor_dict={"ntd": a.clone()},
)
with pytest.raises(RuntimeError):
# b is not contiguous
data.consolidate()
data1 = copy.deepcopy(data)
data_cont = tu.contiguous(data1)
tu.assert_tensordict_eq(data_cont, data)
data_cont.consolidate()
tu.assert_tensordict_eq(data_cont, data)
| {
"repo_id": "verl-project/verl",
"file_path": "tests/test_protocol_v2_on_cpu.py",
"license": "Apache License 2.0",
"lines": 815,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/utils/tensordict_utils.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Any, Iterable
import torch
from tensordict import TensorDict
from tensordict.tensorclass import NonTensorData, NonTensorStack
def assign_non_tensor_data(tensor_dict: TensorDict, key, val):
"""Assign a single non-tensor value to a TensorDict.
Wraps the value in NonTensorData so it can be stored alongside tensors
in the TensorDict. Use this for scalar metadata or simple non-tensor values.
Args:
tensor_dict: The TensorDict to assign to.
key: The key under which to store the value.
val: Any non-tensor value to store (e.g., string, int, dict).
Raises:
AssertionError: If tensor_dict is not a TensorDict.
Example:
>>> td = TensorDict({"obs": torch.randn(3, 4)}, batch_size=[3])
>>> assign_non_tensor_data(td, "experiment_name", "run_001")
"""
assert isinstance(tensor_dict, TensorDict), "input dict must be a TensorDict"
tensor_dict[key] = NonTensorData(val)
def assign_non_tensor_stack(tensor_dict: TensorDict, key, val: list):
"""Assign a list with potentially nested structures (lists, dicts, etc.) to TensorDict.
This function handles complex nested data structures like:
- Lists of lists: [[], [0.5, 0.8], [0.9]]
- Lists of dicts: [{"acc": 1.0}, {"acc": 0.0}]
- Lists of lists of dicts: [[{"content": "...", "role": "user"}]]
These structures are wrapped in NonTensorStack so TensorDict can handle them correctly.
Args:
tensor_dict: The TensorDict to assign to
key: The key to assign the value under
val: A list containing potentially nested structures
Example:
>>> td = TensorDict({}, batch_size=[])
>>> turn_scores = [[], [0.5, 0.8], [0.9]]
>>> assign_non_tensor_stack(td, "turn_scores", turn_scores)
>>> # Now td["turn_scores"] contains the nested data
"""
# Convert list to NonTensorStack to handle nested structures
# This wraps each item in NonTensorData to preserve complex objects
# TODO(petersh6): can convert back to val directly if we are not accessing .data from the NonTensorStack
assert isinstance(tensor_dict, TensorDict), "input dict must be a TensorDict"
tensor_dict[key] = NonTensorStack.from_list([NonTensorData(item) for item in val])
def assign_non_tensor(tensor_dict: TensorDict, **kwargs):
"""Assign non-tensor data to a TensorDict.
Automatically detects if the value is a list with nested structures and uses
the appropriate assignment method (NonTensorData for simple values,
NonTensorStack for lists with nested structures).
Args:
tensor_dict: The TensorDict to assign to
**kwargs: Key-value pairs where values can be:
- Simple values (stored as NonTensorData)
- Lists with nested structures (stored as NonTensorStack)
Example:
>>> td = TensorDict({"obs": torch.randn(3, 4)}, batch_size=[3])
>>> assign_non_tensor(
... tensor_dict=td,
... metadata="experiment_1", # Simple value
... turn_scores=[[], [0.5, 0.8], [0.9]] # Nested list
... )
"""
assert isinstance(tensor_dict, TensorDict), "input dict must be a TensorDict"
for key, val in kwargs.items():
if isinstance(val, (NonTensorData | NonTensorStack)):
tensor_dict[key] = val
elif isinstance(val, list):
# For lists, use NonTensorStack
assign_non_tensor_stack(tensor_dict=tensor_dict, key=key, val=val)
else:
# For non-list values, use NonTensorData
assign_non_tensor_data(tensor_dict=tensor_dict, key=key, val=val)
return tensor_dict
def unwrap_non_tensor_data(data):
"""Unwrap a NonTensorData object to get the underlying value.
If the input is a NonTensorData wrapper, extracts and returns the
underlying data. Otherwise, returns the input unchanged.
Args:
data: Either a NonTensorData object or any other value.
Returns:
The unwrapped data if input was NonTensorData, otherwise the
original input unchanged.
Example:
>>> wrapped = NonTensorData("hello")
>>> unwrap_non_tensor_data(wrapped)
'hello'
>>> unwrap_non_tensor_data(42) # Non-wrapped value
42
"""
if isinstance(data, NonTensorData):
return data.data
return data
def get_non_tensor_data(data: TensorDict, key: str, default):
"""Retrieve and unwrap non-tensor data from a TensorDict.
Fetches the value for the given key from the TensorDict and automatically
unwraps it if it's stored as NonTensorData.
Args:
data: The TensorDict to retrieve from.
key: The key to look up.
default: Value to return if the key is not found.
Returns:
The unwrapped value if the key exists and was wrapped in NonTensorData,
the raw value if it wasn't wrapped, or the default if key not found.
Example:
>>> td = TensorDict({}, batch_size=[])
>>> assign_non_tensor_data(td, "config", {"lr": 0.01})
>>> get_non_tensor_data(td, "config", None)
{'lr': 0.01}
>>> get_non_tensor_data(td, "missing", "default_value")
'default_value'
"""
output = data.get(key, default)
return unwrap_non_tensor_data(output)
def concat_nested_tensors(tensors: list[torch.Tensor]) -> torch.Tensor:
"""Concatenate multiple nested tensors along the batch dimension.
Takes a list of nested tensors with jagged layout and concatenates them
into a single nested tensor. Each input tensor must have 2 or more dimensions and be contiguous.
Args:
tensors: List of nested tensors to concatenate. All tensors must
be nested, contiguous, and have 2 or more dimensions.
Returns:
A new nested tensor with jagged layout containing all rows from
the input tensors concatenated along dimension 0.
Raises:
AssertionError: If any tensor is not nested, not contiguous, or
doesn't have 2 or more dimensions.
Example:
>>> t1 = torch.nested.as_nested_tensor([torch.randn(3), torch.randn(5)], layout=torch.jagged)
>>> t2 = torch.nested.as_nested_tensor([torch.randn(2), torch.randn(4)], layout=torch.jagged)
>>> result = concat_nested_tensors([t1, t2])
>>> # result contains 4 rows: lengths [3, 5, 2, 4]
"""
for tensor in tensors:
assert tensor.is_nested and tensor.is_contiguous()
unbind_tensors = []
for tensor in tensors:
assert len(tensor.shape) >= 2, f"nested tensor must have 2 or more dimensions. Got {tensor.shape}"
unbind_tensor = tensor.unbind(0)
unbind_tensors.extend(list(unbind_tensor))
tensor = torch.nested.as_nested_tensor(unbind_tensors, layout=torch.jagged)
return tensor
def concat_tensordict_with_none_bsz(data: list[TensorDict]):
"""Handle concatenation of TensorDicts with empty batch size.
For TensorDicts that contain only metadata (NonTensorData) with no batch
dimension, returns the first TensorDict as the concatenation result.
Args:
data: List of TensorDicts, each with empty batch_size (batch_size=[]).
Returns:
The first TensorDict from the list, as metadata concatenation
simply preserves the first instance.
Raises:
AssertionError: If any TensorDict has a non-empty batch_size.
Note:
This is used internally by concat_tensordict when handling
TensorDicts that contain only non-tensor metadata.
"""
for d in data:
assert len(d.batch_size) == 0
# directly return the first meta info
return data[0]
def concat_tensordict(data: list[TensorDict]) -> TensorDict:
"""Concatenate multiple TensorDicts along dimension zero.
Combines a list of TensorDicts into a single TensorDict by concatenating
all tensors along the batch dimension (dim=0). Handles nested tensors
specially by unbinding and rebinding them.
Args:
data: List of TensorDicts to concatenate. All TensorDicts must have
the same keys and the same set of nested tensor keys.
Returns:
A new TensorDict containing concatenated tensors from all inputs.
Raises:
AssertionError: If data is empty or if TensorDicts have inconsistent
nested tensor keys.
Note:
- For TensorDicts with empty batch_size, returns the first one
- Nested tensors are handled specially via concat_nested_tensors
- Regular tensors use TensorDict.cat for efficient concatenation
"""
assert len(data) > 0, "Must have at least one tensordict"
# Find nested tensor keys from the first tensordict
nested_tensor_keys = {key for key, value in data[0].items() if isinstance(value, torch.Tensor) and value.is_nested}
if not nested_tensor_keys:
if len(data[0].batch_size) == 0:
return concat_tensordict_with_none_bsz(data)
# if batch size is None (only contain NonTensorData)
return TensorDict.cat(data, dim=0)
# Create a list of tensordicts containing only non-nested tensors for concatenation
regular_tds = []
for td in data:
current_nested_keys = {k for k, v in td.items() if isinstance(v, torch.Tensor) and v.is_nested}
assert current_nested_keys == nested_tensor_keys, "All tensordicts must have the same set of nested tensors."
# Create a new TensorDict with non-nested items without modifying the original
regular_items = {k: v for k, v in td.items() if k not in nested_tensor_keys}
regular_tds.append(TensorDict(regular_items, batch_size=td.batch_size, device=td.device))
# Concatenate the regular tensordicts
output = TensorDict.cat(regular_tds, dim=0)
# Concatenate and add nested tensors to the output
for key in nested_tensor_keys:
nested_tensors_to_concat = [td[key] for td in data]
output[key] = concat_nested_tensors(nested_tensors_to_concat)
return output
def chunk_tensordict(td: TensorDict, chunks: int) -> list[TensorDict]:
"""Split a TensorDict into equal-sized chunks with special nested tensor handling.
Divides a TensorDict into the specified number of chunks along the batch
dimension. Handles 3D+ nested tensors specially since torch.chunk() doesn't
support jagged tensors with 3 or more dimensions.
Args:
td: The TensorDict to split.
chunks: Number of chunks to create. Must evenly divide len(td).
Returns:
List of TensorDicts, each containing a portion of the original data.
Raises:
AssertionError: If td is not a TensorDict or if its length is not
evenly divisible by chunks.
Note:
This is a workaround for PyTorch issue #153238 where torch.chunk()
doesn't support 3D jagged tensors (e.g., MRoPE position_ids).
See: https://github.com/pytorch/pytorch/issues/153238
"""
assert isinstance(td, TensorDict) and len(td) % chunks == 0, (
f"expecting td with length divisible by chunks, but got {len(td)} and {chunks}"
)
chunk_size = len(td) // chunks
keys = {key for key, val in td.items() if isinstance(val, torch.Tensor) and val.is_nested and val.dim() >= 3}
new_td = TensorDict({k: v for k, v in td.items() if k not in keys}, batch_size=td.batch_size, device=td.device)
tds = new_td.chunk(chunks=chunks)
for key in keys:
tensors = td[key].unbind(dim=0)
for i, chunk_td in enumerate(tds):
chunk_td[key] = torch.nested.as_nested_tensor(
tensors[i * chunk_size : (i + 1) * chunk_size], layout=torch.jagged
)
return tds
def get_tensordict(tensor_dict: dict[str, torch.Tensor | list], non_tensor_dict: dict = None) -> TensorDict:
"""Create a TensorDict from tensors and non-tensor data.
Automatically handles nested structures in lists by converting them to NonTensorStack.
This enables support for:
- Lists of lists: [[], [0.5, 0.8], [0.9]]
- Lists of dicts: [{"acc": 1.0}, {"acc": 0.0}]
- Lists of lists of dicts: [[{"content": "...", "role": "user"}]]
Args:
tensor_dict: Dictionary of tensors and lists to include in the TensorDict
non_tensor_dict: Dictionary of metadata to store as NonTensorData
Returns:
TensorDict with proper handling of nested structures
Example:
>>> td = get_tensordict(
... tensor_dict={
... "obs": torch.randn(3, 4),
... "turn_scores": [[], [0.5, 0.8], [0.9]] # Nested list
... },
... non_tensor_dict={"experiment": "test"}
... )
"""
tensor_dict = tensor_dict.copy()
if non_tensor_dict is None:
non_tensor_dict = {}
batch_size = None
for key, val in tensor_dict.items():
if isinstance(val, torch.Tensor) and val.is_nested:
assert val.is_contiguous(), "Nested tensors must be contiguous. Try setting layout=torch.jagged"
assert val.layout == torch.jagged, "Nested tensors must be jagged."
# Skip validation for NonTensorStack as it's already properly formatted
if isinstance(val, NonTensorStack):
if batch_size is None:
batch_size = len(val)
else:
assert len(val) == batch_size, (
f"Batch size of NonTensorStack {key} is not consistent with other tensors. "
f"Expected {batch_size}, got {len(val)}"
)
continue
if isinstance(val, list):
for v in val:
assert not isinstance(v, torch.Tensor), (
"Passing a list makes the data NonTensorStack, "
"which doesn't support torch.Tensor. Please convert to numpy first"
)
# Convert to NonTensorStack to handle nested structures
tensor_dict[key] = NonTensorStack.from_list([NonTensorData(item) for item in val])
assert isinstance(val, torch.Tensor | list)
if batch_size is None:
batch_size = val.size(0) if isinstance(val, torch.Tensor) else len(val)
else:
val_batch_size = val.size(0) if isinstance(val, torch.Tensor) else len(val)
assert val_batch_size == batch_size, (
f"Batch size of tensor {key} is not consistent with other tensors. "
f"Expected {batch_size}, got {val_batch_size}"
)
if batch_size is None:
batch_size = []
else:
batch_size = [batch_size]
for key, val in non_tensor_dict.items():
assert key not in tensor_dict
tensor_dict[key] = NonTensorData(val)
return TensorDict(source=tensor_dict, batch_size=batch_size)
def index_select_tensor_dict(batch: TensorDict, indices: torch.Tensor | list[int]) -> TensorDict:
"""Select rows from a TensorDict using indices.
Creates a new TensorDict containing only the rows specified by indices.
Handles regular tensors, nested tensors, NonTensorStack, and NonTensorData
appropriately.
Args:
batch: The TensorDict to index into. Can be None.
indices: 1D tensor or list of integers specifying which rows to select.
Returns:
A new TensorDict containing only the selected rows, or None if
batch was None.
Raises:
AssertionError: If indices is not 1-dimensional.
Note:
- Regular tensors are indexed directly
- Nested tensors are unbound, indexed, and rebound
- NonTensorStack is indexed by batch dimension
- NonTensorData (scalar metadata) is preserved unchanged
"""
if isinstance(indices, list):
indices = torch.tensor(indices)
assert indices.dim() == 1, "indices must be a 1D tensor"
data_dict = {}
batch_size = indices.shape[0]
if batch is not None:
for key, tensor in batch.items():
if isinstance(tensor, torch.Tensor) and not tensor.is_nested:
data_dict[key] = tensor[indices]
elif isinstance(tensor, torch.Tensor) and tensor.is_nested:
tensor_lst = tensor.unbind() # for performance
data_dict[key] = torch.nested.as_nested_tensor(
[tensor_lst[idx] for idx in indices], layout=torch.jagged
)
else:
# This handles NonTensorStack (indexable by batch dim) and NonTensorData (scalar metadata).
if tensor.shape:
data_dict[key] = tensor[indices]
else:
data_dict[key] = tensor
selected_batch = TensorDict(source=data_dict, batch_size=batch_size)
else:
selected_batch = None
return selected_batch
def union_tensor_dict(tensor_dict1: TensorDict, tensor_dict2: TensorDict) -> TensorDict:
"""Merge two TensorDicts, adding keys from the second to the first.
Performs an in-place union of two TensorDicts. Keys from tensor_dict2
that don't exist in tensor_dict1 are added. Keys that exist in both
must have identical values.
Args:
tensor_dict1: The base TensorDict to merge into (modified in-place).
tensor_dict2: The TensorDict whose keys will be added to tensor_dict1.
Returns:
The modified tensor_dict1 containing the union of both TensorDicts.
Raises:
AssertionError: If batch sizes don't match, or if a key exists in
both TensorDicts with different values.
Example:
>>> td1 = TensorDict({"a": torch.tensor([1, 2])}, batch_size=[2])
>>> td2 = TensorDict({"b": torch.tensor([3, 4])}, batch_size=[2])
>>> result = union_tensor_dict(td1, td2)
>>> list(result.keys())
['a', 'b']
"""
assert tensor_dict1.batch_size == tensor_dict2.batch_size, (
f"Two tensor dict must have identical batch size. Got {tensor_dict1.batch_size} and {tensor_dict2.batch_size}"
)
for key in tensor_dict2.keys():
if key not in tensor_dict1.keys():
# Note that there is a difference between tensor_dict2[key] and tensor_dict2.get(key)
tensor_dict1[key] = tensor_dict2.get(key)
else:
if isinstance(tensor_dict2[key], torch.Tensor):
assert tensor_dict1[key].equal(tensor_dict2[key]), (
f"{key} in tensor_dict1 and tensor_dict2 are not the same object"
)
else:
# non-tensor
assert tensor_dict1[key] == tensor_dict2[key], (
f"{key} in tensor_dict1 and tensor_dict2 are not the same object"
)
return tensor_dict1
def make_iterator(tensordict: TensorDict, mini_batch_size, epochs, seed=None, dataloader_kwargs=None):
"""Create an iterator that yields mini-batches from a TensorDict.
Wraps a TensorDict in a DataLoader-style iterator that yields mini-batches
for the specified number of epochs. Useful for training loops.
Args:
tensordict: The TensorDict to iterate over.
mini_batch_size: Size of each mini-batch. Must evenly divide the
TensorDict's batch size.
epochs: Number of times to iterate through the entire dataset.
seed: Optional random seed for reproducible shuffling.
dataloader_kwargs: Optional dict of additional kwargs to pass to
the underlying DataLoader (e.g., shuffle=True, num_workers=4).
Returns:
An iterator that yields TensorDict mini-batches.
Raises:
AssertionError: If batch size is not divisible by mini_batch_size.
Example:
>>> td = TensorDict({"obs": torch.randn(100, 4)}, batch_size=[100])
>>> for batch in make_iterator(td, mini_batch_size=10, epochs=2):
... # batch is a TensorDict with batch_size=[10]
... pass
"""
from torch.utils.data import DataLoader
assert tensordict.batch_size[0] % mini_batch_size == 0, f"{tensordict.batch_size[0]} % {mini_batch_size} != 0"
# we can directly create a dataloader from TensorDict
if dataloader_kwargs is None:
dataloader_kwargs = {}
if seed is not None:
generator = torch.Generator()
generator.manual_seed(seed)
else:
generator = None
assert isinstance(dataloader_kwargs, dict)
idx_lst = torch.arange(tensordict.shape[0])
train_dataloader = DataLoader(
dataset=idx_lst, batch_size=mini_batch_size, collate_fn=lambda x: x, generator=generator, **dataloader_kwargs
)
def get_data():
for _ in range(epochs):
for idx in train_dataloader:
yield index_select_tensor_dict(tensordict, idx)
return iter(get_data())
def assert_tensordict_eq(tensordict1: TensorDict, tensordict2: TensorDict):
"""Assert that two TensorDicts are equal.
Performs a deep equality check between two TensorDicts, verifying that
they have the same keys with identical values. Handles nested tensors
by comparing their unbound components.
Args:
tensordict1: First TensorDict to compare.
tensordict2: Second TensorDict to compare.
Raises:
AssertionError: If the TensorDicts differ in keys, value types, or
value contents. The error message indicates what differs.
Note:
- Regular tensors are compared element-wise
- Nested tensors are unbound and compared component by component
- Non-tensor values are compared with standard equality
"""
tensordict1_key_set = set(tensordict1.keys())
tensordict2_key_set = set(tensordict2.keys())
assert tensordict1_key_set == tensordict2_key_set, (
f"key set diffs. Got {tensordict2_key_set=} vs {tensordict1_key_set=}"
)
for key in tensordict1.keys():
val = tensordict1[key]
val2 = tensordict2[key]
assert type(val) is type(val2), f"The type of {key} must be the same. Got {type(val)} vs {type(val2)}"
if isinstance(val, torch.Tensor):
if val.is_nested:
assert val.is_nested and val2.is_nested, (
f"Both tensors must be nested tensors. {val.is_nested=}, {val2.is_nested=}"
)
t1, t2 = val.unbind(), val2.unbind()
assert len(t1) == len(t2), f"Nested tensor should have the same lengths. {len(t1)=} vs {len(t2)=}"
for c1, c2 in zip(t1, t2, strict=True):
assert torch.equal(c1, c2), f"Nested tensor components have different values. {c1=} vs {c2=}"
else:
assert torch.all(torch.eq(val, val2)).item()
else:
assert val == val2
def get(tensordict: TensorDict, key: str, default=None) -> Any:
"""Get a value from a TensorDict with automatic unwrapping.
Retrieves a value from the TensorDict and automatically converts it
to a Python-native format:
- Tensors are returned as-is
- NonTensorStack is converted to a Python list
- NonTensorData is unwrapped to its underlying value
Args:
tensordict: The TensorDict to retrieve from.
key: The key to look up.
default: Value to return if the key doesn't exist. Defaults to None.
Returns:
The value for the key in its native format, or default if not found.
Example:
>>> td = get_tensordict({"obs": torch.randn(3, 4), "labels": ["a", "b", "c"]})
>>> get(td, "obs") # Returns torch.Tensor
>>> get(td, "labels") # Returns ["a", "b", "c"] as a list
>>> get(td, "missing", "default") # Returns "default"
"""
if key not in tensordict:
return default
output = tensordict.get(key)
if isinstance(output, torch.Tensor):
return output
elif isinstance(output, NonTensorStack):
return output.tolist()
else:
assert isinstance(output, NonTensorData)
return output.data
def get_keys(tensordict: TensorDict, keys: Iterable[str]) -> TensorDict:
"""Extract a subset of keys from a TensorDict into a new TensorDict.
Creates a new TensorDict containing only the specified keys. Values
are properly categorized as tensor or non-tensor data.
Args:
tensordict: The source TensorDict.
keys: Iterable of key names to extract.
Returns:
A new TensorDict containing only the specified keys with their values.
Raises:
KeyError: If any key in keys doesn't exist in the tensordict.
Example:
>>> td = get_tensordict({"a": torch.randn(3), "b": torch.randn(3), "c": torch.randn(3)})
>>> subset = get_keys(td, ["a", "c"])
>>> list(subset.keys())
['a', 'c']
"""
tensor_output = {}
non_tensor_output = {}
for key in keys:
if key not in tensordict.keys():
raise KeyError(f"key {key} not in tensordict")
output = tensordict.get(key)
if isinstance(output, torch.Tensor):
tensor_output[key] = output
elif isinstance(output, NonTensorStack):
tensor_output[key] = output.tolist()
else:
assert isinstance(output, NonTensorData)
non_tensor_output[key] = output.data
return get_tensordict(tensor_output, non_tensor_output)
def pop(tensordict: TensorDict, key: str, default=None) -> Any:
"""Remove and return a value from a TensorDict with automatic unwrapping.
Removes the specified key from the TensorDict and returns its value,
automatically converting to Python-native format (same as get()).
Args:
tensordict: The TensorDict to pop from.
key: The key to remove and return.
default: Value to return if the key doesn't exist. Defaults to None.
Returns:
The value for the key in its native format, or default if not found.
The key is removed from the TensorDict.
Example:
>>> td = get_tensordict({"obs": torch.randn(3, 4), "labels": ["a", "b", "c"]})
>>> labels = pop(td, "labels") # Returns ["a", "b", "c"], removes from td
>>> "labels" in td.keys()
False
"""
_sentinel = object()
output = tensordict.pop(key, _sentinel)
if output is _sentinel:
return default
if isinstance(output, torch.Tensor):
return output
elif isinstance(output, NonTensorStack):
return output.tolist()
else:
assert isinstance(output, NonTensorData)
return output.data
def pop_keys(tensordict: TensorDict, keys: Iterable[str]) -> TensorDict:
"""Remove multiple keys from a TensorDict and return them as a new TensorDict.
Removes the specified keys from the source TensorDict and creates a new
TensorDict containing those keys and their values.
Args:
tensordict: The source TensorDict to pop from (modified in-place).
keys: Iterable of key names to remove and return.
Returns:
A new TensorDict containing the popped keys and their values.
Raises:
KeyError: If any key in keys doesn't exist in the tensordict.
Example:
>>> td = get_tensordict({"a": torch.randn(3), "b": torch.randn(3), "c": torch.randn(3)})
>>> popped = pop_keys(td, ["a", "c"])
>>> list(td.keys()) # Only 'b' remains
['b']
>>> list(popped.keys())
['a', 'c']
"""
tensor_output = {}
non_tensor_output = {}
for key in keys:
if key not in tensordict.keys():
raise KeyError(f"key {key} not in tensordict")
output = tensordict.get(key)
if isinstance(output, torch.Tensor):
tensor_output[key] = tensordict.pop(key)
elif isinstance(output, NonTensorStack):
tensor_output[key] = tensordict.pop(key).tolist()
else:
assert isinstance(output, NonTensorData)
non_tensor_output[key] = tensordict.pop(key)
return get_tensordict(tensor_output, non_tensor_output)
def pad_to_divisor(data: TensorDict, size_divisor: int):
"""Pad a TensorDict's batch dimension to be divisible by a given divisor.
If the TensorDict's length is not evenly divisible by size_divisor,
pads the batch dimension by repeating elements from the beginning.
Useful for ensuring even distribution across workers in distributed training.
Args:
data: The TensorDict to pad.
size_divisor: The divisor that the padded length must be divisible by.
Returns:
tuple: A tuple containing:
- data (TensorDict): The padded TensorDict (or original if no padding needed)
- pad_size (int): Number of elements added as padding (0 if none)
Raises:
AssertionError: If data is not a TensorDict.
Example:
>>> td = TensorDict({"obs": torch.randn(10, 4)}, batch_size=[10])
>>> padded, pad_size = pad_to_divisor(td, 4)
>>> len(padded) # 12 (next multiple of 4 after 10)
12
>>> pad_size
2
"""
assert isinstance(data, TensorDict), "data must be a TensorDict"
if len(data) % size_divisor != 0:
pad_size = size_divisor - len(data) % size_divisor
padding_protos = []
remaining_pad = pad_size
while remaining_pad > 0:
take_size = min(remaining_pad, len(data))
padding_protos.append(data[:take_size])
remaining_pad -= take_size
data_padded = torch.cat([data] + padding_protos)
else:
if len(data) == 0:
logging.warning("padding a DataProto with no item, no changed made")
pad_size = 0
data_padded = data
return data_padded, pad_size
def unpad(data: TensorDict, pad_size):
"""Remove padding from a TensorDict.
Reverses the effect of pad_to_divisor by removing the specified number
of elements from the end of the TensorDict.
Args:
data: The padded TensorDict.
pad_size: Number of padding elements to remove. If 0, returns
data unchanged.
Returns:
The TensorDict with padding removed, equivalent to data[:-pad_size].
Example:
>>> td = TensorDict({"obs": torch.randn(12, 4)}, batch_size=[12])
>>> unpadded = unpad(td, pad_size=2)
>>> len(unpadded)
10
"""
if pad_size != 0:
data = data[:-pad_size]
return data
def contiguous(data: TensorDict) -> TensorDict:
"""Call contiguous on a tensor dict. The contiguous function of tensordict lib will make NonTensorStack.
This function will always return a new tensordict
Args:
data: The input tensordict
Returns:
a tensordict that is contiguous
"""
tensor_dict = {}
non_tensor_dict = {}
for key in data.keys():
val = data.get(key)
if isinstance(val, NonTensorData):
non_tensor_dict[key] = val
elif isinstance(val, NonTensorStack):
tensor_dict[key] = val
else:
assert isinstance(val, torch.Tensor), f"Expect val to be a torch.Tensor. Got {type(val)}"
tensor_dict[key] = val.contiguous()
return get_tensordict(tensor_dict=tensor_dict, non_tensor_dict=non_tensor_dict)
def maybe_fix_3d_position_ids(data: TensorDict):
# note for tensordict with pickle/unpickle. nested tensor in tensordict after consolidate and pickle/unpickle
# will incur indexing error for ragged tensor. This only happens when using 3D position ids in VLMs.
# This is likely a bug in tensordict. As a workaround, we manually set _ragged_index.
if "position_ids" in data.keys() and data["position_ids"].dim() == 3 and data["position_ids"].is_nested:
data["position_ids"]._ragged_idx = 2
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/tensordict_utils.py",
"license": "Apache License 2.0",
"lines": 674,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.