sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
verl-project/verl:tests/special_e2e/sft/compare_sft_engine_results.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import torch
def get_result(file):
file = os.path.expanduser(file)
result = []
with open(file) as f:
lines = f.readlines()
for line in lines:
result.append(json.loads(line))
return result
def compare_results(golden_results, other_result):
golden_loss = golden_results[0]["data"]["train/loss"]
golden_grad_norm = golden_results[0]["data"]["train/grad_norm"]
loss = other_result[0]["data"]["train/loss"]
grad_norm = other_result[0]["data"]["train/grad_norm"]
torch.testing.assert_close(golden_loss, loss, atol=1e-2, rtol=1e-2)
torch.testing.assert_close(golden_grad_norm, grad_norm, atol=1e-4, rtol=3e-2)
if __name__ == "__main__":
golden_results = get_result("~/verl/test/log/golden.jsonl")
# get all other results
other_results = {}
# walk through all files in ~/verl/test/log
for file in os.listdir(os.path.expanduser("~/verl/test/log/verl_sft_test")):
if file.endswith(".jsonl"):
other_results[file] = get_result(os.path.join(os.path.expanduser("~/verl/test/log/verl_sft_test"), file))
# # compare results
for file, other_result in other_results.items():
print(f"compare results {file}")
compare_results(golden_results, other_result)
print(f"compare results {file} done")
print("All results are close to golden results")
| {
"repo_id": "verl-project/verl",
"file_path": "tests/special_e2e/sft/compare_sft_engine_results.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:examples/data_preprocess/gsm8k_multiturn_sft.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Preprocess the GSM8k dataset to parquet format
"""
import argparse
import os
import re
import datasets
from verl.utils.hdfs_io import copy, makedirs
def extract_solution(solution_str):
solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str)
assert solution is not None
final_solution = solution.group(0)
final_solution = final_solution.split("#### ")[1].replace(",", "")
return final_solution
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--local_dir", default=None)
parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.")
parser.add_argument(
"--local_save_dir", default="~/data/gsm8k_sft", help="The save directory for the preprocessed dataset."
)
parser.add_argument("--hdfs_dir", default=None)
args = parser.parse_args()
local_dataset_path = args.local_dataset_path
data_source = "openai/gsm8k"
if local_dataset_path is not None:
dataset = datasets.load_dataset(local_dataset_path, "main")
else:
dataset = datasets.load_dataset(data_source, "main")
train_dataset = dataset["train"]
test_dataset = dataset["test"]
instruction_following = 'Let\'s think step by step and output the final answer after "####".'
# add a row to each data item that represents a unique id
def make_map_fn(split):
def process_fn(example, idx):
question_raw = example.pop("question")
question = question_raw + " " + instruction_following
answer_raw = example.pop("answer")
data = {
"messages": [
{
"role": "user",
"content": question,
},
{
"role": "assistant",
"content": answer_raw,
},
],
}
return data
return process_fn
train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True)
test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True)
hdfs_dir = args.hdfs_dir
local_save_dir = args.local_dir
if local_save_dir is not None:
print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.")
else:
local_save_dir = args.local_save_dir
local_save_dir = os.path.expanduser(local_save_dir)
train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet"))
test_dataset.to_parquet(os.path.join(local_save_dir, "test.parquet"))
if hdfs_dir is not None:
makedirs(hdfs_dir)
copy(src=local_save_dir, dst=hdfs_dir)
| {
"repo_id": "verl-project/verl",
"file_path": "examples/data_preprocess/gsm8k_multiturn_sft.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/trainer/sft_trainer.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import partial
from tensordict.tensorclass import NonTensorData
os.environ["NCCL_DEBUG"] = "WARN"
os.environ["TOKENIZERS_PARALLELISM"] = "true"
import logging
import hydra
import torch
import torch.distributed
from omegaconf import OmegaConf
from torch.utils.data import DistributedSampler
from torchdata.stateful_dataloader import StatefulDataLoader
from tqdm import tqdm
from verl.utils import tensordict_utils as tu
from verl.utils.checkpoint import CheckpointHandler
from verl.utils.dataset.dataset_utils import SFTTensorCollator
from verl.utils.dataset.multiturn_sft_dataset import MultiTurnSFTDataset
from verl.utils.device import auto_set_device, get_device_name
from verl.utils.distributed import destroy_global_process_group
from verl.utils.logger import log_with_rank
from verl.utils.memory_utils import aggressive_empty_cache
from verl.utils.profiler import log_gpu_memory_usage
from verl.utils.tracking import Tracking
from verl.workers.engine_workers import TrainingWorker
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_SFT_LOGGING_LEVEL", "WARN"))
class SFTTrainer:
def __init__(
self,
config,
):
self.config = config
log_gpu_memory_usage(f"rank {torch.distributed.get_rank()}: Before SFTTrainer init", logger=logger)
self.rank = torch.distributed.get_rank()
self._build_config()
self._build_dataset()
self._build_engine()
self._build_dataloader()
self._init_engine()
self._build_ckpt_handler()
# Initialize resume-related variables
self.resume_global_step = self.ckpt_handler.load_checkpoint()
self.device_name = self.config.trainer.device
if self.rank == 0:
print(self.config)
log_gpu_memory_usage(f"rank {self.rank}: After SFTTrainer init", logger=logger)
def _build_ckpt_handler(self):
resume_mode = getattr(self.config.trainer, "resume_mode", "auto")
resume_from_path = getattr(self.config.trainer, "resume_from_path", None)
max_ckpt_to_keep = getattr(self.config.trainer, "max_ckpt_to_keep", None)
default_hdfs_dir = getattr(self.config.trainer, "default_hdfs_dir", None)
lora_train_meta = self._get_lora_train_meta()
self.ckpt_handler = CheckpointHandler(
engine=self.engine,
train_dataloader=self.train_dataloader,
default_local_dir=self.config.trainer.default_local_dir,
max_ckpt_to_keep=max_ckpt_to_keep,
default_hdfs_dir=default_hdfs_dir,
resume_mode=resume_mode,
resume_from_path=resume_from_path,
lora_train_meta=lora_train_meta,
)
def _get_lora_train_meta(self):
lora_adapter_path = self.config.model.get("lora_adapter_path", None)
lora_rank = int(getattr(self.config.model, "lora_rank", 0) or 0)
if lora_adapter_path is None and lora_rank <= 0:
return None
raw_lora_alpha = self.config.model.get("lora_alpha", None)
if raw_lora_alpha is None:
log_with_rank(
"LoRA is enabled but `model.lora_alpha` is not set; fallback to 0 in checkpoint metadata.",
logger=logger,
rank=self.rank,
level=logging.WARNING,
log_only_rank_0=True,
)
lora_alpha = 0
else:
lora_alpha = int(raw_lora_alpha)
if lora_alpha == 0:
log_with_rank(
"LoRA is enabled but `model.lora_alpha` is 0; this may lead to ineffective LoRA scaling.",
logger=logger,
rank=self.rank,
level=logging.WARNING,
log_only_rank_0=True,
)
task_type = self.config.model.get("task_type", None)
if task_type is None:
task_type = "CAUSAL_LM"
return {
"r": lora_rank,
"lora_alpha": int(lora_alpha or 0),
"task_type": str(task_type),
}
def _build_config(self):
from verl.utils.config import omega_conf_to_dataclass
self.model_config = omega_conf_to_dataclass(self.config.model)
self.engine_config = omega_conf_to_dataclass(self.config.engine)
self.optimizer_config = omega_conf_to_dataclass(self.config.optim)
self.checkpoint_config = omega_conf_to_dataclass(self.config.checkpoint)
self.profiler_config = omega_conf_to_dataclass(self.config.profiler)
# check profile interval
self.profiler_interval = self.config.trainer.profile_interval
self._validate_profiler_interval()
def _validate_profiler_interval(self):
assert len(self.profiler_interval) == 2
self.start_profile_step = self.profiler_interval[0]
self.end_profile_step = self.profiler_interval[1]
assert self.end_profile_step >= self.start_profile_step
if self.start_profile_step < 0:
assert self.end_profile_step < 0
def _build_engine(self):
from verl.workers.engine_workers import TrainingWorkerConfig
from verl.workers.utils.losses import sft_loss
self.loss_fn = partial(sft_loss, config=None)
config = TrainingWorkerConfig(
model_type="language_model",
model_config=self.model_config,
engine_config=self.engine_config,
optimizer_config=self.optimizer_config,
checkpoint_config=self.checkpoint_config,
profiler_config=self.profiler_config,
)
self.training_client = TrainingWorker(config=config)
self.training_client.set_loss_fn(loss_fn=self.loss_fn)
# Note that in SPMD world, this abstraction has to break
self.engine = self.training_client.engine
def _init_engine(self):
# patch optimizer config
if self.config.trainer.total_training_steps is not None:
self.total_training_steps = self.config.trainer.total_training_steps
else:
self.total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs
self.optimizer_config.total_training_steps = self.total_training_steps
self.steps_per_epoch = len(self.train_dataloader)
# manage save and test frequency
self.save_freq = self.config.trainer.save_freq
if self.save_freq == "after_each_epoch":
self.save_freq = self.steps_per_epoch
self.test_freq = self.config.trainer.test_freq
if self.test_freq == "after_each_epoch":
self.test_freq = self.steps_per_epoch
self.training_client.reset()
def _build_dataset(self):
config = self.config
tokenizer = self.model_config.tokenizer
processor = self.model_config.processor
train_dataset = create_sft_dataset(
config.data.train_files,
config.data,
tokenizer,
processor,
max_samples=config.data.get("train_max_samples", -1),
)
if config.data.val_files:
val_dataset = create_sft_dataset(
config.data.val_files,
config.data,
tokenizer,
processor,
max_samples=config.data.get("val_max_samples", -1),
)
else:
val_dataset = None
self.train_dataset, self.val_dataset = train_dataset, val_dataset
def _build_dataloader(self):
# build dataset
config = self.config
# build dataloader
# Use data parallel rank and size instead of global rank and world size
# Set pin_memory_device when pin_memory is enabled.
device_name = get_device_name()
dp_rank = self.engine.get_data_parallel_rank()
dp_size = self.engine.get_data_parallel_size()
self.train_sampler = DistributedSampler(
self.train_dataset, shuffle=True, num_replicas=dp_size, rank=dp_rank, drop_last=True
)
self.global_batch_size = config.data.train_batch_size
self.train_batch_size_per_dp = self.global_batch_size // dp_size
self.collate_fn = SFTTensorCollator(config.data.pad_mode)
self.train_dataloader = StatefulDataLoader(
dataset=self.train_dataset,
batch_size=self.train_batch_size_per_dp,
sampler=self.train_sampler,
collate_fn=self.collate_fn,
num_workers=self.config.data.num_workers,
pin_memory=False,
drop_last=True,
pin_memory_device=device_name,
)
if self.val_dataset:
self.val_sampler = DistributedSampler(
self.val_dataset, shuffle=False, num_replicas=dp_size, rank=dp_rank, drop_last=True
)
self.val_dataloader = StatefulDataLoader(
dataset=self.val_dataset,
batch_size=self.train_batch_size_per_dp,
sampler=self.val_sampler,
collate_fn=self.collate_fn,
num_workers=self.config.data.num_workers,
pin_memory=False,
drop_last=True,
pin_memory_device=device_name,
)
else:
self.val_dataloader = None
def _get_batch_seqlens(self, data):
# mean over dp group
is_nested = data["input_ids"].is_nested
if is_nested:
batch_seqlens: torch.Tensor = data["input_ids"].offsets().diff()
else:
batch_seqlens: torch.Tensor = data["attention_mask"].sum(dim=-1)
batch_seqlens = batch_seqlens.to(self.device_name) # (global_bsz // dp)
dp_group = self.engine.get_data_parallel_group()
dp_size = self.engine.get_data_parallel_size()
if dp_size == 1 or dp_group is None:
return batch_seqlens.tolist()
output_tensor = torch.empty(
(batch_seqlens.shape[0] * dp_size,),
dtype=batch_seqlens.dtype,
device=self.device_name,
) # (global_bsz,)
torch.distributed.all_gather_into_tensor(
output_tensor=output_tensor,
input_tensor=batch_seqlens,
group=dp_group,
)
batch_seqlens = output_tensor.tolist()
return batch_seqlens
def fit(self):
is_logging = self.engine.is_mp_src_rank_with_outputs() and self.engine.get_data_parallel_rank() == 0
# TODO: add a unified tracking
if is_logging:
tracking = Tracking(
project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
default_backend=self.config.trainer.logger,
config=OmegaConf.to_container(self.config, resolve=True),
)
global_step = self.resume_global_step # Start from resumed step
last_valid_metric = None
log_with_rank(
f"Total training steps: {self.total_training_steps},",
logger=logger,
rank=0,
log_only_rank_0=True,
)
# With StatefulDataLoader, we don't need to manually calculate epochs and steps
# The dataloader will automatically resume from where it left off
if global_step > 0:
log_with_rank(
f"StatefulDataLoader will automatically resume from global step: {global_step}",
logger=logger,
rank=0,
log_only_rank_0=True,
)
# Calculate which epoch we're starting from for sampler.set_epoch()
start_epoch = global_step // self.steps_per_epoch
meta_info = {
"use_remove_padding": self.config.model.use_remove_padding,
"use_dynamic_bsz": self.config.data.use_dynamic_bsz,
"max_token_len_per_gpu": self.config.data.max_token_len_per_gpu,
"micro_batch_size_per_gpu": self.config.data.micro_batch_size_per_gpu,
"temperature": 1.0,
"global_batch_size": self.global_batch_size,
"pad_mode": self.config.data.pad_mode,
"pad_token_id": self.model_config.tokenizer.pad_token_id,
}
train_time = 0
total_tokens = 0
for epoch in range(start_epoch, self.config.trainer.total_epochs):
self.train_sampler.set_epoch(epoch=epoch)
aggressive_empty_cache(force_sync=True)
log_gpu_memory_usage(f"rank {self.rank}: At start of epoch {epoch}", logger=logger)
for step_in_epoch, data in enumerate(
tqdm(
self.train_dataloader,
initial=global_step % self.steps_per_epoch if epoch == start_epoch else 0,
total=self.steps_per_epoch,
desc=f"Epoch {epoch + 1}/{self.config.trainer.total_epochs}",
disable=not is_logging,
)
):
global_step += 1
# construct tensordict
data = tu.get_tensordict(tensor_dict=data, non_tensor_dict=meta_info)
batch_seqlens = self._get_batch_seqlens(data=data)
# this is necessary. Otherwise, it is interpreted as NonTensorStack
batch_seqlens_ntd = NonTensorData(batch_seqlens)
tu.assign_non_tensor(data, update_lr_scheduler=True, global_token_num=batch_seqlens_ntd)
# start profile in SPMD mode
if global_step == self.start_profile_step:
self.training_client.start_profile()
# train for on batch
output = self.training_client.train_batch(data=data)
if global_step == self.end_profile_step:
self.training_client.stop_profile()
if self.engine.is_mp_src_rank_with_outputs():
metrics = tu.get(output, "metrics")
# TODO: we can actual accumulate metrics for N steps and perform aggregate metrics
for k in ["loss", "grad_norm", "lr", "mfu"]:
if k in metrics.keys():
value = metrics.pop(k)
metrics[f"train/{k}"] = value
metrics["train/global_tokens"] = torch.sum(
torch.tensor(batch_seqlens, device=self.device_name)
).item()
total_tokens += metrics["train/global_tokens"]
metrics["train/total_tokens(B)"] = total_tokens / 1e9
if self.engine.get_data_parallel_rank() == 0:
tracking.log(data=metrics, step=global_step)
is_last_step = global_step >= self.total_training_steps
is_valid_step = global_step % self.test_freq == 0
is_save_step = global_step % self.save_freq == 0
# early exit or validation step
if is_last_step and self.val_dataloader is not None or (self.test_freq > 0 and is_valid_step):
# Perform validation
val_losses = []
for val_data in self.val_dataloader:
val_data = tu.get_tensordict(tensor_dict=val_data, non_tensor_dict=meta_info)
output = self.training_client.infer_batch(val_data)
if self.engine.is_mp_src_rank_with_outputs():
metrics = tu.get(output, "metrics")
val_losses.append(metrics["loss"])
if self.engine.is_mp_src_rank_with_outputs():
val_loss = torch.mean(torch.tensor(val_losses, device=self.device_name))
# average over data parallel group
dp_group = self.engine.get_data_parallel_group()
if dp_group is not None:
torch.distributed.all_reduce(val_loss, op=torch.distributed.ReduceOp.AVG, group=dp_group)
if is_logging:
metric = {"val/loss": val_loss.detach().item()}
tracking.log(data=metric, step=global_step)
last_valid_metric = metric
torch.distributed.barrier()
if is_last_step or (self.save_freq > 0 and is_save_step):
aggressive_empty_cache(force_sync=True)
self.ckpt_handler.save_checkpoint(step=global_step)
if is_last_step:
if is_logging:
print(f"Total time for train steps: {train_time:.2f}s")
print(f"Final validation metrics: {last_valid_metric}")
return
def run_sft(config):
from verl.utils.distributed import initialize_global_process_group
initialize_global_process_group()
trainer = SFTTrainer(config=config)
trainer.fit()
destroy_global_process_group()
@hydra.main(config_path="config", config_name="sft_trainer_engine", version_base=None)
def main(config):
# Automatically set `config.trainer.device = npu` when running on Ascend NPU.
auto_set_device(config)
run_sft(config)
def create_sft_dataset(data_paths, data_config, tokenizer, processor, max_samples=-1):
"""Create a dataset."""
# build dataset
# First check if a custom dataset class is specified
if data_config.custom_cls.get("path", None):
from verl.utils.import_utils import load_extern_object
dataset_cls = load_extern_object(data_config.custom_cls.path, data_config.custom_cls.name)
else:
# Default to multi-turn dataset
dataset_cls = MultiTurnSFTDataset
# Create datasets based on the selected class
dataset = dataset_cls(
parquet_files=data_paths, tokenizer=tokenizer, config=data_config, processor=processor, max_samples=max_samples
)
return dataset
if __name__ == "__main__":
main()
| {
"repo_id": "verl-project/verl",
"file_path": "verl/trainer/sft_trainer.py",
"license": "Apache License 2.0",
"lines": 386,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/engine/utils.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import numpy as np
import torch
from tensordict import TensorDict
from verl.utils import tensordict_utils as tu
from verl.utils.dataset.dataset_utils import DatasetPadMode
from verl.utils.device import is_npu_available
from verl.utils.py_functional import append_to_dict
from verl.utils.seqlen_balancing import rearrange_micro_batches, restore_dynamic_batch
def enable_full_determinism(seed: int):
"""
Helper function for reproducibility in distributed training.
See https://pytorch.org/docs/stable/notes/randomness.html for details.
"""
os.environ["PYTHONHASHSEED"] = str(seed)
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
os.environ["NCCL_DETERMINISTIC"] = "1"
os.environ["FLASH_ATTENTION_DETERMINISTIC"] = "1"
if is_npu_available:
# The environment variable required to enable deterministic mode on Ascend NPUs.
os.environ["NCCL_DETERMINISTIC"] = "true"
os.environ["CLOSE_MATMUL_K_SHIFT"] = "1"
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.use_deterministic_algorithms(True, warn_only=True)
# Enable CUDNN deterministic mode
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
if is_npu_available:
torch.npu.manual_seed(seed)
torch.npu.manual_seed_all(seed)
def prepare_micro_batches(
data: TensorDict,
dp_group=None,
num_batches_divided_by=None,
same_micro_num_in_dp=True,
min_num_micro_batch=None,
use_dynamic_bsz_balance=True,
):
"""
Prepare micro batches from data.
"""
use_dynamic_bsz = tu.get_non_tensor_data(data=data, key="use_dynamic_bsz", default=True)
sp_size = tu.get_non_tensor_data(data=data, key="sp_size", default=1)
force_group_size = tu.get_non_tensor_data(data=data, key="force_group_size", default=1)
if use_dynamic_bsz:
assert force_group_size == 1, "force_group_size is not supported when use_dynamic_bsz is True"
assert "max_token_len_per_gpu" in data.keys(), "max_token_len_per_gpu must be set when use_dynamic_bsz is True"
max_token_len_per_gpu = data["max_token_len_per_gpu"]
max_token_len = max_token_len_per_gpu * sp_size
micro_batches, batch_idx_list = rearrange_micro_batches(
data,
max_token_len=max_token_len,
dp_group=dp_group,
num_batches_divided_by=num_batches_divided_by,
same_micro_num_in_dp=same_micro_num_in_dp,
min_num_micro_batch=min_num_micro_batch,
use_dynamic_bsz_balance=use_dynamic_bsz_balance,
)
else:
total_data_size = len(data)
micro_batch_size_per_gpu = data["micro_batch_size_per_gpu"]
assert total_data_size % (force_group_size * micro_batch_size_per_gpu) == 0, (
"data size must be divisible by force_group_size * micro_batch_size_per_gpu"
)
micro_batches = tu.chunk_tensordict(data, total_data_size // (micro_batch_size_per_gpu * force_group_size))
batch_idx_list = None
return micro_batches, batch_idx_list
def postprocess_batch_func(output_lst, indices, data: TensorDict):
"""postprocess the output of a forward_backward_batch.
output_lst is a list of dict containing outputs for each micro-batch
reorder entropy and outputs. Return None for other pp ranks
only on last rank. It should be on every tp rank
each losses_reduced contains 1. model_output, 2. loss, 3. metrics.
"""
use_dynamic_bsz = tu.get_non_tensor_data(data=data, key="use_dynamic_bsz", default=True)
pad_mode = tu.get_non_tensor_data(data=data, key="pad_mode", default=DatasetPadMode.NO_PADDING)
assert pad_mode == DatasetPadMode.NO_PADDING, "postprocess_batch_func only support NO_PADDING pad_mode"
# losses_reduced is a list of dict containing outputs for each micro-batch
# reorder entropy and outputs. Return None for other pp ranks
# only on last rank. It should be on every tp rank
# losses_reduced contains 1. model_output, 2. loss, 3. metrics.
# We perform reverse
model_output = {}
losses = []
aggregated_metrics = {}
# model output
for o in output_lst:
if "model_output" in o:
for key, val in o["model_output"].items():
if key not in model_output:
model_output[key] = []
model_output[key].append(val)
# concat results from micro batches
for key, val in model_output.items():
if pad_mode == DatasetPadMode.NO_PADDING:
tensors = [tensor for nt in model_output[key] for tensor in nt.unbind()]
model_output[key] = torch.nested.as_nested_tensor(tensors, layout=torch.jagged)
else:
raise NotImplementedError(f"pad_mode {pad_mode} not implemented")
# reverse with dynamic bsz
if use_dynamic_bsz:
model_output[key] = restore_dynamic_batch(model_output[key], indices)
# loss
for o in output_lst:
if "loss" in o:
losses.append(o["loss"])
# metrics
for o in output_lst:
if "metrics" in o:
metrics = o["metrics"]
append_to_dict(aggregated_metrics, metrics)
output = {
"model_output": model_output,
"loss": losses,
"metrics": aggregated_metrics,
}
return output
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/engine/utils.py",
"license": "Apache License 2.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/interactions/weather_interaction.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Any, Optional
from uuid import uuid4
from .base import BaseInteraction
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class WeatherInteraction(BaseInteraction):
"""A demo interaction for handling weather-related queries.
- `start_interaction`: start a interaction instance for a trajectory.
- `generate_response`: generate the response of the assistant.
- `calculate_score`: calculate the score of the interaction.
- `finalize_interaction`: finalize the interaction instance.
"""
def __init__(self, config: dict):
super().__init__(config)
self._instance_dict = {}
async def start_interaction(
self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs
) -> str:
if instance_id is None:
instance_id = str(uuid4())
self._instance_dict[instance_id] = {
"response": "",
"ground_truth": ground_truth,
"reward": 0.0,
}
return instance_id
async def generate_response(
self, instance_id: str, messages: list[dict[str, Any]], **kwargs
) -> tuple[bool, str, float, dict]:
content = "no tool call"
for i in range(len(messages) - 1, -1, -1):
item = messages[i]
if item.get("role") == "tool":
content = item.get("content")
break
self._instance_dict[instance_id]["response"] = content
reward = await self.calculate_score(instance_id)
if reward == 1.0:
response = "Thank you for your weather query!"
should_terminate_sequence = True
else:
response = "Please use the weather tool to get the weather information."
should_terminate_sequence = True
return should_terminate_sequence, response, reward, {}
async def calculate_score(self, instance_id: str, **kwargs) -> float:
# For weather interaction, we can implement a more complex scoring logic
# For now, we'll just return a default score of 1.0
if self._instance_dict[instance_id]["response"] == "no tool call":
return 0.0
return 1.0
async def finalize_interaction(self, instance_id: str, **kwargs) -> None:
del self._instance_dict[instance_id]
| {
"repo_id": "verl-project/verl",
"file_path": "verl/interactions/weather_interaction.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/engine/megatron/utils.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from verl.utils.device import get_torch_device
def set_random_seed(seed):
import random
import numpy as np
import torch
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if get_torch_device().device_count() > 0:
from megatron.core import tensor_parallel
tensor_parallel.model_parallel_cuda_manual_seed(seed)
# FIXME: torch cumsum not support deterministic (used in vllm sampler),
# https://github.com/pytorch/pytorch/issues/89492
# torch.use_deterministic_algorithms(True, warn_only=True)
# os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/engine/megatron/utils.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/workers/rollout/rollout_sglang/test_http_server_engine.py | # Copyright 2025 z.ai
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is adapted from multiple sources:
# 1. THUDM/slime project
# Original source: https://github.com/THUDM/slime/blob/main/slime/backends/sglang_utils/http_server_engine.py
# Copyright 2025 z.ai
# Licensed under the Apache License, Version 2.0
# 2. SGLang project
# Original source: https://github.com/sgl-project/sglang/blob/main/python/sglang/srt/entrypoints/http_server_engine.py
# Copyright 2023-2024 SGLang Team
# Licensed under the Apache License, Version 2.0
#
# Modifications made by z.ai and ModelBest Inc. include but are not limited to:
# - Enhanced error handling and retry logic
# - Added async support with connection pooling
# - Extended functionality for distributed weight updates
# - Improved logging and monitoring capabilities
# - Additional configuration options and optimizations
"""Complete unit tests for HTTP Server Engine Adapters.
This module contains comprehensive unit tests for both HttpServerEngineAdapter
and AsyncHttpServerEngineAdapter classes, covering all public methods,
error handling scenarios, edge cases, and boundary conditions using pytest and mock frameworks.
Tests use real SGLang modules for integration testing while mocking external dependencies.
"""
import asyncio
from unittest.mock import AsyncMock, Mock, patch
import aiohttp
import pytest
import requests
from sglang.srt.managers.io_struct import (
UpdateWeightsFromTensorReqInput,
)
from sglang.srt.utils import MultiprocessingSerializer
# Import the module under test
from verl.workers.rollout.sglang_rollout.http_server_engine import (
AsyncHttpServerAdapter,
HttpServerAdapter,
launch_server_process,
)
@pytest.fixture(scope="session")
def event_loop():
"""Create an event loop for the entire test session."""
loop = asyncio.new_event_loop()
yield loop
loop.close()
@pytest.fixture
def basic_adapter_kwargs():
"""Provide basic kwargs for creating HTTP server adapters."""
return {
"host": "localhost",
"port": 8000,
"node_rank": 0,
"model_path": "/tmp/test_model",
}
@pytest.fixture
def router_adapter_kwargs():
"""Provide kwargs for creating adapters with router configuration."""
return {
"router_ip": "192.168.1.1",
"router_port": 8080,
"host": "localhost",
"port": 8000,
"node_rank": 0,
"model_path": "/tmp/test_model",
}
@pytest.fixture
def non_master_adapter_kwargs():
"""Provide kwargs for creating non-master node adapters."""
return {
"host": "localhost",
"port": 8000,
"node_rank": 1, # Non-master
"model_path": "/tmp/test_model",
}
@pytest.fixture
def mock_launch_server_process():
"""Mock the launch_server_process function for testing without actual server startup."""
from unittest.mock import patch
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.launch_server_process") as mock_launch:
mock_process = Mock()
mock_process.is_alive.return_value = True
mock_process.pid = 12345
mock_launch.return_value = mock_process
yield mock_launch
@pytest.fixture
def mock_multiprocessing_process():
"""Create mock multiprocessing.Process for testing without actual process creation."""
from unittest.mock import patch
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.multiprocessing.Process") as mock_process_class:
mock_process = Mock()
mock_process.is_alive.return_value = True
mock_process.pid = 12345
mock_process_class.return_value = mock_process
yield mock_process
@pytest.fixture
def mock_requests_session():
"""Create mock requests.Session for testing HTTP interactions."""
from unittest.mock import patch
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.Session") as mock_session_class:
mock_session = Mock()
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {"status": "success"}
mock_session.get.return_value = mock_response
mock_session.post.return_value = mock_response
mock_session_class.return_value.__enter__.return_value = mock_session
yield mock_session
@pytest.fixture
def mock_requests_post():
"""Mock requests.post for testing HTTP POST requests."""
from unittest.mock import patch
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post:
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {"status": "success"}
mock_post.return_value = mock_response
yield mock_post
@pytest.fixture
def mock_requests_get():
"""Mock requests.get for testing HTTP GET requests."""
from unittest.mock import patch
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.get") as mock_get:
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {"status": "success"}
mock_get.return_value = mock_response
yield mock_get
@pytest.fixture
def mock_aiohttp_session():
"""Create mock aiohttp.ClientSession for testing async HTTP interactions."""
mock_session = AsyncMock()
mock_session.closed = False
# Mock response
mock_response = AsyncMock()
mock_response.status = 200
mock_response.json = AsyncMock(return_value={"status": "success"})
mock_response.raise_for_status = Mock()
# Mock context managers
mock_session.get.return_value.__aenter__.return_value = mock_response
mock_session.post.return_value.__aenter__.return_value = mock_response
return mock_session
@pytest.fixture
def mock_kill_process_tree():
"""Mock kill_process_tree function for testing cleanup without actual process termination."""
from unittest.mock import patch
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.kill_process_tree") as mock_kill:
yield mock_kill
# Test environment fixtures for real SGLang testing
@pytest.fixture(scope="session")
def sglang_test_model_path():
"""Provide a test model path for SGLang tests.
This can be overridden by environment variable SGLANG_TEST_MODEL_PATH
for tests that need a real model.
"""
import os
return os.getenv("SGLANG_TEST_MODEL_PATH", "/tmp/test_model")
@pytest.fixture
def real_adapter_kwargs(sglang_test_model_path):
"""Provide kwargs for creating adapters with real SGLang integration."""
return {
"host": "localhost",
"port": 8000,
"node_rank": 0,
"model_path": sglang_test_model_path,
}
@pytest.fixture(autouse=True)
def mock_server_args_post_init():
"""Mock ServerArgs.__post_init__ to skip model path validation."""
from unittest.mock import patch
with patch(
"verl.workers.rollout.sglang_rollout.http_server_engine.ServerArgs.__post_init__", return_value=None
) as mock_post_init:
yield mock_post_init
class TestLaunchServerProcess:
"""Test cases for launch_server_process function."""
def test_launch_server_process_success(
self, mock_multiprocessing_process, mock_requests_session, real_adapter_kwargs
):
"""Test successful server process launch and health check."""
# Import real SGLang ServerArgs
from sglang.srt.server_args import ServerArgs
# Create server args using real ServerArgs
server_args = ServerArgs(**real_adapter_kwargs)
# Test
with patch(
"verl.workers.rollout.sglang_rollout.http_server_engine.multiprocessing.Process"
) as mock_process_class:
mock_process_class.return_value = mock_multiprocessing_process
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.Session") as mock_session_class:
mock_session_class.return_value.__enter__.return_value = mock_requests_session
result = launch_server_process(server_args, first_rank_in_node=True)
# Assertions
assert result == mock_multiprocessing_process
mock_multiprocessing_process.start.assert_called_once()
assert mock_requests_session.get.call_count >= 2 # health_generate and flush_cache
def test_launch_server_process_non_master(self, mock_multiprocessing_process, non_master_adapter_kwargs):
"""Test server launch for non-master nodes (should return immediately)."""
from sglang.srt.server_args import ServerArgs
server_args = ServerArgs(**non_master_adapter_kwargs)
with patch(
"verl.workers.rollout.sglang_rollout.http_server_engine.multiprocessing.Process"
) as mock_process_class:
mock_process_class.return_value = mock_multiprocessing_process
result = launch_server_process(server_args, first_rank_in_node=True)
assert result == mock_multiprocessing_process
mock_multiprocessing_process.start.assert_not_called()
def test_launch_server_process_timeout(self, mock_multiprocessing_process, real_adapter_kwargs):
"""Test timeout during server health check."""
from sglang.srt.server_args import ServerArgs
server_args = ServerArgs(**real_adapter_kwargs)
with patch(
"verl.workers.rollout.sglang_rollout.http_server_engine.multiprocessing.Process"
) as mock_process_class:
mock_process_class.return_value = mock_multiprocessing_process
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.Session") as mock_session_class:
mock_session = Mock()
mock_session.get.side_effect = requests.RequestException("Connection failed")
mock_session_class.return_value.__enter__.return_value = mock_session
import itertools
with patch(
"verl.workers.rollout.sglang_rollout.http_server_engine.time.time",
side_effect=itertools.chain([0], itertools.repeat(400)), # 第一次返回0,之后一直返回400
):
with pytest.raises(TimeoutError):
launch_server_process(server_args, first_rank_in_node=True)
mock_multiprocessing_process.terminate.assert_called_once()
def test_launch_server_process_died(self, real_adapter_kwargs):
"""Test server process dies during startup."""
from sglang.srt.server_args import ServerArgs
server_args = ServerArgs(**real_adapter_kwargs)
with patch(
"verl.workers.rollout.sglang_rollout.http_server_engine.multiprocessing.Process"
) as mock_process_class:
mock_process = Mock()
mock_process.is_alive.return_value = False
mock_process_class.return_value = mock_process
with pytest.raises(RuntimeError, match="Server process terminated unexpectedly"):
launch_server_process(server_args, first_rank_in_node=True)
class TestHttpServerEngineAdapter:
"""Test cases for HttpServerEngineAdapter class."""
def test_init_with_router_registration(self, mock_launch_server_process, mock_requests_post, router_adapter_kwargs):
"""Test initialization with router registration."""
adapter = HttpServerAdapter(**router_adapter_kwargs)
assert adapter.router_ip == "192.168.1.1"
assert adapter.router_port == 8080
assert adapter.process == mock_launch_server_process.return_value
mock_requests_post.assert_called_once()
def test_init_without_router(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test initialization without router registration."""
adapter = HttpServerAdapter(**basic_adapter_kwargs)
assert adapter.router_ip is None
assert adapter.router_port is None
assert adapter.process == mock_launch_server_process.return_value
def test_register_with_router_failure(self, mock_launch_server_process, router_adapter_kwargs):
"""Test router registration failure handling."""
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post:
mock_post.side_effect = requests.RequestException("Connection failed")
# Should not raise exception, just log error
adapter = HttpServerAdapter(**router_adapter_kwargs)
assert adapter.router_ip == "192.168.1.1"
mock_post.assert_called_once()
def test_make_request_success(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test successful HTTP request."""
adapter = HttpServerAdapter(**basic_adapter_kwargs)
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post:
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {"status": "success"}
mock_post.return_value = mock_response
result = adapter._make_request("test_endpoint", {"param": "value"})
assert result == {"status": "success"}
mock_post.assert_called_with(
"http://localhost:8000/test_endpoint",
json={"param": "value"},
timeout=adapter.timeout,
)
def test_make_request_get_method(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test HTTP GET request."""
adapter = HttpServerAdapter(**basic_adapter_kwargs)
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.get") as mock_get:
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {"data": "test"}
mock_get.return_value = mock_response
result = adapter._make_request("test_endpoint", method="GET")
assert result == {"data": "test"}
mock_get.assert_called_with("http://localhost:8000/test_endpoint", timeout=adapter.timeout)
def test_make_request_non_master(self, mock_launch_server_process):
"""Test request from non-master node returns empty dict."""
kwargs = {"host": "localhost", "port": 8000, "node_rank": 1, "model_path": "/tmp/test_model"}
adapter = HttpServerAdapter(**kwargs)
result = adapter._make_request("test_endpoint")
assert result == {}
def test_make_request_retry_logic(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test retry logic for failed requests."""
adapter = HttpServerAdapter(max_attempts=3, **basic_adapter_kwargs)
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post:
with patch("time.sleep") as mock_sleep:
# First two calls fail, third succeeds
mock_post.side_effect = [
requests.exceptions.Timeout(),
requests.exceptions.ConnectionError(),
Mock(status_code=200, json=lambda: {"success": True}),
]
result = adapter._make_request("test_endpoint")
assert result == {"success": True}
assert mock_post.call_count == 3
assert mock_sleep.call_count == 2
def test_make_request_http_error(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test HTTP error handling."""
adapter = HttpServerAdapter(**basic_adapter_kwargs)
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post:
mock_response = Mock()
mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError("404 Not Found")
mock_post.return_value = mock_response
with pytest.raises(requests.exceptions.HTTPError):
adapter._make_request("test_endpoint")
def test_make_request_max_attempts_exceeded(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test max retries exceeded."""
adapter = HttpServerAdapter(max_attempts=1, **basic_adapter_kwargs)
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post:
with patch("time.sleep"):
mock_post.side_effect = requests.exceptions.Timeout()
with pytest.raises(RuntimeError, match="Failed to complete request"):
adapter._make_request("test_endpoint")
assert mock_post.call_count == 1 # Initial retry
def test_update_weights_from_tensor_strict(self, mock_launch_server_process, basic_adapter_kwargs):
import base64
from sglang.srt.managers.io_struct import UpdateWeightsFromTensorReqInput
from verl.workers.rollout.sglang_rollout.http_server_engine import HttpServerAdapter
basic_adapter_kwargs.setdefault("node_rank", 0)
adapter = HttpServerAdapter(**basic_adapter_kwargs)
with patch.object(adapter, "_make_request") as mock_request:
mock_request.return_value = {"status": "updated"}
req = UpdateWeightsFromTensorReqInput(
serialized_named_tensors=[b"tensor1", b"tensor2"],
load_format="safetensors",
flush_cache=True,
)
result = adapter.update_weights_from_tensor(req)
assert result == {"status": "updated"}
expected_b64_1 = base64.b64encode(b"tensor1").decode("utf-8")
expected_b64_2 = base64.b64encode(b"tensor2").decode("utf-8")
mock_request.assert_called_once_with(
"update_weights_from_tensor",
{
"serialized_named_tensors": [expected_b64_1, expected_b64_2],
"load_format": "safetensors",
"flush_cache": True,
},
)
def test_update_weights_from_tensor_empty(self, mock_launch_server_process, basic_adapter_kwargs):
from sglang.srt.managers.io_struct import UpdateWeightsFromTensorReqInput
from verl.workers.rollout.sglang_rollout.http_server_engine import HttpServerAdapter
basic_adapter_kwargs.setdefault("node_rank", 0)
adapter = HttpServerAdapter(**basic_adapter_kwargs)
with patch.object(adapter, "_make_request") as mock_request:
mock_request.return_value = {"status": "updated"}
req = UpdateWeightsFromTensorReqInput(
serialized_named_tensors=[],
load_format="safetensors",
flush_cache=True,
)
result = adapter.update_weights_from_tensor(req)
assert result == {"status": "updated"}
mock_request.assert_called_once_with(
"update_weights_from_tensor",
{
"serialized_named_tensors": [],
"load_format": "safetensors",
"flush_cache": True,
},
)
def test_update_weights_from_tensor_none(self, mock_launch_server_process, basic_adapter_kwargs):
from sglang.srt.managers.io_struct import UpdateWeightsFromTensorReqInput
from verl.workers.rollout.sglang_rollout.http_server_engine import HttpServerAdapter
basic_adapter_kwargs.setdefault("node_rank", 0)
adapter = HttpServerAdapter(**basic_adapter_kwargs)
with patch.object(adapter, "_make_request") as mock_request:
mock_request.return_value = {"status": "updated"}
req = UpdateWeightsFromTensorReqInput(
serialized_named_tensors=None,
load_format="safetensors",
flush_cache=True,
)
result = adapter.update_weights_from_tensor(req)
assert result == {"status": "updated"}
mock_request.assert_called_once_with(
"update_weights_from_tensor",
{
"serialized_named_tensors": [],
"load_format": "safetensors",
"flush_cache": True,
},
)
def test_generate(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test generate method."""
adapter = HttpServerAdapter(**basic_adapter_kwargs)
with patch.object(adapter, "_make_request") as mock_request:
mock_request.return_value = {"text": "Generated text"}
result = adapter.generate(
prompt="Hello world",
sampling_params={"temperature": 0.7},
return_logprob=True,
)
assert result == {"text": "Generated text"}
mock_request.assert_called_once_with(
"generate",
{
"text": "Hello world",
"sampling_params": {"temperature": 0.7},
"return_logprob": True,
},
only_master=False,
)
def test_flush_cache(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test flush_cache method."""
adapter = HttpServerAdapter(**basic_adapter_kwargs)
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.get") as mock_get:
with patch("time.sleep") as mock_sleep:
# First call fails, second succeeds
mock_responses = [
Mock(status_code=503), # Service unavailable
Mock(status_code=200, json=lambda: {"cache_flushed": True}),
]
mock_get.side_effect = mock_responses
result = adapter.flush_cache()
assert result == {"cache_flushed": True}
assert mock_get.call_count == 2
mock_sleep.assert_called_once()
def test_flush_cache_non_master(self, mock_launch_server_process):
"""Test flush_cache for non-master node."""
kwargs = {"host": "localhost", "port": 8000, "node_rank": 1, "model_path": "/tmp/test_model"}
adapter = HttpServerAdapter(**kwargs)
result = adapter.flush_cache()
assert result == {}
def test_memory_management_methods(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test memory release and resume methods."""
adapter = HttpServerAdapter(**basic_adapter_kwargs)
with patch.object(adapter, "_make_request") as mock_request:
mock_request.return_value = {"status": "success"}
# Test release_memory_occupation
result = adapter.release_memory_occupation(["weights", "kv_cache"])
assert result == {"status": "success"}
mock_request.assert_called_with("release_memory_occupation", {"tags": ["weights", "kv_cache"]})
# Test resume_memory_occupation
result = adapter.resume_memory_occupation(["weights"])
assert result == {"status": "success"}
mock_request.assert_called_with("resume_memory_occupation", {"tags": ["weights"]})
def test_generation_control_methods(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test generation control methods."""
adapter = HttpServerAdapter(**basic_adapter_kwargs)
with patch.object(adapter, "_make_request") as mock_request:
mock_request.return_value = {"status": "success"}
def test_shutdown(self, mock_launch_server_process, mock_kill_process_tree, router_adapter_kwargs):
"""Test shutdown method."""
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post:
mock_response = Mock()
mock_response.status_code = 200
mock_post.return_value = mock_response
adapter = HttpServerAdapter(**router_adapter_kwargs)
adapter.shutdown()
# Should unregister from router
assert mock_post.call_count == 2 # Once for registration, once for unregistration
# Should kill process
mock_kill_process_tree.assert_called_once_with(mock_launch_server_process.return_value.pid)
def test_shutdown_with_errors(self, mock_launch_server_process, mock_kill_process_tree, router_adapter_kwargs):
"""Test shutdown method with errors."""
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post:
# Mock registration success but unregistration failure
mock_post.side_effect = [
Mock(status_code=200), # Registration success
requests.RequestException("Unregistration failed"), # Unregistration failure
]
# Mock process kill failure
mock_kill_process_tree.side_effect = Exception("Kill failed")
adapter = HttpServerAdapter(**router_adapter_kwargs)
# Should not raise exceptions
adapter.shutdown()
assert mock_post.call_count == 2
mock_kill_process_tree.assert_called_once_with(mock_launch_server_process.return_value.pid)
# Edge cases for HttpServerEngineAdapter
def test_empty_and_none_parameters(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test handling of empty and None parameters."""
adapter = HttpServerAdapter(**basic_adapter_kwargs)
with patch.object(adapter, "_make_request") as mock_request:
mock_request.return_value = {"status": "success"}
req = UpdateWeightsFromTensorReqInput(
serialized_named_tensors=None,
load_format=None,
flush_cache=None,
)
# Test generate with all None parameters
result = adapter.generate()
assert result == {"status": "success"}
# Test with empty lists
result = adapter.update_weights_from_tensor(req)
assert result == {"status": "success"}
# Test with empty tags
result = adapter.release_memory_occupation(req)
assert result == {"status": "success"}
def test_large_payload_handling(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test handling of large payloads."""
adapter = HttpServerAdapter(**basic_adapter_kwargs)
with patch.object(adapter, "_make_request") as mock_request:
mock_request.return_value = {"status": "success"}
# Test with large tensor list
large_tensor_list = [MultiprocessingSerializer.serialize(f"tensor_{i}") for i in range(1000)]
req = UpdateWeightsFromTensorReqInput(
serialized_named_tensors=large_tensor_list,
load_format="safetensors",
flush_cache=True,
)
result = adapter.update_weights_from_tensor(req)
assert result == {"status": "success"}
# Test with large prompt
large_prompt = "A" * 10000
result = adapter.generate(prompt=large_prompt)
assert result == {"status": "success"}
def test_timeout_edge_cases(self, mock_launch_server_process):
"""Test various timeout scenarios."""
# Test with very small timeout
kwargs = {"host": "localhost", "port": 8000, "node_rank": 0, "model_path": "/tmp/test_model", "timeout": 0.001}
adapter = HttpServerAdapter(**kwargs)
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post:
mock_post.side_effect = requests.exceptions.Timeout()
with pytest.raises(RuntimeError, match="Failed to complete request"):
adapter._make_request("test_endpoint")
def test_extreme_configuration_values(self, mock_launch_server_process):
"""Test extreme configuration values."""
# Test with extreme values
kwargs = {
"host": "localhost",
"port": 8000,
"node_rank": 0,
"model_path": "/tmp/test_model",
"timeout": 0.001, # Very small
"max_attempts": 100, # Very large
"retry_delay": 0.001, # Very small
}
adapter = HttpServerAdapter(**kwargs)
assert adapter.timeout == 0.001
assert adapter.max_attempts == 100
assert adapter.retry_delay == 0.001
class TestAsyncHttpServerEngineAdapter:
"""Test cases for AsyncHttpServerEngineAdapter class."""
def test_init(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test async adapter initialization."""
adapter = AsyncHttpServerAdapter(max_connections=50, **basic_adapter_kwargs)
assert adapter.max_connections == 50
@pytest.mark.asyncio
async def test_make_async_request_success(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test successful async HTTP request."""
# Instantiate adapter
adapter = AsyncHttpServerAdapter(**basic_adapter_kwargs)
mock_response = AsyncMock()
mock_response.status = 200
mock_response.json = AsyncMock(return_value={"status": "success"})
mock_response.raise_for_status = Mock()
mock_post_context_manager = AsyncMock()
mock_post_context_manager.__aenter__.return_value = mock_response
mock_session = AsyncMock(spec=aiohttp.ClientSession)
mock_session.closed = False
mock_session.post.return_value = mock_post_context_manager
mock_session_cm = AsyncMock()
mock_session_cm.__aenter__.return_value = mock_session
with patch.object(adapter, "_get_session", return_value=mock_session_cm):
result = await adapter._make_async_request("test_endpoint", {"param": "value"})
# Assert result is correct
assert result == {"status": "success"}
# Verify post was called
mock_session.post.assert_called_once_with(
"http://localhost:8000/test_endpoint", json={"param": "value"}, timeout=adapter.timeout
)
@pytest.mark.asyncio
async def test_make_async_request_get_method(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test async GET request using aiohttp and proper context mocking."""
# Instantiate the async adapter
adapter = AsyncHttpServerAdapter(**basic_adapter_kwargs)
mock_response = AsyncMock()
mock_response.status = 200
mock_response.json = AsyncMock(return_value={"data": "test"})
mock_response.raise_for_status = Mock()
mock_get_context_manager = AsyncMock()
mock_get_context_manager.__aenter__.return_value = mock_response
mock_session = AsyncMock(spec=aiohttp.ClientSession)
mock_session.closed = False
mock_session.get.return_value = mock_get_context_manager
mock_session_cm = AsyncMock()
mock_session_cm.__aenter__.return_value = mock_session
with patch.object(adapter, "_get_session", return_value=mock_session_cm):
result = await adapter._make_async_request("test_endpoint", method="GET")
# Validate
assert result == {"data": "test"}
mock_session.get.assert_called_once_with("http://localhost:8000/test_endpoint", timeout=adapter.timeout)
@pytest.mark.asyncio
async def test_make_async_request_non_master(self, mock_launch_server_process):
"""Test async request from non-master node."""
kwargs = {"host": "localhost", "port": 8000, "node_rank": 1, "model_path": "/tmp/test_model"}
adapter = AsyncHttpServerAdapter(**kwargs)
result = await adapter._make_async_request("test_endpoint")
assert result == {}
@pytest.mark.asyncio
async def test_async_generate(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test async generate method."""
adapter = AsyncHttpServerAdapter(**basic_adapter_kwargs)
with patch.object(adapter, "_make_async_request", new_callable=AsyncMock) as mock_request:
mock_request.return_value = {"text": "Generated text"}
result = await adapter.generate(
prompt="Hello world",
sampling_params={"temperature": 0.7},
return_logprob=True,
)
assert result == {"text": "Generated text"}
mock_request.assert_called_once()
@pytest.mark.asyncio
async def test_async_memory_management(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test async memory management methods."""
adapter = AsyncHttpServerAdapter(**basic_adapter_kwargs)
with patch.object(adapter, "_make_async_request", new_callable=AsyncMock) as mock_request:
mock_request.return_value = {"status": "success"}
# Test release_memory_occupation
result = await adapter.release_memory_occupation(["weights"])
assert result == {"status": "success"}
mock_request.assert_called_with("release_memory_occupation", {"tags": ["weights"]})
# Test resume_memory_occupation
result = await adapter.resume_memory_occupation(["weights"])
assert result == {"status": "success"}
mock_request.assert_called_with("resume_memory_occupation", {"tags": ["weights"]})
assert (
mock_request.call_count == 2
) # resume memory occupation will also call release memory occupation once
class TestErrorRecovery:
"""Test error recovery mechanisms."""
def test_flush_cache_recovery(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test flush cache recovery from failures."""
adapter = HttpServerAdapter(max_attempts=2, **basic_adapter_kwargs)
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.get") as mock_get:
# Simulate multiple failures then success
mock_get.side_effect = [
requests.exceptions.ConnectionError(),
requests.exceptions.Timeout(),
Mock(status_code=503), # Service unavailable
Mock(status_code=200, json=lambda: {"cache_flushed": True}),
]
with patch("time.sleep"):
result = adapter.flush_cache()
assert result == {"cache_flushed": True}
def test_flush_cache_max_attempts(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test flush cache max retries exceeded."""
adapter = HttpServerAdapter(max_attempts=1, **basic_adapter_kwargs)
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.get") as mock_get:
# All attempts fail
mock_get.side_effect = requests.exceptions.ConnectionError()
with patch("time.sleep"):
result = adapter.flush_cache()
assert result == {} # Should return empty dict on failure
def test_network_partition_recovery(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test recovery from network partition scenarios."""
adapter = HttpServerAdapter(max_attempts=3, **basic_adapter_kwargs)
with patch("verl.workers.rollout.sglang_rollout.http_server_engine.requests.post") as mock_post:
# Simulate network partition then recovery
mock_post.side_effect = [
requests.exceptions.ConnectionError("Network unreachable"),
requests.exceptions.ConnectionError("Network unreachable"),
Mock(status_code=200, json=lambda: {"recovered": True}),
]
with patch("time.sleep"):
result = adapter._make_request("test_endpoint")
assert result == {"recovered": True}
class TestResourceManagement:
"""Test resource management and cleanup."""
def test_resource_cleanup_on_exception(
self, mock_launch_server_process, mock_kill_process_tree, basic_adapter_kwargs
):
"""Test resource cleanup when exceptions occur."""
adapter = HttpServerAdapter(**basic_adapter_kwargs)
# Simulate exception during operation
with patch.object(adapter, "_make_request", side_effect=Exception("Test error")):
try:
adapter.generate(prompt="test")
except Exception:
pass
# Cleanup should still work
adapter.shutdown()
mock_kill_process_tree.assert_called_once_with(mock_launch_server_process.return_value.pid)
def test_multiple_shutdown_calls(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test multiple shutdown calls are safe."""
adapter = HttpServerAdapter(**basic_adapter_kwargs)
# Multiple shutdown calls should be safe
adapter.shutdown()
adapter.shutdown()
adapter.shutdown()
class TestDataTypeHandling:
"""Test handling of various data types."""
def test_complex_data_structures(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test handling of complex data structures."""
adapter = HttpServerAdapter(**basic_adapter_kwargs)
with patch.object(adapter, "_make_request") as mock_request:
mock_request.return_value = {"status": "success"}
# Test with complex sampling params
complex_sampling_params = {
"temperature": 0.7,
"top_p": 0.9,
"top_k": 50,
"repetition_penalty": 1.1,
"stop_sequences": ["</s>", "\n\n"],
"max_tokens": 100,
"logit_bias": {"token_123": 0.5, "token_456": -0.5},
"nested_config": {
"beam_search": True,
"num_beams": 4,
"early_stopping": True,
},
}
result = adapter.generate(
prompt="Test prompt",
sampling_params=complex_sampling_params,
)
assert result == {"status": "success"}
# Verify the complex structure was passed through
call_args = mock_request.call_args[0][1]
assert call_args["sampling_params"] == complex_sampling_params
class TestIntegration:
"""Integration tests for both adapters."""
def test_error_scenarios(self, mock_launch_server_process, basic_adapter_kwargs):
"""Test various error scenarios."""
adapter = HttpServerAdapter(**basic_adapter_kwargs)
# Test with None payload
with patch.object(adapter, "_make_request") as mock_request:
mock_request.return_value = {}
result = adapter.generate()
assert result == {}
# Test with empty parameters
with patch.object(adapter, "_make_request") as mock_request:
mock_request.return_value = {}
req = UpdateWeightsFromTensorReqInput(
serialized_named_tensors=None,
load_format=None,
flush_cache=None,
)
result = adapter.update_weights_from_tensor(req)
assert result == {}
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/rollout/rollout_sglang/test_http_server_engine.py",
"license": "Apache License 2.0",
"lines": 757,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/workers/rollout/sglang_rollout/http_server_engine.py | # Copyright 2025 z.ai
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is adapted from multiple sources:
# 1. THUDM/slime project
# Original source: https://github.com/THUDM/slime/blob/main/slime/backends/sglang_utils/http_server_engine.py
# Copyright 2025 z.ai
# Licensed under the Apache License, Version 2.0
# 2. SGLang project
# Original source: https://github.com/sgl-project/sglang/blob/main/python/sglang/srt/entrypoints/http_server_engine.py
# Copyright 2023-2024 SGLang Team
# Licensed under the Apache License, Version 2.0
#
# Modifications made by z.ai and ModelBest Inc. include but are not limited to:
# - Enhanced error handling and retry logic
# - Added async support with connection pooling
# - Extended functionality for distributed weight updates
# - Improved logging and monitoring capabilities
# - Additional configuration options and optimizations
"""HTTP Server Engine Adapter for SGLang.
This module provides HTTP-based adapters for SGLang engines, allowing communication
with SGLang servers through HTTP requests instead of direct engine calls.
Classes:
HttpServerAdapter: Synchronous HTTP adapter for SGLang engines
AsyncHttpServerAdapter: Asynchronous HTTP adapter for SGLang engines
Functions:
launch_server_process: Launch and initialize an SGLang HTTP server process
"""
import asyncio
import logging
import multiprocessing
import os
import time
from contextlib import asynccontextmanager
from typing import Any, Callable, Optional
import aiohttp
import requests
from sglang.srt.entrypoints.EngineBase import EngineBase
from sglang.srt.entrypoints.http_server import launch_server
from sglang.srt.managers.io_struct import (
UpdateWeightsFromTensorReqInput,
)
from sglang.srt.server_args import ServerArgs
from sglang.srt.utils import kill_process_tree
# Configure logger
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
# Default configuration constants
DEFAULT_TIMEOUT = 60.0
DEFAULT_MAX_ATTEMPTS = 3
DEFAULT_RETRY_DELAY = 2.0
DEFAULT_MAX_CONNECTIONS = 2000
DEFAULT_MAX_WAIT_TIME = 300.0
def _read_response(response: requests.Response):
if response.status_code == 204 or not response.content:
return {}
try:
return response.json()
except ValueError:
return {
"content_type": response.headers.get("Content-Type", ""),
"text": response.text,
}
async def _read_async_response(resp: aiohttp.ClientResponse) -> dict[str, Any]:
if resp.status == 204 or (resp.content_length == 0):
return {}
try:
return await resp.json(content_type=None)
except Exception:
try:
text = await resp.text()
except Exception:
return {}
return {
"content_type": (resp.headers.get("Content-Type") or ""),
"text": text,
}
def launch_server_process(
server_args: ServerArgs,
timeout: float = DEFAULT_TIMEOUT,
max_wait_time=DEFAULT_MAX_WAIT_TIME,
first_rank_in_node=False,
) -> multiprocessing.Process:
"""Launch an SGLang HTTP server process and wait for it to be ready.
This function starts a new process running an SGLang HTTP server, then waits
for the server to become ready by polling its health endpoints. It ensures
the server is fully operational before returning.
Args:
server_args (ServerArgs): Server configuration arguments including host, port, and other settings
timeout (float, optional): Timeout for individual HTTP requests during health checks.
Defaults to DEFAULT_TIMEOUT.
Returns:
multiprocessing.Process: The launched multiprocessing.Process instance
Raises:
RuntimeError: If the server process terminates unexpectedly during startup or cache flush
TimeoutError: If server fails to become ready within reasonable time (300 seconds)
requests.RequestException: If health check requests fail repeatedly
Note:
This function will return immediately for non-master nodes (node_rank != 0),
but the process will still be started and returned.
This is for consistency; except for the process obtained by node_rank = 0,
other processes have no actual effect.
"""
p = multiprocessing.Process(target=launch_server, args=(server_args,))
if server_args.node_rank != 0 or not first_rank_in_node:
logger.info(f"Server process started with PID {p.pid} for node rank {server_args.node_rank}", flush=True)
return p
p.start()
base_url = server_args.url()
headers = {
"Content-Type": "application/json; charset=utf-8",
"Authorization": f"Bearer {server_args.api_key}",
}
# Health check with overall timeout
start_time = time.time()
with requests.Session() as session:
while time.time() - start_time < max_wait_time:
if not p.is_alive():
raise RuntimeError("Server process terminated unexpectedly during startup")
try:
if server_args.is_embedding:
response = session.get(f"{base_url}/health", headers=headers, timeout=timeout)
else:
response = session.get(f"{base_url}/health_generate", headers=headers, timeout=timeout)
if response.status_code == 200:
break
except requests.RequestException as e:
logger.debug(f"Health check failed: {e}")
time.sleep(2)
else:
p.terminate()
logger.error(f"Server in {base_url} failed to become healthy within timeout period")
raise TimeoutError("Server failed to become healthy within timeout period")
# Ensure cache is ready
while time.time() - start_time < max_wait_time:
if not p.is_alive():
raise RuntimeError("Server process terminated unexpectedly during cache flush")
try:
response = session.get(f"{base_url}/flush_cache", headers=headers, timeout=timeout)
if response.status_code == 200:
break
except requests.RequestException as e:
logger.debug(f"Cache flush check failed: {e}")
time.sleep(2)
else:
p.terminate()
raise TimeoutError("Server cache flush failed within timeout period")
return p
class HttpServerAdapter(EngineBase):
"""HTTP-based adapter for SGLang engines.
This adapter allows interaction with SGLang engines through HTTP requests
instead of direct engine calls. It launches an HTTP server process and
provides methods to communicate with it via REST API calls.
You can use this class to launch a server from a HttpServerAdapter instance.
We recommend using this class only when you need to use http server.
Otherwise, you can use Engine directly.
Attributes:
router_ip (Optional[str]): IP address of the router for worker registration
router_port (Optional[int]): Port of the router for worker registration
server_args (ServerArgs): Server configuration arguments
node_rank (int): Rank of this node in distributed setup
process (multiprocessing.Process): The launched server process
timeout (float): HTTP request timeout in seconds
max_attempts (int): Maximum number of attempts for requests
retry_delay (float): Base delay between retries in seconds
"""
def __init__(
self,
router_ip: Optional[str] = None,
router_port: Optional[int] = None,
timeout: float = DEFAULT_TIMEOUT,
max_attempts: int = DEFAULT_MAX_ATTEMPTS,
retry_delay: float = DEFAULT_RETRY_DELAY,
first_rank_in_node: bool = False,
max_start_wait_time: float = DEFAULT_MAX_WAIT_TIME,
launch_server: bool = True,
**kwargs: Any,
) -> None:
"""Initialize the HTTP server engine adapter.
Args:
router_ip (Optional[str], optional): IP address of router for worker registration.
Defaults to None.
router_port (Optional[int], optional): Port of router for worker registration.
Defaults to None.
timeout (float, optional): HTTP request timeout in seconds.
Defaults to DEFAULT_TIMEOUT.
max_attempts (int, optional): Maximum number of retry attempts for failed requests.
Defaults to DEFAULT_MAX_ATTEMPTS.
retry_delay (float, optional): Base delay between retries in seconds.
Defaults to DEFAULT_RETRY_DELAY.
launch_server (bool, optional): Whether to launch the server process.
Defaults to True.
**kwargs (Any): Additional arguments passed to ServerArgs
Note:
TODO: @ChangyiYang Enable SGLang router for this http server engine
If both router_ip and router_port are provided and this is the master node
(node_rank == 0), the adapter will automatically register with the router.
"""
self.router_ip: Optional[str] = router_ip
self.router_port: Optional[int] = router_port
self.timeout: float = timeout
self.max_attempts: int = max_attempts
self.retry_delay: float = retry_delay
self.server_args: ServerArgs = ServerArgs(**kwargs)
self.node_rank: int = self.server_args.node_rank
self.max_start_wait_time: float = max_start_wait_time
logger.info(
f"Launch HttpServerAdapter at: {self.server_args.host}:{self.server_args.port} with {first_rank_in_node}"
)
if launch_server:
self.process: multiprocessing.Process = launch_server_process(
self.server_args, self.timeout, self.max_start_wait_time, first_rank_in_node
)
if self.node_rank == 0 and self.router_ip and self.router_port:
self._register_with_router()
def _register_with_router(self) -> None:
"""Register worker with router with error handling.
This method attempts to register the current worker with a router service.
If registration fails, it logs an error but does not raise an exception,
allowing the server to continue operating without router integration.
Raises:
Does not raise exceptions - all errors are logged and handled gracefully.
"""
try:
url = f"http://{self.router_ip}:{self.router_port}/add_worker"
params = {"url": f"http://{self.server_args.host}:{self.server_args.port}"}
response = requests.post(url, params=params, timeout=self.timeout)
response.raise_for_status()
logger.info("Successfully registered with router")
except Exception as e:
logger.error(f"Failed to register with router: {e}")
# Don't raise here - server can still work without router
def _make_request(
self,
endpoint: str,
payload: Optional[dict[str, Any]] = None,
method: str = "POST",
timeout: float = DEFAULT_TIMEOUT,
only_master: bool = True,
) -> dict[str, Any]:
"""Make a HTTP request with retry logic and consistent error handling.
Args:
endpoint (str): The API endpoint to call (without leading slash)
payload (Optional[Dict[str, Any]], optional): The JSON payload to send.
Defaults to empty dict if None.
method (str, optional): HTTP method to use. Defaults to "POST".
Returns:
Dict[str, Any]: The JSON response from the server
Raises:
requests.HTTPError: If the HTTP request fails with a client/server error
RuntimeError: If all retry attempts are exhausted
Note:
- For non-master nodes (node_rank != 0), returns empty dict immediately
- Uses exponential backoff for retries
- Logs warnings for timeout and connection errors, errors for HTTP errors
"""
if only_master and self.node_rank != 0:
return {}
url = f"http://{self.server_args.host}:{self.server_args.port}/{endpoint}"
for attempt in range(self.max_attempts):
try:
if method.upper() == "GET":
response = requests.get(url, timeout=self.timeout)
else:
response = requests.post(url, json=payload or {}, timeout=self.timeout)
response.raise_for_status()
return _read_response(response)
except requests.exceptions.Timeout:
logger.warning(f"Request to {endpoint} timed out (attempt {attempt + 1})")
except requests.exceptions.ConnectionError:
logger.warning(f"Connection error for {endpoint} (attempt {attempt + 1})")
except requests.exceptions.HTTPError as e:
logger.error(f"HTTP error for {endpoint}: {e}")
raise
except Exception as e:
logger.error(f"Unexpected error for {endpoint}: {e}")
if attempt == self.max_attempts - 1:
raise
if attempt < self.max_attempts - 1:
time.sleep(self.retry_delay * (2**attempt))
raise RuntimeError(f"Failed to complete request to {endpoint} after {self.max_attempts} attempts")
def update_weights_from_tensor(self, req: UpdateWeightsFromTensorReqInput) -> dict[str, Any]:
"""Update model weights from tensor data.
The HTTP server will only post meta data, and the real weights will be
copied directly from GPUs.
Args:
serialized_named_tensors (List[str]): List of serialized tensor data
load_format (Optional[str], optional): Format specification for loading weights.
Defaults to None.
flush_cache (bool, optional): Whether to flush cache after updating weights.
Defaults to False.
Returns:
Dict[str, Any]: Server response containing update status
Note:
The model should be on GPUs rather than CPU for this functionality to work properly.
If you encounter issues, ensure your model is loaded on GPU devices rather than CPU.
"""
import base64
named_tensors = req.serialized_named_tensors
load_format = req.load_format
flush_cache = req.flush_cache
if named_tensors:
serialized_named_tensors = [
base64.b64encode(named_tensor).decode("utf-8") for named_tensor in named_tensors
]
else:
serialized_named_tensors = []
return self._make_request(
"update_weights_from_tensor",
{
"serialized_named_tensors": serialized_named_tensors,
"load_format": load_format,
"flush_cache": flush_cache,
},
)
def shutdown(self) -> None:
"""Shutdown the HTTP server and clean up resources.
This method performs the following cleanup operations:
1. Unregisters the worker from the router (if configured)
2. Terminates the server process tree
All operations are performed with error handling to ensure graceful shutdown
even if individual steps fail.
Note:
This method should be called when the adapter is no longer needed
to ensure proper cleanup of resources and processes.
"""
# Unregister from router
if self.router_ip and self.router_port:
try:
url = f"http://{self.router_ip}:{self.router_port}/remove_worker"
params = {"url": f"http://{self.server_args.host}:{self.server_args.port}"}
requests.post(url, params=params, timeout=5.0) # Short timeout for shutdown
logger.info("Successfully unregistered from router")
except Exception as e:
logger.warning(f"Failed to unregister from router: {e}")
# Kill server process
if hasattr(self, "process") and self.process is not None:
try:
kill_process_tree(self.process.pid)
logger.info("Server process terminated")
except Exception as e:
logger.error(f"Failed to terminate server process: {e}")
def generate(
self,
prompt: Optional[str] = None,
sampling_params: Optional[dict[str, Any]] = None,
input_ids: Optional[list[int]] = None,
image_data: Optional[Any] = None,
return_logprob: bool = False,
logprob_start_len: Optional[int] = None,
top_logprobs_num: Optional[int] = None,
token_ids_logprob: Optional[list[int]] = None,
lora_path: Optional[str] = None,
custom_logit_processor: Optional[Callable] = None,
) -> dict[str, Any]:
"""Generate text using the SGLang server.
Args:
prompt (Optional[str], optional): Text prompt for generation. Defaults to None.
sampling_params (Optional[Dict[str, Any]], optional): Parameters controlling
text generation sampling. Defaults to None.
input_ids (Optional[List[int]], optional): Alternative to prompt, direct token IDs input.
Defaults to None.
image_data (Optional[Any], optional): Image data for multimodal generation.
Defaults to None.
return_logprob (bool, optional): Whether to return log probabilities.
Defaults to False.
logprob_start_len (Optional[int], optional): Starting length for log probability calculation.
Defaults to None.
top_logprobs_num (Optional[int], optional): Number of top log probabilities to return.
Defaults to None.
token_ids_logprob (Optional[List[int]], optional): Specific token IDs for
log probability calculation. Defaults to None.
lora_path (Optional[str], optional): Path to LoRA adapter weights. Defaults to None.
custom_logit_processor (Optional[Callable], optional): Custom logit processing function.
Defaults to None.
Returns:
Dict[str, Any]: Generated text and associated metadata from the server
Note:
Either prompt or input_ids should be provided, but not both.
The response format depends on the server configuration and parameters.
"""
payload = {
"text": prompt,
"sampling_params": sampling_params,
"input_ids": input_ids,
"image_data": image_data,
"return_logprob": return_logprob,
"logprob_start_len": logprob_start_len,
"top_logprobs_num": top_logprobs_num,
"token_ids_logprob": token_ids_logprob,
"lora_path": lora_path,
"custom_logit_processor": custom_logit_processor,
}
# Filter out None values
payload = {k: v for k, v in payload.items() if v is not None}
return self._make_request("generate", payload, only_master=False)
def reward_score(
self,
prompt: Optional[str] = None,
input_ids: Optional[list[int]] = None,
image_data: Optional[Any] = None,
lora_path: Optional[str] = None,
) -> dict[str, Any]:
assert self.server_args.is_embedding, "Score is only supported for embedding models"
payload = {
"text": prompt,
"input_ids": input_ids,
"image_data": image_data,
"lora_path": lora_path,
}
# Filter out None values
payload = {k: v for k, v in payload.items() if v is not None}
return self._make_request("classify", payload, only_master=False)
def flush_cache(self) -> dict[str, Any]:
"""Flush the cache of the server.
This method repeatedly attempts to flush the server cache until successful.
The flush operation will not return status 200 when there are pending requests.
Returns:
Dict[str, Any]: Server response indicating cache flush status.
For non-master nodes, returns empty dict.
Note:
Uses retry logic with limited attempts (max_attempts * 2) to avoid infinite loops.
Each retry includes a delay to allow pending requests to complete.
"""
if self.node_rank != 0:
return {}
# Use retry logic with limited attempts to avoid infinite loops
for attempt in range(self.max_attempts * 2): # Allow more retries for cache flush
try:
response = requests.get(
f"http://{self.server_args.host}:{self.server_args.port}/flush_cache", timeout=self.timeout
)
if response.status_code == 200:
return _read_response(response)
except Exception as e:
logger.warning(f"Error flushing cache (attempt {attempt + 1}): {e}")
time.sleep(self.retry_delay)
logger.error("Failed to flush cache after maximum attempts")
return {}
def release_memory_occupation(self, tags: Optional[list[str]] = None) -> dict[str, Any]:
"""Release GPU memory occupation temporarily.
Args:
tags (Optional[List[str]], optional): List of tags to specify which memory to release.
If None, releases all memory. Defaults to None. ["weights", "kv_cache"]
Returns:
Dict[str, Any]: Server response indicating memory release status
"""
return self._make_request("release_memory_occupation", {"tags": tags})
def resume_memory_occupation(self, tags: Optional[list[str]] = None) -> dict[str, Any]:
"""Resume GPU memory occupation.
Args:
tags (Optional[List[str]], optional): List of tags to specify which memory to resume.
If None, resumes all memory. Defaults to None. ["weights", "kv_cache"]
Returns:
Dict[str, Any]: Server response indicating memory resume status
"""
return self._make_request("resume_memory_occupation", {"tags": tags})
def abort_request(self, rid: str = "", abort_all: bool = False) -> dict[str, Any]:
"""Abort a request.
Args:
rid (str): The ID of the request to abort
abort_all (bool, optional): Whether to abort all requests. Defaults to False.
Returns:
Dict[str, Any]: Server response indicating abort status
"""
return self._make_request("abort_request", {"rid": rid, "abort_all": abort_all})
class AsyncHttpServerAdapter(HttpServerAdapter):
"""Asynchronous HTTP-based adapter for SGLang engines.
This class inherits from HttpServerAdapter and adds async capabilities
for non-blocking HTTP requests to the SGLang server. It provides the same
functionality as the synchronous version but with async/await support.
The async adapter is useful when you need to make multiple concurrent requests
or integrate with async frameworks. It uses aiohttp for efficient async HTTP
communication and maintains connection pooling for better performance.
Attributes:
max_connections (int): Maximum number of connections in the connection pool
"""
def __init__(
self,
router_ip: Optional[str] = None,
router_port: Optional[int] = None,
timeout: float = DEFAULT_TIMEOUT,
max_attempts: int = DEFAULT_MAX_ATTEMPTS,
retry_delay: float = DEFAULT_RETRY_DELAY,
max_connections: int = DEFAULT_MAX_CONNECTIONS,
first_rank_in_node: bool = False,
launch_server: bool = True,
**kwargs: Any,
) -> None:
"""Initialize the async HTTP server engine adapter.
Args:
router_ip (Optional[str], optional): IP address of router for worker registration.
Defaults to None.
router_port (Optional[int], optional): Port of router for worker registration.
Defaults to None.
timeout (float, optional): HTTP request timeout in seconds.
Defaults to DEFAULT_TIMEOUT.
max_attempts (int, optional): Maximum number of retry attempts for failed requests.
Defaults to DEFAULT_MAX_ATTEMPTS.
retry_delay (float, optional): Base delay between retries in seconds.
Defaults to DEFAULT_RETRY_DELAY.
max_connections (int, optional): Maximum number of connections in the connection pool.
Defaults to DEFAULT_MAX_CONNECTIONS.
launch_server (bool, optional): Whether to launch the server process.
Defaults to True.
**kwargs (Any): Additional arguments passed to ServerArgs
"""
super().__init__(
router_ip,
router_port,
timeout,
max_attempts,
retry_delay,
first_rank_in_node,
launch_server=launch_server,
**kwargs,
)
self.max_connections: int = max_connections
@asynccontextmanager
async def _get_session(self) -> aiohttp.ClientSession:
"""Context manager for safe session access with proper connection pooling.
Yields:
aiohttp.ClientSession: Session instance for making HTTP requests
Note:
This method creates a new session for each request to avoid resource competition
while still maintaining proper connection pooling through the shared connector.
"""
# Create a new session for each request to avoid resource competition
connector = aiohttp.TCPConnector(
limit=self.max_connections,
limit_per_host=self.max_connections // 4,
ttl_dns_cache=300,
use_dns_cache=True,
)
timeout = aiohttp.ClientTimeout(total=self.timeout)
session = aiohttp.ClientSession(connector=connector, timeout=timeout)
try:
yield session
finally:
# Always close the session to free up resources
if not session.closed:
await session.close()
async def _make_async_request(
self,
endpoint: str,
payload: Optional[dict[str, Any]] = None,
method: str = "POST",
timeout: float = DEFAULT_TIMEOUT,
only_master: bool = True,
) -> dict[str, Any]:
"""Make an async HTTP request with retry logic and consistent error handling.
Args:
endpoint (str): The API endpoint to call (without leading slash)
payload (Optional[Dict[str, Any]], optional): The JSON payload to send.
Defaults to empty dict if None.
method (str, optional): HTTP method to use. Defaults to "POST".
Returns:
Dict[str, Any]: The JSON response from the server
Raises:
aiohttp.ClientResponseError: If the HTTP request fails with a client/server error
RuntimeError: If all retry attempts are exhausted
Note:
- For non-master nodes (node_rank != 0), returns empty dict immediately
- Uses exponential backoff for retries
- Logs warnings for timeout and connection errors, errors for HTTP errors
"""
if only_master and self.node_rank != 0:
return {}
url = f"http://{self.server_args.host}:{self.server_args.port}/{endpoint}"
for attempt in range(self.max_attempts):
try:
async with self._get_session() as session:
if method.upper() == "GET":
async with session.get(url, timeout=timeout) as response:
response.raise_for_status()
return await _read_async_response(response)
else:
async with session.post(url, json=payload or {}, timeout=timeout) as response:
response.raise_for_status()
return await _read_async_response(response)
except asyncio.TimeoutError:
logger.warning(f"Async request to {endpoint} timed out (attempt {attempt + 1})")
except aiohttp.ClientConnectorError:
logger.warning(f"Connection error for {endpoint} (attempt {attempt + 1})")
except aiohttp.ClientResponseError as e:
logger.error(f"HTTP error for {endpoint}: {e}")
raise
except Exception as e:
logger.error(f"Unexpected error for {endpoint}: {e}")
if attempt == self.max_attempts - 1:
raise
if attempt < self.max_attempts - 1:
await asyncio.sleep(self.retry_delay * (2**attempt))
raise RuntimeError(f"Failed to complete async request to {endpoint} after {self.max_attempts} attempts")
async def release_memory_occupation(self, tags: Optional[list[str]] = None) -> dict[str, Any]:
"""Release GPU memory occupation temporarily (async version).
Args:
tags (Optional[List[str]], optional): List of tags to specify which memory to release.
If None, releases all memory. Defaults to None. ["weights", "kv_cache"]
Returns:
Dict[str, Any]: Server response indicating memory release status
"""
return await self._make_async_request("release_memory_occupation", {"tags": tags})
async def resume_memory_occupation(self, tags: Optional[list[str]] = None) -> dict[str, Any]:
"""Resume GPU memory occupation (async version).
Similar to AsyncEngine, this method handles first-time weight reloading
by calling release_memory_occupation if needed.
Args:
tags (Optional[List[str]], optional): List of tags to specify which memory to resume.
If None, resumes all memory. Defaults to None. ["weights", "kv_cache"]
Returns:
Dict[str, Any]: Server response indicating memory resume status
"""
return await self._make_async_request("resume_memory_occupation", {"tags": tags})
async def update_weights_from_tensor(
self,
req: UpdateWeightsFromTensorReqInput,
) -> dict[str, Any]:
"""Update model weights from tensor data asynchronously.
Args:
serialized_named_tensors (List[str]): List of serialized tensor data
load_format (Optional[str], optional): Format specification for loading weights.
Defaults to None.
flush_cache (bool, optional): Whether to flush cache after updating weights.
Defaults to True.
Returns:
Dict[str, Any]: Server response containing update status
"""
import base64
named_tensors = req.serialized_named_tensors
load_format = req.load_format
flush_cache = req.flush_cache
serialized_named_tensors = [base64.b64encode(named_tensor).decode("utf-8") for named_tensor in named_tensors]
return await self._make_async_request(
"update_weights_from_tensor",
{
"serialized_named_tensors": serialized_named_tensors,
"load_format": load_format,
"flush_cache": flush_cache,
},
)
async def flush_cache(self) -> dict[str, Any]:
"""Flush the cache of the server asynchronously.
Similar to the sync version, this method retries until the cache
is successfully flushed. It uses async sleep between retries.
Returns:
Dict[str, Any]: Server response indicating cache flush status.
For non-master nodes, returns empty dict.
Note:
Uses retry logic with limited attempts (max_attempts * 4) to avoid infinite loops.
Each retry includes an async delay to allow pending requests to complete.
"""
if self.node_rank != 0:
return {}
# Use retry logic with limited attempts to avoid infinite loops
for attempt in range(self.max_attempts * 4): # Allow more retries for cache flush
try:
async with self._get_session() as session:
url = f"http://{self.server_args.host}:{self.server_args.port}/flush_cache"
async with session.get(url) as response:
if response.status == 200:
return await _read_async_response(response)
except Exception as e:
logger.warning(f"Error flushing cache (attempt {attempt + 1}): {e}")
await asyncio.sleep(self.retry_delay)
logger.error("Failed to flush cache after maximum attempts")
return {}
async def generate(
self,
prompt: Optional[str] = None,
sampling_params: Optional[dict[str, Any]] = None,
input_ids: Optional[list[int]] = None,
image_data: Optional[Any] = None,
return_logprob: bool = False,
logprob_start_len: Optional[int] = None,
top_logprobs_num: Optional[int] = None,
token_ids_logprob: Optional[list[int]] = None,
lora_path: Optional[str] = None,
custom_logit_processor: Optional[Callable] = None,
) -> dict[str, Any]:
"""Generate text using the SGLang server asynchronously."""
logger.info("generate() started")
payload = {
"text": prompt,
"sampling_params": sampling_params,
"input_ids": input_ids,
"image_data": image_data,
"return_logprob": return_logprob,
"logprob_start_len": logprob_start_len,
"top_logprobs_num": top_logprobs_num,
"token_ids_logprob": token_ids_logprob,
"lora_path": lora_path,
"custom_logit_processor": custom_logit_processor,
}
# Filter out None values
payload = {k: v for k, v in payload.items() if v is not None}
# Send request
response = await self._make_async_request("generate", payload, timeout=self.timeout, only_master=False)
return response
async def async_generate(
self,
prompt: Optional[str] = None,
sampling_params: Optional[dict[str, Any]] = None,
input_ids: Optional[list[int]] = None,
image_data: Optional[Any] = None,
return_logprob: bool = False,
logprob_start_len: Optional[int] = None,
top_logprobs_num: Optional[int] = None,
token_ids_logprob: Optional[list[int]] = None,
lora_path: Optional[str] = None,
custom_logit_processor: Optional[Callable] = None,
) -> dict[str, Any]:
"""Async generate method that mirrors AsyncEngine.async_generate interface.
This method provides compatibility with AsyncEngine's async_generate method
by forwarding the call to the generate method. It ensures API consistency
between direct engine usage and HTTP-based engine usage.
Args:
prompt (Optional[str], optional): Text prompt for generation. Defaults to None.
sampling_params (Optional[Dict[str, Any]], optional): Parameters controlling
text generation sampling. Defaults to None.
input_ids (Optional[List[int]], optional): Alternative to prompt, direct token IDs input.
Defaults to None.
image_data (Optional[Any], optional): Image data for multimodal generation.
Defaults to None.
return_logprob (bool, optional): Whether to return log probabilities.
Defaults to False.
logprob_start_len (Optional[int], optional): Starting length for log probability calculation.
Defaults to None.
top_logprobs_num (Optional[int], optional): Number of top log probabilities to return.
Defaults to None.
token_ids_logprob (Optional[List[int]], optional): Specific token IDs for
log probability calculation. Defaults to None.
lora_path (Optional[str], optional): Path to LoRA adapter weights. Defaults to None.
custom_logit_processor (Optional[Callable], optional): Custom logit processing function.
Defaults to None.
Returns:
Dict[str, Any]: Generated text and associated metadata from the server
Note:
This method is provided for API compatibility with AsyncEngine.
It forwards all calls to the generate method.
"""
return await self.generate(
prompt=prompt,
sampling_params=sampling_params,
input_ids=input_ids,
image_data=image_data,
return_logprob=return_logprob,
logprob_start_len=logprob_start_len,
top_logprobs_num=top_logprobs_num,
token_ids_logprob=token_ids_logprob,
lora_path=lora_path,
custom_logit_processor=custom_logit_processor,
)
async def reward_score(
self,
prompt: Optional[str] = None,
input_ids: Optional[list[int]] = None,
image_data: Optional[Any] = None,
lora_path: Optional[str] = None,
) -> dict[str, Any]:
logger.info("reward_score() started")
payload = {
"text": prompt,
"input_ids": input_ids,
"image_data": image_data,
"lora_path": lora_path,
}
# Filter out None values
payload = {k: v for k, v in payload.items() if v is not None}
# Send request
response = await self._make_async_request("classify", payload, timeout=self.timeout, only_master=False)
return response
async def async_reward_score(
self,
prompt: Optional[str] = None,
input_ids: Optional[list[int]] = None,
image_data: Optional[Any] = None,
lora_path: Optional[str] = None,
) -> dict[str, Any]:
return await self.reward_score(
prompt=prompt,
input_ids=input_ids,
image_data=image_data,
lora_path=lora_path,
)
async def abort_request(self, rid: str = "", abort_all: bool = False) -> dict[str, Any]:
"""Abort a request asynchronously.
Args:
rid (str): The ID of the request to abort
abort_all (bool, optional): Whether to abort all requests. Defaults to False.
Returns:
Dict[str, Any]: Server response indicating abort status
"""
return await self._make_async_request("abort_request", {"rid": rid, "abort_all": abort_all})
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/rollout/sglang_rollout/http_server_engine.py",
"license": "Apache License 2.0",
"lines": 807,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/trainer/ppo/utils.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from enum import Enum
from omegaconf import DictConfig
from verl.single_controller.base import Worker
from verl.trainer.ppo.core_algos import AdvantageEstimator
WorkerType = type[Worker]
class Role(Enum):
"""
To create more roles dynamically, you can subclass Role and add new members
"""
Actor = 0
Rollout = 1
ActorRollout = 2
Critic = 3
RefPolicy = 4
RewardModel = 5
ActorRolloutRef = 6
Env = 7
def __str__(self):
return self._get_role_string()
def _get_role_string(self):
role_mapping = {
Role.Actor: "actor",
Role.Rollout: "rollout",
Role.ActorRollout: "actor_rollout",
Role.Critic: "critic",
Role.RefPolicy: "ref",
Role.RewardModel: "rm",
Role.ActorRolloutRef: "actor_rollout_ref",
}
return role_mapping.get(self, self.name.lower())
@classmethod
def from_string(cls, name: str):
string_mapping = {
"actor": cls.Actor,
"rollout": cls.Rollout,
"actor_rollout": cls.ActorRollout,
"critic": cls.Critic,
"ref": cls.RefPolicy,
"rm": cls.RewardModel,
"actor_rollout_ref": cls.ActorRolloutRef,
}
role = string_mapping.get(name.lower())
if role is None:
raise ValueError(f"No Role found for string: {name}")
return role
def need_reference_policy(
config: DictConfig,
) -> bool:
"""Given the config, do we need ref policy."""
return config.algorithm.use_kl_in_reward or config.actor_rollout_ref.actor.use_kl_loss
def need_reward_model(
config: DictConfig,
) -> bool:
"""Given the config, do we need reward model."""
return config.reward.reward_model.enable
def need_critic(config: DictConfig) -> bool:
"""Given a config, do we need critic."""
if config.critic.enable is not None:
return bool(config.critic.enable)
elif config.algorithm.adv_estimator == AdvantageEstimator.GAE:
return True
else:
warnings.warn(
"Disabled critic as algorithm.adv_estimator != gae. If it is not intended, please set critic.enable=True",
stacklevel=2,
)
return False
| {
"repo_id": "verl-project/verl",
"file_path": "verl/trainer/ppo/utils.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/utils/test_special_mstx_profile.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest.mock import MagicMock, patch
from verl.utils.profiler.config import NPUToolConfig, ProfilerConfig
from verl.utils.profiler.mstx_profile import NPUProfiler
from verl.utils.profiler.profile import DistProfiler
class TestNPUProfilerInitialization(unittest.TestCase):
def setUp(self):
NPUProfiler._define_count = 0
def test_init_with_default_config(self):
tool_config = NPUToolConfig()
config = ProfilerConfig(tool="npu")
profiler = DistProfiler(rank=0, config=config, tool_config=tool_config)
self.assertFalse(profiler.check_enable())
def test_init_with_disabled_config(self):
config = ProfilerConfig(enable=False, tool="npu")
tool_config = NPUToolConfig()
profiler = DistProfiler(rank=0, config=config, tool_config=tool_config)
self.assertFalse(profiler.check_enable())
def test_init_with_all_ranks_true(self):
config = ProfilerConfig(enable=True, all_ranks=True, tool="npu")
tool_config = NPUToolConfig()
profiler = DistProfiler(rank=0, config=config, tool_config=tool_config)
self.assertTrue(profiler.check_this_rank())
def test_init_with_ranks_list(self):
config = ProfilerConfig(enable=True, ranks=[1, 2], tool="npu")
tool_config = NPUToolConfig()
profiler = DistProfiler(rank=1, config=config, tool_config=tool_config)
self.assertTrue(profiler.check_this_rank())
def test_init_with_rank_not_in_ranks(self):
config = ProfilerConfig(enable=True, ranks=[1, 2], tool="npu")
tool_config = NPUToolConfig()
profiler = DistProfiler(rank=3, config=config, tool_config=tool_config)
self.assertFalse(profiler.check_this_rank())
class TestNPUProfilerStart(unittest.TestCase):
def setUp(self):
NPUProfiler._define_count = 0
self.config = ProfilerConfig(enable=True, ranks=[0], tool="npu")
self.tool_config = NPUToolConfig(discrete=False)
@patch("verl.utils.profiler.mstx_profile.get_npu_profiler")
def test_start_when_enabled_and_this_rank(self, mock_get_profiler):
profiler = DistProfiler(rank=0, config=self.config, tool_config=self.tool_config)
profiler.start(role="worker", profile_step="1")
self.assertTrue(profiler.check_this_step())
self.assertEqual(NPUProfiler._define_count, 1)
mock_get_profiler.assert_called_once()
@patch("verl.utils.profiler.mstx_profile.get_npu_profiler")
def test_start_when_not_this_rank(self, mock_get_profiler):
profiler = DistProfiler(rank=1, config=self.config, tool_config=self.tool_config)
profiler.start()
self.assertFalse(profiler.check_this_step())
self.assertEqual(NPUProfiler._define_count, 0)
mock_get_profiler.assert_not_called()
@patch("verl.utils.profiler.mstx_profile.get_npu_profiler")
def test_start_discrete_mode_does_not_increase_count(self, mock_get_profiler):
tool_config = NPUToolConfig(discrete=True)
profiler = DistProfiler(rank=0, config=self.config, tool_config=tool_config)
profiler.start()
self.assertEqual(NPUProfiler._define_count, 0)
mock_get_profiler.assert_not_called()
@patch("verl.utils.profiler.mstx_profile.get_npu_profiler")
def test_multiple_start_calls_do_not_increase_count(self, mock_get_profiler):
profiler = DistProfiler(rank=0, config=self.config, tool_config=self.tool_config)
profiler.start()
profiler.start()
self.assertEqual(NPUProfiler._define_count, 1)
mock_get_profiler.assert_called_once()
class TestNPUProfilerStartStopInteraction(unittest.TestCase):
def setUp(self):
NPUProfiler._define_count = 0
self.config = ProfilerConfig(enable=True, ranks=[0], tool="npu")
self.tool_config = NPUToolConfig(discrete=False)
@patch("verl.utils.profiler.mstx_profile.get_npu_profiler")
def test_start_stop_cycle(self, mock_get_profiler):
mock_profile_npu = MagicMock()
mock_get_profiler.return_value = mock_profile_npu
profiler = DistProfiler(rank=0, config=self.config, tool_config=self.tool_config)
profiler.start()
self.assertEqual(NPUProfiler._define_count, 1)
self.assertEqual(mock_profile_npu.start.call_count, 1)
profiler.stop()
self.assertEqual(NPUProfiler._define_count, 0)
self.assertEqual(mock_profile_npu.step.call_count, 1)
self.assertEqual(mock_profile_npu.stop.call_count, 1)
@patch("verl.utils.profiler.mstx_profile.get_npu_profiler")
def test_multiple_instances_share_define_count(self, mock_get_profiler):
mock_profile_npu = MagicMock()
mock_get_profiler.return_value = mock_profile_npu
profiler1 = DistProfiler(rank=0, config=self.config, tool_config=self.tool_config)
profiler2 = DistProfiler(rank=0, config=self.config, tool_config=self.tool_config)
profiler1.start()
profiler2.start()
self.assertEqual(NPUProfiler._define_count, 1)
self.assertEqual(mock_profile_npu.start.call_count, 1)
profiler1.stop()
self.assertEqual(NPUProfiler._define_count, 0)
class TestNPUProfilerAnnotate(unittest.TestCase):
def setUp(self):
self.config = ProfilerConfig(enable=True, all_ranks=True, tool="npu")
self.tool_config = NPUToolConfig(discrete=False)
self.rank = 0
def test_annotate_decorator_applied_correctly(self):
mock_worker = MagicMock()
mock_worker.profiler = DistProfiler(rank=self.rank, config=self.config, tool_config=self.tool_config)
# Manually set private attribute for testing annotation in active step
mock_worker.profiler._this_step = True
mock_mark_range = "mocked_range_handle"
with (
patch("verl.utils.profiler.mstx_profile.mark_start_range") as mock_start_patch,
patch("verl.utils.profiler.mstx_profile.mark_end_range") as mock_end_patch,
):
mock_start_patch.return_value = mock_mark_range
with patch("verl.utils.profiler.mstx_profile.get_npu_profiler") as mock_get_profiler:
decorator = mock_worker.profiler.annotate(message="test")
@decorator
def test_func(self, *args, **kwargs):
return "result"
result = test_func(mock_worker)
self.assertEqual(result, "result")
mock_start_patch.assert_called_once_with(message="test")
mock_end_patch.assert_called_once_with(mock_mark_range)
mock_get_profiler.assert_not_called()
def test_annotate_when_profiler_disabled(self):
disabled_config = ProfilerConfig(enable=False, tool="npu")
mock_worker = MagicMock()
mock_worker.profiler = DistProfiler(rank=self.rank, config=disabled_config, tool_config=self.tool_config)
with (
patch("verl.utils.profiler.mstx_profile.mark_start_range") as mock_start_patch,
patch("verl.utils.profiler.mstx_profile.mark_end_range") as mock_end_patch,
patch("verl.utils.profiler.mstx_profile.get_npu_profiler") as mock_get_profiler,
):
decorator = mock_worker.profiler.annotate(message="test")
@decorator
def test_func(self, *args, **kwargs):
return "result"
result = test_func(mock_worker)
self.assertEqual(result, "result")
mock_start_patch.assert_not_called()
mock_end_patch.assert_not_called()
mock_get_profiler.assert_not_called()
def test_annotate_when_this_step_disabled(self):
mock_worker = MagicMock()
mock_worker.profiler = DistProfiler(rank=self.rank, config=self.config, tool_config=self.tool_config)
mock_worker.profiler._this_step = False
with (
patch("verl.utils.profiler.mstx_profile.mark_start_range") as mock_start_patch,
patch("verl.utils.profiler.mstx_profile.mark_end_range") as mock_end_patch,
patch("verl.utils.profiler.mstx_profile.get_npu_profiler") as mock_get_profiler,
):
decorator = mock_worker.profiler.annotate(message="test")
@decorator
def test_func(self, *args, **kwargs):
return "result"
result = test_func(mock_worker)
self.assertEqual(result, "result")
mock_start_patch.assert_not_called()
mock_end_patch.assert_not_called()
mock_get_profiler.assert_not_called()
def test_annotate_discrete_mode_enabled(self):
discrete_tool_config = NPUToolConfig(discrete=True)
mock_worker = MagicMock()
mock_worker.profiler = DistProfiler(rank=self.rank, config=self.config, tool_config=discrete_tool_config)
mock_worker.profiler._this_step = True
mock_mark_range = "mocked_range_handle"
mock_profile_npu = MagicMock()
with (
patch("verl.utils.profiler.mstx_profile.mark_start_range") as mock_start_patch,
patch("verl.utils.profiler.mstx_profile.mark_end_range") as mock_end_patch,
patch("verl.utils.profiler.mstx_profile.get_npu_profiler") as mock_get_profiler,
):
mock_start_patch.return_value = mock_mark_range
mock_get_profiler.return_value = mock_profile_npu
decorator = mock_worker.profiler.annotate(message="test", role="test_role")
@decorator
def test_func(self, *args, **kwargs):
return "result"
result = test_func(mock_worker)
self.assertEqual(result, "result")
mock_start_patch.assert_called_once_with(message="test")
mock_end_patch.assert_called_once_with(mock_mark_range)
mock_get_profiler.assert_called_once_with(
contents=mock_worker.profiler._impl.profile_contents,
profile_level=mock_worker.profiler._impl.profile_level,
profile_save_path=mock_worker.profiler._impl.profile_save_path,
analysis=mock_worker.profiler._impl.analysis,
role="test_role",
)
mock_profile_npu.start.assert_called_once()
mock_profile_npu.step.assert_called_once()
mock_profile_npu.stop.assert_called_once()
def test_annotate_with_default_message(self):
mock_worker = MagicMock()
mock_worker.profiler = DistProfiler(rank=self.rank, config=self.config, tool_config=self.tool_config)
mock_worker.profiler._this_step = True
mock_mark_range = "mocked_range_handle"
with (
patch("verl.utils.profiler.mstx_profile.mark_start_range") as mock_start_patch,
patch("verl.utils.profiler.mstx_profile.mark_end_range") as mock_end_patch,
):
mock_start_patch.return_value = mock_mark_range
decorator = mock_worker.profiler.annotate()
@decorator
def test_func(self, *args, **kwargs):
return "result"
test_func(mock_worker)
mock_start_patch.assert_called_once_with(message="test_func")
mock_end_patch.assert_called_once_with(mock_mark_range)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_special_mstx_profile.py",
"license": "Apache License 2.0",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/single_controller/test_nested_worker.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ray
from verl.single_controller.base.decorator import Dispatch, register
from verl.single_controller.base.worker import Worker
from verl.single_controller.ray.base import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup
from verl.utils.device import get_device_name
class TestActor(Worker):
# TODO: pass *args and **kwargs is bug prone and not very convincing
def __init__(self, x) -> None:
super().__init__()
self.a = x
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def get(self):
return self.a + self.rank
class TestHighLevelActor(Worker):
def __init__(self, x=None) -> None:
super().__init__()
self.test_actor = TestActor(x=x)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def get(self):
return self.test_actor.get()
def test_nested_worker():
ray.init(num_cpus=100)
# create 4 workers, each hold a GPU
resource_pool = RayResourcePool([4], use_gpu=True)
class_with_args = RayClassWithInitArgs(cls=ray.remote(TestActor), x=2)
worker_group = RayWorkerGroup(
resource_pool=resource_pool,
ray_cls_with_init=class_with_args,
name_prefix="worker_group_basic",
device_name=get_device_name(),
)
output = worker_group.get()
assert output == [2, 3, 4, 5]
class_with_args = RayClassWithInitArgs(cls=ray.remote(TestHighLevelActor), x=2)
high_level_worker_group = RayWorkerGroup(
resource_pool=resource_pool,
ray_cls_with_init=class_with_args,
name_prefix="worker_group_basic_2",
device_name=get_device_name(),
)
output_1 = high_level_worker_group.get()
assert output_1 == [2, 3, 4, 5]
ray.shutdown()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/single_controller/test_nested_worker.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/workers/config/model.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Optional
from omegaconf import MISSING
from transformers import AutoConfig
from verl.base_config import BaseConfig
from verl.utils import hf_processor, hf_tokenizer
from verl.utils.fs import copy_to_local
from verl.utils.import_utils import import_external_libs
from verl.utils.model import get_generation_config, update_model_config
__all__ = ["HFModelConfig", "MtpConfig"]
@dataclass
class MtpConfig(BaseConfig):
"""
Configuration for MTP model.
enable: Enable loading and saving of MTP parameters, but do not use them
enable_train: Whether to enable using MTP parameters during training
enable_rollout: Whether to enable using MTP parameters during rollout
Training parameters:
detach_encoder: Whether to detach encoder parameters during MTP training
mtp_loss_scaling_factor: Loss scaling factor during MTP training
vLLM rollout parameters:
method: "mtp"
num-speculative-tokens: 1
SGLang rollout parameters:
speculative-algorithm: EAGLE
speculative-num-steps: 3
speculative-eagle-topk: 1
speculative-num-draft-tokens: 4
"""
enable: bool = False
enable_train: bool = False
enable_rollout: bool = False
detach_encoder: bool = False
mtp_loss_scaling_factor: float = 0.1
speculative_algorithm: str = "EAGLE"
speculative_num_steps: int = 3
speculative_eagle_topk: int = 1
speculative_num_draft_tokens: int = 4
method: str = "mtp"
num_speculative_tokens: int = 1
@dataclass
class HFModelConfig(BaseConfig):
# note that we separate model_path, model_config_path and tokenizer_path in case they are different
_mutable_fields = {
"hf_config_path",
"tokenizer_path",
"hf_config",
"generation_config",
"tokenizer",
"processor",
"local_path",
"architectures",
"local_hf_config_path",
"local_tokenizer_path",
}
path: str = MISSING
local_path: Optional[str] = None
hf_config_path: Optional[str] = None
local_hf_config_path: Optional[str] = None
tokenizer_path: Optional[str] = None
local_tokenizer_path: Optional[str] = None
# whether to load tokenizer. This is useful when we only want to load model config
load_tokenizer: bool = True
hf_config: Any = None
generation_config: Any = None
tokenizer: Any = None
processor: Any = None
# whether to use shared memory
use_shm: bool = False
trust_remote_code: bool = False
# custom chat template for the model
custom_chat_template: Optional[str] = None
external_lib: Optional[str] = None
override_config: dict = field(default_factory=dict)
enable_gradient_checkpointing: bool = True
enable_activation_offload: bool = False
use_remove_padding: bool = True
# TODO: unify fsdp and megatron lora config
# fsdp lora related. We may setup a separate config later
lora_rank: int = 0
lora_alpha: int = 16
target_modules: Optional[Any] = "all-linear" # allow both "all-linear" and ["q_proj","k_proj"]
target_parameters: Optional[list[str]] = None # for lora adapter on nn.Parameter
exclude_modules: Optional[str] = None
# megatron lora config
lora: dict[str, Any] = field(default_factory=dict)
# path to pre-trained LoRA adapter to load for continued training
lora_adapter_path: Optional[str] = None
use_liger: bool = False
use_fused_kernels: bool = False
fused_kernel_options: dict = field(default_factory=dict)
# TiledMLP configuration for memory-efficient MLP computation
tiled_mlp: dict = field(default_factory=lambda: {"enabled": False, "num_shards": 4})
architectures: Optional[list[str]] = None
mtp: MtpConfig = field(default_factory=MtpConfig)
def __post_init__(self):
import_external_libs(self.external_lib)
if self.hf_config_path is None:
self.hf_config_path = self.path
if self.tokenizer_path is None:
self.tokenizer_path = self.path
self.local_path = copy_to_local(self.path, use_shm=self.use_shm)
# construct tokenizer
if self.load_tokenizer:
self.local_tokenizer_path = copy_to_local(self.tokenizer_path, use_shm=self.use_shm)
self.tokenizer = hf_tokenizer(self.local_tokenizer_path, trust_remote_code=self.trust_remote_code)
self.processor = hf_processor(self.local_tokenizer_path, trust_remote_code=self.trust_remote_code)
if self.custom_chat_template is not None:
if self.processor is not None:
self.processor.chat_template = self.custom_chat_template
else:
self.tokenizer.chat_template = self.custom_chat_template
self.local_hf_config_path = copy_to_local(self.hf_config_path, use_shm=self.use_shm)
self.generation_config = get_generation_config(
self.local_hf_config_path, trust_remote_code=self.trust_remote_code
)
# construct hf_config
attn_implementation = self.override_config.get("attn_implementation", "flash_attention_2")
self.hf_config = AutoConfig.from_pretrained(
self.local_hf_config_path, trust_remote_code=self.trust_remote_code, attn_implementation=attn_implementation
)
override_config_kwargs = {}
if self.tokenizer is not None:
override_config_kwargs.update(
{
"bos_token_id": self.tokenizer.bos_token_id,
"eos_token_id": self.tokenizer.eos_token_id,
"pad_token_id": self.tokenizer.pad_token_id,
}
)
# TODO: (vermouth1992). self.config.model in megatron differs from that of fsdp in the override_config.
override_config = (
self.override_config["model_config"] if "model_config" in self.override_config else self.override_config
)
override_config_kwargs.update(override_config)
update_model_config(self.hf_config, override_config_kwargs=override_config_kwargs)
self.share_embeddings_and_output_weights = getattr(self.hf_config, "tie_word_embeddings", False)
# get model architectures
self.architectures = getattr(self.hf_config, "architectures", None)
assert self.architectures is not None and len(self.architectures) == 1, (
"Expect only one architecture, got {}".format(self.architectures)
)
# per model patch
if getattr(self.hf_config, "model_type", None) == "kimi_vl":
self.hf_config.text_config.topk_method = "greedy"
# Ensure target_modules is a str or list[str] (only if not None)
if self.target_modules is not None:
if not isinstance(self.target_modules, (str | list)):
raise TypeError(
"target_modules must be a string or a list of strings, "
f"but got {type(self.target_modules).__name__}"
)
if isinstance(self.target_modules, list):
for x in self.target_modules:
if not isinstance(x, str):
raise TypeError(
f"All elements in target_modules list must be strings, but found {type(x).__name__}"
)
def get_processor(self):
return self.processor if self.processor is not None else self.tokenizer
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/config/model.py",
"license": "Apache License 2.0",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/experimental/agent_loop/test_multi_modal.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from typing import Any
import numpy as np
import pytest
import ray
from omegaconf import DictConfig
from PIL import Image
from transformers.utils import get_json_schema
from tests.experimental.agent_loop.agent_utils import init_agent_loop_manager
from verl.protocol import DataProto
from verl.tools.base_tool import BaseTool, OpenAIFunctionToolSchema
from verl.tools.schemas import ToolResponse
from verl.utils import hf_tokenizer
def parse_multi_modal_type(messages: list[dict]) -> str:
message = messages[-1]
if isinstance(message["content"], str):
return "text"
for content in message["content"]:
if content["type"] == "image":
return "image"
elif content["type"] == "video":
return "video"
return "text"
@pytest.fixture
def init_config() -> DictConfig:
from hydra import compose, initialize_config_dir
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")):
config = compose(
config_name="ppo_trainer",
overrides=[
"actor_rollout_ref.actor.use_dynamic_bsz=true",
# test sleep/wake_up with fsdp offload
"actor_rollout_ref.actor.fsdp_config.param_offload=True",
"actor_rollout_ref.actor.fsdp_config.optimizer_offload=True",
],
)
model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-VL-3B-Instruct")
config.actor_rollout_ref.model.path = model_path
config.actor_rollout_ref.rollout.name = os.environ["ROLLOUT_NAME"]
config.actor_rollout_ref.rollout.mode = "async"
config.actor_rollout_ref.rollout.enforce_eager = True
config.actor_rollout_ref.rollout.prompt_length = 10240
config.actor_rollout_ref.rollout.response_length = 4096
config.actor_rollout_ref.rollout.n = 4
config.actor_rollout_ref.rollout.agent.num_workers = 2
config.actor_rollout_ref.rollout.skip_tokenizer_init = True
return config
class ImageGeneratorTool(BaseTool):
def generate_image(self, description: str, size: str = "256x256"):
"""Generate a simple image based on description.
Args:
description: The description of the image to generate.
size: The size of the image. Defaults to "256x256". (choices: ["256x256", "512x512"])
Returns:
A generated image
"""
print(f"[DEBUG] generate_image: {description}, {size}")
# Create a simple colored image for testing
width, height = map(int, size.split("x"))
# Create different colors based on description
if "red" in description.lower():
color = (255, 0, 0)
elif "blue" in description.lower():
color = (0, 0, 255)
elif "green" in description.lower():
color = (0, 255, 0)
else:
color = (128, 128, 128) # gray
# Create image
image = Image.new("RGB", (width, height), color)
# Add some pattern to make it more interesting
for i in range(0, width, 50):
for j in range(0, height, 50):
# Add white squares in a grid pattern
for x in range(i, min(i + 20, width)):
for y in range(j, min(j + 20, height)):
image.putpixel((x, y), (255, 255, 255))
return image
def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema:
schema = get_json_schema(self.generate_image)
return OpenAIFunctionToolSchema(**schema)
async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]:
try:
image = self.generate_image(**parameters)
# Return the PIL Image directly - the framework should handle the conversion
return ToolResponse(image=[image]), 0, {}
except Exception as e:
return ToolResponse(text=str(e)), 0, {}
@pytest.mark.flaky(reruns=3)
def test_multimodal_tool_agent(init_config):
"""Test agent loop with multimodal tool that returns images using Qwen VL model."""
ray.shutdown()
ray.init(
runtime_env={
"env_vars": {
"TOKENIZERS_PARALLELISM": "true",
"NCCL_DEBUG": "WARN",
"VLLM_LOGGING_LEVEL": "INFO",
"VLLM_USE_V1": "1",
}
},
ignore_reinit_error=True,
)
# Add custom chat template to enable tool calling support (same as recipe/deepeyes)
template_path = os.path.join(os.path.dirname(__file__), "qwen_vl_tool_chat_template.jinja2")
with open(template_path, encoding="utf-8") as f:
custom_chat_template = f.read()
init_config.actor_rollout_ref.model.custom_chat_template = custom_chat_template
# =========================== 1. Init rollout manager with image tool ===========================
tool_config = {
"tools": [
{
"class_name": "tests.experimental.agent_loop.test_multi_modal.ImageGeneratorTool",
"config": {"type": "native"},
},
]
}
tool_config_path = "/tmp/multimodal_tool_config.json"
with open(tool_config_path, "w") as f:
json.dump(tool_config, f)
n = 2
init_config.actor_rollout_ref.rollout.n = n
init_config.actor_rollout_ref.rollout.multi_turn.tool_config_path = tool_config_path
init_config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls = 1
init_config.actor_rollout_ref.rollout.multi_turn.max_user_turns = 1
agent_loop_manager = init_agent_loop_manager(init_config)
# =========================== 2. Generate sequences with multimodal prompts ===========================
raw_prompts = [
[
{"role": "user", "content": "How are you?"},
],
[
{
"role": "user",
"content": [
{
"type": "video",
"video": os.path.expanduser("~/models/hf_data/test-videos/space_woaudio.mp4"),
"min_pixels": 4 * 32 * 32,
"max_pixels": 256 * 32 * 32,
"total_pixels": 4096 * 32 * 32,
},
{
"type": "text",
"text": "Describe this video. Then you must call the "
"image generator tool to generate a green image for me.",
},
],
},
],
[
{"role": "user", "content": "Please generate a red image for me."},
],
[
{"role": "user", "content": "Can you create a blue picture with size 512x512?"},
],
[
{
"role": "system",
"content": (
"You are Qwen VL, created by Alibaba Cloud. You are a helpful "
"assistant that can generate and analyze images."
),
},
{"role": "user", "content": "Generate a green landscape image and describe what you see in it."},
],
]
batch = DataProto(
non_tensor_batch={
"raw_prompt": np.array([np.array(prompt) for prompt in raw_prompts], dtype=object),
"agent_name": np.array(["tool_agent"] * len(raw_prompts)),
"data_source": np.array(["openai/gsm8k"] * len(raw_prompts)),
"reward_model": np.array([{"style": "rule", "ground_truth": "1.0"}] * len(raw_prompts)),
},
)
batch = batch.repeat(n)
result = agent_loop_manager.generate_sequences(prompts=batch)
assert len(result) == len(raw_prompts) * n
# Check turns
num_turns = result.non_tensor_batch["__num_turns__"]
multi_modal_inputs = result.non_tensor_batch["multi_modal_inputs"]
print(f"num_turns: {num_turns}")
for i in range(len(num_turns)):
multi_modal_type = parse_multi_modal_type(raw_prompts[i // n])
if multi_modal_type == "video":
assert "pixel_values_videos" in multi_modal_inputs[i], f"Sample {i} should have pixel_values_videos"
assert "video_grid_thw" in multi_modal_inputs[i], f"Sample {i} should have video_grid_thw"
if i // n <= 1:
# TODO: prompt with video not generate tool call as expected
# First prompt: "How are you?" - should have 2 turns [user, assistant]
assert num_turns[i] == 2, f"Expected 2 turns but got {num_turns[i]} for sample {i}"
else:
# Tool-calling prompts should have 4 turns [user, assistant, tool, assistant]
assert num_turns[i] == 4, f"Expected 4 turns but got {num_turns[i]} for sample {i}"
assert "pixel_values" in multi_modal_inputs[i], f"Sample {i} should have pixel_values"
assert "image_grid_thw" in multi_modal_inputs[i], f"Sample {i} should have image_grid_thw"
# Check that images were properly returned in the tool responses
tokenizer = hf_tokenizer(init_config.actor_rollout_ref.model.path)
responses = result.batch["responses"]
response_mask = result.batch["response_mask"]
attention_mask = result.batch["attention_mask"]
assert responses.size() == response_mask.size(), f"{responses.size()} != {response_mask.size()}"
response_length = response_mask.size(1)
image_found_count = 0
for i in range(len(responses)):
# response with tool response (including images)
valid_tokens = responses[i][attention_mask[i][-response_length:].bool()]
response_with_obs = tokenizer.decode(valid_tokens)
# response without tool response
valid_tokens = responses[i][response_mask[i].bool()]
response_without_obs = tokenizer.decode(valid_tokens)
# Check that tool responses were properly masked out from training
assert "<tool_response>" not in response_without_obs, (
f"found <tool_response> in response: {response_without_obs}"
)
assert "</tool_response>" not in response_without_obs, (
f"found </tool_response> in response: {response_without_obs}"
)
# Check that images were included in the full response
if "<image>" in response_with_obs or "image" in response_with_obs.lower():
image_found_count += 1
print("=========================")
print("Response with tool observations:")
print(response_with_obs)
print("---")
print("Response without tool observations:")
print(response_without_obs)
# Verify that tool-calling responses contained image-related content
print(f"Found {image_found_count} responses with image content out of {len(responses)}")
# We should have at least some image content from the tool-calling prompts
# Note: First prompt might not use tools, so we don't expect 100% image content
expected_tool_calls = sum(1 for i in range(len(num_turns)) if num_turns[i] == 4)
assert image_found_count >= 0, (
f"No image-related content found, but expected at least some from {expected_tool_calls} tool calls"
)
print("Multimodal tool test passed!")
ray.shutdown()
def test_multimodal_single_turn_agent(init_config):
"""Test single turn agent loop with multimodal inputs using Qwen VL model."""
ray.init(
runtime_env={
"env_vars": {
"TOKENIZERS_PARALLELISM": "true",
"NCCL_DEBUG": "WARN",
"VLLM_LOGGING_LEVEL": "INFO",
"VLLM_USE_V1": "1",
}
},
ignore_reinit_error=True,
)
# =========================== 1. Init rollout manager ===========================
n = 2
init_config.actor_rollout_ref.rollout.n = n
init_config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls = 1
init_config.actor_rollout_ref.rollout.multi_turn.max_user_turns = 1
agent_loop_manager = init_agent_loop_manager(init_config)
# =========================== 2. Generate sequences with multimodal prompts ===========================
# Create a simple test image
test_image = Image.new("RGB", (256, 256), (100, 150, 200))
test_image2 = Image.new("RGB", (512, 512), (100, 150, 200))
raw_prompts = [
# text
[
{"role": "user", "content": "Hello, how are you?"},
],
# image
[
{
"role": "user",
"content": [
{"type": "image", "image": test_image},
{"type": "text", "text": "What color is this image?"},
],
},
],
# system + image
[
{
"role": "system",
"content": "You are Qwen VL, created by Alibaba Cloud. You are a helpful assistant.",
},
{
"role": "user",
"content": [
{"type": "image", "image": test_image2},
{"type": "text", "text": "Describe this image in detail."},
],
},
],
# video
[
{
"role": "user",
"content": [
{
"type": "video",
"video": os.path.expanduser("~/models/hf_data/test-videos/space_woaudio.mp4"),
"min_pixels": 4 * 32 * 32,
"max_pixels": 256 * 32 * 32,
"total_pixels": 4096 * 32 * 32,
},
{"type": "text", "text": "Describe this video."},
],
},
],
]
batch = DataProto(
non_tensor_batch={
"raw_prompt": np.array([np.array(prompt) for prompt in raw_prompts], dtype=object),
"agent_name": np.array(["single_turn_agent"] * len(raw_prompts)),
"data_source": np.array(["openai/gsm8k"] * len(raw_prompts)),
"reward_model": np.array([{"style": "rule", "ground_truth": "1.0"}] * len(raw_prompts)),
},
)
batch = batch.repeat(n)
result = agent_loop_manager.generate_sequences(prompts=batch)
assert len(result) == len(raw_prompts) * n
# Check turns - all should be single turn (2: user + assistant)
num_turns = result.non_tensor_batch["__num_turns__"]
print(f"num_turns: {num_turns}")
for i in range(len(num_turns)):
assert num_turns[i] == 2, f"Expected 2 turns but got {num_turns[i]} for sample {i}"
# Verify responses
tokenizer = hf_tokenizer(init_config.actor_rollout_ref.model.path)
prompts = result.batch["prompts"]
responses = result.batch["responses"]
response_mask = result.batch["response_mask"]
input_ids = result.batch["input_ids"]
position_ids = result.batch["position_ids"]
multi_modal_inputs = result.non_tensor_batch["multi_modal_inputs"]
assert responses.size() == response_mask.size(), f"{responses.size()} != {response_mask.size()}"
assert position_ids.size() == (input_ids.size(0), 4, input_ids.size(1)) # (batch_size, 4, seq_len)
# Check for image pads in prompts
image_pad_count = 0
for i in range(len(prompts)):
prompt_ids = prompts[i][prompts[i] != tokenizer.pad_token_id].tolist()
prompt_text = tokenizer.decode(prompt_ids)
# Check if this sample should have image pads (samples with index 1 and 2 in each repeat have images)
sample_idx = i // n
has_image_pad = "<|image_pad|>" in prompt_text or "<|vision_start|>" in prompt_text
print("=========================")
print(f"Sample {i} (original prompt index: {sample_idx}):")
print(f"Prompt length: {len(prompt_ids)} tokens")
print(f"Has image_pad: {has_image_pad}")
# Check multi-modal type
multi_modal_type = parse_multi_modal_type(raw_prompts[sample_idx])
if multi_modal_type == "text":
assert len(multi_modal_inputs[i]) == 0, f"Sample {i} should not have multi-modal inputs"
elif multi_modal_type == "image":
assert "pixel_values" in multi_modal_inputs[i], f"Sample {i} should have pixel_values"
assert "image_grid_thw" in multi_modal_inputs[i], f"Sample {i} should have image_grid_thw"
else:
assert "pixel_values_videos" in multi_modal_inputs[i], f"Sample {i} should have pixel_values_videos"
assert "video_grid_thw" in multi_modal_inputs[i], f"Sample {i} should have video_grid_thw"
# Show first 200 chars of prompt
print(f"Prompt text (first 200 chars): {prompt_text[:200]}...")
for i in range(len(responses)):
valid_tokens = responses[i][response_mask[i].bool()]
response_text = tokenizer.decode(valid_tokens)
print(f"Sample {i} response: {response_text[:100]}...")
# Verify that we found image pads in multimodal samples
expected_multimodal_samples = 2 * n # 2 prompts with images, repeated n times
print(f"\nFound {image_pad_count} samples with image_pad out of {expected_multimodal_samples} expected")
print("Single turn multimodal test passed!")
ray.shutdown()
def test_multimodal_partial_single_turn_agent(init_config):
"""Test partial single turn agent loop with multimodal inputs using Qwen VL model."""
# TODO(baiyan):
# see verl/recipe/fully_async_policy/agent_loop/partial_single_turn_agent_loop.py for more details.
# if use_correct_processor=True, the test will pass but the async training will hang, so I disable this test
# for now
return
ray.init(
runtime_env={
"env_vars": {
"TOKENIZERS_PARALLELISM": "true",
"NCCL_DEBUG": "WARN",
"VLLM_LOGGING_LEVEL": "INFO",
"VLLM_USE_V1": "1",
}
},
ignore_reinit_error=True,
)
from verl.experimental.fully_async_policy.agent_loop import FullyAsyncAgentLoopManager
# =========================== 1. Init rollout manager ===========================
n = 2
init_config.actor_rollout_ref.rollout.n = n
init_config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls = 1
init_config.actor_rollout_ref.rollout.multi_turn.max_user_turns = 1
import asyncio
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
agent_loop_manager = loop.run_until_complete(FullyAsyncAgentLoopManager.create(init_config))
# =========================== 2. Generate sequences with multimodal prompts ===========================
# Create a simple test image
test_image = Image.new("RGB", (256, 256), (200, 100, 50))
test_image2 = Image.new("RGB", (512, 512), (100, 150, 200))
raw_prompts = [
[
{"role": "user", "content": "What is the capital of France?"},
],
[
{
"role": "user",
"content": [
{"type": "image", "image": test_image},
{"type": "text", "text": "What do you see in this image?"},
],
},
],
[
{
"role": "system",
"content": "You are Qwen VL, a helpful multimodal assistant.",
},
{
"role": "user",
"content": [
{"type": "image", "image": test_image2},
{"type": "text", "text": "Analyze the colors in this image."},
],
},
],
]
batch = DataProto(
non_tensor_batch={
"raw_prompt": np.array([np.array(prompt) for prompt in raw_prompts], dtype=object),
"agent_name": np.array(["partial_single_turn_agent"] * len(raw_prompts)),
"data_source": np.array(["openai/gsm8k"] * len(raw_prompts)),
"reward_model": np.array([{"style": "rule", "ground_truth": "1.0"}] * len(raw_prompts)),
},
)
batch = batch.repeat(n)
result = agent_loop_manager.generate_sequences(prompts=batch)
assert len(result) == len(raw_prompts) * n
# Check turns - all should be single turn (2: user + assistant)
num_turns = result.non_tensor_batch["__num_turns__"]
print(f"num_turns: {num_turns}")
for i in range(len(num_turns)):
assert num_turns[i] == 2, f"Expected 2 turns but got {num_turns[i]} for sample {i}"
# Verify responses
tokenizer = hf_tokenizer(init_config.actor_rollout_ref.model.path)
prompts = result.batch["prompts"]
responses = result.batch["responses"]
response_mask = result.batch["response_mask"]
assert responses.size() == response_mask.size(), f"{responses.size()} != {response_mask.size()}"
# Check for image pads in prompts
image_pad_count = 0
for i in range(len(prompts)):
prompt_ids = prompts[i][prompts[i] != tokenizer.pad_token_id].tolist()
prompt_text = tokenizer.decode(prompt_ids)
# Check if this sample should have image pads (samples with index 1 and 2 in each repeat have images)
sample_idx = i // n
has_image_pad = "<|image_pad|>" in prompt_text or "<|vision_start|>" in prompt_text
print("=========================")
print(f"Sample {i} (original prompt index: {sample_idx}):")
print(f"Prompt length: {len(prompt_ids)} tokens")
print(f"Has image_pad: {has_image_pad}")
if sample_idx != 0: # Samples 1 and 2 should have images
if has_image_pad:
image_pad_count += 1
# Count the number of image_pad tokens
num_image_pads = prompt_text.count("<|image_pad|>")
print(f"Number of <|image_pad|> tokens: {num_image_pads}")
else:
print("WARNING: Expected image_pad but not found!")
# Show first 200 chars of prompt
print(f"Prompt text (first 200 chars): {prompt_text[:200]}...")
for i in range(len(responses)):
valid_tokens = responses[i][response_mask[i].bool()]
response_text = tokenizer.decode(valid_tokens)
print(f"Sample {i} response: {response_text[:100]}...")
# Verify that we found image pads in multimodal samples
expected_multimodal_samples = 2 * n # 2 prompts with images, repeated n times
print(f"\nFound {image_pad_count} samples with image_pad out of {expected_multimodal_samples} expected")
assert image_pad_count > 0, "No image_pad tokens found in multimodal samples!"
print("Partial single turn multimodal test passed!")
ray.shutdown()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/experimental/agent_loop/test_multi_modal.py",
"license": "Apache License 2.0",
"lines": 492,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/tools/image_zoom_in_tool.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import threading
from contextlib import ExitStack
from enum import Enum
from math import ceil, floor
from typing import Any, Callable, Optional, TypeVar
from uuid import uuid4
import ray
import ray.actor
from qwen_vl_utils import fetch_image
from .base_tool import BaseTool
from .schemas import OpenAIFunctionToolSchema, ToolResponse
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
T = TypeVar("T")
# Adapted from verl/tools/sandbox_fusion_tools.py
class PoolMode(Enum):
"""Execution pool mode enumeration."""
ThreadMode = 1
ProcessMode = 2
@ray.remote(concurrency_groups={"acquire": 1, "release": 10})
class TokenBucketWorker:
"""Ray actor for rate limiting using token bucket algorithm."""
def __init__(self, rate_limit: int):
self.rate_limit = rate_limit
self.current_count = 0 # For observability
self._semaphore = threading.Semaphore(rate_limit)
@ray.method(concurrency_group="acquire")
def acquire(self):
"""Acquire a token from the bucket."""
self._semaphore.acquire()
self.current_count += 1
@ray.method(concurrency_group="release")
def release(self):
"""Release a token back to the bucket."""
self._semaphore.release()
self.current_count -= 1
def get_current_count(self):
"""Get current number of acquired tokens."""
return self.current_count
class VisualExecutionWorker:
"""Worker for executing visual processing operations with optional rate limiting."""
def __init__(self, enable_global_rate_limit=True, rate_limit=10):
self.rate_limit_worker = self._init_rate_limit(rate_limit) if enable_global_rate_limit else None
def _init_rate_limit(self, rate_limit):
"""Initialize singleton rate limiter."""
return TokenBucketWorker.options(name="rate-limiter", get_if_exists=True).remote(rate_limit)
def ping(self):
"""Health check method."""
return True
def execute(self, fn: Callable[..., T], *fn_args, **fn_kwargs) -> T:
"""Execute function with optional rate limiting."""
if self.rate_limit_worker:
with ExitStack() as stack:
stack.callback(self.rate_limit_worker.release.remote)
ray.get(self.rate_limit_worker.acquire.remote())
try:
return fn(*fn_args, **fn_kwargs)
except Exception as e:
# TODO we should make this available to the tool caller
logger.warning(f"Error when executing visual processing: {e}")
else:
return fn(*fn_args, **fn_kwargs)
def init_visual_execution_pool(
num_workers: int, enable_global_rate_limit=True, rate_limit=10, mode: PoolMode = PoolMode.ThreadMode
):
"""Initialize visual execution pool."""
if mode == PoolMode.ThreadMode:
return (
ray.remote(VisualExecutionWorker)
.options(max_concurrency=num_workers)
.remote(enable_global_rate_limit=enable_global_rate_limit, rate_limit=rate_limit)
)
else:
raise NotImplementedError("Process mode is not implemented yet")
class ImageZoomInTool(BaseTool):
"""A tool for zooming in on an image by cropping it based on a bounding box.
This tool provides a zoom-in functionality by cropping a region from an image,
with rate limiting and concurrent execution support through Ray.
Methods:
get_openai_tool_schema: Return the tool schema in OpenAI format
create: Create a tool instance for a trajectory
execute: Execute the zoom-in operation
calc_reward: Calculate the reward with respect to tool state
release: Release the tool instance
"""
MIN_DIMENSION = 28
def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema):
"""
_tool_schema = OpenAIFunctionToolSchema.model_validate({
"type": "function",
"function": {
"name": "image_zoom_in_tool",
"description": (
"Zoom in on a specific region of an image by cropping it based on a bounding box (bbox) and an "
"optional object label."
),
"parameters": {
"type": "object",
"properties": {
"bbox_2d": {
"type": "array",
"items":{"type":"number"},
"minItems":4,
"maxItems":4,
"description": (
"The bounding box of the region to zoom in, as [x1, y1, x2, y2], where (x1, y1) is "
"the top-left corner and (x2, y2) is the bottom-right corner."
),
},
"label": {
"type": "string",
"description": "The name or label of the object in the specified bounding box (optional).",
},
},
"required": ["bbox_2d"],
},
}
})
"""
super().__init__(config, tool_schema)
self._instance_dict = {}
# Worker and rate limiting configuration
self.num_workers = config.get("num_workers", 20)
self.rate_limit = config.get("rate_limit", 50)
self.timeout = config.get("timeout", 30)
self.enable_global_rate_limit = config.get("enable_global_rate_limit", True)
self.execution_pool = init_visual_execution_pool(
num_workers=self.num_workers,
enable_global_rate_limit=self.enable_global_rate_limit,
rate_limit=self.rate_limit,
mode=PoolMode.ThreadMode,
)
logger.info(f"Initialized ImageZoomInTool with config: {config}")
def _validate_bbox(self, left: float, top: float, right: float, bottom: float) -> bool:
"""Validate the bounding box dimensions and aspect ratio."""
try:
if not (left < right and top < bottom):
logger.warning(f"Invalid bbox shape: left={left}, top={top}, right={right}, bottom={bottom}")
return False
height = bottom - top
width = right - left
# Prevent division by zero for zero-sized boxes
if min(height, width) == 0:
logger.warning(f"Bbox has zero width or height: left={left}, top={top}, right={right}, bottom={bottom}")
return False
if max(height, width) / min(height, width) > 100:
logger.warning(f"Bbox aspect ratio > 100: left={left}, top={top}, right={right}, bottom={bottom}")
return False
return True
except Exception as e:
logger.warning(f"Bbox validation error: {e}")
return False
def _maybe_resize_bbox(self, bbox_2d: list[float], image_width: int, image_height: int) -> Optional[list[float]]:
"""
Clamp, validate, and potentially resize a bounding box.
This function ensures the final bounding box is within image bounds and meets the minimum
dimension requirements. If the initial box is too small, it attempts to expand it
from its center. It performs a final check to guarantee the output dimensions are valid.
Returns:
A valid bounding box as a list of coordinates, or None if validation fails.
"""
left, top, right, bottom = bbox_2d
# 1. Clamp the initial bounding box to the image dimensions.
left = max(0.0, float(left))
top = max(0.0, float(top))
right = min(float(image_width), float(right))
bottom = min(float(image_height), float(bottom))
# 2. If clamped bbox is invalid, return immediately.
if not self._validate_bbox(left, top, right, bottom):
return None
current_bbox = [left, top, right, bottom]
height = bottom - top
width = right - left
# 3. If the box is too small, attempt to resize it.
if height < self.MIN_DIMENSION or width < self.MIN_DIMENSION:
logger.info(f"Bbox {width}x{height} is smaller than {self.MIN_DIMENSION}, attempting resize.")
center_x = (left + right) / 2.0
center_y = (top + bottom) / 2.0
min_dim = min(height, width)
if min_dim == 0: # Safeguard for zero-area boxes
return None
# 1. Calculate the target dimensions to make the smallest side MIN_DIMENSION.
ratio = self.MIN_DIMENSION / min_dim
target_width = width * ratio
target_height = height * ratio
# 2. If the target size is larger than the image, scale it down to fit.
# This preserves the aspect ratio while respecting image boundaries.
if target_width > image_width:
scale_down = image_width / target_width
target_width = image_width
target_height *= scale_down
if target_height > image_height:
scale_down = image_height / target_height
target_height = image_height
target_width *= scale_down
# 3. Determine the coordinates for the box centered on the original center.
new_half_width = target_width / 2.0
new_half_height = target_height / 2.0
new_left = center_x - new_half_width
new_top = center_y - new_half_height
# 4. Shift the box if it extends beyond the image boundaries to keep its size.
if new_left < 0:
new_left = 0
if new_top < 0:
new_top = 0
if new_left + target_width > image_width:
new_left = image_width - target_width
if new_top + target_height > image_height:
new_top = image_height - target_height
new_right = new_left + target_width
new_bottom = new_top + target_height
# Use floor and ceil for final integer coordinates.
current_bbox = [floor(new_left), floor(new_top), ceil(new_right), ceil(new_bottom)]
# 4. Final validation on the resulting bounding box (either original or resized).
final_left, final_top, final_right, final_bottom = current_bbox
if not self._validate_bbox(final_left, final_top, final_right, final_bottom):
logger.warning(f"Final bbox is invalid after processing: {current_bbox}")
return None
final_height = floor(final_bottom) - floor(final_top)
final_width = floor(final_right) - floor(final_left)
if final_height < self.MIN_DIMENSION or final_width < self.MIN_DIMENSION:
logger.warning(
f"Final bbox size ({final_width}x{final_height}) are still smaller than minimum ({self.MIN_DIMENSION})."
f"Original bbox: {bbox_2d}, original image size: {image_width}x{image_height}"
)
return None
return current_bbox
def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema:
return self.tool_schema
async def create(self, instance_id: Optional[str] = None, **kwargs) -> tuple[str, ToolResponse]:
"""
Creates a new instance for image zoom-in tool.
This method initializes a new session for an image, which can then be used
for operations like zooming. It fetches the image from various sources
and stores it internally.
Args:
instance_id: An optional unique identifier for the instance. If not
provided, a new UUID will be generated.
**kwargs: Should contain 'image' key with image data, or 'create_kwargs'
containing {'image': image_data}. Image can be one of the following:
- A PIL.Image.Image object.
- A string containing an HTTP or HTTPS URL.
- A string containing a local file path.
- A string containing a file URI (e.g., "file:///path/to/image.jpg").
- A string containing a base64-encoded image in the format of "data:image/jpeg;base64,..."
Returns:
Tuple of (instance_id, ToolResponse)
"""
if instance_id is None:
instance_id = str(uuid4())
# Handle create_kwargs parameter if passed
create_kwargs = kwargs.get("create_kwargs", {})
if create_kwargs:
kwargs.update(create_kwargs)
# Get image from kwargs
image = kwargs.get("image")
if image is None:
raise ValueError("Missing required 'image' parameter in kwargs")
img = fetch_image({"image": image})
self._instance_dict[instance_id] = {
"image": img,
"response": "",
"reward": 0.0,
}
return instance_id, ToolResponse()
async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]:
bbox_2d = parameters.get("bbox_2d")
label = parameters.get("label", "")
if not bbox_2d or len(bbox_2d) != 4:
return (
ToolResponse(text="Error: bbox_2d parameter is missing or not a list of 4 numbers."),
-0.05,
{"success": False},
)
instance_data = self._instance_dict[instance_id]
image = instance_data["image"]
image_width, image_height = image.size
try:
resized_bbox = self._maybe_resize_bbox(bbox_2d, image_width=image_width, image_height=image_height)
if resized_bbox is None:
error_msg = (
f"Error: The specified bounding box {bbox_2d} is invalid or results in a crop smaller than "
f"the minimum size of {self.MIN_DIMENSION}x{self.MIN_DIMENSION}."
)
logger.warning(f"Tool execution failed: {error_msg}")
return ToolResponse(text=error_msg), -0.05, {"success": False}
cropped_image = image.crop(resized_bbox)
logger.info(f"Cropped image size: {cropped_image.size}")
except Exception as e:
logger.error(f"Error processing image zoom-in: {e}")
return ToolResponse(text=f"Error processing image zoom-in: {e}"), -0.05, {"success": False}
response_text = f"Zoomed in on the image to the region {bbox_2d}."
if label:
response_text = f"Zoomed in on the image to the region {bbox_2d} with label {label}."
return (
ToolResponse(
image=[cropped_image],
text=response_text,
),
0.0,
{"success": True},
)
async def release(self, instance_id: str, **kwargs) -> None:
if instance_id in self._instance_dict:
del self._instance_dict[instance_id]
| {
"repo_id": "verl-project/verl",
"file_path": "verl/tools/image_zoom_in_tool.py",
"license": "Apache License 2.0",
"lines": 321,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/transformers_compat.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Compatibility utilities for different versions of transformers library.
"""
import importlib.metadata
from functools import lru_cache
from typing import Optional
from packaging import version
# Handle version compatibility for flash_attn_supports_top_left_mask
# This function was added in newer versions of transformers
try:
from transformers.modeling_flash_attention_utils import flash_attn_supports_top_left_mask
except ImportError:
# For older versions of transformers that don't have this function
# Default to False as a safe fallback for older versions
def flash_attn_supports_top_left_mask():
"""Fallback implementation for older transformers versions.
Returns False to disable features that require this function.
"""
return False
@lru_cache
def is_transformers_version_in_range(min_version: Optional[str] = None, max_version: Optional[str] = None) -> bool:
try:
# Get the installed version of the transformers library
transformers_version_str = importlib.metadata.version("transformers")
except importlib.metadata.PackageNotFoundError as e:
raise ModuleNotFoundError("The `transformers` package is not installed.") from e
transformers_version = version.parse(transformers_version_str)
lower_bound_check = True
if min_version is not None:
lower_bound_check = version.parse(min_version) <= transformers_version
upper_bound_check = True
if max_version is not None:
upper_bound_check = transformers_version <= version.parse(max_version)
return lower_bound_check and upper_bound_check
@lru_cache
def get_auto_model_for_vision2seq():
"""Return the available VL auto model class across transformers versions."""
try:
# Prefer the newer class when available. In transformers 4.x this class has
# a broader mapping than AutoModelForVision2Seq, and AutoModelForVision2Seq
# is deprecated for removal in v5.
from transformers import AutoModelForImageTextToText
except ImportError:
from transformers import AutoModelForVision2Seq
return AutoModelForVision2Seq
return AutoModelForImageTextToText
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/transformers_compat.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/special_distributed/test_mcore_config_converter.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import megatron.core.parallel_state as mpu
import torch
from megatron.core.transformer import MLATransformerConfig, TransformerConfig
from transformers import AutoConfig, PretrainedConfig
from verl.models.mcore import hf_to_mcore_config
from verl.utils.distributed import destroy_global_process_group, initialize_global_process_group
TEST_MODELS = [
"Qwen/Qwen2.5-7B", # Qwen2 dense
"Qwen/Qwen3-8B", # Qwen3 dense
"deepseek-ai/deepseek-coder-1.3b-instruct", # deepseek dense
"Qwen/Qwen2-57B-A14B", # Qwen2 moe
"Qwen/Qwen3-30B-A3B", # Qwen3 moe
# "mistralai/Mixtral-8x7B-v0.1", # Mixtral # require authentication
"deepseek-ai/DeepSeek-V3-Base", # Deepseek V3
]
def check_config_converter_results(tf_config: TransformerConfig | MLATransformerConfig, hf_config: PretrainedConfig):
assert tf_config.num_layers == hf_config.num_hidden_layers, (
f"Number of layers mismatch: {tf_config.num_layers} != {hf_config.num_hidden_layers}"
)
assert tf_config.hidden_size == hf_config.hidden_size, (
f"Hidden size mismatch: {tf_config.hidden_size} != {hf_config.hidden_size}"
)
assert tf_config.num_attention_heads == hf_config.num_attention_heads, (
f"Number of attention heads mismatch: {tf_config.num_attention_heads} != {hf_config.num_attention_heads}"
)
assert tf_config.num_query_groups == hf_config.num_key_value_heads, (
f"Number of query groups mismatch: {tf_config.num_query_groups} != {hf_config.num_key_value_heads}"
)
assert tf_config.ffn_hidden_size == hf_config.intermediate_size, (
f"FFN hidden size mismatch: {tf_config.ffn_hidden_size} != {hf_config.intermediate_size}"
)
assert tf_config.attention_dropout == hf_config.attention_dropout, (
f"Attention dropout mismatch: {tf_config.attention_dropout} != {hf_config.attention_dropout}"
)
assert tf_config.hidden_dropout == getattr(hf_config, "hidden_dropout", 0.0), (
f"Hidden dropout mismatch: {tf_config.hidden_dropout} != {getattr(hf_config, 'hidden_dropout', 0.0)}"
)
if getattr(hf_config, "head_dim", None) is not None:
assert tf_config.kv_channels == getattr(hf_config, "head_dim", None), (
f"Head dim mismatch: {tf_config.kv_channels} != {getattr(hf_config, 'head_dim', None)}"
)
assert tf_config.layernorm_epsilon == hf_config.rms_norm_eps, (
f"Layernorm epsilon mismatch: {tf_config.layernorm_epsilon} != {hf_config.rms_norm_eps}"
)
def modify_hf_config(name: str, hf_config: PretrainedConfig):
if name == "deepseek-ai/DeepSeek-V3-Base":
hf_config.num_nextn_predict_layers = 0
hf_config.quantization_config = None
return hf_config
def test_mcore_config_converter():
"""
Test the conversion of Hugging Face model configurations to MCore configurations.
"""
local_rank, rank, world_size = initialize_global_process_group()
mpu.initialize_model_parallel(
tensor_model_parallel_size=2,
pipeline_model_parallel_size=2,
virtual_pipeline_model_parallel_size=None,
use_sharp=False,
context_parallel_size=2,
expert_model_parallel_size=1,
expert_tensor_parallel_size=None,
nccl_communicator_config_path=None,
)
for model_name in TEST_MODELS:
print(f"testing {model_name}")
hf_config = AutoConfig.from_pretrained(os.path.expanduser(f"~/models/configs/{model_name}/config.json"))
hf_config = modify_hf_config(model_name, hf_config)
tf_config = hf_to_mcore_config(hf_config, torch.bfloat16)
check_config_converter_results(tf_config, hf_config)
destroy_global_process_group()
if __name__ == "__main__":
test_mcore_config_converter()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/special_distributed/test_mcore_config_converter.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/single_controller/test_device_mesh_register.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import ray
import torch
from tensordict import TensorDict
import verl.utils.tensordict_utils as tu
from verl import DataProto
from verl.single_controller.base import Worker
from verl.single_controller.base.decorator import make_nd_compute_dataproto_dispatch_fn, register
from verl.utils.device import get_device_name, get_nccl_backend
@ray.remote
class TestActor(Worker):
def __init__(self):
super().__init__()
import torch.distributed
torch.distributed.init_process_group(backend=get_nccl_backend())
self.infer_device_mesh = torch.distributed.device_mesh.init_device_mesh(
device_type=get_device_name(), mesh_shape=[2, 4], mesh_dim_names=["dp", "tp"]
)
self.train_device_mesh = torch.distributed.device_mesh.init_device_mesh(
device_type=get_device_name(), mesh_shape=[2, 2, 2], mesh_dim_names=["pp", "dp", "tp"]
)
self._register_dispatch_collect_info(
"infer",
dp_rank=self.infer_device_mesh["dp"].get_local_rank(),
is_collect=self.infer_device_mesh["tp"].get_local_rank() == 0,
)
self._register_dispatch_collect_info(
"train",
dp_rank=self.train_device_mesh["dp"].get_local_rank(),
is_collect=self.train_device_mesh["tp"].get_local_rank() == 0
and self.train_device_mesh["pp"].get_local_rank() == 1,
)
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="infer"))
def generate_data_proto(self, data: DataProto):
tp_rank = self.infer_device_mesh["tp"].get_local_rank()
dp_rank = self.infer_device_mesh["dp"].get_local_rank()
data.batch["a"] += (tp_rank + 1) * dp_rank
return data
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="infer"))
def generate_tensordict(self, data: TensorDict):
tp_rank = self.infer_device_mesh["tp"].get_local_rank()
dp_rank = self.infer_device_mesh["dp"].get_local_rank()
data["a"] += (tp_rank + 1) * dp_rank
return data
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="train"))
def train_data_proto(self, data: DataProto):
tp_rank = self.train_device_mesh["tp"].get_local_rank()
dp_rank = self.train_device_mesh["dp"].get_local_rank()
pp_rank = self.train_device_mesh["pp"].get_local_rank()
data.batch["a"] += (tp_rank + 1) * (dp_rank + 2) * (pp_rank + 3)
# tp rank 0, pp rank 1, dp rank 0, output data added: 8 + 3 = 11
# tp rank 0, pp rank 1, dp rank 1, output data added: 12 + 4 = 16
return data
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="train"))
def train_tensordict(self, data: TensorDict):
tp_rank = self.train_device_mesh["tp"].get_local_rank()
dp_rank = self.train_device_mesh["dp"].get_local_rank()
pp_rank = self.train_device_mesh["pp"].get_local_rank()
data["a"] += (tp_rank + 1) * (dp_rank + 2) * (pp_rank + 3)
# tp rank 0, pp rank 1, dp rank 0, output data added: 8 + 3 = 11
# tp rank 0, pp rank 1, dp rank 1, output data added: 12 + 4 = 16
return data
@register(dispatch_mode=make_nd_compute_dataproto_dispatch_fn(mesh_name="infer"))
def generate_nested_tensor(self, data: TensorDict):
tp_rank = self.infer_device_mesh["tp"].get_local_rank()
dp_rank = self.infer_device_mesh["dp"].get_local_rank()
assert data.shape[0] == 8
data["input_ids"] += tp_rank + dp_rank
print(data)
return data
def test_dist_global_info_wg():
# create a worker group with size 8
# register a infer dist info with tp=4, dp=2
# register a train dist info with tp=2, dp=2, pp=2
# test the correctness of data dispatch and computation
from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup
ray.init()
ray_cls = RayClassWithInitArgs(TestActor)
resource_pool = RayResourcePool(process_on_nodes=[8])
wg = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=ray_cls, device_name=get_device_name())
infer_input_data_proto = DataProto.from_single_dict(data={"a": torch.tensor([1, 2])})
infer_output_data_proto = wg.generate_data_proto(infer_input_data_proto)
assert wg._dispatch_info["infer"] == [0, 0, 0, 0, 1, 1, 1, 1]
assert torch.all(torch.eq(infer_output_data_proto.batch["a"], torch.tensor([1, 3])))
infer_input_tensordict = infer_input_data_proto.to_tensordict()
infer_output_tensordict = wg.generate_tensordict(infer_input_tensordict)
assert torch.all(torch.eq(infer_output_tensordict["a"], torch.tensor([1, 3])))
train_input_data_proto = DataProto.from_single_dict(data={"a": torch.tensor([3, 4])})
train_output_data_proto = wg.train_data_proto(train_input_data_proto)
assert wg._dispatch_info["train"] == [0, 0, 1, 1, 0, 0, 1, 1]
assert torch.all(torch.eq(train_output_data_proto.batch["a"], torch.tensor([11, 16])))
train_input_tensordict = train_input_data_proto.to_tensordict()
train_output_tensordict = wg.train_tensordict(train_input_tensordict)
assert torch.all(torch.eq(train_output_tensordict["a"], torch.tensor([11, 16])))
# create a batch size of input_ids
input_ids = [
torch.randint(low=0, high=128, size=(np.random.randint(low=1, high=10, dtype=np.int64),)) for _ in range(16)
]
input_ids = torch.nested.as_nested_tensor(input_ids, layout=torch.jagged)
data = tu.get_tensordict(tensor_dict={"input_ids": input_ids})
output = wg.generate_nested_tensor(data)
input_ids_chunked = list(input_ids.chunk(2))
print(input_ids_chunked)
input_ids_chunked[0] += 0
input_ids_chunked[1] += 1
expected = tu.concat_nested_tensors(input_ids_chunked)
assert torch.all(torch.eq(output["input_ids"].values(), expected.values()))
ray.shutdown()
if __name__ == "__main__":
test_dist_global_info_wg()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/single_controller/test_device_mesh_register.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/utils/test_rollout_skip_on_cpu.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
from pathlib import Path
from unittest.mock import MagicMock
import pytest
import torch
from verl.utils.rollout_skip import DataProto, RolloutSkip
len_prompt = 50
len_response = 100
def temp_dir():
# Create a temporary directory
temp_dir = Path(tempfile.mkdtemp())
yield temp_dir
# Cleanup
shutil.rmtree(temp_dir)
def build_generate_fn(gen_bs, n):
len_tokenizer = 1024
def iterate():
while True:
prompt = torch.randint(len_tokenizer, size=(gen_bs, len_prompt)).repeat_interleave(n, dim=0)
generate = torch.randint(len_tokenizer, size=(gen_bs * n, len_response))
data = DataProto.from_dict(tensors={"prompt": prompt, "response": generate})
yield data
mock_infer_engine = iterate()
def fn(batch, **kwargs):
# Simulate the inference engine returning the next batch
return next(mock_infer_engine)
return fn
@pytest.fixture(params=[(32, 4), (64, 4), (64, 8)])
def mock_rollout_wg(request):
gen_bs, n = request.param
rollout_wg = MagicMock()
config = MagicMock()
config.actor_rollout_ref.rollout = {
"n": n,
"skip_dump_dir": next(temp_dir()),
}
config.data = {"gen_batch_size": gen_bs}
rollout_wg.generate_sequences = build_generate_fn(gen_bs, n)
yield config, rollout_wg
# Cleanup
shutil.rmtree(next(temp_dir()))
class TestRolloutSkip:
def test_initialization(self, capsys):
"""Test that RolloutSkip initializes correctly"""
config = MagicMock()
config.actor_rollout_ref.rollout = {
"n": 16,
"skip_dump_dir": "tmp/rollout_dump",
}
config.data = {"gen_batch_size": 128}
mock_rollout_wg = MagicMock()
skip = RolloutSkip(config, mock_rollout_wg)
assert skip.n == 16
assert skip.gbs == 128
assert str(skip.dumped_dir) == "tmp/rollout_dump"
assert skip._rollout_wg == mock_rollout_wg
skip.wrap_generate_sequences()
captured = capsys.readouterr()
assert "Successfully patched" in captured.out
def test_generate_without_wrap(self, mock_rollout_wg):
"""Test that generate_sequences works without wrapping"""
config, rollout_wg = mock_rollout_wg
_ = RolloutSkip(config, rollout_wg)
_result = rollout_wg.generate_sequences(MagicMock())
for _ in range(10):
result = rollout_wg.generate_sequences(MagicMock())
assert isinstance(result, DataProto)
# * make sure the data is different
assert torch.abs(_result.batch["prompt"] - result.batch["prompt"]).sum() > 0
assert torch.abs(_result.batch["response"] - result.batch["response"]).sum() > 0
_result = result
def test_dump(self, mock_rollout_wg, capsys):
config, rollout_wg = mock_rollout_wg
skip = RolloutSkip(config, rollout_wg)
skip.wrap_generate_sequences()
result = rollout_wg.generate_sequences(MagicMock())
# * check if dump is OK
assert skip.curr_path_dump.exists()
captured = capsys.readouterr()
assert "Successfully dump data in" in captured.out
# * get file size, estimate file size
file_size = skip.curr_path_dump.stat().st_size
est_file_size = (len_prompt + len_response) * skip.gbs * skip.n * result.batch["prompt"].dtype.itemsize
assert file_size >= est_file_size, "Dumped file size is smaller than expected"
def test_generate_with_wrap(self, mock_rollout_wg, capsys):
"""Test that generate_sequences works without wrapping"""
config, rollout_wg = mock_rollout_wg
skip = RolloutSkip(config, rollout_wg)
skip.wrap_generate_sequences()
_result = rollout_wg.generate_sequences(MagicMock())
for _ in range(10):
result = rollout_wg.generate_sequences(MagicMock())
assert isinstance(result, DataProto)
# * make sure the data is different
assert torch.abs(_result.batch["prompt"] - result.batch["prompt"]).sum() == 0
assert torch.abs(_result.batch["response"] - result.batch["response"]).sum() == 0
captured = capsys.readouterr()
assert "Successfully load pre-generated data from" in captured.out
_result = result
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_rollout_skip_on_cpu.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/utils/rollout_skip.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from verl.protocol import DataProto
class RolloutSkip:
"""
RolloutSkip skips sequence generation during rollout by attempting to load previously dumped data.
If no dumped data is found, it generates new sequences and saves them to disk.
Args:
config: The configuration object containing rollout settings.
rollout_wg: The worker group that handles the rollout process.
Note:
When rollout.n or rollout.gen_batch_size differ from previous runs,
new sequences will be generated and saved with different filenames.
"""
print_mark = "[RolloutSkip()]"
def __init__(self, config, rollout_wg):
self.rollout_config = config.actor_rollout_ref.rollout
self.exp_name = config.data.get("experiment_name", "")
self.project_name = config.data.get("project_name", "")
self.n = int(self.rollout_config.get("n", 0))
self.gbs = int(config.data.get("gen_batch_size", config.data.get("train_batch_size", 0)))
self.dumped_dir = Path(self.rollout_config.get("skip_dump_dir", "/tmp/verl/rollout_dump"))
self.dumped_dir.mkdir(parents=True, exist_ok=True)
# Check if path is in Ray temporary directory
if str(self.dumped_dir.absolute()).startswith("/tmp/ray/session"):
print(
f"\033[33m{self.print_mark} Warning: \nUsing dump path ",
f"'{self.dumped_dir.absolute()}' is not recommended ",
"as it's located in /tmp/ray/session*\033[0m",
flush=True,
)
print(
f"{self.print_mark} Rollout skip dump path set to: ",
f"{self.dumped_dir.absolute()}",
flush=True,
)
self._rollout_wg = rollout_wg
@property
def curr_path_dump(self):
return self.dumped_dir.joinpath(f"{self.exp_name}_{self.project_name}_GBS{self.gbs}__N{self.n}").absolute()
def wrap_generate_sequences(self):
try:
self._rollout_wg.generate_sequences = wrap_generate_sequences(self, self._rollout_wg)
print(
f"{self.print_mark} Successfully patched `actor_rollout_wg.generate_sequences()`",
flush=True,
)
except Exception as e:
raise RuntimeError(
"{self.print_mark} Failed to patch `actor_rollout_wg.generate_sequences()`",
flush=True,
) from e
def try_load(self):
if not self.curr_path_dump.exists():
print(
f"{self.print_mark} No data dump found at {self.curr_path_dump}.",
"The trainer will generate and automatically dump the data for this first run.",
flush=True,
)
return None
try:
# * Load
ret_batch = DataProto.load_from_disk(self.curr_path_dump)
print(
f"\033[32m{self.print_mark} Successfully load pre-generated data from {self.curr_path_dump}\033[0m",
flush=True,
)
return ret_batch
except Exception as e:
print(
f"\033[31m{self.print_mark} Failed to load pre-generated data from {self.curr_path_dump}",
f"Error: {str(e)}\033[0m",
flush=True,
)
return None
def dump(self, outputs: DataProto):
try:
outputs.save_to_disk(self.curr_path_dump)
print(
f"\033[32m{self.print_mark} Successfully dump data in {self.curr_path_dump}\033[0m",
flush=True,
)
except Exception as e:
print(
f"\033[31m{self.print_mark} Failed to dump data in {self.curr_path_dump}: {e}\033[0m",
flush=True,
)
def wrap_generate_sequences(rolloutskip: RolloutSkip, rollout_wg):
generate_sequences = rollout_wg.generate_sequences
def warp_fn(batch, **kwargs):
gen_batch_output = rolloutskip.try_load()
if gen_batch_output is None:
# * 1. Generation
gen_batch_output = generate_sequences(batch, **kwargs)
# * 2. Dump
rolloutskip.dump(gen_batch_output)
return gen_batch_output
return warp_fn
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/rollout_skip.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/workers/test_fsdp_workers.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from omegaconf import OmegaConf
from verl.workers.fsdp_workers import ActorRolloutRefWorker
def test_actor_rollout_ref_worker_actor_ref_model():
"""Test specifying different reference/actor model"""
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "8888"
actor_model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B-Instruct")
ref_model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-1.5B-Instruct")
config_str = f"""
model:
path: {actor_model_path}
actor:
_target_: verl.workers.config.FSDPActorConfig
strategy: fsdp
fsdp_config:
_target_: verl.workers.config.FSDPEngineConfig
fsdp_size: -1
forward_prefetch: false
profiler:
tool: torch_memory
save_path: ./mem_snapshots
tool_config:
torch_memory:
_target_: verl.utils.profiler.config.TorchMemoryToolConfig
trace_alloc_max_entries: 100000
stack_depth: 32
ref:
model:
path: {ref_model_path}
fsdp_config:
_target_: verl.workers.config.FSDPEngineConfig
fsdp_size: -1
profiler:
tool: torch_memory
save_path: ./mem_snapshots
tool_config:
torch_memory:
_target_: verl.utils.profiler.config.TorchMemoryToolConfig
trace_alloc_max_entries: 100000
stack_depth: 32
log_prob_micro_batch_size: 1
ulysses_sequence_parallel_size: 1
entropy_from_logits_with_chunking: false
"""
dict_conf = OmegaConf.create(config_str)
actor_rollout_ref_worker = ActorRolloutRefWorker(dict_conf, role="ref")
actor_rollout_ref_worker.init_model()
model_config = actor_rollout_ref_worker.ref_module_fsdp._fsdp_wrapped_module.config
assert model_config.hidden_size == 1536
# set ref.model to null, fallback to default case where actor is the same as reference
dict_conf["ref"]["model"] = None
actor_rollout_ref_worker = ActorRolloutRefWorker(dict_conf, role="ref")
actor_rollout_ref_worker.init_model()
model_config = actor_rollout_ref_worker.ref_module_fsdp._fsdp_wrapped_module.config
assert model_config.hidden_size == 896
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/test_fsdp_workers.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/utils/debug/test_metrics.py | # Copyright 2025 Individual Contributor: TomQunChaoA
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from verl.protocol import DataProto
from verl.utils.debug.metrics import calculate_debug_metrics
class TestMetrics(unittest.TestCase):
def test_calculate_debug_metrics(self):
data = DataProto.from_dict(
{
"rollout_log_probs": torch.tensor(
[
[-1.5085, -0.1200, -0.6650, -0.4823, -0.1426, -1.5557, -2.8532, -0.3919, -0.4294, -0.4700],
[-0.0585, -0.0573, -0.4681, -0.5187, -0.7451, -1.2737, -0.0682, -0.4284, -0.5754, -0.0611],
]
),
"old_log_probs": torch.tensor(
[
[-1.8636, -0.7863, -0.2136, -0.4376, -2.0257, -0.2579, -1.1547, -0.5203, -0.3802, -0.9872],
[-0.3507, -0.5426, -0.2725, -0.4637, -0.3577, -0.3733, -1.7560, -1.9542, -0.4229, -1.3098],
]
),
"loss_mask": torch.tensor([[1, 0, 0, 0, 1, 1, 0, 1, 1, 0], [1, 0, 1, 0, 1, 1, 1, 0, 1, 1]]),
"responses": torch.zeros((2, 10)),
}
)
metrics = calculate_debug_metrics(data)
print(metrics)
assert metrics["training/rollout_probs_diff_valid"] == 1
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/debug/test_metrics.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/utils/debug/metrics.py | # Copyright 2025 Individual Contributor: TomQunChaoA
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import torch
from verl.protocol import DataProto
logger = logging.getLogger(__file__)
def calculate_token_list_diff(tensor1: torch.Tensor, tensor2: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
# verify inputs
if tensor1.numel() == 0 or tensor2.numel() == 0:
return torch.zeros(tensor1.shape[0], dtype=torch.long, device=tensor1.device)
if tensor1.shape != tensor2.shape or mask.shape != tensor1.shape or mask.shape != tensor2.shape:
print(
f"<WARN> dim of tensor1, tensor2, mask is not equal, {(tensor1.shape)=},{(tensor2.shape)=}, {(mask.shape)=}"
)
return torch.ones_like(tensor1)
# transfer to same device
if tensor2.device != tensor1.device:
tensor2 = tensor2.to(tensor1.device)
if mask.device != tensor1.device:
mask = mask.to(tensor1.device)
# calculate diff
diff_mask = tensor1 != tensor2
valid_diff_mask = diff_mask & (mask == 1)
diff_counts = valid_diff_mask.sum(dim=1)
return diff_counts
def pearson_correlation_coefficient(tensor1: torch.Tensor, tensor2: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
# implemention of https://arxiv.org/pdf/2506.13585
if tensor1.shape != tensor2.shape or mask.shape != tensor1.shape or mask.shape != tensor2.shape:
return 0
mt1 = torch.masked_select(tensor1, mask)
mt2 = torch.masked_select(tensor2, mask)
result = torch.corrcoef(torch.stack([mt1, mt2], dim=0))
return result[0][1].detach().item()
def calculate_log_prob_diff(log_probs1: torch.Tensor, log_probs2: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
full_diff = torch.abs(log_probs1 - log_probs2)
return torch.masked_select(full_diff, mask)
def calculate_debug_metrics(data: DataProto) -> dict:
"""
calculate rollout vs actor logprobs diff, for debugging purpose
Args:
data: DataProto
the data batch to calculate
rollout_log_probs: log_probs record when rollout forward tokens
old_log_probs(actor log probs): log_probs record when actor forward tokens
loss_mask or attention_mask: to mask unrelated token
responses: the response tokens, for calculating size
Returns:
dict: metrics
"training/rollout_probs_diff_valid": 1->input is valid, 0->input is invalid
"training/rollout_probs_diff_max": max value of logprob diff of rollout vs. actor
"training/rollout_probs_diff_mean": mean value of logprob diff of rollout vs. actor
"training/rollout_probs_diff_std": std value of logprob diff of rollout vs. actor
"training/rollout_actor_probs_pearson_corr": logprob's pearson corrcoef of rollout vs. actor, reference to https://arxiv.org/pdf/2506.13585
"""
rollout_old_log_probs = data.batch["rollout_log_probs"]
actor_old_log_probs = data.batch["old_log_probs"]
if "response_mask" in data.batch:
logger.debug("response mask found, use it to mask log probs")
log_prob_mask = data.batch["response_mask"]
elif "attention_mask" in data.batch:
log_prob_mask = data.batch["attention_mask"]
else:
logger.warning(f"no mask info found, use all log probs, {(data.batch.keys())=}")
log_prob_mask = torch.ones_like(rollout_old_log_probs)
responses = data.batch["responses"]
response_length = responses.size(1)
response_mask = log_prob_mask[:, -response_length:]
# calculate pearson corrcoef
actor_probs = torch.exp(actor_old_log_probs)
rollout_probs = torch.exp(rollout_old_log_probs)
response_mask_bool = response_mask.bool()
pearson_corrcoef = pearson_correlation_coefficient(actor_probs, rollout_probs, response_mask_bool)
rollout_probs_diff = calculate_log_prob_diff(actor_probs, rollout_probs, response_mask_bool)
return {
"training/rollout_probs_diff_valid": 1,
"training/rollout_probs_diff_max": torch.max(rollout_probs_diff).detach().item(),
"training/rollout_probs_diff_mean": torch.mean(rollout_probs_diff).detach().item(),
"training/rollout_probs_diff_std": torch.std(rollout_probs_diff).detach().item(),
"training/rollout_actor_probs_pearson_corr": pearson_corrcoef,
}
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/debug/metrics.py",
"license": "Apache License 2.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/reward_manager/abstract.py | # Copyright 2023-2025 SGLang Team
# Copyright Amazon.com, Inc. or its affiliates.
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any, Callable
import torch
from verl.protocol import DataProto
RawRewardFn = Callable[..., Any]
class AbstractRewardManager(ABC):
@abstractmethod
def __init__(
self,
tokenizer: Any,
num_examine: int,
compute_score: RawRewardFn | None,
reward_fn_key: str = "data_source",
**kwargs: Any,
):
pass
@abstractmethod
def __call__(
self,
data: DataProto,
return_dict: bool = False,
) -> torch.Tensor | dict[str, Any]:
pass
def _extract_reward_from_rm_scores(
self, data: DataProto, return_dict: bool = False
) -> torch.Tensor | dict[str, Any] | None:
"""
Extract reward from already-computed rm_scores if available.
This has been deprecated.
Args:
data: DataProto object containing the batch data
return_dict: Whether to return a dictionary with reward_tensor and reward_extra_info
Returns:
If rm_scores exists:
- If return_dict=True: dict with "reward_tensor" and "reward_extra_info"
- If return_dict=False: torch.Tensor of rm_scores
If rm_scores doesn't exist: None
"""
if "rm_scores" not in data.batch.keys():
return None
if return_dict:
reward_extra_keys = data.meta_info.get("reward_extra_keys", [])
reward_extra_info = {key: data.non_tensor_batch[key] for key in reward_extra_keys}
return {"reward_tensor": data.batch["rm_scores"], "reward_extra_info": reward_extra_info}
else:
return data.batch["rm_scores"]
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/reward_manager/abstract.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/vllm/patch.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To support different vLLM versions, we add the model into SUPPORTED_MOE_MODELS separately to avoid triggering
# unsupported issues.
SUPPORTED_MOE_MODELS = []
try:
from vllm.model_executor.models.deepseek_v2 import DeepseekV2ForCausalLM, DeepseekV3ForCausalLM
SUPPORTED_MOE_MODELS.append(DeepseekV2ForCausalLM)
SUPPORTED_MOE_MODELS.append(DeepseekV3ForCausalLM)
except ImportError:
pass
try:
from vllm.model_executor.models.mixtral import MixtralForCausalLM
SUPPORTED_MOE_MODELS.append(MixtralForCausalLM)
except ImportError:
pass
try:
from vllm.model_executor.models.qwen2_moe import Qwen2MoeForCausalLM
SUPPORTED_MOE_MODELS.append(Qwen2MoeForCausalLM)
except ImportError:
pass
try:
from vllm.model_executor.models.qwen3_moe import Qwen3MoeForCausalLM
SUPPORTED_MOE_MODELS.append(Qwen3MoeForCausalLM)
except ImportError:
pass
try:
from vllm.model_executor.models.qwen3_vl_moe import Qwen3MoeLLMForCausalLM
SUPPORTED_MOE_MODELS.append(Qwen3MoeLLMForCausalLM)
except ImportError:
pass
try:
from vllm.model_executor.models.qwen3_next import Qwen3NextForCausalLM
SUPPORTED_MOE_MODELS.append(Qwen3NextForCausalLM)
except ImportError:
pass
try:
from vllm.model_executor.models.kimi_vl import KimiVLForConditionalGeneration
SUPPORTED_MOE_MODELS.append(KimiVLForConditionalGeneration)
except ImportError:
pass
def patch_vllm_moe_model_weight_loader(model):
# this is a work around to load the weight of vllm fused moe model
# it is from a bug from vllm 0.8.2
# all the weights are supposed to have a weight_loader, but the moe weights
# do not have a weight_loader, so we need to patch it
# (True, 'model.embed_tokens.weight')
# (True, 'model.layers.0.self_attn.qkv_proj.weight')
# (True, 'model.layers.0.self_attn.qkv_proj.bias')
# (True, 'model.layers.0.self_attn.o_proj.weight')
# (True, 'model.layers.0.mlp.gate.weight')
# (True, 'model.layers.0.mlp.shared_expert.gate_up_proj.weight')
# (True, 'model.layers.0.mlp.shared_expert.down_proj.weight')
# (False, 'model.layers.0.mlp.shared_expert_gate.weight') use default
# (False, 'model.layers.0.input_layernorm.weight') use default
# (False, 'model.layers.0.post_attention_layernorm.weight') use default
# (False, 'model.layers.0.mlp.experts.w13_weight') use mlp.experts.weight_loader
# (False, 'model.layers.0.mlp.experts.w2_weight') use mlp.experts.weight_loader
# Early return if no MOE models are supported
if not SUPPORTED_MOE_MODELS:
return
original_model_type = type(model)
if hasattr(model, "runnable") and "ACLGraphWrapper" in str(original_model_type):
model = model.runnable
original_model_type = type(model)
# Define MLP attribute mapping for different model types
MLP_ATTR_MAPPING = {}
try:
from vllm.model_executor.models.mixtral import MixtralForCausalLM
MLP_ATTR_MAPPING[MixtralForCausalLM] = "block_sparse_moe"
except ImportError:
pass
DEFAULT_MLP_ATTR = "mlp"
# Get inner model (either model.model or model.language_model)
inner_model = getattr(model, "model", None) or getattr(model, "language_model", None)
if inner_model is None:
raise ValueError("The provided model does not have a valid 'model' or 'language_model' attribute.")
if not isinstance(model, tuple(SUPPORTED_MOE_MODELS)) and not isinstance(inner_model, tuple(SUPPORTED_MOE_MODELS)):
return
# TODO(@leisuzz): class Qwen3MoeLLMForCausalLM is not available if VLLM version < 0.11.0,
# will update the 'if statement' with 'isinstance' when verl commonly use VLLM version >= 0.11.0
if type(inner_model).__name__ == "Qwen3MoeLLMForCausalLM":
inner_model = inner_model.model # Reassign inner_model in Qwen3-vl
for layer_idx, layer in enumerate(inner_model.layers):
mlp_attr = MLP_ATTR_MAPPING.get(original_model_type, DEFAULT_MLP_ATTR)
mlp = getattr(layer, mlp_attr, None)
if not mlp:
continue
experts = getattr(mlp, "experts", None)
if not experts or not hasattr(experts, "weight_loader"):
continue
# Patch the weight loaders
for name, param in mlp.named_parameters():
if "w13_weight" in name or "w2_weight" in name:
param.weight_loader = experts.weight_loader
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/vllm/patch.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/utils/dataset/test_rl_collate_fn_on_cpu.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def test_rl_collate_fn():
from verl.utils.dataset.rl_dataset import collate_fn
max_prompt_length = 5
test_data = [
{
# test tensor
"input_ids": torch.randint(0, 10, (max_prompt_length,)),
# test fixed length (1) list within a batch
"messages": [{"role": "user", "content": "Hi."}],
# test variable length list within a batch
"raw_prompt_ids": [1, 2, 3, 4],
# test string
"ability": "math",
# test dict
"reward_model": {"ground_truth": 5, "style": "rule"},
# test empty dict
"tools_kwargs": {},
},
{
"input_ids": torch.randint(0, 10, (max_prompt_length,)),
"messages": [{"role": "user", "content": "Hello."}],
"raw_prompt_ids": [1, 2, 3],
"ability": "toolcall",
"reward_model": {
"ground_truth": '[{"name": "rgb_to_cmyk", "arguments": {"r": 0, "g": 0, "b": 255}}]',
"style": "rule",
},
"tools_kwargs": {},
},
]
batch_size = len(test_data)
batch = collate_fn(test_data)
# Tensor part
assert batch["input_ids"].shape == (batch_size, max_prompt_length)
assert isinstance(batch["input_ids"], torch.Tensor)
# Non-tensor parts
expected_types = {
"messages": list,
"raw_prompt_ids": list,
"ability": str,
"reward_model": dict,
"tools_kwargs": dict,
}
for key, dtype in expected_types.items():
assert batch[key].shape == (batch_size,), (
f"Expected shape {(batch_size,)} for '{key}', but got {batch[key].shape}"
)
assert isinstance(batch[key][0], dtype), (
f"'{key}' should contain elements of type {dtype}, but got {type(batch[key][0])}"
)
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/dataset/test_rl_collate_fn_on_cpu.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/utils/memory_utils.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import inspect
import logging
import os
from datetime import datetime
from pathlib import Path
import torch
from verl.utils.device import get_torch_device, is_cuda_available
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def aggressive_empty_cache(force_sync: bool = True, max_retries: int = 3) -> None:
"""
More aggressive GPU memory cleanup function, tries to release PyTorch reserved
but unallocated memory.
Args:
force_sync: Whether to force device synchronization
max_retries: Maximum number of retries
"""
device = get_torch_device()
if not device.is_available():
return
for attempt in range(max_retries):
# Record memory status before cleanup
before_reserved = device.memory_reserved()
before_allocated = device.memory_allocated()
# Run garbage collection
gc.collect()
# Clear PyTorch cache
device.empty_cache()
# Force synchronization (optional)
if force_sync:
device.synchronize()
# Record memory status after cleanup
after_reserved = device.memory_reserved()
after_allocated = device.memory_allocated()
# Calculate freed memory
reserved_freed = before_reserved - after_reserved
allocated_freed = before_allocated - after_allocated
logger.info(
f"Memory cleanup attempt {attempt + 1}: Freed {reserved_freed / 1024**3:.2f} GB reserved, "
f"{allocated_freed / 1024**3:.2f} GB allocated"
)
# Stop retrying if little memory was freed
if reserved_freed < 1024**3: # less than 1GB
break
def reset_memory_stats() -> None:
"""Reset GPU memory statistics"""
if get_torch_device().is_available():
device = get_torch_device()
device.reset_peak_memory_stats()
device.reset_accumulated_memory_stats()
def get_memory_info() -> dict:
"""Get detailed GPU memory information"""
if not get_torch_device().is_available():
return {}
device = get_torch_device()
device_id = device.current_device()
return {
"total_memory_gb": device.get_device_properties(device_id).total_memory / 1024**3,
"reserved_memory_gb": device.memory_reserved() / 1024**3,
"allocated_memory_gb": device.memory_allocated() / 1024**3,
"cached_memory_gb": (device.memory_reserved() - device.memory_allocated()) / 1024**3,
"max_memory_allocated_gb": device.max_memory_allocated() / 1024**3,
"max_memory_reserved_gb": device.max_memory_reserved() / 1024**3,
}
def log_memory_usage(stage: str = "current") -> None:
"""Log GPU memory usage"""
if not get_torch_device().is_available():
return
info = get_memory_info()
logger.info(
f"Memory usage [{stage}]: "
f"Total: {info['total_memory_gb']:.2f} GB, "
f"Allocated: {info['allocated_memory_gb']:.2f} GB, "
f"Reserved: {info['reserved_memory_gb']:.2f} GB, "
f"Cached: {info['cached_memory_gb']:.2f} GB"
)
def optimize_memory_for_inference() -> None:
"""Optimize GPU memory usage for inference"""
if not get_torch_device().is_available():
return
# Set a more aggressive memory allocation policy
get_torch_device().set_per_process_memory_fraction(0.95) # Use 95% of GPU memory
# Clear cache
aggressive_empty_cache(force_sync=True)
logger.info("Optimized GPU memory usage for inference")
def optimize_memory_for_training() -> None:
"""Optimize GPU memory usage for training"""
if not get_torch_device().is_available():
return
# Set a moderate memory allocation policy
get_torch_device().set_per_process_memory_fraction(0.9) # Use 90% of GPU memory
# Clear cache
aggressive_empty_cache(force_sync=False)
logger.info("Optimized GPU memory usage for training")
def enable_memory_visualize(
trace_alloc_max_entries: int = 200_000,
stack_depth: int = 32,
context: str = "all",
stacks: str = "all",
devices=None,
record_context: bool = True,
):
"""
Enables memory history recording for CUDA allocations. This function
should be called before any large-scale CUDA allocations. For DDP or
multi-process setups, it must be called on each rank.
Args:
trace_alloc_max_entries (int): Maximum number of allocation entries
to record.
stack_depth (int): The depth of the call stack to capture for each
allocation. (Supported by some PyTorch versions).
context (str): The type of memory events to record.
'alloc': records only allocation events.
'state': records memory state changes.
'all': records both.
stacks (str): The type of call stacks to record.
'python': records Python stacks.
'cpp': records C++ stacks (available in some versions).
'all': records both.
devices (Union[int, list[int], None]): The device for which to enable
memory history. `None` enables it for the current default device.
record_context (bool): Whether to record context information for
allocations. Required by older PyTorch versions.
"""
# Memory history recording is CUDA-specific functionality
if not is_cuda_available:
logger.warning("[memory_visualize] Memory history recording is only available on CUDA devices")
return
f = get_torch_device().memory._record_memory_history
params = set(inspect.signature(f).parameters.keys())
def _one_call(dev_kw=None):
kwargs = {}
if "context" in params:
kwargs["context"] = context
if "stacks" in params:
kwargs["stacks"] = stacks
if "max_entries" in params:
kwargs["max_entries"] = trace_alloc_max_entries
elif "trace_alloc_max_entries" in params:
kwargs["trace_alloc_max_entries"] = trace_alloc_max_entries
if "stack_depth" in params:
kwargs["stack_depth"] = stack_depth
if dev_kw is not None:
if "device" in params:
kwargs["device"] = dev_kw
elif "devices" in params:
kwargs["devices"] = dev_kw if isinstance(dev_kw, list) else [dev_kw]
if "record_context" in params:
kwargs["record_context"] = record_context
try:
f(**kwargs)
return "native", kwargs
except TypeError:
try:
if "trace_alloc_max_entries" in params and "record_context" in params:
f(enabled=True, trace_alloc_max_entries=trace_alloc_max_entries, record_context=True)
return "legacy", {
"enabled": True,
"trace_alloc_max_entries": trace_alloc_max_entries,
"record_context": True,
}
else:
f(enabled=True)
return "legacy-min", {"enabled": True}
except Exception:
raise
if devices is None or isinstance(devices, str | int | torch.device):
mode, used = _one_call(devices if devices is not None else None)
else:
mode, used = "multi-device", {}
for d in list(devices):
_mode, _used = _one_call(d)
used[f"dev{d}"] = _used
device = get_torch_device()
if device.is_available():
device.reset_peak_memory_stats()
device.synchronize()
rank = int(os.environ.get("RANK", "0") or 0)
logger.info(f"[memory_visualize][rank {rank}] recording enabled ({mode}); args={used}")
class MemorySnapshotSampler:
"""
A utility class that dumps GPU memory snapshots.
This is useful for monitoring memory usage over a long-running process.
The dumped files can be visualized with https://docs.pytorch.org/memory_viz
Args:
out_dir (str): The directory where the snapshots will be saved.
tag (str): A tag for the snapshot filenames.
"""
def __init__(self, out_dir: str = "./mem_snapshots", tag: str = "periodic"):
self.out_dir = out_dir
self.tag = tag
def dump_memory_snapshot(self, out_dir: str = "./mem_snapshots", tag: str = "snapshot", sub_dir: str = None):
"""
Generates a memory snapshot and saves it as a pickle file in a specified directory.
The files are organized by timestamp in subdirectories, with all ranks' files
placed in the same timestamp subdirectory.
Args:
out_dir (str): The directory where the snapshot file will be saved.
The directory is created if it does not exist.
tag (str): A string tag to prepend to the filename for easier identification.
sub_dir (str): A subdirectory to place the snapshot file in.
"""
if sub_dir is None:
timestamp = datetime.now().strftime("%Y%m%d-%H%M")
out_path = Path(out_dir) / timestamp
else:
out_path = Path(out_dir) / sub_dir
out_path.mkdir(parents=True, exist_ok=True)
# get the GPU rank on the current process
rank = os.environ.get("RANK", "0")
pid = os.getpid()
# todo(chenyang): check wether we need to sync all ranks before dump
fname = f"{tag}_rank{rank}_pid{pid}.pickle"
path = out_path / fname
device = get_torch_device()
if not device.is_available():
logger.warning("[memory_visualize] is only available on CUDA devices.")
return
try:
device.synchronize()
# Memory snapshot is CUDA-specific functionality
device.memory._dump_snapshot(str(path))
logger.info(f"[memory_visualize] dumped: {path}")
except Exception as e:
logger.info(f"[memory_visualize][warn] dump failed: {e}")
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/memory_utils.py",
"license": "Apache License 2.0",
"lines": 240,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/workers/critic/test_special_dp_critic.py | #!/usr/bin/env python3
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from unittest.mock import Mock, patch
import torch
import torch.distributed
from omegaconf import OmegaConf
from tensordict import TensorDict
from transformers import AutoConfig
from verl import DataProto
from verl.workers.config import FSDPCriticConfig, FSDPOptimizerConfig
from verl.workers.config.critic import FSDPCriticModelCfg
from verl.workers.config.engine import FSDPEngineConfig
from verl.workers.fsdp_workers import CriticWorker
class TestCriticWorker(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Set up distributed environment"""
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(
backend="nccl" if torch.cuda.is_available() else "gloo", init_method="env://"
)
cls.rank = torch.distributed.get_rank()
cls.world_size = torch.distributed.get_world_size()
if torch.cuda.is_available():
torch.cuda.set_device(cls.rank)
cls.device = torch.device(f"cuda:{cls.rank}")
else:
cls.device = torch.device("cpu")
@classmethod
def tearDownClass(cls):
"""Clean up distributed environment"""
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
def setUp(self):
"""Set up test fixtures"""
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.temp_dir = tempfile.mkdtemp()
model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-0.5B-Instruct")
config = AutoConfig.from_pretrained(model_path)
config.save_pretrained(self.temp_dir)
self.config = FSDPCriticConfig(
strategy="fsdp2",
ppo_mini_batch_size=4,
ppo_micro_batch_size_per_gpu=2,
forward_micro_batch_size_per_gpu=2,
ppo_epochs=1,
cliprange_value=0.5,
grad_clip=1.0,
use_dynamic_bsz=False,
ulysses_sequence_parallel_size=1,
rollout_n=1,
optim=FSDPOptimizerConfig(lr=1e-6),
model=FSDPCriticModelCfg(
path=model_path,
tokenizer_path=model_path,
fsdp_config=FSDPEngineConfig(fsdp_size=-1),
use_remove_padding=False,
),
)
assert self.world_size <= 4 // 2
def tearDown(self):
"""Clean up test fixtures"""
import shutil
shutil.rmtree(self.temp_dir, ignore_errors=True)
def _create_test_data_for_compute_values(self, batch_size=2, seq_len=10, response_len=5):
"""Create test data for compute_values method"""
input_ids = torch.randint(0, 1000, (batch_size, seq_len), dtype=torch.long)
attention_mask = torch.ones(batch_size, seq_len, dtype=torch.long)
position_ids = torch.arange(seq_len).unsqueeze(0).expand(batch_size, -1)
responses = torch.randint(0, 1000, (batch_size, response_len), dtype=torch.long)
response_mask = torch.ones(batch_size, response_len, dtype=torch.float)
batch = TensorDict(
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"position_ids": position_ids,
"responses": responses,
"response_mask": response_mask,
},
batch_size=[batch_size],
)
data = DataProto(
batch=batch, meta_info={"micro_batch_size": 2, "max_token_len": seq_len, "use_dynamic_bsz": False}
)
return data
def _create_test_data_for_update_critic(self, batch_size=2, seq_len=10, response_len=5):
"""Create test data for update_critic method"""
input_ids = torch.randint(0, 1000, (batch_size, seq_len), dtype=torch.long)
attention_mask = torch.ones(batch_size, seq_len, dtype=torch.long)
position_ids = torch.arange(seq_len).unsqueeze(0).expand(batch_size, -1)
responses = torch.randint(0, 1000, (batch_size, response_len), dtype=torch.long)
response_mask = torch.ones(batch_size, response_len, dtype=torch.float)
values = torch.randn(batch_size, response_len, dtype=torch.float)
returns = torch.randn(batch_size, response_len, dtype=torch.float)
batch = TensorDict(
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"position_ids": position_ids,
"responses": responses,
"response_mask": response_mask,
"values": values,
"returns": returns,
},
batch_size=[batch_size],
)
data = DataProto(
batch=batch,
meta_info={"global_token_num": [response_len] * batch_size, "batch_seqlens": [response_len] * batch_size},
)
return data
def test_init_model(self):
"""Test CriticWorker.init_model() method"""
worker = CriticWorker(self.config)
worker.init_model()
self.assertIsNotNone(worker.critic_module)
self.assertIsNotNone(worker.critic_optimizer)
self.assertIsNotNone(worker.critic)
self.assertIsNotNone(worker.checkpoint_manager)
def test_compute_values(self):
"""Test CriticWorker.compute_values() method"""
worker = CriticWorker(self.config)
worker.init_model()
data = self._create_test_data_for_compute_values()
result = worker.compute_values(data)
self.assertIsInstance(result, DataProto)
self.assertIn("values", result.batch)
values = result.batch["values"]
batch_size, response_len = 2, 5
self.assertEqual(values.shape, (batch_size, response_len))
self.assertTrue(torch.isfinite(values).all())
def test_update_critic(self):
"""Test CriticWorker.update_critic() method"""
worker = CriticWorker(self.config)
worker.init_model()
data = self._create_test_data_for_update_critic()
result = worker.update_critic(data)
self.assertIsInstance(result, DataProto)
self.assertIn("metrics", result.meta_info)
metrics = result.meta_info["metrics"]
expected_keys = ["critic/vf_loss", "critic/vf_clipfrac", "critic/vpred_mean", "critic/grad_norm"]
for key in expected_keys:
self.assertIn(key, metrics)
for key, value in metrics.items():
if isinstance(value, list | tuple):
for v in value:
self.assertTrue(torch.isfinite(torch.tensor(v)).all())
else:
self.assertTrue(torch.isfinite(torch.tensor(value)).all())
@patch("transformers.AutoConfig.from_pretrained")
def test_critic_attn_implementation_override_functionality(self, mock_config_from_pretrained):
"""Test that CriticWorker correctly uses attn_implementation from override_config"""
# Mock the AutoConfig return value
mock_config = Mock()
mock_config.tie_word_embeddings = False
mock_config.architectures = ["LlamaForCausalLM"]
mock_config.num_labels = 1
mock_config_from_pretrained.return_value = mock_config
# Test different attn_implementation values
test_cases = [
("eager", "eager"),
("sdpa", "sdpa"),
("flash_attention_2", "flash_attention_2"),
(None, "flash_attention_2"), # Default case
]
for override_value, expected_value in test_cases:
mock_config_from_pretrained.reset_mock()
# Create config with override_config
config_dict = {
"model": {
"path": "/test/model/path",
"tokenizer_path": "/test/tokenizer/path",
"fsdp_config": {
"fsdp_size": 1,
"param_offload": False,
"optimizer_offload": False,
},
},
"optim": {"lr": 1e-4, "type": "AdamW"},
"strategy": "fsdp",
"ppo_mini_batch_size": 1,
"ppo_epochs": 1,
"rollout_n": 1,
"checkpoint": {"save_contents": [], "load_contents": []},
}
# Add override_config with attn_implementation if specified
if override_value is not None:
config_dict["model"]["override_config"] = {"attn_implementation": override_value}
# Convert to OmegaConf
test_config = OmegaConf.create(config_dict)
# Test the extraction logic that should happen in CriticWorker._build_critic_model_optimizer
override_config = OmegaConf.to_container(OmegaConf.create(test_config.model.get("override_config", {})))
extracted_attn_implementation = override_config.get("attn_implementation", "flash_attention_2")
# Verify the extraction works correctly
self.assertEqual(
extracted_attn_implementation,
expected_value,
f"Expected {expected_value}, got {extracted_attn_implementation} for override_value {override_value}",
)
def test_critic_model_config_structure(self):
"""Test that critic model config properly incorporates override settings"""
# Test configuration scenarios
test_scenarios = [
{"name": "default_flash_attention", "override_config": {}, "expected_attn": "flash_attention_2"},
{"name": "eager_override", "override_config": {"attn_implementation": "eager"}, "expected_attn": "eager"},
{"name": "sdpa_override", "override_config": {"attn_implementation": "sdpa"}, "expected_attn": "sdpa"},
{
"name": "mixed_config",
"override_config": {"attn_implementation": "eager", "dropout": 0.1, "num_labels": 1},
"expected_attn": "eager",
},
]
for scenario in test_scenarios:
with self.subTest(scenario=scenario["name"]):
# Simulate the config processing logic from CriticWorker
override_config = scenario["override_config"]
# Test the extraction logic
extracted_attn = override_config.get("attn_implementation", "flash_attention_2")
# Verify correct extraction
self.assertEqual(extracted_attn, scenario["expected_attn"], f"Failed for scenario {scenario['name']}")
# Verify other configs are preserved
if "dropout" in override_config:
self.assertEqual(override_config["dropout"], 0.1)
def test_critic_hydra_config_compatibility(self):
"""Test that Hydra +prefix configurations work correctly for CriticWorker"""
# Simulate Hydra configuration with +prefix for critic
# This would come from: +critic.model.override_config.attn_implementation=eager
hydra_config_dict = {
"critic": {"model": {"path": "/test/model/path", "override_config": {"attn_implementation": "eager"}}}
}
omegaconf = OmegaConf.create(hydra_config_dict)
# Extract override config as would be done in CriticWorker
override_model_config = OmegaConf.to_container(
OmegaConf.create(omegaconf.critic.model.get("override_config", {}))
)
# Test extraction
attn_implementation = override_model_config.get("attn_implementation", "flash_attention_2")
self.assertEqual(attn_implementation, "eager")
def test_critic_backward_compatibility(self):
"""Test that CriticWorker maintains backward compatibility with existing configurations"""
# Test cases for backward compatibility
compatibility_tests = [
{"name": "no_override_config", "config": {}, "expected": "flash_attention_2"},
{"name": "empty_override_config", "config": {"override_config": {}}, "expected": "flash_attention_2"},
{
"name": "other_overrides_only",
"config": {"override_config": {"dropout": 0.1, "hidden_size": 768}},
"expected": "flash_attention_2",
},
]
for test in compatibility_tests:
with self.subTest(test=test["name"]):
override_config = test["config"].get("override_config", {})
attn_implementation = override_config.get("attn_implementation", "flash_attention_2")
self.assertEqual(
attn_implementation, test["expected"], f"Backward compatibility failed for {test['name']}"
)
def test_critic_and_actor_independent_configuration(self):
"""Test that critic and actor can have independent attention implementation configurations"""
# Simulate a complete training configuration with both actor and critic
complete_config = {
"actor_rollout_ref": {"model": {"override_config": {"attn_implementation": "eager"}}},
"critic": {"model": {"override_config": {"attn_implementation": "sdpa"}}},
}
omegaconf = OmegaConf.create(complete_config)
# Extract actor config
actor_override = OmegaConf.to_container(
OmegaConf.create(omegaconf.actor_rollout_ref.model.get("override_config", {}))
)
actor_attn = actor_override.get("attn_implementation", "flash_attention_2")
# Extract critic config
critic_override = OmegaConf.to_container(OmegaConf.create(omegaconf.critic.model.get("override_config", {})))
critic_attn = critic_override.get("attn_implementation", "flash_attention_2")
# Verify independent configuration
self.assertEqual(actor_attn, "eager")
self.assertEqual(critic_attn, "sdpa")
self.assertNotEqual(actor_attn, critic_attn) # Ensure they are indeed different
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/critic/test_special_dp_critic.py",
"license": "Apache License 2.0",
"lines": 294,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/workers/actor/test_special_dp_actor.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
import torch.nn as nn
from tensordict import TensorDict
from transformers import AutoModelForCausalLM, Qwen3Config
from verl import DataProto
from verl.utils.device import get_device_name
from verl.workers.actor.dp_actor import DataParallelPPOActor
from verl.workers.config import FSDPActorConfig, OptimizerConfig
class MockTransformerModel(nn.Module):
"""Mock transformer model for testing DataParallelPPOActor"""
def __init__(self, vocab_size=1000, hidden_size=64):
super().__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.embedding = nn.Embedding(vocab_size, hidden_size)
self.transformer = nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=hidden_size, nhead=4, batch_first=True), num_layers=2
)
self.lm_head = nn.Linear(hidden_size, vocab_size)
def forward(self, input_ids, attention_mask=None, position_ids=None, use_cache=False, **kwargs):
batch_size, seq_len = input_ids.shape
embeddings = self.embedding(input_ids)
hidden_states = self.transformer(embeddings)
logits = self.lm_head(hidden_states)
class MockOutput:
def __init__(self, logits):
self.logits = logits
return MockOutput(logits)
class TestDataParallelPPOActor(unittest.TestCase):
"""Test DataParallelPPOActor compute_log_prob and update_policy methods"""
@classmethod
def setUpClass(cls):
"""Set up distributed environment"""
if get_device_name() == "cuda":
backend_name = "nccl"
elif get_device_name() == "npu":
backend_name = "hccl"
else:
backend_name = "gloo"
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend=backend_name, init_method="env://")
cls.rank = torch.distributed.get_rank()
cls.world_size = torch.distributed.get_world_size()
if get_device_name() == "cuda":
torch.cuda.set_device(cls.rank)
cls.device = torch.device(f"cuda:{cls.rank}")
elif get_device_name() == "npu":
torch.npu.set_device(cls.rank)
cls.device = torch.device(f"npu:{cls.rank}")
else:
cls.device = torch.device("cpu")
def setUp(self):
"""Set up test fixtures"""
self.config = FSDPActorConfig(
strategy="fsdp2",
ppo_mini_batch_size=4,
ppo_micro_batch_size_per_gpu=2,
ppo_epochs=1,
clip_ratio=0.2,
entropy_coeff=0.01,
grad_clip=1.0,
use_dynamic_bsz=False,
use_torch_compile=False, # Disable torch.compile for testing
ulysses_sequence_parallel_size=1,
optim=OptimizerConfig(lr=1e-6),
rollout_n=1,
)
self.mock_model = MockTransformerModel(vocab_size=1000, hidden_size=64).to(self.device)
self.mock_optimizer = torch.optim.Adam(self.mock_model.parameters(), lr=1e-4)
self.actor = DataParallelPPOActor(
config=self.config, actor_module=self.mock_model, actor_optimizer=self.mock_optimizer
)
@classmethod
def tearDownClass(cls):
"""Clean up distributed environment"""
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
def _create_test_data_for_compute_log_prob(self):
"""Create test DataProto for compute_log_prob method"""
batch_size = 2
prompt_length = 8
response_length = 4
total_length = prompt_length + response_length
vocab_size = 1000
input_ids = torch.randint(0, vocab_size, (batch_size, total_length)).to(self.device)
attention_mask = torch.ones(batch_size, total_length).to(self.device)
position_ids = torch.arange(total_length).unsqueeze(0).expand(batch_size, -1).to(self.device)
responses = input_ids[:, -response_length:] # Last part is the response
tensor_dict = TensorDict(
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"position_ids": position_ids,
"responses": responses,
},
batch_size=[batch_size],
)
meta_info = {"micro_batch_size": batch_size, "temperature": 1.0, "use_dynamic_bsz": False}
return DataProto(batch=tensor_dict, meta_info=meta_info)
def _create_test_data_for_update_policy(self):
"""Create test DataProto for update_policy method"""
batch_size = 4 # Must match ppo_mini_batch_size
prompt_length = 8
response_length = 4
total_length = prompt_length + response_length
vocab_size = 1000
input_ids = torch.randint(0, vocab_size, (batch_size, total_length)).to(self.device)
attention_mask = torch.ones(batch_size, total_length).to(self.device)
position_ids = torch.arange(total_length).unsqueeze(0).expand(batch_size, -1).to(self.device)
responses = input_ids[:, -response_length:]
response_mask = torch.ones(batch_size, response_length).to(self.device)
old_log_probs = torch.randn(batch_size, response_length).to(self.device) * 0.1 # Small values
advantages = torch.randn(batch_size, response_length).to(self.device) * 0.5
tensor_dict = TensorDict(
{
"input_ids": input_ids,
"attention_mask": attention_mask,
"position_ids": position_ids,
"responses": responses,
"response_mask": response_mask,
"old_log_probs": old_log_probs,
"advantages": advantages,
},
batch_size=[batch_size],
)
meta_info = {"temperature": 1.0}
return DataProto(batch=tensor_dict, meta_info=meta_info)
def test_compute_log_prob(self):
"""Test compute_log_prob method"""
data = self._create_test_data_for_compute_log_prob()
outputs = self.actor.compute_log_prob(data, calculate_entropy=True)
log_probs = outputs["log_probs"]
entropys = outputs["entropys"]
batch_size = data.batch["responses"].shape[0]
response_length = data.batch["responses"].shape[1]
self.assertIsInstance(log_probs, torch.Tensor)
self.assertEqual(log_probs.shape, (batch_size, response_length))
self.assertTrue(torch.all(torch.isfinite(log_probs)))
self.assertIsInstance(entropys, torch.Tensor)
self.assertEqual(entropys.shape, (batch_size, response_length))
self.assertTrue(torch.all(torch.isfinite(entropys)))
self.assertTrue(torch.all(entropys >= 0)) # Entropy should be non-negative
def test_compute_log_prob_without_entropy(self):
"""Test compute_log_prob method without entropy calculation"""
data = self._create_test_data_for_compute_log_prob()
outputs = self.actor.compute_log_prob(data, calculate_entropy=False)
log_probs = outputs["log_probs"]
entropys = outputs.get("entropys", None)
batch_size = data.batch["responses"].shape[0]
response_length = data.batch["responses"].shape[1]
self.assertIsInstance(log_probs, torch.Tensor)
self.assertEqual(log_probs.shape, (batch_size, response_length))
self.assertTrue(torch.all(torch.isfinite(log_probs)))
self.assertIsNone(entropys)
def test_update_policy(self):
"""Test update_policy method"""
data = self._create_test_data_for_update_policy()
metrics = self.actor.update_policy(data)
self.assertIsInstance(metrics, dict)
expected_metric_keys = [
"actor/pg_loss",
"actor/pg_clipfrac",
"actor/ppo_kl",
"actor/pg_clipfrac_lower",
"actor/grad_norm",
]
for key in expected_metric_keys:
self.assertIn(key, metrics)
if isinstance(metrics[key], list):
self.assertTrue(all(torch.isfinite(torch.tensor(v)) for v in metrics[key]))
else:
self.assertIsInstance(metrics[key], (float, int))
self.assertTrue(torch.isfinite(torch.tensor(metrics[key])))
def test_dataparallelppoactor_initialization(self):
"""Test DataParallelPPOActor initialization"""
self.assertIsNotNone(self.actor.actor_module)
self.assertIsNotNone(self.actor.actor_optimizer)
self.assertEqual(self.actor.config, self.config)
self.assertEqual(self.actor.config.strategy, "fsdp2")
self.assertEqual(self.actor.config.ppo_mini_batch_size, 4)
self.assertEqual(self.actor.config.clip_ratio, 0.2)
def test_dataparallelppoactor_with_qwen3_model(self):
"""Test DataParallelPPOActor with real Qwen3ForCausalLM model"""
qwen_config = Qwen3Config(
vocab_size=1000,
hidden_size=64,
intermediate_size=128,
num_hidden_layers=2,
num_attention_heads=4,
num_key_value_heads=2,
max_position_embeddings=512,
torch_dtype=torch.float32,
use_cache=False,
)
with torch.device(self.device):
qwen_model = AutoModelForCausalLM.from_config(config=qwen_config, torch_dtype=torch.float32).to(self.device)
qwen_optimizer = torch.optim.Adam(qwen_model.parameters(), lr=1e-4)
qwen_actor = DataParallelPPOActor(config=self.config, actor_module=qwen_model, actor_optimizer=qwen_optimizer)
data = self._create_test_data_for_compute_log_prob()
outputs = qwen_actor.compute_log_prob(data, calculate_entropy=True)
log_probs = outputs["log_probs"]
entropys = outputs["entropys"]
batch_size = data.batch["responses"].shape[0]
response_length = data.batch["responses"].shape[1]
self.assertIsInstance(log_probs, torch.Tensor)
self.assertEqual(log_probs.shape, (batch_size, response_length))
self.assertTrue(torch.all(torch.isfinite(log_probs)))
self.assertIsInstance(entropys, torch.Tensor)
self.assertEqual(entropys.shape, (batch_size, response_length))
self.assertTrue(torch.all(torch.isfinite(entropys)))
self.assertTrue(torch.all(entropys >= 0))
policy_data = self._create_test_data_for_update_policy()
metrics = qwen_actor.update_policy(policy_data)
self.assertIsInstance(metrics, dict)
expected_metric_keys = [
"actor/pg_loss",
"actor/pg_clipfrac",
"actor/ppo_kl",
"actor/pg_clipfrac_lower",
"actor/grad_norm",
]
for key in expected_metric_keys:
self.assertIn(key, metrics)
if isinstance(metrics[key], list):
self.assertTrue(all(torch.isfinite(torch.tensor(v)) for v in metrics[key]))
else:
self.assertIsInstance(metrics[key], (float, int))
self.assertTrue(torch.isfinite(torch.tensor(metrics[key])))
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/actor/test_special_dp_actor.py",
"license": "Apache License 2.0",
"lines": 244,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/workers/config/test_actor_config_on_cpu.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from verl.utils.config import omega_conf_to_dataclass
from verl.workers.config import (
ActorConfig,
FSDPActorConfig,
McoreActorConfig,
OptimizerConfig,
)
class TestActorConfig(unittest.TestCase):
"""Test the ActorConfig dataclass and its variants."""
def test_config_inheritance(self):
"""Test that the inheritance hierarchy works correctly."""
megatron_dict = {
"_target_": "verl.workers.config.McoreActorConfig",
"strategy": "megatron",
"ppo_mini_batch_size": 256,
"ppo_micro_batch_size_per_gpu": 256,
"clip_ratio": 0.2,
"optim": {
"_target_": "verl.workers.config.McoreOptimizerConfig",
"lr": 0.1,
},
"rollout_n": 1,
}
fsdp_dict = {
"_target_": "verl.workers.config.FSDPActorConfig",
"strategy": "fsdp",
"ppo_mini_batch_size": 256,
"ppo_micro_batch_size_per_gpu": 256,
"clip_ratio": 0.2,
"optim": {
"_target_": "verl.workers.config.FSDPOptimizerConfig",
"lr": 0.1,
},
"rollout_n": 1,
}
megatron_config = omega_conf_to_dataclass(megatron_dict)
fsdp_config = omega_conf_to_dataclass(fsdp_dict)
self.assertIsInstance(megatron_config, ActorConfig)
self.assertIsInstance(fsdp_config, ActorConfig)
self.assertEqual(megatron_config.ppo_mini_batch_size, fsdp_config.ppo_mini_batch_size)
self.assertEqual(megatron_config.clip_ratio, fsdp_config.clip_ratio)
def test_actor_config_from_yaml(self):
"""Test creating ActorConfig from YAML file."""
from hydra import compose, initialize_config_dir
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config/actor")):
cfg = compose(config_name="actor", overrides=["strategy=fsdp", "ppo_micro_batch_size_per_gpu=128"])
config = omega_conf_to_dataclass(cfg)
self.assertIsInstance(config, ActorConfig)
self.assertEqual(config.strategy, "fsdp")
def test_fsdp_actor_config_from_yaml(self):
"""Test creating FSDPActorConfig from YAML file."""
from hydra import compose, initialize_config_dir
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config/actor")):
cfg = compose(config_name="dp_actor", overrides=["strategy=fsdp2", "ppo_micro_batch_size_per_gpu=128"])
config = omega_conf_to_dataclass(cfg)
self.assertIsInstance(config, FSDPActorConfig)
self.assertEqual(config.strategy, "fsdp2")
def test_megatron_actor_config_from_yaml(self):
"""Test creating McoreActorConfig from YAML file."""
from hydra import compose, initialize_config_dir
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config/actor")):
cfg = compose(config_name="megatron_actor", overrides=["ppo_micro_batch_size_per_gpu=128"])
config = omega_conf_to_dataclass(cfg)
self.assertIsInstance(config, McoreActorConfig)
self.assertEqual(config.strategy, "megatron")
def test_config_get_method(self):
"""Test the get method for backward compatibility."""
config_dict = {
"_target_": "verl.workers.config.ActorConfig",
"strategy": "fsdp",
"ppo_mini_batch_size": 256,
"ppo_micro_batch_size_per_gpu": 256,
"optim": {
"_target_": "verl.workers.config.OptimizerConfig",
"lr": 0.1,
},
"rollout_n": 1,
}
config = omega_conf_to_dataclass(config_dict)
self.assertEqual(config.get("strategy"), "fsdp")
self.assertEqual(config.get("ppo_mini_batch_size"), 256)
self.assertIsNone(config.get("non_existing"))
self.assertEqual(config.get("non_existing", "default"), "default")
def test_config_dict_like_access(self):
"""Test dictionary-like access to config fields."""
config_dict = {
"_target_": "verl.workers.config.ActorConfig",
"strategy": "fsdp",
"ppo_mini_batch_size": 256,
"ppo_micro_batch_size_per_gpu": 256,
"optim": {
"_target_": "verl.workers.config.OptimizerConfig",
"lr": 0.1,
},
"rollout_n": 1,
}
config = omega_conf_to_dataclass(config_dict)
self.assertEqual(config["strategy"], "fsdp")
self.assertEqual(config["ppo_mini_batch_size"], 256)
field_names = list(config)
self.assertIn("strategy", field_names)
self.assertIn("ppo_mini_batch_size", field_names)
self.assertGreater(len(config), 0)
def test_frozen_fields_modification_raises_exception(self):
"""Test that modifying frozen fields raises an exception."""
config_dict = {
"_target_": "verl.workers.config.ActorConfig",
"strategy": "fsdp",
"ppo_mini_batch_size": 256,
"ppo_micro_batch_size_per_gpu": 256,
"optim": {
"_target_": "verl.workers.config.OptimizerConfig",
"lr": 0.1,
},
"rollout_n": 1,
}
config = omega_conf_to_dataclass(config_dict)
with self.assertRaises(AttributeError):
config.strategy = "megatron"
with self.assertRaises(AttributeError):
config.clip_ratio = 0.5
config.ppo_mini_batch_size = 512 # This should work since it's not in frozen fields anymore
self.assertEqual(config.ppo_mini_batch_size, 512)
def test_actor_config_validation_exceptions(self):
"""Test that ActorConfig.__post_init__ raises appropriate validation exceptions."""
optim = OptimizerConfig(lr=0.1)
with self.assertRaises((ValueError, AssertionError)) as cm:
ActorConfig(
strategy="fsdp",
loss_agg_mode="invalid-mode",
use_dynamic_bsz=True,
optim=optim,
ppo_micro_batch_size_per_gpu=4,
rollout_n=1,
)
self.assertIn("Invalid loss_agg_mode", str(cm.exception))
with self.assertRaises((ValueError, AssertionError)) as cm:
ActorConfig(
strategy="fsdp",
use_dynamic_bsz=False,
ppo_micro_batch_size=4,
ppo_micro_batch_size_per_gpu=2,
optim=optim,
rollout_n=1,
)
self.assertIn("You have set both", str(cm.exception))
with self.assertRaises((ValueError, AssertionError)) as cm:
ActorConfig(
strategy="fsdp",
use_dynamic_bsz=False,
ppo_micro_batch_size=None,
ppo_micro_batch_size_per_gpu=None,
optim=optim,
rollout_n=1,
)
self.assertIn("Please set at least one", str(cm.exception))
config = ActorConfig(
strategy="fsdp",
use_dynamic_bsz=True,
ppo_micro_batch_size=None,
ppo_micro_batch_size_per_gpu=None,
optim=optim,
rollout_n=1,
)
self.assertIsNotNone(config) # Should not raise an exception
def test_fsdp_actor_config_validation_exceptions(self):
"""Test that FSDPActorConfig.validate() raises appropriate validation exceptions."""
optim = OptimizerConfig(lr=0.1)
config = FSDPActorConfig(
strategy="fsdp",
ulysses_sequence_parallel_size=2,
use_dynamic_bsz=True, # Skip batch size validation to focus on FSDP validation
optim=optim,
rollout_n=1,
)
model_config = {"use_remove_padding": False}
with self.assertRaises(ValueError) as cm:
config.validate(n_gpus=8, train_batch_size=256, model_config=model_config)
self.assertIn("you must enable `use_remove_padding`", str(cm.exception))
def test_actor_config_validate_method_exceptions(self):
"""Test that ActorConfig.validate() raises appropriate validation exceptions."""
optim = OptimizerConfig(lr=0.1)
config = ActorConfig(
strategy="fsdp",
use_dynamic_bsz=False,
ppo_mini_batch_size=256,
ppo_micro_batch_size=8,
ppo_micro_batch_size_per_gpu=None, # Ensure only one batch size setting is used
optim=optim,
rollout_n=1,
)
with self.assertRaises(ValueError) as cm:
config.validate(n_gpus=8, train_batch_size=128)
self.assertIn("train_batch_size", str(cm.exception))
with self.assertRaises(ValueError) as cm:
config.validate(n_gpus=16, train_batch_size=512)
self.assertIn("must be >= n_gpus", str(cm.exception))
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/config/test_actor_config_on_cpu.py",
"license": "Apache License 2.0",
"lines": 214,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/workers/config/test_critic_config_on_cpu.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import pytest
from hydra import compose, initialize_config_dir
from verl.utils.config import omega_conf_to_dataclass
from verl.utils.profiler import ProfilerConfig
from verl.workers.config import (
CriticConfig,
FSDPCriticConfig,
FSDPOptimizerConfig,
McoreCriticConfig,
McoreOptimizerConfig,
OptimizerConfig,
)
@pytest.mark.skip(reason="This test is flaky when we actively load model config")
class TestCriticConfig:
"""Test suite for critic configuration dataclasses."""
@pytest.fixture
def config_dir(self):
"""Get the path to the config directory."""
return Path(__file__).parent.parent.parent.parent / "verl" / "trainer" / "config" / "critic"
def test_megatron_critic_config_instantiation_from_yaml(self, config_dir):
"""Test that McoreCriticConfig can be instantiated from megatron_critic.yaml."""
yaml_path = config_dir / "megatron_critic.yaml"
assert yaml_path.exists(), f"Config file not found: {yaml_path}"
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config/critic")):
test_config = compose(config_name="megatron_critic", overrides=["ppo_micro_batch_size_per_gpu=1"])
megatron_config_obj = omega_conf_to_dataclass(test_config)
assert isinstance(megatron_config_obj, McoreCriticConfig)
assert isinstance(megatron_config_obj, CriticConfig)
expected_attrs = [
"strategy",
"rollout_n",
"optim",
"model",
"ppo_mini_batch_size",
"ppo_max_token_len_per_gpu",
"cliprange_value",
"get",
"nccl_timeout",
"megatron",
"load_weight",
]
for attr in expected_attrs:
assert hasattr(megatron_config_obj, attr), f"Missing attribute: {attr}"
assert callable(megatron_config_obj.get)
assert megatron_config_obj.strategy == "megatron"
def test_fsdp_critic_config_instantiation_from_yaml(self, config_dir):
"""Test that FSDPCriticConfig can be instantiated from dp_critic.yaml."""
yaml_path = config_dir / "dp_critic.yaml"
assert yaml_path.exists(), f"Config file not found: {yaml_path}"
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config/critic")):
test_config = compose(config_name="dp_critic", overrides=["ppo_micro_batch_size_per_gpu=1"])
fsdp_config_obj = omega_conf_to_dataclass(test_config)
assert isinstance(fsdp_config_obj, FSDPCriticConfig)
assert isinstance(fsdp_config_obj, CriticConfig)
expected_attrs = [
"strategy",
"rollout_n",
"optim",
"model",
"ppo_mini_batch_size",
"ppo_max_token_len_per_gpu",
"cliprange_value",
"get",
"forward_micro_batch_size",
"forward_micro_batch_size_per_gpu",
"ulysses_sequence_parallel_size",
"grad_clip",
]
for attr in expected_attrs:
assert hasattr(fsdp_config_obj, attr), f"Missing attribute: {attr}"
assert callable(fsdp_config_obj.get)
assert fsdp_config_obj.strategy == "fsdp"
def test_config_inheritance_hierarchy(self):
"""Test that the inheritance hierarchy is correct."""
megatron_config = McoreCriticConfig(ppo_micro_batch_size_per_gpu=1, optim=McoreOptimizerConfig(lr=0.1))
assert isinstance(megatron_config, CriticConfig)
assert isinstance(megatron_config, McoreCriticConfig)
fsdp_config = FSDPCriticConfig(ppo_micro_batch_size_per_gpu=1, optim=FSDPOptimizerConfig(lr=0.1))
assert isinstance(fsdp_config, CriticConfig)
assert isinstance(fsdp_config, FSDPCriticConfig)
critic_config = CriticConfig(ppo_micro_batch_size_per_gpu=1, strategy="fsdp2", optim=OptimizerConfig(lr=0.1))
assert isinstance(critic_config, CriticConfig)
assert not isinstance(critic_config, McoreCriticConfig)
assert not isinstance(critic_config, FSDPCriticConfig)
def test_config_dict_interface(self):
"""Test that configs provide dict-like interface from BaseConfig."""
optim = OptimizerConfig(lr=0.1)
config = CriticConfig(ppo_micro_batch_size_per_gpu=1, strategy="fsdp2", optim=optim)
assert "strategy" in config
assert config["strategy"] == "fsdp2"
assert config.get("strategy") == "fsdp2"
assert config.get("nonexistent_key", "default") == "default"
keys = list(config)
assert "strategy" in keys
assert "rollout_n" in keys
assert len(config) > 0
def test_frozen_fields_immutability(self):
"""Test that frozen fields raise exceptions when modified after creation."""
critic_config = CriticConfig(ppo_micro_batch_size_per_gpu=1, strategy="fsdp2", optim=OptimizerConfig(lr=0.1))
frozen_fields = ["rollout_n", "strategy", "cliprange_value"]
for field_name in frozen_fields:
with pytest.raises((AttributeError, TypeError, ValueError)):
setattr(critic_config, field_name, "modified_value")
megatron_config = McoreCriticConfig(ppo_micro_batch_size_per_gpu=1, optim=McoreOptimizerConfig(lr=0.1))
megatron_frozen_fields = ["nccl_timeout", "load_weight", "data_loader_seed"]
for field_name in megatron_frozen_fields:
with pytest.raises((AttributeError, TypeError, ValueError)):
setattr(megatron_config, field_name, "modified_value")
fsdp_config = FSDPCriticConfig(ppo_micro_batch_size_per_gpu=1, optim=FSDPOptimizerConfig(lr=0.1))
fsdp_frozen_fields = ["ulysses_sequence_parallel_size", "grad_clip"]
for field_name in fsdp_frozen_fields:
with pytest.raises((AttributeError, TypeError, ValueError)):
setattr(fsdp_config, field_name, "modified_value")
def test_batch_size_fields_modifiable(self):
"""Test that batch size fields can be modified after creation."""
optim = OptimizerConfig(lr=0.1)
critic_config = CriticConfig(ppo_micro_batch_size_per_gpu=1, strategy="fsdp2", optim=optim)
critic_config.ppo_mini_batch_size = 8
critic_config.ppo_micro_batch_size = 4
critic_config.ppo_micro_batch_size_per_gpu = 2
assert critic_config.ppo_mini_batch_size == 8
assert critic_config.ppo_micro_batch_size == 4
assert critic_config.ppo_micro_batch_size_per_gpu == 2
fsdp_config = FSDPCriticConfig(ppo_micro_batch_size_per_gpu=1, optim=FSDPOptimizerConfig(lr=0.1))
fsdp_config.forward_micro_batch_size = 16
fsdp_config.forward_micro_batch_size_per_gpu = 8
assert fsdp_config.forward_micro_batch_size == 16
assert fsdp_config.forward_micro_batch_size_per_gpu == 8
def test_profiler_config_type_validation(self):
"""Test that profiler field has correct type and validation."""
optim = OptimizerConfig(lr=0.1)
critic_config = CriticConfig(ppo_micro_batch_size_per_gpu=1, strategy="fsdp2", optim=optim)
assert isinstance(critic_config.profiler, ProfilerConfig)
assert critic_config.profiler.all_ranks is False
assert critic_config.profiler.ranks == []
custom_profiler = ProfilerConfig(all_ranks=True, ranks=[0, 1])
critic_config_custom = CriticConfig(
profiler=custom_profiler, ppo_micro_batch_size_per_gpu=1, strategy="fsdp2", optim=optim
)
assert isinstance(critic_config_custom.profiler, ProfilerConfig)
assert critic_config_custom.profiler.all_ranks is True
assert critic_config_custom.profiler.ranks == [0, 1]
profiler1 = ProfilerConfig(enable=True, ranks=[0, 1])
profiler2 = ProfilerConfig(all_ranks=True, ranks=[1, 2])
union_result = profiler1.union(profiler2)
assert union_result.enable is True
assert union_result.all_ranks is True
assert set(union_result.ranks) == {0, 1, 2}
intersect_result = profiler1.intersect(profiler2)
assert intersect_result.all_ranks is False
assert intersect_result.ranks == [1]
def test_critic_config_validation_logic(self):
"""Test the __post_init__ validation logic for CriticConfig."""
optim = OptimizerConfig(lr=0.1)
valid_config = CriticConfig(
strategy="fsdp2", ppo_micro_batch_size_per_gpu=2, use_dynamic_bsz=False, optim=optim
)
assert valid_config.ppo_micro_batch_size_per_gpu == 2
valid_config2 = CriticConfig(
strategy="fsdp2",
ppo_micro_batch_size_per_gpu=None,
ppo_micro_batch_size=4,
ppo_mini_batch_size=8,
use_dynamic_bsz=False,
optim=optim,
)
assert valid_config2.ppo_micro_batch_size == 4
dynamic_config = CriticConfig(
strategy="fsdp2", ppo_micro_batch_size_per_gpu=2, use_dynamic_bsz=True, optim=optim
)
assert dynamic_config.use_dynamic_bsz is True
with pytest.raises(ValueError, match="You have set both.*micro_batch_size.*AND.*micro_batch_size_per_gpu"):
CriticConfig(
strategy="fsdp2",
ppo_micro_batch_size=4,
ppo_micro_batch_size_per_gpu=2,
use_dynamic_bsz=False,
optim=optim,
)
with pytest.raises(
ValueError, match="Please set at least one of.*micro_batch_size.*or.*micro_batch_size_per_gpu"
):
CriticConfig(
strategy="fsdp2",
ppo_micro_batch_size=None,
ppo_micro_batch_size_per_gpu=None,
use_dynamic_bsz=False,
optim=optim,
)
def test_micro_batch_size_divisibility_validation(self):
"""Test micro batch size divisibility validation in __post_init__."""
optim = OptimizerConfig(lr=0.1)
valid_config = CriticConfig(
strategy="fsdp2", ppo_micro_batch_size_per_gpu=2, ppo_mini_batch_size=8, use_dynamic_bsz=False, optim=optim
)
assert valid_config.ppo_mini_batch_size == 8
assert valid_config.ppo_micro_batch_size_per_gpu == 2
valid_config_with_mbs = CriticConfig(
strategy="fsdp2", ppo_mini_batch_size=8, ppo_micro_batch_size=4, use_dynamic_bsz=False, optim=optim
)
assert valid_config_with_mbs.ppo_mini_batch_size == 8
assert valid_config_with_mbs.ppo_micro_batch_size == 4
with pytest.raises(ValueError, match="ppo_mini_batch_size.*must be divisible by.*ppo_micro_batch_size"):
CriticConfig(
strategy="fsdp2", ppo_mini_batch_size=7, ppo_micro_batch_size=4, use_dynamic_bsz=False, optim=optim
)
dynamic_config = CriticConfig(
strategy="fsdp2", ppo_mini_batch_size=7, ppo_micro_batch_size=4, use_dynamic_bsz=True, optim=optim
)
assert dynamic_config.use_dynamic_bsz is True
def test_fsdp_sequence_parallelism_validation(self):
"""Test FSDP sequence parallelism validation in FSDPCriticConfig.__post_init__."""
valid_config = FSDPCriticConfig(
ppo_micro_batch_size_per_gpu=2,
ulysses_sequence_parallel_size=2,
model={"use_remove_padding": True},
optim=FSDPOptimizerConfig(lr=0.1),
)
assert valid_config.ulysses_sequence_parallel_size == 2
with pytest.raises(
ValueError, match="When using sequence parallelism for critic, you must enable.*use_remove_padding"
):
FSDPCriticConfig(
ppo_micro_batch_size_per_gpu=2,
ulysses_sequence_parallel_size=2,
model={"use_remove_padding": False},
optim=FSDPOptimizerConfig(lr=0.1),
)
valid_config_no_sp = FSDPCriticConfig(
ppo_micro_batch_size_per_gpu=2,
ulysses_sequence_parallel_size=1,
model={"use_remove_padding": False},
optim=FSDPOptimizerConfig(lr=0.1),
)
assert valid_config_no_sp.ulysses_sequence_parallel_size == 1
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/config/test_critic_config_on_cpu.py",
"license": "Apache License 2.0",
"lines": 250,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/workers/config/test_engine_config_on_cpu.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from verl.workers.config.engine import FSDPEngineConfig, McoreEngineConfig
class TestMcoreEngineConfig:
def test_default_values(self):
config = McoreEngineConfig()
assert config.tensor_model_parallel_size == 1
assert config.sequence_parallel is False # Should be auto-corrected
assert config.seed == 42
def test_post_init_validation(self):
# Test TP size 1 forces sequence_parallel=False
config = McoreEngineConfig(tensor_model_parallel_size=1)
assert config.sequence_parallel is False
# Test TP >1 keeps sequence_parallel=True
config = McoreEngineConfig(tensor_model_parallel_size=2)
assert config.sequence_parallel is True
def test_mutable_fields(self):
config = McoreEngineConfig()
config.sequence_parallel = True # Should be mutable
with pytest.raises(AttributeError):
config.tensor_model_parallel_size = 2 # Frozen field
@pytest.mark.parametrize("offload_field", ["param_offload", "grad_offload", "optimizer_offload"])
def test_offload_flags(self, offload_field):
config = McoreEngineConfig(**{offload_field: True})
assert getattr(config, offload_field) is True
class TestFSDPEngineConfigCPU:
def test_default_values(self):
config = FSDPEngineConfig()
assert config.param_offload is False
assert config.optimizer_offload is False
assert config.fsdp_size == -1
@pytest.mark.parametrize(
"offload_params",
[{"param_offload": True}, {"optimizer_offload": True}, {"param_offload": True, "optimizer_offload": True}],
)
def test_offload_combinations(self, offload_params):
config = FSDPEngineConfig(**offload_params)
assert config.param_offload == offload_params.get("param_offload", False)
assert config.optimizer_offload == offload_params.get("optimizer_offload", False)
def test_wrap_policy_configuration(self):
test_policy = {"layer_class": "TransformerBlock"}
config = FSDPEngineConfig(wrap_policy=test_policy)
assert config.wrap_policy == test_policy
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/config/test_engine_config_on_cpu.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/workers/config/test_optim_config_on_cpu.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from verl.workers.config.optimizer import FSDPOptimizerConfig
class TestFSDPOptimizerConfigCPU:
def test_default_configuration(self):
config = FSDPOptimizerConfig(lr=0.1)
assert config.min_lr_ratio is None
assert config.lr_scheduler_type == "constant"
assert config.num_cycles == 0.5
@pytest.mark.parametrize("lr_scheduler_type", ["constant", "cosine"])
def test_valid_lr_scheduler_types(self, lr_scheduler_type):
config = FSDPOptimizerConfig(lr_scheduler_type=lr_scheduler_type, lr=0.1)
assert config.lr_scheduler_type == lr_scheduler_type
@pytest.mark.parametrize("warmup_style", ["constant", "cosine"])
def test_valid_warmup_style_types(self, warmup_style):
config = FSDPOptimizerConfig(warmup_style=warmup_style, lr=0.1)
assert config.lr_scheduler_type == warmup_style
def test_invalid_lr_scheduler_type(self):
with pytest.raises((ValueError, AssertionError)):
FSDPOptimizerConfig(lr_scheduler_type="invalid_style", lr=0.1)
def test_invalid_warmup_style_type(self):
with pytest.raises((ValueError, AssertionError)):
FSDPOptimizerConfig(warmup_style="invalid_style", lr=0.1)
@pytest.mark.parametrize("num_cycles", [0.1, 1.0, 2.5])
def test_num_cycles_configuration(self, num_cycles):
config = FSDPOptimizerConfig(num_cycles=num_cycles, lr=0.1)
assert config.num_cycles == num_cycles
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/config/test_optim_config_on_cpu.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/workers/config/actor.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Optional
from omegaconf import MISSING
from verl.base_config import BaseConfig
from verl.trainer.config import CheckpointConfig, RolloutCorrectionConfig
from verl.utils.profiler.config import ProfilerConfig
from verl.utils.qat import QATConfig
from .engine import FSDPEngineConfig, McoreEngineConfig, TorchtitanEngineConfig, VeOmniEngineConfig
from .model import HFModelConfig
from .optimizer import OptimizerConfig
__all__ = [
"PolicyLossConfig",
"RouterReplayConfig",
"ActorConfig",
"FSDPActorConfig",
"McoreActorConfig",
"VeOmniActorConfig",
"QATConfig",
"TorchTitanActorConfig",
]
@dataclass
class RouterReplayConfig(BaseConfig):
"""Configuration for router replay in MoE models.
This configuration controls the routing behavior for Mixture of Experts (MoE) models,
allowing for deterministic training through route recording and replay.
Args:
mode (str): Router replay mode. Options: 'disabled', 'R2', 'R3'.
- 'disabled': No router replay functionality
- 'R2': Use Router Replay routing strategy
- 'R3': Use Rollout Router Replay routing strategy
record_file (Optional[str]): File path to save recorded routing decisions.
Required when mode is 'record', 'R2', or 'R3'.
replay_file (Optional[str]): File path to load recorded routing decisions for replay.
Required when mode is 'replay'.
"""
mode: str = "disabled"
record_file: Optional[str] = None
replay_file: Optional[str] = None
def __post_init__(self):
"""Validate router replay configuration."""
valid_modes = ["disabled", "R2", "R3"]
if self.mode not in valid_modes:
raise ValueError(f"Invalid router_replay mode: {self.mode}. Must be one of {valid_modes}")
@dataclass
class PolicyLossConfig(BaseConfig):
"""Configuration for policy loss computation.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
loss_mode (str): Loss function mode. Options: 'vanilla', 'clip-cov', 'kl-cov', 'gpg'.
clip_cov_ratio (float): Ratio of tokens to be clipped for clip-cov loss.
clip_cov_lb (float): Lower bound for clip-cov loss.
clip_cov_ub (float): Upper bound for clip-cov loss.
kl_cov_ratio (float): Ratio of tokens to be applied KL penalty for kl-cov loss.
ppo_kl_coef (float): KL divergence penalty coefficient.
rollout_correction (RolloutCorrectionConfig): Configuration for rollout correction.
"""
loss_mode: str = "vanilla"
clip_cov_ratio: float = 0.0002
clip_cov_lb: float = 1.0
clip_cov_ub: float = 5.0
kl_cov_ratio: float = 0.0002
ppo_kl_coef: float = 0.1
rollout_correction: RolloutCorrectionConfig = field(default_factory=RolloutCorrectionConfig)
@dataclass
class ActorConfig(BaseConfig):
"""Configuration for actor model training.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
strategy (str): Training strategy. Must be specified.
ppo_mini_batch_size (int): Mini-batch size for PPO training.
ppo_micro_batch_size (Optional[int]): Micro-batch size for PPO training.
If None, uses ppo_micro_batch_size_per_gpu.
ppo_micro_batch_size_per_gpu (Optional[int]): Micro-batch size per GPU for PPO training.
use_dynamic_bsz (bool): Whether to use dynamic batch sizing.
ppo_max_token_len_per_gpu (int): Maximum token length per GPU for PPO training.
clip_ratio (float): PPO clipping ratio for policy loss.
clip_ratio_low (float): Lower bound for PPO clipping ratio.
clip_ratio_high (float): Upper bound for PPO clipping ratio.
policy_loss (PolicyLossConfig): Configuration for policy loss computation.
clip_ratio_c (float): Clipping ratio for critic loss.
loss_agg_mode (str): Loss aggregation mode. Options: 'token-mean', 'sample-mean'.
loss_scale_factor (Optional[int]): Scale factor for 'seq-mean-token-sum-norm' loss aggregation mode.
If None, uses response_length. Set to a constant to ensure consistent normalization.
entropy_coeff (float): Entropy coefficient for regularization.
tau_pos (float): Positive tau for SAPO smoothing (>= 1.0 keeps rewards stable).
tau_neg (float): Negative tau for SAPO smoothing (> tau_pos for asymmetry).
use_kl_loss (bool): Whether to use KL divergence loss.
use_torch_compile (bool): Whether to use torch.compile for optimization.
kl_loss_coef (float): KL divergence loss coefficient.
kl_loss_type (str): Type of KL loss to use.
ppo_epochs (int): Number of PPO epochs per training step.
shuffle (bool): Whether to shuffle data during training.
checkpoint (CheckpointConfig): Configuration for checkpointing.
optim (OptimizerConfig): Configuration for optimizer.
use_fused_kernels (bool): Whether to use custom fused kernels (e.g., FlashAttention, fused MLP).
data_loader_seed (int): Seed for data loader. If None, uses global seed.
router_replay (RouterReplayConfig): Configuration for router replay in MoE models.
"""
_mutable_fields = BaseConfig._mutable_fields | {
"ppo_mini_batch_size",
"ppo_micro_batch_size",
"ppo_micro_batch_size_per_gpu",
"ppo_infer_micro_batch_size_per_gpu",
"engine",
"model_config",
}
strategy: str = MISSING
ppo_mini_batch_size: int = 256
ppo_micro_batch_size: Optional[int] = None # deprecate
ppo_micro_batch_size_per_gpu: Optional[int] = None
ppo_infer_micro_batch_size_per_gpu: Optional[int] = None
use_dynamic_bsz: bool = False
ppo_max_token_len_per_gpu: int = 16384
ppo_infer_max_token_len_per_gpu: int = 16384
clip_ratio: float = 0.2
clip_ratio_low: float = 0.2
clip_ratio_high: float = 0.2
freeze_vision_tower: bool = False
policy_loss: PolicyLossConfig = field(default_factory=PolicyLossConfig)
clip_ratio_c: float = 3.0
loss_agg_mode: str = "token-mean"
loss_scale_factor: Optional[int] = None
entropy_coeff: float = 0
tau_pos: float = 1.0
tau_neg: float = 1.05
calculate_entropy: bool = False
use_kl_loss: bool = False
# Whether to enable PrefixGrouper-based shared-prefix forward
use_prefix_grouper: bool = False
use_torch_compile: bool = True
kl_loss_coef: float = 0.001
kl_loss_type: str = "low_var_kl"
ppo_epochs: int = 1
shuffle: bool = False
data_loader_seed: int = 1
checkpoint: CheckpointConfig = field(default_factory=CheckpointConfig)
optim: OptimizerConfig = field(default_factory=OptimizerConfig)
use_fused_kernels: bool = False
profiler: ProfilerConfig = field(default_factory=ProfilerConfig)
engine: BaseConfig = field(default_factory=BaseConfig)
rollout_n: int = MISSING # must be override by sampling config
model_config: HFModelConfig = field(default_factory=BaseConfig)
router_replay: RouterReplayConfig = field(default_factory=RouterReplayConfig)
# Store global batch info for loss aggregation:
# dp_size: data parallel size
# batch_num_tokens: number of valid tokens in global batch
# global_batch_size: global batch size
global_batch_info: dict = field(default_factory=dict)
def __post_init__(self):
"""Validate actor configuration parameters."""
assert self.strategy != MISSING
assert self.rollout_n != MISSING
if not self.use_dynamic_bsz:
if self.ppo_micro_batch_size is not None and self.ppo_micro_batch_size_per_gpu is not None:
raise ValueError(
"[actor] You have set both 'actor.ppo_micro_batch_size' AND 'actor.ppo_micro_batch_size_per_gpu'. "
"Please remove 'actor.ppo_micro_batch_size' because only '*_ppo_micro_batch_size_per_gpu' is "
"supported (the former is deprecated)."
)
else:
assert not (self.ppo_micro_batch_size is None and self.ppo_micro_batch_size_per_gpu is None), (
"[actor] Please set at least one of 'actor.ppo_micro_batch_size' or "
"'actor.ppo_micro_batch_size_per_gpu' if use_dynamic_bsz is not enabled."
)
valid_loss_agg_modes = [
"token-mean",
"seq-mean-token-sum",
"seq-mean-token-mean",
"seq-mean-token-sum-norm",
]
if self.loss_agg_mode not in valid_loss_agg_modes:
raise ValueError(f"Invalid loss_agg_mode: {self.loss_agg_mode}")
def validate(self, n_gpus: int, train_batch_size: int, model_config: dict = None):
"""Validate actor configuration with runtime parameters."""
if not self.use_dynamic_bsz:
if train_batch_size < self.ppo_mini_batch_size:
raise ValueError(
f"train_batch_size ({train_batch_size}) must be >= "
f"actor.ppo_mini_batch_size ({self.ppo_mini_batch_size})"
)
sp_size = getattr(self, "ulysses_sequence_parallel_size", 1)
if self.ppo_micro_batch_size is not None:
if self.ppo_mini_batch_size % self.ppo_micro_batch_size != 0:
raise ValueError(
f"ppo_mini_batch_size ({self.ppo_mini_batch_size}) must be divisible by "
f"ppo_micro_batch_size ({self.ppo_micro_batch_size})"
)
if self.ppo_micro_batch_size * sp_size < n_gpus:
raise ValueError(
f"ppo_micro_batch_size ({self.ppo_micro_batch_size}) * "
f"ulysses_sequence_parallel_size ({sp_size}) must be >= n_gpus ({n_gpus})"
)
@staticmethod
def _check_mutually_exclusive(mbs, mbs_per_gpu, name: str):
"""Validate mutually exclusive micro batch size configuration options."""
param = "ppo_micro_batch_size"
param_per_gpu = f"{param}_per_gpu"
if mbs is None and mbs_per_gpu is None:
raise ValueError(f"[{name}] Please set at least one of '{name}.{param}' or '{name}.{param_per_gpu}'.")
if mbs is not None and mbs_per_gpu is not None:
raise ValueError(
f"[{name}] You have set both '{name}.{param}' AND '{name}.{param_per_gpu}'. Please remove "
f"'{name}.{param}' because only '*_{param_per_gpu}' is supported (the former is deprecated)."
)
@dataclass
class McoreActorConfig(ActorConfig):
"""Configuration for Megatron actor models.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
strategy (str): Training strategy set to 'megatron' for Megatron parallelism.
load_weight (bool): Whether to load model weights from checkpoint.
megatron (dict[str, Any]): Configuration for Megatron parallelism settings.
profile (dict[str, Any]): Configuration for profiling settings.
"""
strategy: str = "megatron"
load_weight: bool = True
megatron: McoreEngineConfig = field(default_factory=McoreEngineConfig)
profile: dict[str, Any] = field(default_factory=dict)
use_rollout_log_probs: bool = False
def __post_init__(self):
"""Validate FSDP actor configuration parameters."""
super().__post_init__()
self.engine = self.megatron
@dataclass
class FSDPActorConfig(ActorConfig):
"""Configuration for FSDP actor models.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
strategy (str): Training strategy set to 'fsdp' for Fully Sharded Data Parallel.
grad_clip (float): Gradient clipping threshold.
ulysses_sequence_parallel_size (int): [DEPRECATED] Ulysses sequence parallel size for long sequences.
entropy_from_logits_with_chunking (bool): Whether to compute entropy from logits
with chunking for memory efficiency.
entropy_checkpointing (bool): Whether to use gradient checkpointing for entropy computation.
fsdp_config (dict[str, Any]): Configuration for FSDP settings.
use_remove_padding (bool): Whether to remove padding tokens in inputs during training
"""
strategy: str = "fsdp"
grad_clip: float = 1.0
ulysses_sequence_parallel_size: int = 1
entropy_from_logits_with_chunking: bool = False
entropy_checkpointing: bool = False
fsdp_config: FSDPEngineConfig = field(default_factory=FSDPEngineConfig)
use_remove_padding: bool = False
use_rollout_log_probs: bool = False
calculate_sum_pi_squared: bool = False
sum_pi_squared_checkpointing: bool = False
qat: QATConfig = field(default_factory=QATConfig)
def __post_init__(self):
"""Validate FSDP actor configuration parameters."""
super().__post_init__()
self.engine = self.fsdp_config
# backward compatibility
if self.ulysses_sequence_parallel_size > 1:
self.fsdp_config.ulysses_sequence_parallel_size = self.ulysses_sequence_parallel_size
def validate(self, n_gpus: int, train_batch_size: int, model_config: dict = None):
"""Validate FSDP actor configuration with runtime parameters."""
super().validate(n_gpus, train_batch_size, model_config)
if self.strategy in {"fsdp", "fsdp2"} and self.ulysses_sequence_parallel_size > 1:
if model_config and not model_config.get("use_remove_padding", False):
raise ValueError(
"When using sequence parallelism for actor/ref policy, you must enable `use_remove_padding`."
)
@dataclass
class VeOmniActorConfig(ActorConfig):
"""Configuration for VeOmni actor models.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
strategy (str): Training strategy set to 'veomni' for VeOmni parallelism.
veomni (dict[str, Any]): Configuration for VeOmni settings.
use_remove_padding (bool): Whether to remove padding tokens in inputs during training
"""
strategy: str = "veomni"
veomni: VeOmniEngineConfig = field(default_factory=VeOmniEngineConfig)
use_remove_padding: bool = False
use_rollout_log_probs: bool = False
def __post_init__(self):
"""Validate VeOmni actor configuration parameters."""
super().__post_init__()
self.engine = self.veomni
@dataclass
class TorchTitanActorConfig(ActorConfig):
"""Configuration for TorchTitan actor models.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
strategy (str): Training strategy set to 'torchtitan' for TorchTitan parallelism.
torchtitan (TorchtitanEngineConfig): Configuration for TorchTitan engine settings.
use_remove_padding (bool): Whether to remove padding tokens in inputs during training
use_rollout_log_probs (bool): Whether to use log probabilities from rollout engine
"""
strategy: str = "torchtitan"
torchtitan: TorchtitanEngineConfig = field(default_factory=TorchtitanEngineConfig)
use_remove_padding: bool = False
use_rollout_log_probs: bool = False
def __post_init__(self):
"""Validate TorchTitan actor configuration parameters."""
super().__post_init__()
self.engine = self.torchtitan
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/config/actor.py",
"license": "Apache License 2.0",
"lines": 311,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/config/critic.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from dataclasses import dataclass, field
from typing import Optional
from omegaconf import MISSING
from verl.base_config import BaseConfig
from verl.trainer.config import BaseModelConfig, CheckpointConfig
from verl.utils.profiler import ProfilerConfig
from .engine import FSDPEngineConfig, McoreEngineConfig, TorchtitanEngineConfig
from .model import HFModelConfig
from .optimizer import OptimizerConfig
__all__ = ["CriticConfig", "FSDPCriticConfig", "McoreCriticConfig", "TorchTitanCriticConfig", "FSDPCriticModelCfg"]
@dataclass
class CriticConfig(BaseConfig):
"""Configuration for critic model training.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
strategy (str): Strategy used for critic model training (fsdp, fsdp2, megatron).
ppo_micro_batch_size_per_gpu (int): Local per-GPU micro batch size.
rollout_n (int): Number of rollouts per update (mirrors actor rollout_n).
optim (Dict[str, Any]): Optimizer configuration including lr, weight_decay, etc.
model (Dict[str, Any]): Model configuration including path, tokenizer_path, etc.
ppo_mini_batch_size (int): PPO mini-batch size per update.
ppo_micro_batch_size (Optional[int]): Global micro batch size (deprecated).
use_dynamic_bsz (bool): Whether to automatically adjust batch size at runtime.
ppo_max_token_len_per_gpu (int): Max tokens per GPU in one PPO batch.
forward_max_token_len_per_gpu (int): Max token length per GPU in forward pass.
ppo_epochs (int): Number of PPO epochs per batch.
shuffle (bool): Shuffle training data across PPO epochs.
cliprange_value (float): PPO value function clipping range.
loss_agg_mode (str): Loss aggregation mode.
checkpoint (Dict[str, Any]): Checkpoint configuration.
profiler (Dict[str, Any]): Profiler configuration.
enable (Optional[bool]): Whether to enable the critic.
"""
_mutable_fields = BaseConfig._mutable_fields | {
"ppo_micro_batch_size_per_gpu",
"ppo_mini_batch_size",
"ppo_micro_batch_size",
"model_config",
}
strategy: str = MISSING
ppo_micro_batch_size_per_gpu: Optional[int] = None
enable: Optional[bool] = None
rollout_n: int = 1
ppo_mini_batch_size: int = 1
use_dynamic_bsz: bool = False
ppo_max_token_len_per_gpu: int = 32768
# deprecate this
forward_max_token_len_per_gpu: int = 32768
ppo_infer_micro_batch_size_per_gpu: Optional[int] = None
ppo_infer_max_token_len_per_gpu: int = 32768
ppo_epochs: int = 1
data_loader_seed: int = 1
shuffle: bool = True
cliprange_value: float = 0.5
loss_agg_mode: str = "token-mean"
ppo_micro_batch_size: Optional[int] = None
engine: BaseConfig = field(default_factory=BaseConfig)
optim: OptimizerConfig = field(default_factory=OptimizerConfig)
# deprecate model to favor model_config
model: BaseModelConfig = field(default_factory=BaseModelConfig)
model_config: HFModelConfig = None
checkpoint: CheckpointConfig = field(default_factory=CheckpointConfig)
profiler: ProfilerConfig = field(default_factory=ProfilerConfig)
def __post_init__(self):
"""Validate critic configuration parameters."""
assert self.strategy != MISSING
if self.model_config is None:
warnings.warn("using model in Critic Config is deprecated, please use model_config instead", stacklevel=2)
self.model_config = HFModelConfig(
path=self.model.path,
tokenizer_path=self.model.tokenizer_path,
override_config=self.model.override_config,
external_lib=self.model.external_lib,
trust_remote_code=self.model.trust_remote_code,
)
if not self.use_dynamic_bsz:
self._check_mutually_exclusive(self.ppo_micro_batch_size, self.ppo_micro_batch_size_per_gpu, "critic")
if self.ppo_micro_batch_size is not None:
if self.ppo_mini_batch_size % self.ppo_micro_batch_size != 0:
raise ValueError(
f"[critic] ppo_mini_batch_size ({self.ppo_mini_batch_size}) must be divisible by "
f"ppo_micro_batch_size ({self.ppo_micro_batch_size})"
)
def validate(self, n_gpus: int, train_batch_size: int):
"""Validate critic configuration with runtime parameters.
Args:
n_gpus: Total number of GPUs available
train_batch_size: Training batch size from data config
"""
if not self.use_dynamic_bsz:
if train_batch_size < self.ppo_mini_batch_size:
raise ValueError(
f"train_batch_size ({train_batch_size}) must be >= "
f"critic.ppo_mini_batch_size ({self.ppo_mini_batch_size})"
)
@staticmethod
def _check_mutually_exclusive(mbs, mbs_per_gpu, name: str):
"""Validate mutually exclusive micro batch size configuration options.
Ensures that users don't set both deprecated micro_batch_size and
the new micro_batch_size_per_gpu parameters simultaneously.
Args:
mbs: Deprecated micro batch size parameter value.
mbs_per_gpu: New micro batch size per GPU parameter value.
name (str): Configuration section name for error messages.
Raises:
ValueError: If both parameters are set or neither is set.
"""
param = "micro_batch_size"
param_per_gpu = f"{param}_per_gpu"
if mbs is None and mbs_per_gpu is None:
raise ValueError(f"[{name}] Please set at least one of '{name}.{param}' or '{name}.{param_per_gpu}'.")
if mbs is not None and mbs_per_gpu is not None:
raise ValueError(
f"[{name}] You have set both '{name}.{param}' AND '{name}.{param_per_gpu}'. Please remove "
f"'{name}.{param}' because only '*_{param_per_gpu}' is supported (the former is deprecated)."
)
@dataclass
class McoreCriticConfig(CriticConfig):
"""Configuration for Megatron-based critic model training.
The inheritance from CriticConfig provides all base critic configuration plus Megatron-specific settings.
Args:
nccl_timeout (int): NCCL timeout in seconds for distributed operations.
megatron (Dict[str, Any]): Megatron-specific parallelism settings.
load_weight (bool): Whether to load initial weights.
"""
strategy: str = "megatron"
nccl_timeout: int = 600
megatron: McoreEngineConfig = field(default_factory=McoreEngineConfig)
load_weight: bool = True
def validate(self, n_gpus: int, train_batch_size: int):
"""Validate Megatron critic configuration with runtime parameters."""
super().validate(n_gpus, train_batch_size)
@dataclass
class FSDPCriticConfig(CriticConfig):
"""Configuration for FSDP-based critic model training.
The inheritance from CriticConfig provides all base critic configuration plus FSDP-specific settings.
Args:
forward_micro_batch_size (int): Forward-only batch size during inference (global).
forward_micro_batch_size_per_gpu (int): Forward-only batch size during inference (per GPU).
ulysses_sequence_parallel_size (int): [DEPRECATED] Ulysses sequence parallel size for long sequences.
grad_clip (float): Gradient clipping for critic updates.
"""
_mutable_fields = CriticConfig._mutable_fields | {
"forward_micro_batch_size",
"forward_micro_batch_size_per_gpu",
}
strategy: str = "fsdp"
forward_micro_batch_size: int = 1
forward_micro_batch_size_per_gpu: int = 1
ulysses_sequence_parallel_size: int = 1
grad_clip: float = 1.0
def __post_init__(self):
"""Validate FSDP critic configuration parameters."""
super().__post_init__()
if self.strategy in {"fsdp", "fsdp2"}:
if self.ulysses_sequence_parallel_size > 1:
if not self.model.get("use_remove_padding", False):
raise ValueError(
"When using sequence parallelism for critic, you must enable `use_remove_padding`."
)
def validate(self, n_gpus: int, train_batch_size: int):
"""Validate FSDP critic configuration with runtime parameters."""
super().validate(n_gpus, train_batch_size)
if not self.use_dynamic_bsz:
sp_size = self.ulysses_sequence_parallel_size
if self.ppo_micro_batch_size is not None:
if self.ppo_micro_batch_size * sp_size < n_gpus:
raise ValueError(
f"critic.ppo_micro_batch_size ({self.ppo_micro_batch_size}) * "
f"ulysses_sequence_parallel_size ({sp_size}) must be >= n_gpus ({n_gpus})"
)
@dataclass
class TorchTitanCriticConfig(CriticConfig):
"""Configuration for TorchTitan-based critic model training.
The inheritance from CriticConfig provides all base critic configuration plus TorchTitan-specific settings.
Args:
strategy (str): Training strategy set to 'torchtitan' for TorchTitan parallelism.
torchtitan (TorchtitanEngineConfig): Configuration for TorchTitan engine settings.
"""
strategy: str = "torchtitan"
torchtitan: TorchtitanEngineConfig = field(default_factory=TorchtitanEngineConfig)
def __post_init__(self):
"""Validate TorchTitan critic configuration parameters."""
super().__post_init__()
self.engine = self.torchtitan
@dataclass
class FSDPCriticModelCfg(BaseModelConfig):
"""FSDP-enabled critic model configuration.
Inherits base critic settings and adds distributed-memory and LoRA options.
Args:
use_shm (bool): Whether to use shared memory for loading the model.
enable_activation_offload (bool): Offload activations to CPU to reduce GPU memory usage.
use_remove_padding (bool): Use remove-padding optimization (saves compute).
enable_gradient_checkpointing (bool): Enable gradient checkpointing for memory efficiency.
fsdp_config (FSDPEngineConfig): FSDP-specific configuration block.
lora_rank (int): Set to positive value to enable LoRA (e.g., 32).
lora_alpha (int): LoRA scaling factor.
target_modules (Union[str, List[str]]): LoRA target modules: "all-linear" or list of layer names.
"""
use_shm: bool = False
enable_activation_offload: bool = False
use_remove_padding: bool = False
enable_gradient_checkpointing: bool = True
fsdp_config: FSDPEngineConfig = field(default_factory=FSDPEngineConfig)
lora_rank: int = 0
lora_alpha: int = 16
target_modules: str | list[str] = "all-linear"
# TiledMLP configuration for memory-efficient MLP computation
tiled_mlp: dict = field(default_factory=lambda: {"enabled": False, "num_shards": 4})
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/config/critic.py",
"license": "Apache License 2.0",
"lines": 223,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/config/engine.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from dataclasses import dataclass, field
from typing import Any, Callable, Literal, Optional
from verl.base_config import BaseConfig
from verl.trainer.config import CheckpointConfig
from ...utils.profiler import ProfilerConfig
from .model import HFModelConfig
from .optimizer import OptimizerConfig
__all__ = [
"FSDPEngineConfig",
"McoreEngineConfig",
"TrainingWorkerConfig",
"TorchtitanEngineConfig",
"VeOmniEngineConfig",
"EngineConfig",
"EngineRouterReplayConfig",
"QATEngineConfig",
]
# TODO: rename to RouterReplayConfig after removing the legacy implementation
@dataclass
class EngineRouterReplayConfig(BaseConfig):
"""Configuration for router replay in MoE models.
This configuration controls the routing behavior for Mixture of Experts (MoE) models,
allowing for deterministic training through route recording and replay.
Args:
mode (str): Router replay mode. Options: 'disabled', 'R2', 'R3'.
- 'disabled': No router replay functionality
- 'R2': Use Router Replay routing strategy
- 'R3': Use Rollout Router Replay routing strategy
record_file (Optional[str]): File path to save recorded routing decisions.
Required when mode is 'record', 'R2', or 'R3'.
replay_file (Optional[str]): File path to load recorded routing decisions for replay.
Required when mode is 'replay'.
"""
mode: str = "disabled"
record_file: Optional[str] = None
replay_file: Optional[str] = None
def __post_init__(self):
"""Validate router replay configuration."""
valid_modes = ["disabled", "R2", "R3"]
if self.mode not in valid_modes:
raise ValueError(f"Invalid router_replay mode: {self.mode}. Must be one of {valid_modes}")
@dataclass
class EngineConfig(BaseConfig):
_mutable_fields = BaseConfig._mutable_fields | {
"use_dynamic_bsz",
"max_token_len_per_gpu",
"micro_batch_size_per_gpu",
"infer_max_token_len_per_gpu",
"infer_micro_batch_size_per_gpu",
"use_fused_kernels",
"use_remove_padding",
"forward_only",
"param_offload",
}
# whether to offload param
param_offload: bool = False
# whether to offload optimizer
optimizer_offload: bool = False
# whether to offload grad
grad_offload: bool = False
# whether the engine is forward only (e.g., ref policy)
forward_only: bool = False
# the strategy (backend)
strategy: str = None
# model dtype
dtype: str = "bfloat16" # ["bfloat16", "float16"]
# whether to use dynamic bsz
use_dynamic_bsz: bool = True
# for training
max_token_len_per_gpu: int = None
micro_batch_size_per_gpu: int = None
# for inference
infer_max_token_len_per_gpu: int = None
infer_micro_batch_size_per_gpu: int = None
# whether use fuse lm head kernel
use_fused_kernels: bool = False
# TODO (this may conflict with the one in model config)
use_remove_padding: bool = True
seed: int = 42
full_determinism: bool = False
router_replay: EngineRouterReplayConfig = field(default_factory=EngineRouterReplayConfig)
def __post_init__(self):
pass
# TODO: turn on this check after we reorg config
# if self.use_dynamic_bsz:
# assert self.max_token_len_per_gpu is not None
# else:
# assert self.micro_batch_size_per_gpu is not None
@dataclass
class McoreEngineConfig(EngineConfig):
"""Configuration for Megatron parallelism.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
param_offload (bool): Whether to offload parameters to CPU.
grad_offload (bool): Whether to offload gradients to CPU.
optimizer_offload (bool): Whether to offload optimizer states to CPU.
tensor_model_parallel_size (int): Tensor model parallel size.
expert_model_parallel_size (int): Expert model parallel size for MoE models.
expert_tensor_parallel_size (Optional[int]): Expert tensor parallel size for MoE models.
pipeline_model_parallel_size (int): Pipeline model parallel size.
virtual_pipeline_model_parallel_size (Optional[int]): Virtual pipeline model parallel size
for interleaved scheduling.
context_parallel_size (int): Context parallel size for long sequences.
sequence_parallel (bool): Whether to enable sequence parallelism.
use_distributed_optimizer (bool): Whether to use distributed optimizer.
use_dist_checkpointing (bool): Whether to use distributed checkpointing.
dist_checkpointing_path (Optional[str]): Path for distributed checkpointing.
dist_ckpt_optim_fully_reshardable (bool): Use fully reshardable optimizer checkpoints.
distrib_optim_fully_reshardable_mem_efficient (bool): Use memory-efficient fully reshardable format.
seed (int): Random seed for reproducibility.
override_ddp_config (dict[str, Any]): Override configuration for DDP.
override_transformer_config (dict[str, Any]): Override configuration for transformer.
use_mbridge (bool): Whether to use MBridge for communication.
dtype (str): Mixed precision training param dtype, default "bfloat16"
"""
# sequence_parallel is not listed as a frozen field for auto-correction purpose
_mutable_fields = EngineConfig._mutable_fields | {"sequence_parallel"}
# mcore parallelism
tensor_model_parallel_size: int = 1
expert_model_parallel_size: int = 1
expert_tensor_parallel_size: Optional[int] = None
pipeline_model_parallel_size: int = 1
virtual_pipeline_model_parallel_size: Optional[int] = None
context_parallel_size: int = 1
sequence_parallel: bool = True
use_distributed_optimizer: bool = True
use_dist_checkpointing: bool = False
dist_checkpointing_path: Optional[str] = None
dist_checkpointing_prefix: str = ""
dist_ckpt_optim_fully_reshardable: bool = False
distrib_optim_fully_reshardable_mem_efficient: bool = False
override_ddp_config: dict[str, Any] = field(default_factory=dict)
override_transformer_config: dict[str, Any] = field(default_factory=dict)
override_mcore_model_config: dict[str, Any] = field(default_factory=dict)
use_mbridge: bool = True
vanilla_mbridge: bool = True
strategy: str = "megatron"
def __post_init__(self) -> None:
super().__post_init__()
"""config validation logics go here"""
assert self.strategy == "megatron"
assert self.dtype in ["bfloat16", "float16"], f"dtype {self.dtype} not supported"
if self.tensor_model_parallel_size == 1:
warnings.warn("set sequence parallel to false as TP size is 1", stacklevel=2)
self.sequence_parallel = False
@dataclass
class QATEngineConfig(BaseConfig):
"""Configuration for QAT (Quantization-Aware Training) within an engine.
Args:
enable (bool): Whether to enable QAT, default False
mode (str): Quantization mode, "w4a16" or "w4a4", default "w4a16"
group_size (int): Group size for blockwise quantization, default 16
ignore_patterns (list[str]): Module name patterns to exclude from quantization
activation_observer (str): Observer strategy for activation global_scale (W4A4 only)
quantization_config_path (Optional[str]): Path to quantization config JSON for vLLM
"""
enable: bool = False
mode: str = "w4a16"
group_size: int = 16
ignore_patterns: list[str] = field(default_factory=lambda: ["lm_head", "embed_tokens", "re:.*mlp.gate$"])
activation_observer: str = "static_minmax"
quantization_config_path: Optional[str] = None
@dataclass
class FSDPEngineConfig(EngineConfig):
"""Configuration for FSDP (Fully Sharded Data Parallel).
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
wrap_policy (Dict[str, Any]): Configuration for FSDP wrap policy.
param_offload (bool): Whether to offload parameters to CPU, default False
optimizer_offload (bool): Whether to offload optimizer states to CPU, default False
offload_policy (bool): Whether to offload policy model parameters, default False
reshard_after_forward (bool): Whether to reshard parameters after forward pass, default True
fsdp_size (int): FSDP group size. -1 means use all available GPUs.
forward_prefetch (bool): Whether to prefetch parameters for next forward pass, default False
model_dtype (str): Model data type used to initialize the transformers model. default "fp32"
use_orig_params (bool): Whether to use original parameters when initialize FSDP1, default False
seed (int): Random seed for reproducibility.
full_determinism (bool): If true, enable_full_determinism is called to ensure reproducible results
in distributed training. Important: this will negatively impact performance, so only use it for
debugging.
mixed_precision (Optional[dict[str, Any]]): Mixed precision configuration for FSDP, default None
dtype (str): Mixed precision training param dtype, default "bfloat16"
qat (QATEngineConfig): QAT configuration, default disabled
"""
# ulysses_sequence_parallel_size is mutable for backward compatibility
_mutable_fields = EngineConfig._mutable_fields | {"ulysses_sequence_parallel_size"}
# fsdp specific flags
wrap_policy: dict[str, Any] = field(default_factory=dict)
offload_policy: bool = False
reshard_after_forward: bool = True
fsdp_size: int = -1
forward_prefetch: bool = False
model_dtype: str = "fp32"
use_orig_params: bool = False
mixed_precision: Optional[dict[str, Any]] = None
ulysses_sequence_parallel_size: int = 1
entropy_from_logits_with_chunking: bool = False
use_torch_compile: bool = True
entropy_checkpointing: bool = False
strategy: str = "fsdp"
qat: QATEngineConfig = field(default_factory=QATEngineConfig)
def __post_init__(self):
super().__post_init__()
assert self.strategy in ["fsdp", "fsdp2"], f"strategy {self.strategy} not supported"
@dataclass
class VeOmniEngineConfig(EngineConfig):
"""Configuration for VeOmni.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
wrap_policy (Dict[str, Any]): Configuration for FSDP wrap policy.
param_offload (bool): Whether to offload parameters to CPU, default False
optimizer_offload (bool): Whether to offload optimizer states to CPU, default False
offload_policy (bool): Whether to offload policy model parameters, default False
reshard_after_forward (bool): Whether to reshard parameters after forward pass, default True
fsdp_size (int): FSDP group size. -1 means use all available GPUs, default -1
ulysses_parallel_size (int): Ulysses sequence parallel size, default 1
expert_parallel_size (int): Expert parallel size, default 1
init_device (str): Device to initialize model weights.
1. `cpu`: Init parameters on CPU in rank0 only.
2. `cuda`: Init parameters on GPU.
3. `meta`: Init parameters on meta.
4. `npu`: Init parameters on Ascend NPU.
default "meta"
enable_full_shard (bool): Enable fully shard for FSDP training (ZeRO-3), default False
enable_fsdp_offload (bool): Enable CPU offload for FSDP1, default False
enable_reentrant (bool): Use reentrant gradient checkpointing, default False
attn_implementation (str): Attention implementation to use.
1. `eager`
2. `sdpa`
3. `flash_attention_2`
4. `flash_attention_3`
5. `veomni_flash_attention_2_with_sp`
6. `veomni_flash_attention_3_with_sp`
7. `native-sparse`
default "flash_attention_2"
Note: In case VeOmni add more attn_implementation, please check https://github.com/ByteDance-Seed/VeOmni/
moe_implementation (str): MoE implementation to use.
1. `eager`
2. `fused`
default "fused"
Note: In case VeOmni add more moe_implementation, please check https://github.com/ByteDance-Seed/VeOmni/
force_use_huggingface (bool): Force loading model from huggingface, default False
activation_gpu_limit (float): When enabling activation offload, `activation_gpu_limit` GB
activations are allowed to reserve on GPU, default 0.0
basic_modules (list[str]): List of basic modules to use, default None
forward_prefetch (bool): Whether to prefetch parameters for next forward pass, default False
model_dtype (str): Model data type used to initialize the transformers model. default "fp32"
use_orig_params (bool): Whether to use original parameters when initialize FSDP1, default False
seed (int): Random seed for reproducibility.
full_determinism (bool): If true, enable_full_determinism is called to ensure reproducible results
in distributed training. Important: this will negatively impact performance, so only use it for
debugging.
mixed_precision (Optional[dict[str, Any]]): Mixed precision configuration for FSDP, default None
"""
wrap_policy: dict[str, Any] = field(default_factory=dict)
offload_policy: bool = False
reshard_after_forward: bool = True
forward_prefetch: bool = False
use_orig_params: bool = False
entropy_from_logits_with_chunking: bool = False
use_torch_compile: bool = True
entropy_checkpointing: bool = False
strategy: str = "veomni"
fsdp_size: int = -1
ulysses_parallel_size: int = 1
expert_parallel_size: int = 1
seed: int = 42
full_determinism: bool = False
mixed_precision: bool = False
init_device: str = "meta"
enable_full_shard: bool = False
ckpt_manager: Literal["dcp"] = "dcp"
load_checkpoint_path: Optional[str] = None
enable_fsdp_offload: bool = False
enable_reentrant: bool = False
attn_implementation: str = "flash_attention_2"
moe_implementation: str = "fused"
force_use_huggingface: bool = False
activation_gpu_limit: float = 0.0
basic_modules: Optional[list[str]] = field(default_factory=list)
def __post_init__(self):
super().__post_init__()
assert self.strategy in ["veomni"], f"strategy {self.strategy} not supported"
@dataclass
class TorchtitanEngineConfig(EngineConfig):
"""Configuration for Torchtitan.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
wrap_policy (Dict[str, Any]): Configuration for FSDP wrap policy.
reshard_after_forward (Literal["default", "always", "never"]): The policy for applying
`reshard_after_forward` within an FSDP setup, default "default"
forward_prefetch (bool): Whether to prefetch parameters for next forward pass, default False
use_orig_params (bool): Whether to use original parameters when initialize FSDP, default False
mixed_precision (bool): Mixed precision configuration for FSDP, default False
offload_policy (bool): Whether to offload policy model parameters, default False
data_parallel_size (int): Data parallel group size, default 1
data_parallel_replicate_size (int): Data parallel replicate size, default 1
data_parallel_shard_size (int): Data parallel shard degree, default 1
tensor_parallel_size (int): Tensor parallel size, default 1
expert_parallel_size (int): Expert parallel size, default 1
expert_tensor_parallel_size (int): Expert tensor parallel size, default 1
pipeline_parallel_size (int): Pipeline parallel size, default 1
context_parallel_size (int): Context parallel size, default 1
attn_type (str): Attention type for torchtitan's model (e.g., "sdpa", "flex", "varlen"),
default "flex"
strategy (str): Strategy to use for distributed training, default "torchtitan"
seed (int): Random seed for reproducibility.
full_determinism (bool): If true, enable_full_determinism is called to ensure reproducible results
in distributed training. Important: this will negatively impact performance, so only use it for
debugging.
"""
wrap_policy: dict[str, Any] = field(default_factory=dict)
reshard_after_forward: Literal["default", "always", "never"] = "default"
forward_prefetch: bool = False
use_orig_params: bool = False
mixed_precision: bool = False
offload_policy: bool = False
use_torch_compile: bool = True
entropy_from_logits_with_chunking: bool = False
entropy_checkpointing: bool = False
data_parallel_size: int = 1
data_parallel_replicate_size: int = 1
data_parallel_shard_size: int = 1
tensor_parallel_size: int = 1
expert_parallel_size: int = 1
expert_tensor_parallel_size: int = 1
pipeline_parallel_size: int = 1
context_parallel_size: int = 1
attn_type: str = "flex"
max_seq_len: Optional[int] = None
strategy: str = "torchtitan"
seed: int = 42
full_determinism: bool = False
def __post_init__(self):
super().__post_init__()
assert self.strategy in ["torchtitan"], f"strategy {self.strategy} not supported"
@dataclass
class TrainingWorkerConfig(BaseConfig):
model_type: str = None # model type (language_model/value_model)
model_config: HFModelConfig = None
engine_config: EngineConfig = None
optimizer_config: OptimizerConfig = None
checkpoint_config: CheckpointConfig = None
profiler_config: ProfilerConfig = None
# automatically select engine and optimizer function.
# This function takes model config and the device name as parameter.
# Users can pass in a higher-order function to take more parameters
auto_select_engine_optim_fn: Callable[["HFModelConfig", str], tuple["EngineConfig", "OptimizerConfig"]] = None
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/config/engine.py",
"license": "Apache License 2.0",
"lines": 362,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/config/optimizer.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from dataclasses import dataclass
from typing import Optional
from omegaconf import MISSING
from verl.base_config import BaseConfig
__all__ = [
"OptimizerConfig",
"FSDPOptimizerConfig",
"McoreOptimizerConfig",
"build_optimizer",
"VeOmniOptimizerConfig",
"TorchtitanOptimizerConfig",
]
@dataclass
class OptimizerConfig(BaseConfig):
"""Base optimizer configuration.
Args:
lr (float): learning rate. Must be specified.
lr_warmup_steps_ratio (float): Warmup steps ratio; total steps will be injected at runtime.
total_training_steps (int): Total training steps (must be overridden at runtime).
weight_decay (float): Weight decay factor.
lr_warmup_steps (Optional[int]): Number of warmup steps; None delegates to lr_warmup_steps_ratio.
"""
_mutable_fields = {"clip_grad", "total_training_steps", "lr_warmup_steps"}
lr: float = 1e-3
lr_warmup_steps_ratio: float = 0.0
total_training_steps: int = -1
weight_decay: float = 0.01
lr_warmup_steps: Optional[int] = -1
betas: tuple[float, float] = (0.9, 0.999)
clip_grad: float = 1.0
# deprecate grad_clip
grad_clip: Optional[float] = None
def __post_init__(self):
assert self.lr != MISSING
if self.grad_clip is not None:
warnings.warn("`grad_clip` is deprecated, use `clip_grad` instead.", DeprecationWarning, stacklevel=2)
self.clip_grad = self.grad_clip
@dataclass
class VeOmniOptimizerConfig(OptimizerConfig):
"""VeOmni optimizer configuration extending base OptimizerConfig.
Args:
optimizer (str): Optimizer name; default is "adamw".
lr (float): Learning rate.
lr_min (float): Minimum learning rate.
lr_start (float): Starting learning rate for warmup.
lr_decay_ratio (float): LR decay ratio.
lr_scheduler_type (str): LR scheduler type: "constant" or "cosine".
"""
_mutable_fields = OptimizerConfig._mutable_fields.copy()
optimizer: str = "adamw"
lr_min: float = 0.0
lr_start: float = 0.0
lr_decay_ratio: float = 1.0
lr_scheduler_type: str = "constant"
override_optimizer_config: Optional[dict] = None
@dataclass
class FSDPOptimizerConfig(OptimizerConfig):
"""FSDP optimizer configuration extending base OptimizerConfig.
Args:
optimizer (str): Optimizer class name (e.g., "AdamW", "AdamW8bit", "_AdamW").
optimizer_impl (str): Module path to import optimizer from (e.g., "torch.optim", "torchao.optim",
"bitsandbytes.optim").
lr (float): Learning rate.
min_lr_ratio (Optional[float]): Minimum LR ratio for cosine schedule.
lr_scheduler_type (str): LR scheduler type: "constant" or "cosine".
num_cycles (float): Number of cosine cycles in LR schedule.
zero_indexed_step (bool): Whether the LR schedule uses 0-indexed steps. If True (default),
step counting starts at 0. If False, step counting starts at 1.
"""
_mutable_fields = OptimizerConfig._mutable_fields.copy()
_mutable_fields.add("lr_scheduler_type")
optimizer: str = "AdamW"
optimizer_impl: str = "torch.optim"
min_lr_ratio: Optional[float] = None
# deprecate warmup_style
warmup_style: Optional[str] = None
lr_scheduler_type: str = "constant"
num_cycles: float = 0.5
override_optimizer_config: Optional[dict] = None
zero_indexed_step: bool = True
def __post_init__(self):
if self.warmup_style is not None:
assert self.warmup_style in ["constant", "cosine"]
warnings.warn(
"`warmup_style` is deprecated, use `lr_scheduler_type` instead.", DeprecationWarning, stacklevel=2
)
self.lr_scheduler_type = self.warmup_style
assert self.lr_scheduler_type in ["constant", "cosine"]
return super().__post_init__()
@dataclass
class McoreOptimizerConfig(OptimizerConfig):
"""Mcore optimizer configuration extending base OptimizerConfig.
Args:
optimizer (str): Optimizer name; default is "adam".
lr (float): Learning rate.
clip_grad (float): Gradient clipping norm.
lr_warmup_init (float): Initial learning rate for warmup; defaults to 0.0.
lr_decay_steps (Optional[int]): Number of decay steps.
lr_decay_style (str): LR decay style: "constant", "linear", "cosine", or "inverse_square_root".
min_lr (float): Minimum learning rate.
weight_decay_incr_style (str): Weight decay increment style: "constant" or "cosine".
lr_wsd_decay_style (str): Weight-standard-deviation decay style: "constant", "exponential", or "cosine".
lr_wsd_decay_steps (Optional[int]): Number of steps for weight-standard-deviation decay.
use_checkpoint_opt_param_scheduler (bool): Whether to use checkpoint optimizer parameter scheduler.
"""
optimizer: str = "adam"
lr_warmup_init: float = 0.0
lr_decay_steps: Optional[int] = None
lr_decay_style: str = "linear"
min_lr: float = 0.0
weight_decay_incr_style: str = "constant"
lr_wsd_decay_style: str = "exponential"
lr_wsd_decay_steps: Optional[int] = None
use_checkpoint_opt_param_scheduler: bool = False
override_optimizer_config: Optional[dict] = None
@dataclass
class TorchtitanOptimizerConfig(OptimizerConfig):
"""Torchtitan optimizer configuration extending base OptimizerConfig.
Args:
name (str): Optimizer name; default is "AdamW".
eps (float): Epsilon value for AdamW optimizer, default 1e-8.
decay_type (str): Weight decay type: "linear", "sqrt", or "cosine".
min_lr_factor (float): Minimum learning rate factor.
"""
name: str = "AdamW"
eps: float = 1e-8
decay_type: str = "linear"
min_lr_factor: float = 0.0
def build_optimizer(parameters, config: FSDPOptimizerConfig):
"""Build an optimizer based on the configuration.
Dynamically imports and instantiates an optimizer class from the specified module.
Args:
parameters: Model parameters to optimize
config: FSDPOptimizerConfig with optimizer settings
Returns:
Optimizer instance
Examples:
# PyTorch AdamW
config.optimizer_impl = "torch.optim"
config.optimizer = "AdamW"
# TorchAO AdamW with bf16 stochastic rounding
config.optimizer_impl = "torchao.optim"
config.optimizer = "_AdamW"
config.override_optimizer_config = {"bf16_stochastic_round": True}
# BitsAndBytes AdamW 8bit
config.optimizer_impl = "bitsandbytes.optim"
config.optimizer = "AdamW8bit"
"""
import importlib
optimizer_args = {
"lr": config.lr,
"weight_decay": config.weight_decay,
}
optimizer_name_lower = config.optimizer.lower()
if "adam" in optimizer_name_lower or "ademamix" in optimizer_name_lower:
optimizer_args["betas"] = config.betas
if config.override_optimizer_config is not None:
optimizer_args.update(config.override_optimizer_config)
try:
module = importlib.import_module(config.optimizer_impl)
optimizer_cls = getattr(module, config.optimizer)
except ImportError as e:
raise ImportError(
f"Failed to import module '{config.optimizer_impl}'. Make sure the package is installed. Error: {e}"
) from e
except AttributeError as e:
raise AttributeError(
f"Optimizer '{config.optimizer}' not found in module '{config.optimizer_impl}'. "
f"Available optimizers: {dir(module)}"
) from e
return optimizer_cls(parameters, **optimizer_args)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/config/optimizer.py",
"license": "Apache License 2.0",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/third_party/torch/distributed/_state_dict_utils.py | # official torch 2.6.0 set_model_state_dict API leads to OOM
# this is a copy of torch/distributed/checkpoint from torch 2.7.0
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (Adam Paszke)
# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (Clement Farabet)
# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yangqing Jia:
# Copyright (c) 2015 Yangqing Jia
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions by Cruise LLC:
# Copyright (c) 2022 Cruise LLC.
# All rights reserved.
# All contributions by Tri Dao:
# Copyright (c) 2024 Tri Dao.
# All rights reserved.
# All contributions by Arm:
# Copyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ruff: noqa: B028, UP038, UP007, E721, E501
# mypy: allow-untyped-defs
import copy
import io
import math
import weakref
from collections.abc import Mapping, MutableMapping
from typing import TYPE_CHECKING, Any, Callable, NamedTuple, Optional, Union, cast
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.distributed._functional_collectives import AsyncCollectiveTensor
if dist.is_available() or TYPE_CHECKING:
from torch.distributed import distributed_c10d
from torch.distributed._shard.sharded_tensor import ShardedTensor
from torch.distributed.tensor import DTensor, Replicate, distribute_tensor
from torch.distributed.tensor._utils import compute_local_shape_and_global_offset
def _identity_func(
obj: torch.Tensor,
pg: Optional[dist.ProcessGroup],
device: Optional[torch.device],
companion_obj: Any,
) -> torch.Tensor:
return obj
def _all_gather_sharded_tensor(
sharded_tensor: "ShardedTensor",
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
) -> torch.Tensor:
if pg is None:
pg = distributed_c10d._get_default_group()
world_size = dist.get_world_size(pg)
shards = sharded_tensor.local_shards()
dim_0_size = sharded_tensor.size()[0] # type: ignore[index]
tensor_numel = sharded_tensor.size().numel() # type: ignore[union-attr]
chunk_size = math.ceil(dim_0_size / world_size) * tensor_numel // dim_0_size
pg_device = distributed_c10d._get_pg_default_device(pg) if device is None else device
if shards:
local_tensor = shards[0].tensor.flatten()
if local_tensor.device.type != pg_device.type:
local_tensor = local_tensor.to(pg_device)
num_padding = chunk_size - local_tensor.numel()
if num_padding > 0:
local_tensor = F.pad(local_tensor, [0, num_padding])
else:
local_tensor = torch.zeros(chunk_size, dtype=sharded_tensor.dtype, device=pg_device)
tensor = torch.empty(
chunk_size * world_size,
dtype=local_tensor.dtype,
device=pg_device,
)
dist.all_gather_into_tensor(tensor, local_tensor, group=pg)
tensor = tensor.narrow(0, 0, tensor_numel).reshape(sharded_tensor.size())
return tensor
class CompanionMismatch(Exception):
pass
def _iterate_state_dict(
iter_object: Any,
sharded_tensor_func: Callable,
dtensor_func: Callable,
tensor_func: Callable,
*,
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
cpu_offload: bool = False,
companion_obj: Any = None,
ranks_only: tuple[int, ...] = (),
type_check: bool = True,
non_blocking: bool = True,
) -> dict[str, Any]:
"""Iterate through the state dict, applying the given functions to each tensor type.
Args:
iter_object (Any): the target state_dict.
sharded_tensor_func (Callable): the function to apply to ShardedTensor
dtensor_func (Callable): the function to apply to DTensor
tensor_func (Callable): the function to apply to Tensor
pg (Optional[dist.ProcessGroup]): process group passed to tensor functions
device (Optional[torch.device]): device passed to tensor functions
cpu_offload (bool): whether to offload the tensors to CPU memory. This option is ignored
if a companion_obj is supplied.
companion_obj (Any): A companion object to the state dict. If this object
is supplied, we attempt to copy the tensor to the companion object.
ranks_only (Tuple[int, ...]): if this tuple is empty, all ranks will
have the same state_dicts. Otherwise only ranks that in ``ranks_only``
have the same state_dicts. Other ranks will get empty state_dicts.
type_check (bool): check if the instance data type is a supported type
that can be saved by DCP. The current supported data types are
torch.Tensor, DTensor, int, float, str, list, dict, None.
non_blocking (bool): whether to use non-blocking copy when copying to the companion object.
"""
# TODO: should we use pytree?
cpu_device = torch.device("cpu")
if isinstance(iter_object, ShardedTensor):
ret = sharded_tensor_func(iter_object, pg, device, companion_obj)
elif isinstance(iter_object, DTensor):
ret = dtensor_func(iter_object, pg, device, companion_obj)
elif isinstance(iter_object, torch.Tensor):
ret = tensor_func(iter_object, pg, device, companion_obj)
elif isinstance(iter_object, (int, float, str, bytes, io.BytesIO)) or iter_object is None:
ret = iter_object
elif isinstance(iter_object, dict):
if companion_obj is not None and (
not isinstance(companion_obj, dict) or set(companion_obj.keys()) != set(iter_object.keys())
):
msg = "" if isinstance(companion_obj, dict) else f"{set(companion_obj.keys())=} {set(iter_object.keys())=}"
raise CompanionMismatch(msg)
ret = {
key: _iterate_state_dict(
value,
sharded_tensor_func,
dtensor_func,
tensor_func,
pg=pg,
device=device,
cpu_offload=cpu_offload,
companion_obj=companion_obj[key] if companion_obj is not None else None,
ranks_only=ranks_only,
type_check=type_check,
non_blocking=non_blocking,
)
for key, value in iter_object.items()
}
elif isinstance(iter_object, (list, tuple)):
if companion_obj is not None and (
not isinstance(companion_obj, (list, tuple)) or len(companion_obj) != len(iter_object)
):
raise CompanionMismatch
ret = [
_iterate_state_dict(
v,
sharded_tensor_func,
dtensor_func,
tensor_func,
pg=pg,
device=device,
cpu_offload=cpu_offload,
companion_obj=companion_obj[idx] if companion_obj is not None else None,
ranks_only=ranks_only,
type_check=type_check,
non_blocking=non_blocking,
)
for idx, v in enumerate(iter_object)
]
if isinstance(iter_object, tuple):
ret = tuple(ret)
elif not type_check:
ret = copy.deepcopy(iter_object)
else:
raise ValueError(f"Unexpected value type {type(iter_object)}")
if not ranks_only or dist.get_rank(pg) in ranks_only:
if isinstance(ret, torch.Tensor):
if cpu_offload and companion_obj is None:
ret = ret.to(cpu_device)
if companion_obj is not None:
if isinstance(companion_obj, DTensor):
assert isinstance(ret, DTensor)
companion_obj._local_tensor.copy_(ret._local_tensor, non_blocking=non_blocking)
else:
companion_obj.copy_(ret, non_blocking=non_blocking)
ret = companion_obj
else:
ret = {} if isinstance(ret, dict) else None
return ret
def _gather_state_dict(
state_dict: dict[str, Any],
*,
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
cpu_offload: bool = False,
ranks_only: tuple[int, ...] = (),
type_check: bool = True,
) -> dict[str, Any]:
"""
Given a state_dict, this API gathers all the ShardedTensors or DTensors in
the state_dict.
Args:
state_dict (Dict[str, Any]): the target sharded state_dict.
pg (Optional[dist.ProcessGroup]): the process group that is used to
gather ShardedTensor. Note that gathering a DTensor will use
the DeviceMesh. So this argument will be ignored when gathering a
DTensor.
device: (Optional[torch.device]): the device that is used to
perform allgather for ShardedTensor. Note that gathering a DTensor
will use the DeviceMesh. So this argument will be ignored when
gathering a DTensor.
cpu_offload (bool): whether to offload the tensors to CPU memory. The
default value is False.
ranks_only: (Tuple[int, ...]): if this tuple is empty, all ranks will
have the same state_dicts. Otherwise only ranks that in ``ranks_only``
have the same state_dicts. Other ranks will get empty state_dicts.
type_check: (bool): check if the instance data type is a supported type
that can be saved by DCP. The current supported data types are
torch.Tensor, DTensor, int, float, str, list, dict, None.
Returns:
The gathered state dictionary.
"""
def sharded_tensor_func(value, pg, device, companion_obj):
# ShardedTensor does not seem to record the original device type.
# So if the tensor is moved to CPU, we won't know the original type.
# As a result, we have to rely on the user to tell us the correct one.
cpu_device = torch.device("cpu")
output_tensor = _all_gather_sharded_tensor(value, pg, device)
local_shard_device = value.local_shards()[0].tensor.device if value.local_shards() else cpu_device
if output_tensor.device != local_shard_device:
value = output_tensor.to(local_shard_device)
else:
value = output_tensor
return value
def dtensor_func(value, pg, device, companion_obj):
if value.device != value.device_mesh.device_type:
value = value.to(value.device_mesh.device_type)
# FSDP all_gather: [Shard(0)] -> [Replicate()]
# HSDP all_gather: [Replicate(), Shard(0)] -> [Replicate(), Replicate()]
# 2D FSDP + TP all_gather:
# - [Shard(0), Shard(n)] -> [Replicate(), Replicate()]
# - [Shard(0), Replicate()] -> [Replicate(), Replicate()]
placements = [Replicate() for _ in value.placements]
value = value.redistribute(
device_mesh=value.device_mesh,
placements=placements,
)
# Call `wait()` to force the tensor to be synchronous with respect
# to the main stream.
# See the discussion in https://github.com/pytorch/pytorch/pull/117799.
value = value.to_local()
if isinstance(value, AsyncCollectiveTensor):
value = value.wait()
return value
return _iterate_state_dict(
state_dict,
sharded_tensor_func,
dtensor_func,
_identity_func,
pg=pg,
device=device,
cpu_offload=cpu_offload,
ranks_only=ranks_only,
type_check=type_check,
)
def _offload_state_dict_to_cpu(
state_dict: dict[str, Any],
*,
ranks_only: tuple[int, ...] = (),
type_check: bool = True,
) -> dict[str, Any]:
"""
Given a state_dict, this API offload all the tensors to CPU memory.
Args:
state_dict (Dict[str, Any]): the target state_dict.
pg (Optional[dist.ProcessGroup]): the process group that is used to
gather ShardedTensor. Note that gathering a DTensor will use
the DeviceMesh. So this argument will be ignored when gathering a
DTensor.
ranks_only: (Tuple[int, ...]): if this tuple is empty, all ranks will
have the same state_dicts. Otherwise only ranks that in ``ranks_only``
have the same state_dicts. Other ranks will get empty state_dicts.
type_check: (bool): check if the instance data type is a supported type
that can be saved by DCP. The current supported data types are
torch.Tensor, DTensor, int, float, str, list, dict, None.
Returns:
The gathered state dictionary.
"""
ret = _iterate_state_dict(
state_dict,
_identity_func,
_identity_func,
_identity_func,
pg=None,
device=None,
cpu_offload=True,
ranks_only=ranks_only,
type_check=type_check,
)
return ret
@torch.no_grad()
def _copy_state_dict(
state_dict: dict[str, Any],
copy_state_dict: dict[str, Any],
non_blocking: bool = False,
type_check: bool = True,
) -> dict[str, Any]:
"""
Copies all tensors in a given state dict into a different state_dict with the
same structure. Additionally, a copied state dict with the same value references
is returned. Editing the keys on this state dict will not affect the
passed in copy_state_dict (but the value references are the same).
.. warning::
It is expected by this function that state_dict and copy_state_dict share
the same structure and data types.
.. warning::
The current supported data types are
torch.Tensor, DTensor, int, float, str, list, dict, None.
Args:
state_dict (Dict[str, Any]): the target state_dict.
copy_state_dict (Dict[str, Any]):
The state dict we are copying into. This state_dict must have exactly
the same structure as the source `state_dict`.
non_blocking: (bool): Whether copy ops should be performed asynchronously
type_check (bool): check if the instance data type is a supported type
that can be saved by DCP. The current supported data types are
torch.Tensor, DTensor, int, float, str, list, dict, None.
Returns:
State Dict copy
"""
return _iterate_state_dict(
state_dict,
_identity_func,
_identity_func,
_identity_func,
pg=None,
device=None,
cpu_offload=False,
ranks_only=(),
companion_obj=copy_state_dict,
type_check=type_check,
non_blocking=non_blocking,
)
@torch.no_grad()
def _create_cpu_state_dict(
state_dict: dict[str, Any], pin_memory: bool = False, share_memory: bool = False
) -> dict[str, Any]:
"""
Given a state_dict, create another state_dict with the same structure and elements.
However, all tensors in the returned state_dict are new tensors on CPU. These
tensors can be placed on pin_memory or share_memory based on the provided arguments.
.. warning::
Setting both `pin_memory` and `share_memory` to True significantly increases the
latency of this method because of the nuances which require us to register memory
as pinned directly as opposed to relying on the pin_memory cache allocator. This
option should only be used for long lived tensors which are required to be shared.
This is not the case as long as at least one of `pin_memory` or `share_memory` is
set to False.
"""
def tensor_func(
obj: torch.Tensor,
pg: Optional[dist.ProcessGroup],
device: Optional[torch.device],
_: Any,
) -> torch.Tensor:
if len(obj.size()) == 0:
return torch.tensor(0, dtype=obj.dtype)
if share_memory:
t = torch.empty(*tuple(obj.size()), dtype=obj.dtype)
t = t.share_memory_()
if pin_memory:
def unpin_memory(t):
succ = int(torch.cuda.cudart().cudaHostUnregister(t.data_ptr()))
assert succ == 0, f"Unpinning shared memory failed with error-code: {succ}"
weakref.finalize(t, unpin_memory, t)
succ = int(
torch.cuda.cudart().cudaHostRegister(
t.data_ptr(),
t.numel() * t.element_size(),
1, # lines up with 'cudaHostRegisterPortable'
)
)
assert succ == 0, f"Pinning shared memory failed with error-code: {succ}"
return t
elif pin_memory:
return torch.empty(*tuple(obj.size()), dtype=obj.dtype).pin_memory()
else:
return torch.empty(*tuple(obj.size()), dtype=obj.dtype)
def dtensor_func(
obj: DTensor,
pg: Optional[dist.ProcessGroup],
device: Optional[torch.device],
_: Any,
) -> DTensor:
if len(obj.size()) == 0:
return obj
if obj.device != torch.device("cpu"):
ret = cast(DTensor, obj.to(device="cpu"))
else:
ret = copy.deepcopy(obj)
ret._local_tensor = tensor_func(ret._local_tensor, pg, device, None)
return ret
ret = _iterate_state_dict(
state_dict,
_identity_func,
dtensor_func,
tensor_func,
pg=None,
device=None,
cpu_offload=False,
ranks_only=(),
type_check=False,
)
return ret
def _check_state_dict_similarity(
state_dict: dict[str, Any],
compared_state_dict: dict[str, Any],
) -> bool:
"""
Given two state_dicts, check if the structures are the same. And
if a [key, tensor] pair exist in one state_dict there must be
the a corresponding pait, [key, other_tensor], in the other state_dict,
where tensor and other_tensor have the same size and dtype.
Return the check result.
"""
def tensor_func(
obj: torch.Tensor,
pg: Optional[dist.ProcessGroup],
device: Optional[torch.device],
companion_obj: Any,
) -> torch.Tensor:
if companion_obj.dtype != obj.dtype or companion_obj.size() != obj.size():
raise CompanionMismatch
return obj
try:
_iterate_state_dict(
state_dict,
_identity_func,
_identity_func,
tensor_func,
pg=None,
device=None,
cpu_offload=False,
ranks_only=(),
companion_obj=compared_state_dict,
type_check=False,
)
except CompanionMismatch:
return False
return True
class _TensorInfo(NamedTuple):
size: torch.Size
dtype: torch.dtype
def _broadcast_tensors(
full_state_dict: dict[str, Any],
local_state_dict: dict[str, Any],
keys: list[str],
device: torch.device,
pg: Optional[dist.ProcessGroup] = None,
) -> None:
tensors = []
for key in keys:
if dist.get_rank() == 0:
full_state = full_state_dict[key]
assert isinstance(full_state, torch.Tensor)
full_tensor = full_state.detach().to(device)
else:
tensor_info = full_state_dict[key]
full_tensor = torch.empty(
size=tensor_info.size,
device=device,
dtype=tensor_info.dtype,
)
tensors.append(full_tensor)
local_state = local_state_dict.get(key, None)
if local_state is None:
continue
elif isinstance(local_state, DTensor):
local_state_dict[key] = (local_state, full_tensor)
else:
local_state_dict[key] = full_tensor
if pg is None:
pg = dist.distributed_c10d._get_default_group()
if len(tensors) > 1:
dist._broadcast_coalesced(pg, tensors, 500, 0)
else:
dist.broadcast(tensors[0], src=0, group=pg)
_distribute_tensors(local_state_dict, keys, device, pg)
def _distribute_tensors(
local_state_dict: dict[str, Any],
keys: list[str],
device: torch.device,
pg: Optional[dist.ProcessGroup] = None,
) -> None:
if pg is None:
pg = dist.distributed_c10d._get_default_group()
for key in keys:
_local_state = local_state_dict.get(key, None)
if _local_state is None or torch.is_tensor(_local_state):
continue
local_state = _local_state[0]
full_tensor = _local_state[1]
shape, offset = compute_local_shape_and_global_offset(
full_tensor.shape, local_state.device_mesh, local_state.placements
)
slices = [
slice(cur_offset, cur_offset + cur_shape) for cur_shape, cur_offset in zip(shape, offset, strict=False)
]
if local_state.is_meta:
# Use .clone() here rather than view to clone and return only the sliced portion, minimizing memory access and cost.
local_tensor = full_tensor[slices].detach().clone()
# TODO: currently, we cannot handle strided sharding if the dp dimension is not even. For example,
# one of the case that is not yet supported is when placements = (Shard(0), _StridedShard(0, sf=2)).
ret = DTensor.from_local(
local_tensor,
local_state.device_mesh,
local_state.placements,
shape=local_state.shape,
stride=local_state.stride(),
)
else:
ret = local_state
# Copy full_tensor[slices] into local_state.to_local() to reduce memory footprint.
ret.to_local().copy_(full_tensor[slices])
local_state_dict[key] = ret
def _broadcast_state_dict(
full_state_dict: dict[str, Any],
local_state_dict: dict[str, Any],
device: torch.device,
pg: Optional[dist.ProcessGroup] = None,
strict: bool = False,
cpu_offload: bool = False,
) -> None:
# Broadcast from rank0's `full_state_dict` to all ranks' `local_state_dict`.
# If strict is True, any keys in `local_state_dict` but not in `full_state_dict`
# will be removed from `local_state_dict`.
ret = {}
if dist.get_rank() == 0:
for key, value in full_state_dict.items():
if not torch.is_tensor(value):
ret[key] = value
elif value.dim() == 0:
ret[key] = value.cpu()
else:
ret[key] = _TensorInfo(value.size(), value.dtype)
broadcast_list = [ret]
dist.broadcast_object_list(broadcast_list, src=0, group=pg)
ret = broadcast_list[0]
# Gather values
keys = []
local_state_dict_keys = set(local_state_dict.keys())
global_keys = set()
for key, value in ret.items():
global_keys.add(key)
if not isinstance(value, _TensorInfo):
if key in local_state_dict:
local_state_dict[key] = value
continue
if dist.get_rank() == 0:
ret[key] = full_state_dict[key]
keys.append(key)
# Broadcast every tensor to avoid OOM for now.
if len(keys) >= 1:
_broadcast_tensors(ret, local_state_dict, keys, device, pg)
if cpu_offload:
for key in keys:
local_state_dict[key] = local_state_dict[key].cpu()
keys.clear()
if strict:
if missing_keys := (local_state_dict_keys - global_keys):
for key in missing_keys:
local_state_dict.pop(key)
if keys:
_broadcast_tensors(ret, local_state_dict, keys, device, pg)
if cpu_offload:
for key in keys:
local_state_dict[key] = local_state_dict[key].cpu()
def _distribute_state_dict(
full_state_dict: dict[str, Any],
local_state_dict: dict[str, Any],
device: torch.device,
pg: Optional[dist.ProcessGroup] = None,
) -> None:
# Full_state_dict = True, broadcast_from_rank0 = False here. Each rank has
# full_state_dict. Skip the broadcast in ``_broadcast_state_dict`` and
# distribute tensors in each rank
for key, value in full_state_dict.items():
if key not in full_state_dict:
continue
if not torch.is_tensor(value):
local_state_dict[key] = value
elif value.dim() == 0:
local_state_dict[key] = value.cpu()
else:
assert isinstance(value, torch.Tensor)
local_state = local_state_dict.get(key, None)
if local_state is None:
continue
elif isinstance(local_state, DTensor):
local_state_dict[key] = distribute_tensor(
value.detach().to(device),
local_state.device_mesh,
local_state.placements,
)
else:
local_state_dict[key] = value.detach().to(device)
# These APIs are from torch.distributed.checkpoint.
# TODO: We should consolidate the code here as some not all modules can depend on
# DCP.
PATH_ITEM = Union[str, int]
OBJ_PATH = tuple[PATH_ITEM, ...]
FLATTEN_MAPPING = dict[str, OBJ_PATH]
STATE_DICT_TYPE = dict[str, Any]
CONTAINER_TYPE = MutableMapping[PATH_ITEM, Any]
def _traverse_state_dict(
state_dict: STATE_DICT_TYPE,
visitor: Callable[[OBJ_PATH, Any], None],
) -> None:
"""
Invoke ``visitor`` for each value recursively in ``state_dict``.
Mapping, list, and tuple will be flattened and other value types are treated
as the terminal values and will invoke ``visitor``.
"""
def _traverse_obj(path: OBJ_PATH, value: Any) -> None:
if isinstance(value, Mapping):
for k, v in value.items():
_traverse_obj(path + (str(k),), v)
elif isinstance(value, (list, tuple)):
for i, v in enumerate(value):
_traverse_obj(path + (i,), v)
else:
visitor(path, value)
for key, value in state_dict.items():
_traverse_obj((str(key),), value)
def _flatten_state_dict(
state_dict: STATE_DICT_TYPE,
) -> tuple[STATE_DICT_TYPE, FLATTEN_MAPPING]:
"""
Flatten ``state_dict`` made of nested dicts and lists into a top level dictionary.
Use ``unflatten_state_dict`` to revert this process.
Returns:
A tuple with the flatten state_dict and a mapping from original to new state_dict.
N.B. The new keys are derived from the object paths, joined by dot.
For example: ``{ 'a': {'b':...}}`` results in the key `a.b`.
"""
flattened: STATE_DICT_TYPE = {}
mappings: FLATTEN_MAPPING = {}
def flat_copy(path: OBJ_PATH, value: Any) -> None:
new_fqn = ".".join(map(str, path))
if new_fqn in flattened:
raise ValueError(f"duplicated flatten key {new_fqn}")
flattened[new_fqn] = value
mappings[new_fqn] = path
_traverse_state_dict(state_dict, flat_copy)
return flattened, mappings
def _set_element(root_dict: STATE_DICT_TYPE, path: OBJ_PATH, value: Any) -> None:
"""Set ``value`` in ``root_dict`` along the ``path`` object path."""
cur_container = cast(CONTAINER_TYPE, root_dict)
def extend_list(lst: list[Any], idx: int) -> None:
while len(lst) <= idx:
lst.append(None)
for i in range(1, len(path)):
prev_key = path[i - 1]
key = path[i]
def_val: CONTAINER_TYPE | list[Any] = {} if type(key) == str else []
if isinstance(cur_container, Mapping):
cur_container = cast(CONTAINER_TYPE, cur_container.setdefault(prev_key, def_val))
else:
extend_list(cur_container, prev_key)
if cur_container[prev_key] is None:
cur_container[prev_key] = def_val
cur_container = cur_container[prev_key]
key = path[-1]
if type(key) == int:
extend_list(cast(list[Any], cur_container), key)
cur_container[key] = value
def _unflatten_state_dict(state_dict: STATE_DICT_TYPE, mapping: FLATTEN_MAPPING) -> STATE_DICT_TYPE:
"""Restore the original nested state_dict according to ``mapping`` and the flattened ``state_dict``."""
nested: STATE_DICT_TYPE = {}
for key, value in state_dict.items():
_set_element(nested, mapping[key], value)
return nested
| {
"repo_id": "verl-project/verl",
"file_path": "verl/third_party/torch/distributed/_state_dict_utils.py",
"license": "Apache License 2.0",
"lines": 723,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/third_party/torch/distributed/checkpoint/state_dict.py | # official torch 2.6.0 set_model_state_dict API leads to OOM
# this is a copy of torch/distributed/checkpoint from torch 2.7.0
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (Adam Paszke)
# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (Clement Farabet)
# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yangqing Jia:
# Copyright (c) 2015 Yangqing Jia
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions by Cruise LLC:
# Copyright (c) 2022 Cruise LLC.
# All rights reserved.
# All contributions by Tri Dao:
# Copyright (c) 2024 Tri Dao.
# All rights reserved.
# All contributions by Arm:
# Copyright (c) 2021, 2023-2024 Arm Limited and/or its affiliates
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ruff: noqa: B028, UP038, UP007, E721
# mypy: allow-untyped-defs
import contextlib
import functools
import gc
import warnings
from collections.abc import Generator, Iterable
from dataclasses import asdict, dataclass, field
from itertools import chain
from typing import Any, Callable, Optional, Union, cast, no_type_check
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed._shard.sharded_tensor import ShardedTensor
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
_CHECKPOINT_PREFIX,
)
from torch.distributed.fsdp import (
FullOptimStateDictConfig,
FullStateDictConfig,
OptimStateDictConfig,
ShardedOptimStateDictConfig,
ShardedStateDictConfig,
StateDictConfig,
StateDictType,
)
from torch.distributed.fsdp import (
FullyShardedDataParallel as FSDP,
)
from torch.distributed.fsdp._common_utils import (
FSDP_WRAPPED_MODULE,
_get_module_fsdp_state_if_fully_sharded_module,
)
from torch.distributed.tensor import DTensor
from torch.nn.modules.module import _IncompatibleKeys
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils._pytree import tree_map_only
from verl.third_party.torch.distributed._state_dict_utils import (
_broadcast_state_dict,
_distribute_state_dict,
_flatten_state_dict,
_gather_state_dict,
_offload_state_dict_to_cpu,
_unflatten_state_dict,
)
__all__ = [
"FQNS_T",
"PrimitiveType",
"ValueType",
"DictValueType",
"ListDictValueType",
"OptimizerStateType",
"StateDictOptions",
"get_model_state_dict",
"get_optimizer_state_dict",
"get_state_dict",
"set_model_state_dict",
"set_optimizer_state_dict",
"set_state_dict",
]
_FLAT_PARAM = "_flat_param"
_PG = "param_groups"
_PARAMS = "params"
_STATE = "state"
FQNS_T = set[str]
PrimitiveType = Union[DTensor, ShardedTensor, torch.Tensor, int, float, str]
ValueType = Union[PrimitiveType, list[PrimitiveType], tuple[PrimitiveType], dict[str, "ValueType"]]
DictValueType = dict[str, ValueType]
ListDictValueType = list[DictValueType]
OptimizerStateType = dict[str, DictValueType | ListDictValueType]
_patched_state_dict: set[Callable] = set()
@contextlib.contextmanager
def _gc_context():
is_enabled = gc.isenabled()
gc.disable()
try:
yield
finally:
if is_enabled:
gc.enable()
@dataclass
class StateDictOptions:
"""
This dataclass specifies how get_state_dict/set_state_dict will work.
- ``full_state_dict``: if this is set to True, all the tensors in the
returned state_dict will be gathered. No ShardedTensor and DTensor
will be in the returned state_dict.
- ``cpu_offload``: offload all the tensors to cpu. To prevent CPU OOM, if
``full_state_dict`` is also true, then only the rank0 will get the
state_dict and all other ranks will get empty state_dict.
- ``ignore_frozen_params``: if the value is True, the returned state_dict
won't contain any frozen parameters -- the ``requires_grad`` is False.
The default value is False.
- ``keep_submodule_prefixes`` (deprecated): when ``submodules`` is not None, this option
indicates whether to keep the submodule prefixes from the state_dict keys.
or example, if the submodule is ``module.pretrain`` and the full FQN of
the parameter is ``pretrain.layer1.weight`` of the param. When this option
is True, the parameter's key in the returned state_dict will be
``pretrain.layer1.weight``. If the options is False, the key will be
``layer1.weight``.
Note that if ``keep_submodule_prefixes`` is False, there may be conflicted
FQNs, hence there should be only one submodule in ``submodules``.
- ``strict``: the ``strict`` option when ``set_state_dict`` calls
model.load_state_dict().
- ``broadcast_from_rank0``: when the option is True, rank0 should receive a
full state_dict and will broadcast the tensors in the state_dict/
optim_state_dict one by one to other ranks. Other ranks will receive
the tensors and shard according to the local shards in the model and
optimizer. ``full_state_dict`` must be set to True when using this option.
This option currently only supports DTensor, not the legacy ShardedTensor.
"""
full_state_dict: bool = False
cpu_offload: bool = False
ignore_frozen_params: bool = False
keep_submodule_prefixes: bool = True
strict: bool = True
broadcast_from_rank0: bool = False
flatten_optimizer_state_dict: bool = False
dsd_fqn_modifiers: str = "_fqn_modifiers"
@dataclass
class _StateDictInfo(StateDictOptions):
fqn_param_mapping: dict[
str | torch.Tensor,
FQNS_T | torch.Tensor,
] = field(default_factory=dict)
shared_params_mapping: dict[
str | torch.Tensor,
FQNS_T | torch.Tensor,
] = field(default_factory=dict)
submodule_prefixes: set[str] = field(default_factory=set)
handle_model: bool = True
handle_optim: bool = True
fsdp_context: Callable = contextlib.nullcontext
fsdp_modules: list[nn.Module] = field(default_factory=list)
@functools.cache
def _get_fqns(
model: nn.Module,
name: str,
dsd_fqn_modifiers: str = "_fqn_modifiers",
skip_ddp_prefix: bool = True,
skip_compiler_prefix: bool = True,
) -> FQNS_T:
"""
This API is used to convert the name of a parameter to the FQNs. For FSDP
without `use_orig_params`, the name of FlatParameter can be mapped to
multiple original parameters. As a result, the return type of this function
is `set[str]`.
Args:
module (nn.Module): the root model.
name (str): the name
skip_ddp_prefix (bool): whether to skip DDP's `module` prefix
Returns:
The canonical FQNs based on the model traversal.
"""
# Remove the checkpoint prefix, if it exists.
name = name.replace(_CHECKPOINT_PREFIX, "")
if "." not in name:
return {name}
obj_names = name.split(".")
fqn_obj_names = []
curr_obj = model
for i, curr_obj_name in enumerate(obj_names):
if isinstance(curr_obj, DDP):
assert curr_obj_name == "module"
curr_obj = curr_obj.module
if not skip_ddp_prefix:
fqn_obj_names.append(curr_obj_name)
elif isinstance(curr_obj, FSDP):
if i < len(obj_names) - 1 and obj_names[i + 1] == _FLAT_PARAM:
prefix = ".".join(fqn_obj_names)
flat_param = getattr(curr_obj, _FLAT_PARAM)
if prefix:
prefix = f"{prefix}."
return {f"{prefix}{fqn}" for fqn in flat_param._fqns}
curr_obj = getattr(curr_obj, FSDP_WRAPPED_MODULE)
if curr_obj_name != FSDP_WRAPPED_MODULE:
fqn_obj_names.append(curr_obj_name)
curr_obj = getattr(curr_obj, curr_obj_name)
elif isinstance(curr_obj, torch._dynamo.eval_frame.OptimizedModule):
assert curr_obj_name == "_orig_mod"
curr_obj = curr_obj._orig_mod
if not skip_compiler_prefix:
fqn_obj_names.append(curr_obj_name)
else:
# In some modeuls, _fqn_modifiers would not shown in the state_dict keys,
# skip them in the fqn to ensure load stat dict successfully for them.
if hasattr(curr_obj, dsd_fqn_modifiers):
if removed_fqn := getattr(curr_obj, dsd_fqn_modifiers)().get(curr_obj_name):
if hasattr(curr_obj, removed_fqn):
curr_obj = getattr(curr_obj, removed_fqn)
fqn_obj_names.append(curr_obj_name)
if curr_obj_name == nn.modules.module._EXTRA_STATE_KEY_SUFFIX:
if i != len(obj_names) - 1:
raise RuntimeError("Expect `_extra_state` to be the last obj name")
else:
curr_obj = getattr(curr_obj, curr_obj_name)
return {".".join(fqn_obj_names).replace(_CHECKPOINT_PREFIX, "")}
class _EXTRA_STATE:
pass
def _iterate_valid_model_state(model, dsd_fqn_modifiers="_fqn_modifiers"):
visited_modules: set[nn.Module] = set()
def recurse(module: nn.Module, curr_fqn: str) -> Generator:
visited_modules.add(module)
curr_fqn = f"{curr_fqn}." if curr_fqn else ""
for name, submodule in module.named_children():
if submodule in visited_modules:
continue
# if user have state_dict_hooks in their model, they can add the state_dict key changes
# at dsd_fqn_modifiers in input to align with the function of state_dict_hook
if hasattr(module, dsd_fqn_modifiers) and name in getattr(module, dsd_fqn_modifiers)().values():
# skip _fqn_modifiers here thus remove the last `.` added
new_fqn = curr_fqn[:-1]
else:
new_fqn = f"{curr_fqn}{name}"
yield from recurse(submodule, new_fqn)
for name, obj in chain(module.named_buffers(recurse=False), module.named_parameters(recurse=False)):
if name in module._non_persistent_buffers_set:
continue
new_fqn = f"{curr_fqn}{name}"
yield new_fqn, obj
if getattr(module.__class__, "get_extra_state", nn.Module.get_extra_state) != nn.Module.get_extra_state:
new_fqn = f"{curr_fqn}{nn.modules.module._EXTRA_STATE_KEY_SUFFIX}"
yield new_fqn, _EXTRA_STATE()
yield from recurse(model, "")
def _verify_options(
model: nn.Module,
optims: tuple[torch.optim.Optimizer, ...],
optim_only: bool,
*,
submodules: Optional[set[nn.Module]] = None,
options: Optional[StateDictOptions] = None,
) -> _StateDictInfo:
"""
Verify the model and options passed by the user and generates _StateDictInfo.
"""
if submodules:
warnings.warn(
"Getting submodules only model/optim state_dict is deprecated and "
"will be removed in 2.5. This feature can be achieved by manually "
"filtering out the state_dict returned from get_state_dict.",
FutureWarning,
)
if optim_only and not optims:
raise RuntimeError("Optimizers are not passed in but optim_only is set to True.")
options = options or StateDictOptions()
fqn_param_mapping: dict[str | torch.Tensor, set[str] | torch.Tensor] = {}
shared_params_mapping: dict[str | torch.Tensor, set[str] | torch.Tensor] = {}
for name, param in _iterate_valid_model_state(model):
if isinstance(param, _EXTRA_STATE):
continue
fqns = _get_fqns(model, name)
fqn = fqn_param_mapping.get(param, None)
if fqn is not None:
cast(set[str], fqn_param_mapping[param]).update(fqns)
shared_params_mapping[param] = fqn_param_mapping[param]
else:
# We need to do copy as _get_fqns is lru_cached
fqn_param_mapping[param] = fqns.copy()
for fqn in fqns:
if not isinstance(param, _EXTRA_STATE):
fqn_param_mapping[fqn] = param
for param_, fqns_ in list(shared_params_mapping.items()):
for fqn in fqns_:
shared_params_mapping[fqn] = cast(torch.Tensor, param_)
submodule_prefixes: set[str] = set()
if submodules:
submodules = set(submodules)
for name, module in model.named_modules():
if module not in submodules:
continue
fqns = _get_fqns(model, name)
assert len(fqns) == 1, "Submodule FQN should only have 1 instance"
submodule_prefixes.update(f"{fqn}." for fqn in fqns)
if options.broadcast_from_rank0 and not options.full_state_dict:
raise ValueError("full_state_dict must be True when broadcast_from_rank0 is True.")
fsdp_modules = FSDP.fsdp_modules(model)
state_dict_config: StateDictConfig
optim_state_dict_config: OptimStateDictConfig
fsdp_context: Callable
if fsdp_modules:
# FSDP API only work if at least one FSDP instance exists.
if options.full_state_dict:
state_dict_config = FullStateDictConfig(offload_to_cpu=options.cpu_offload, rank0_only=options.cpu_offload)
optim_state_dict_config = FullOptimStateDictConfig(
offload_to_cpu=options.cpu_offload,
rank0_only=(options.cpu_offload or options.broadcast_from_rank0),
)
state_dict_type = StateDictType.FULL_STATE_DICT
else:
state_dict_config = ShardedStateDictConfig(
offload_to_cpu=options.cpu_offload,
)
optim_state_dict_config = ShardedOptimStateDictConfig(
offload_to_cpu=options.cpu_offload,
)
state_dict_type = StateDictType.SHARDED_STATE_DICT
@contextlib.contextmanager
def fsdp_state_dict_type_without_warning(
module,
state_dict_type,
state_dict_config,
optim_state_dict_config,
):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="FSDP.state_dict_type", category=FutureWarning)
with FSDP.state_dict_type(
module=module,
state_dict_type=state_dict_type,
state_dict_config=state_dict_config,
optim_state_dict_config=optim_state_dict_config,
):
yield
fsdp_context = functools.partial(
fsdp_state_dict_type_without_warning,
module=model,
state_dict_type=state_dict_type,
state_dict_config=state_dict_config,
optim_state_dict_config=optim_state_dict_config,
)
else:
fsdp_context = contextlib.nullcontext
return _StateDictInfo(
**asdict(options),
fqn_param_mapping=fqn_param_mapping,
shared_params_mapping=shared_params_mapping,
submodule_prefixes=submodule_prefixes,
fsdp_context=fsdp_context,
fsdp_modules=cast(list[nn.Module], fsdp_modules),
handle_model=not optim_only,
handle_optim=(len(optims) > 0),
)
def _verify_state_dict(
model_state_dict: dict[str, ValueType],
optim_state_dict: OptimizerStateType,
info: _StateDictInfo,
) -> None:
for module in info.fsdp_modules:
fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)
assert fsdp_state is not None, "Expected a fsdp_state with a fsdp module."
# Verify if the model_state_dict and optim_state_dict are valid. This API
# should give the users an explicit error message to debug or report.
if (
info.handle_model
and not model_state_dict
and not info.submodule_prefixes
and not info.ignore_frozen_params
and not (info.cpu_offload and info.full_state_dict)
and info.strict
and not info.broadcast_from_rank0
):
raise RuntimeError(
"The option indicates that model state_dict is required to save "
"or load, but model state_dict is empty."
f"rank = {dist.get_rank()=}."
)
if info.handle_optim:
if not optim_state_dict and not (info.cpu_offload and info.full_state_dict) and (not info.broadcast_from_rank0):
raise RuntimeError(
"The option indicates that model state_dict is required to save, "
f"or load but optim state_dict is empty. {optim_state_dict}"
)
for key in model_state_dict.keys():
if _FLAT_PARAM in key:
raise RuntimeError(f"{key} contains {_FLAT_PARAM}. This can happen if the model is not the root module.")
def _state_dict_fn(obj: nn.Module | torch.optim.Optimizer, api: str) -> Callable:
call = getattr(obj, api)
if call in _patched_state_dict:
call = functools.partial(getattr(obj.__class__, api), self=obj)
return call
def _maybe_full_or_cpu_state_dict(state_dict: dict[str, Any], info: _StateDictInfo) -> dict[str, Any]:
if info.full_state_dict:
ranks_only = () if (not info.cpu_offload or not torch.distributed.is_initialized()) else (0,)
return _gather_state_dict(state_dict, cpu_offload=info.cpu_offload, ranks_only=ranks_only)
elif info.cpu_offload:
return _offload_state_dict_to_cpu(state_dict)
else:
return state_dict
@torch.no_grad()
def _get_model_state_dict(model: nn.Module, info: _StateDictInfo) -> dict[str, ValueType]:
if not info.handle_model:
return {}
with info.fsdp_context():
state_dict = _state_dict_fn(model, "state_dict")()
for key in list(state_dict.keys()):
fqns = _get_fqns(model, key)
assert len(fqns) == 1, (key, fqns)
fqn = next(iter(fqns))
if fqn != key:
# As we only support FSDP, DDP, and TP, the only cases are
# wrapper-based DDP and compiler. Verify if the assumption
# is correct.
def verify(key, fqn) -> bool:
if len(fqn) >= len(key):
return False
fqn_split = fqn.split(".")
key_split = key.split(".")
fqn_idx = 0
for key_idx, key_name in enumerate(key_split):
if key_name == fqn_split[fqn_idx]:
fqn_idx += 1
if fqn_idx == len(fqn_split):
return key_idx == len(key_split) - 1
elif key_name in ("module", "_orig_mod"):
continue
else:
return False
return True
if not verify(key, fqn):
raise RuntimeError(f"An unexpected key, {key}, exists. FQN is {fqn}")
state_dict[fqn] = state_dict.pop(key)
if info.submodule_prefixes:
new_state_dict: dict[str, ValueType] = {}
# TODO: make this faster.
for fqn in state_dict.keys():
for prefix in info.submodule_prefixes:
if not fqn.startswith(prefix):
continue
if info.keep_submodule_prefixes:
new_state_dict[fqn] = state_dict[fqn]
else:
new_fqn = fqn[len(prefix) :]
new_state_dict[new_fqn] = state_dict[fqn]
state_dict = new_state_dict
if info.ignore_frozen_params:
for key, param in model.named_parameters():
if param.requires_grad:
continue
fqns = _get_fqns(model, key)
for fqn in fqns:
state_dict.pop(fqn)
for key, p in list(state_dict.items()):
if torch.is_tensor(p) and p.is_meta:
state_dict.pop(key)
return _maybe_full_or_cpu_state_dict(state_dict, info)
@torch.no_grad()
def _load_model_state_dict(
model: nn.Module,
state_dict: dict[str, ValueType],
info: _StateDictInfo,
) -> _IncompatibleKeys:
if not info.handle_model or (not state_dict and not info.broadcast_from_rank0):
return _IncompatibleKeys({}, {})
local_state_dict = {}
for key, value in _iterate_valid_model_state(model, info.dsd_fqn_modifiers):
fqns = _get_fqns(model, key, info.dsd_fqn_modifiers)
fqns_with_prefix = _get_fqns(
model,
key,
info.dsd_fqn_modifiers,
skip_ddp_prefix=False,
skip_compiler_prefix=False,
)
for fqn, fqn_with_prefix in zip(fqns, fqns_with_prefix, strict=False):
if (not info.broadcast_from_rank0 or dist.get_rank() == 0) and fqn != fqn_with_prefix:
load_value = state_dict.pop(fqn, None)
if load_value is None:
if info.strict:
raise RuntimeError(f"Missing key: {fqn}.")
else:
state_dict[fqn_with_prefix] = load_value
local_state_dict[fqn_with_prefix] = value
assign = False
if info.broadcast_from_rank0 or info.full_state_dict:
devices = set()
for key, value in local_state_dict.items():
if torch.is_tensor(value) and value.dim() > 0:
devices.add(value.device)
# In lora state_dict, there could be multiple devices, with meta device inside.
# Take the other device in the broadcast/distribtue, and set assign to True
if torch.device("meta") in devices:
devices.remove(torch.device("meta"))
assign = True
if len(devices) == 0:
devices.add(dist.distributed_c10d._get_pg_default_device())
elif len(devices) > 1:
raise ValueError("Multiple devices found")
if info.broadcast_from_rank0:
_broadcast_state_dict(
state_dict,
local_state_dict,
device=devices.pop(),
strict=info.strict,
cpu_offload=info.cpu_offload,
)
elif info.full_state_dict:
_distribute_state_dict(state_dict, local_state_dict, device=devices.pop())
for fqn, local_state in local_state_dict.items():
state_dict[fqn] = local_state
with info.fsdp_context():
return cast(
_IncompatibleKeys,
_state_dict_fn(model, "load_state_dict")(state_dict=state_dict, strict=info.strict, assign=assign),
)
def _init_optim_state(optim: torch.optim.Optimizer) -> None:
"""
Initialize optim states by calling the step() with zero grads.
"""
if optim.state:
# The optimizer state is initialized.
return
# There are some stateless optimizers like SGD. These optimizer will
# not return in the above condition. So if gradients exist, we should also
# return. If gradients do not exist, the following initialization should
# not disturb SGD because the gradients and lr are both zero.
for param_group in optim.param_groups:
for param in param_group[_PARAMS]:
if param.grad is not None:
return
for param_group in optim.param_groups:
for param in param_group[_PARAMS]:
if param.requires_grad:
param.grad = torch.zeros_like(param)
# Some optimizers will update parameters regardless of grads due to lr, so
# make lr to zero when calling `step()`.
lrs = []
for param_group in optim.param_groups:
if "lr" in param_group:
lrs.append(param_group["lr"])
param_group["lr"] = torch.tensor(0.0) if isinstance(param_group["lr"], torch.Tensor) else 0.0
optim.step(closure=None)
# Whether to recover the "lr" should not matter too much as we will
# restore checkpointing later.
for param_group in optim.param_groups:
if "lr" in param_group:
param_group["lr"] = lrs.pop(0)
optim.zero_grad(set_to_none=True)
def _flatten_optim_state_dict(state_dict: OptimizerStateType) -> dict[str, ValueType]:
"""
This API flattens the optimizer state_dict to support optimizer resharding for
MPMD, e.g., pipeline parallelism.
Without the API, the original optimizer state_dict looks like:
{
"state": {
"layer1.weight": {
"step": 10, "exp_avg": SomeTensor, "exp_avg_sq": SomeTensor
},
"layer2.weight": {
"step": 10, "exp_avg": SomeTensor, "exp_avg_sq": SomeTensor
},
},
"param_group": [
{
"lr": 0.0,
"betas": (0.9, 0.95), ...,
"params": ["layer1.weight", "layer2.weight"]
}
]
}
With this API, the optimizer state_dict looks like:
{
"state.layer1.weight.step": 10,
"state.layer2.weight.step": 10,
"state.layer1.weight.exp_avg": SomeTensor,
"state.layer2.weight.exp_avg": SomeTensor,
"state.layer1.weight.exp_avg_sq": SomeTensor,
"state.layer2.weight.exp_avg_sq": SomeTensor,
"param_group.layer1.weight.lr" : 0.1,
"param_group.layer2.weight.lr" : 0.1,
"param_group.layer1.weight.betas" : (0.9, 0.95),
"param_group.layer2.weight.betas" : (0.9, 0.95),
}
Note that if any of the value is a container, like the betas in the example,
this API won't flattent it.
"""
def _raise_if_type_not_supported(v):
if not isinstance(v, (torch.Tensor, int, float)):
raise NotImplementedError(
f"Flattening optimizer state_dict only supports tensor, int, float states now. Type is {type(v)}."
)
ret: dict[str, ValueType] = {}
for fqn, state in cast(DictValueType, state_dict[_STATE]).items():
for k, v in cast(DictValueType, state).items():
_raise_if_type_not_supported(v)
ret[f"{_STATE}.{fqn}.{k}"] = v
for param_group in cast(ListDictValueType, state_dict[_PG]):
fqns = param_group.pop(_PARAMS)
for fqn in cast(list[str], fqns):
for k, v in param_group.items():
ret[f"{_PG}.{fqn}.{k}"] = v
return ret
def _unflatten_optim_state_dict(
optim: torch.optim.Optimizer,
state_dict: dict[str, ValueType],
info: _StateDictInfo,
) -> OptimizerStateType:
"""
This API unflattens the state_dict generated by _flatten_optim_state_dict().
See the docstring of _flatten_optim_state_dict() for more detail.
"""
state: DictValueType = {}
pg_state: ListDictValueType = []
return_osd: OptimizerStateType = {_STATE: state, _PG: pg_state}
for param_group in optim.param_groups:
pg_state.append({_PARAMS: []})
for param in param_group[_PARAMS]:
for fqn in info.fqn_param_mapping[param]:
# If a parameter is shared, only one of the FQN will be used.
# So we need to verify which if this fqn is actually used in
# the state_dict.
if fqn in info.shared_params_mapping:
in_params = False
for k in param_group.keys():
if k == _PARAMS:
continue
flatten_key = f"{_PG}.{fqn}.{k}"
if flatten_key in state_dict:
in_params = True
break
else:
in_params = True
if not in_params:
continue
params = pg_state[-1][_PARAMS]
assert isinstance(params, list) # typing
params.append(fqn)
if not param.requires_grad:
continue
state[fqn] = {}
for state_name in optim.state[param].keys():
cast(DictValueType, state[fqn])[state_name] = state_dict[f"{_STATE}.{fqn}.{state_name}"]
first_param_fqn = cast(list[str], pg_state[-1][_PARAMS])[0]
for k in param_group.keys():
if k == _PARAMS:
continue
value = state_dict[f"{_PG}.{first_param_fqn}.{k}"]
if k not in pg_state[-1]:
pg_state[-1][k] = value
elif pg_state[-1][k] != value:
raise RuntimeError(
"All the parameters in the same parameter group should have "
f"the same saved param_group value. But {first_param_fqn}.{k} "
f"is {value} while other(s) is {pg_state[-1][k]}."
)
return return_osd
@torch.no_grad()
def _get_optim_state_dict(
model: nn.Module,
optimizers: tuple[torch.optim.Optimizer, ...],
info: _StateDictInfo,
) -> OptimizerStateType:
if not info.handle_optim:
return {}
optim_state_dict: OptimizerStateType = {_STATE: {}, _PG: []}
for optim in optimizers:
_init_optim_state(optim)
osd = _state_dict_fn(optim, "state_dict")()
if info.fsdp_modules:
with info.fsdp_context():
osd = FSDP.optim_state_dict(model, optim, osd)
# We need to specially handle FlatParameter FSDP as
# FlatParameter FSDP converts the FQNs.
# There are no easy ways to do this conversion systematically.
# We can only use a string replacment without correctness check.
if not osd:
continue
for k in list(osd[_STATE].keys()):
if "_orig_mod" in k:
osd[_STATE][k.replace("_orig_mod.", "")] = osd[_STATE].pop(k)
for g in osd[_PG]:
params = [k.replace("_orig_mod.", "") for k in g[_PARAMS]]
g[_PARAMS] = params
else:
params = list(chain.from_iterable(g[_PARAMS] for g in optim.param_groups))
param_pid_mapping = dict(zip(params, range(len(params)), strict=False))
fqn_pid_mapping = {}
for key, param in model.named_parameters():
fqns = _get_fqns(model, key)
assert len(fqns) == 1
fqn = next(iter(fqns))
if param not in param_pid_mapping:
continue
pid = param_pid_mapping[param]
fqn_pid_mapping[fqn] = pid
fqn_pid_mapping[pid] = fqn
for key in list(osd[_STATE].keys()):
fqn = fqn_pid_mapping[key]
osd[_STATE][fqn] = osd[_STATE].pop(key)
for group in osd[_PG]:
group[_PARAMS] = [fqn_pid_mapping[pid] for pid in group[_PARAMS]]
if not osd:
continue
cast(DictValueType, optim_state_dict[_STATE]).update(osd[_STATE])
cast(ListDictValueType, optim_state_dict[_PG]).extend(osd[_PG])
if info.flatten_optimizer_state_dict:
optim_state_dict = cast(OptimizerStateType, _flatten_optim_state_dict(optim_state_dict))
return _maybe_full_or_cpu_state_dict(optim_state_dict, info)
def _split_optim_state_dict(
model: nn.Module,
optim: torch.optim.Optimizer,
optim_state_dict: OptimizerStateType,
info: _StateDictInfo,
) -> OptimizerStateType:
"""
Extract the corresponding optim state_dict from ``optim_state_dict`` for
``optim`` and return the result optim state_dict.
Args:
model (nn.Module): the root model.
optim (torch.optim.Optimizer): the optimizer.
optim_state_dict (Dict[str, ValueType]): the superset optim state_dict that
contains the optim state_dict of ``optim``.
info (_StateDictInfo): state dict information.
Returns:
The optim state_dict of ``optim``.
"""
state: DictValueType = {}
pg_state: ListDictValueType = []
return_osd: OptimizerStateType = {_STATE: state, _PG: pg_state}
pg_mapping: dict[int, int] = {}
if all(isinstance(k, int) for k in cast(DictValueType, optim_state_dict[_STATE]).keys()):
return optim_state_dict
for param_group in optim.param_groups:
pg_state.append({_PARAMS: []})
for param in param_group[_PARAMS]:
for fqn in info.fqn_param_mapping[param]:
if fqn in info.shared_params_mapping:
in_params = False
for loaded_param_group in cast(ListDictValueType, optim_state_dict[_PG]):
if fqn in cast(list[str], loaded_param_group[_PARAMS]):
in_params = True
break
else:
in_params = True
if not in_params:
continue
params = pg_state[-1][_PARAMS]
assert isinstance(params, list)
params.append(fqn)
if param.requires_grad:
state[fqn] = cast(DictValueType, optim_state_dict[_STATE])[fqn]
for loaded_param_group in cast(ListDictValueType, optim_state_dict[_PG]):
if fqn in cast(list[str], loaded_param_group[_PARAMS]):
pg_mapping[id(loaded_param_group)] = len(return_osd[_PG]) - 1
if len(param_group[_PARAMS]) == 0:
# Param_group with empty params.
ret = []
for loaded_param_group in cast(ListDictValueType, optim_state_dict[_PG]):
if len(cast(list[str], loaded_param_group[_PARAMS])) == 0:
ret.append(loaded_param_group)
if len(ret) != 1:
raise ValueError(
"There are param groups that have zero parameters. "
"In such a case, DSD only support exactly one param group "
"with zero parameters."
"But the loaded state_dict has zero or more than one param groups "
"that have zero parameters."
)
if len(optim_state_dict[_PG]) != len(optim.param_groups):
raise ValueError(
"When there is a parameter group that has zero parameters, multiple optimizers are not supported."
)
pg_mapping[id(loaded_param_group)] = len(return_osd[_PG]) - 1
for param_group in cast(ListDictValueType, optim_state_dict[_PG]):
pg_idx = pg_mapping.get(id(param_group), -1)
if pg_idx == -1:
continue
for key, value in param_group.items():
if key == _PARAMS:
continue
# TODO: check if value is the same if exists.
pg_state[pg_idx][key] = value
return return_osd
@torch.no_grad()
def _load_optim_state_dict(
model: nn.Module,
optimizers: tuple[torch.optim.Optimizer, ...],
state_dict: OptimizerStateType,
info: _StateDictInfo,
) -> None:
if not info.handle_optim:
return
for optim in optimizers:
_init_optim_state(optim)
if state_dict:
if _STATE in state_dict:
optim_state_dict = _split_optim_state_dict(model, optim, state_dict, info)
else:
optim_state_dict = _unflatten_optim_state_dict(optim, cast(dict[str, ValueType], state_dict), info)
else:
optim_state_dict = {}
if info.fsdp_modules:
# We need to specially handle FlatParameter FSDP as
# FlatParameter FSDP converts the FQNs.
for original_fqn, _ in model.named_parameters():
fqns = _get_fqns(model, original_fqn)
fqns_with_compiler = _get_fqns(model, original_fqn, skip_compiler_prefix=False)
if fqns == fqns_with_compiler:
continue
assert len(fqns) == 1
fqn = fqns.pop()
fqn_with_compiler = fqns_with_compiler.pop()
for g in optim_state_dict[_PG]:
val = cast(dict[str, Any], g)
params = [key.replace(fqn, fqn_with_compiler) for key in val[_PARAMS]]
val[_PARAMS] = params
osd_state = cast(DictValueType, optim_state_dict[_STATE])
for k in list(osd_state.keys()):
if fqn in k:
osd_state[k.replace(fqn, fqn_with_compiler)] = osd_state.pop(k)
with info.fsdp_context():
optim_state_dict = FSDP.optim_state_dict_to_load(model, optim, optim_state_dict)
elif info.full_state_dict:
info.full_state_dict = False
local_state_dict = _get_optim_state_dict(model, (optim,), info)
info.full_state_dict = True
device = None
def _device(t):
if t.dim() > 0:
nonlocal device
if device is None:
device = t.device
elif device != t.device:
raise ValueError("Device mismatch")
return t
_ = tree_map_only(torch.Tensor, _device, local_state_dict)
assert device is not None
flatten_osd, osd_mapping = _flatten_state_dict(optim_state_dict)
flatten_local_osd, local_osd_mapping = _flatten_state_dict(local_state_dict)
if info.broadcast_from_rank0:
_broadcast_state_dict(flatten_osd, flatten_local_osd, device=device)
else:
_distribute_state_dict(flatten_osd, flatten_local_osd, device=device)
# The modifications listed seek to address the problem where optim might possess
# dissimilar parameters in comparison to optim_state_dict. This is achieved by
# incorporating differential parameters within local, which may result in optim
# having additional parameters ultimately.
for optim_key in flatten_osd.keys():
if optim_key not in flatten_local_osd:
assert optim_key in osd_mapping
flatten_local_osd[optim_key] = flatten_osd[optim_key]
local_osd_mapping[optim_key] = osd_mapping[optim_key]
optim_state_dict = _unflatten_state_dict(flatten_local_osd, local_osd_mapping)
for pg in optim_state_dict[_PG]:
if _PARAMS not in pg:
cast(dict[str, ValueType], pg)[_PARAMS] = []
# Note that we do not have to convert the FQN back to param id here if
# order in optim.param_groups[idx][_PARAMS] is the same as the one in
# optim_state_dict[_PG][idx][_PARAMS].
_state_dict_fn(optim, "load_state_dict")(state_dict=optim_state_dict)
def get_model_state_dict(
model: nn.Module,
*,
submodules: Optional[set[nn.Module]] = None,
options: Optional[StateDictOptions] = None,
) -> dict[str, ValueType]:
"""
Return the model state_dict of ``model``.
See ``get_state_dict`` for the detail usage.
Args:
model (nn.Module): the nn.Module to the model.
submodules (deprecated): Optional[set[nn.Module]]: only return the model parameters
that belong to the submodules.
options (StateDictOptions): the options to control how
model state_dict and optimizer state_dict should be returned. See
`StateDictOptions` for the details.
Returns:
The state_dict for ``model``.
:rtype: typing.Dict[str, ValueType]
"""
with _gc_context():
info = _verify_options(
model,
(),
optim_only=False,
submodules=submodules,
options=options,
)
model_state_dict = _get_model_state_dict(model, info)
_verify_state_dict(model_state_dict, {}, info)
return model_state_dict
def get_optimizer_state_dict(
model: nn.Module,
optimizers: torch.optim.Optimizer | Iterable[torch.optim.Optimizer],
*,
submodules: Optional[set[nn.Module]] = None,
options: Optional[StateDictOptions] = None,
) -> OptimizerStateType:
"""
Return the combined state_dict for optimizers.
See ``get_state_dict`` for the detail usage.
Args:
model (nn.Module): the nn.Module to the model.
optimizers (Union[None, Optimizer, Iterable[Optimizer]]):
The optimizers that are used to optimize ``model``.
submodules (deprecated): Optional[set[nn.Module]]: only return the model parameters
that belong to the submodules.
options (StateDictOptions): the options to control how
model state_dict and optimizer state_dict should be returned. See
`StateDictOptions` for the details.
Returns:
The state_dict for ``optimizers``.
:rtype: OptimizerStateType
"""
with _gc_context():
optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers)
info = _verify_options(
model,
optimizers,
optim_only=True,
submodules=submodules,
options=options,
)
optim_state_dict = _get_optim_state_dict(model, optimizers, info)
_verify_state_dict({}, optim_state_dict, info)
return optim_state_dict
def get_state_dict(
model: nn.Module,
optimizers: torch.optim.Optimizer | Iterable[torch.optim.Optimizer],
*,
submodules: Optional[set[nn.Module]] = None,
options: Optional[StateDictOptions] = None,
) -> tuple[dict[str, ValueType], OptimizerStateType]:
"""
Return the model state_dict and optimizers state_dict.
``get_state_dict`` can process any module that is parallelized by PyTorch
FSDP/fully_shard, DDP/replicate, tensor_parallel/parallelize_module, and any
combination of these parallelisms. The main functions of ``get_state_dict``
are: 1.) returning a model and optimizer state_dict that can be resharded
with a different number of trainers and/or different parallelisms.
2.) hiding the parallelism-specific state_dict APIs. Users don't have to call
these APIs.
3.) sanity checking the result state_dict.
The keys of the result state dictionary are the canonical FQNs (Fully
Qualified Names). A canonical FQN refers to the FQN based on a parameter's
position in an nn.Module hierarchy. More specifically, a canonical FQN to a
parameter is the FQN returned by ``module.named_parameters()`` or
``module.named_buffers()`` when the module is not distributed by any
parallelisms. Since the optimizer internally uses parameter IDs to represent
a parameter, there will be a conversion from the parameter IDs to the
canonical FQNs when calling this API.
``get_state_dict`` can also process a module that is not parallelized. In
such a case, ``get_state_dict`` only performs one function -- converting the
optimizer parameter IDs to the canonical FQNs.
Example:
>>> # xdoctest: +SKIP
>>> import torch
>>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
>>> from torch.nn.parallel import DistributedDataParallel as DDP
>>> from torch.distributed.checkpoint.state_dict import get_state_dict
>>> fsdp_model = FSDP(copy.deepcopy(model))
>>> fsdp_optim = torch.optim.Adam(model.parameters(), lr=1e-3)
>>> ddp_model = DDP(copy.deepcopy(model))
>>> ddp_optim = torch.optim.Adam(model.parameters(), lr=1e-3)
>>> ddp_state_dict, ddp_optim_state_dict = get_state_dict(ddp_model, ddp_optim)
>>> fsdp_state_dict, fsdp_optim_state_dict = get_state_dict(
... fsdp_model, fsdp_optim
... )
>>> # if we simply call ddp_model.state_dict() and fsdp_model.state_dict(),
>>> # the asserts will fail.
>>> assert ddp_state_dict == fsdp_state_dict
>>> assert ddp_optim_state == fsdp_optim_state_dict
Args:
model (nn.Module): the nn.Module to the model.
optimizers (Union[None, Optimizer, Iterable[Optimizer]]):
The optimizers that are used to optimize ``model``.
submodules (deprecated): Optional[set[nn.Module]]: only return the model parameters
that belong to the submodules.
options (StateDictOptions): the options to control how
model state_dict and optimizer state_dict should be returned. See
`StateDictOptions` for the details.
Returns:
``Tuple`` that contain model state_dict and optimizer state_dict.
:rtype: typing.Tuple[typing.Dict[str, ValueType], OptimizerStateType]
"""
with _gc_context():
optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers)
info = _verify_options(
model,
optimizers,
optim_only=False,
submodules=submodules,
options=options,
)
model_state_dict = _get_model_state_dict(model, info)
optim_state_dict = _get_optim_state_dict(model, optimizers, info)
_verify_state_dict(model_state_dict, optim_state_dict, info)
return model_state_dict, optim_state_dict
def _unflatten_model_state_dict(
model: nn.Module,
state_dict: dict[nn.Module, dict[str, ValueType]] | dict[str, ValueType],
) -> dict[str, ValueType]:
if not state_dict:
return {}
if isinstance(next(iter(state_dict.keys())), nn.Module):
warnings.warn(
"Passing model_state_dict as a ``Dict[nn.Module, Dict[str, Any]]``"
"is deprecated and will be removed in 2.5. If you need this "
"feature, please preprocessing the model_state_dict to achieve the "
"same functionality.",
FutureWarning,
)
cast_state_dict = cast(dict[nn.Module, dict[str, ValueType]], state_dict)
new_state_dict: dict[str, ValueType] = {}
for submodule, sub_state_dict in cast_state_dict.items():
for name, m in model.named_modules():
if m != submodule:
continue
fqns = _get_fqns(model, name)
assert len(fqns) == 1, "FQNs for a submodule should only have 1 element"
prefix = f"{next(iter(fqns))}."
new_state_dict.update({prefix + subfqn: value for subfqn, value in sub_state_dict.items()})
return new_state_dict
else:
return cast(dict[str, ValueType], state_dict)
def set_model_state_dict(
model: nn.Module,
model_state_dict: dict[str, ValueType],
*,
options: Optional[StateDictOptions] = None,
) -> _IncompatibleKeys:
"""Load the model state_dict.
The counterpart of ``get_model_state_dict`` to set the state_dict to the
model. See ``set_state_dict`` for the detail usage.
Args:
model (nn.Module): the nn.Module to the model.
model_state_dict: (Dict[str, ValueType]):
the model state_dict to load. If the key of the ``model_state_dict``
is nn.Module, the key is a submodule of ``model`` and the value should
be the state_dict of the submodule. When loading the state_dict,
the prefix of the submodule will be append to the state_dict.
options (StateDictOptions): the options to control how
model state_dict and optimizer state_dict should be loaded. See
`StateDictOptions` for the details.
Returns:
``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields:
* **missing_keys** is a list of str containing the missing keys
* **unexpected_keys** is a list of str containing the unexpected keys
:type model_state_dict: typing.Dict[str, ValueType]
"""
model_state_dict: dict[str, ValueType] = _unflatten_model_state_dict(model, model_state_dict)
with _gc_context():
info = _verify_options(model, (), optim_only=False, options=options)
_verify_state_dict(model_state_dict, {}, info)
return _load_model_state_dict(model, model_state_dict, info)
def set_optimizer_state_dict(
model: nn.Module,
optimizers: torch.optim.Optimizer | Iterable[torch.optim.Optimizer],
optim_state_dict: OptimizerStateType,
*,
options: Optional[StateDictOptions] = None,
) -> None:
"""Load the optimizers state_dict.
The counterpart of ``get_optimizer_state_dict`` to set the state_dict to the
optimizers. See ``set_state_dict`` for the detail usage.
WARN: ``set_optimizer_state_dict`` can only be called before ``backward()`` or after
``step()`` is called on the optimizers. Otherwise, the optimizer states won't be
initialized correctly.
Args:
model (nn.Module): the nn.Module to the model.
optimizers (Union[Optimizer, Iterable[Optimizer]]):
The optimizers that are used to optimize ``model``.
optim_state_dict: OptimizerStateType:
the optimizer state_dict to load.
options (StateDictOptions): the options to control how
model state_dict and optimizer state_dict should be loaded. See
`StateDictOptions` for the details.
Returns:
None
:type optim_state_dict: typing.OptimizerStateType
"""
with _gc_context():
optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers)
info = _verify_options(model, optimizers, optim_only=True, options=options)
_verify_state_dict({}, optim_state_dict, info)
_load_optim_state_dict(model, optimizers, optim_state_dict, info)
def set_state_dict(
model: nn.Module,
optimizers: torch.optim.Optimizer | Iterable[torch.optim.Optimizer],
*,
model_state_dict: dict[str, ValueType],
optim_state_dict: OptimizerStateType,
options: Optional[StateDictOptions] = None,
) -> _IncompatibleKeys:
"""Load the model state_dict and optimizers state_dict.
The counterpart of ``get_state_dict`` to set the state_dict to the model and
optimizers. The given ``model_state_dict`` and ``optim_state_dict`` do not
have to be returned by ``get_state_dict`` but must meet the following
requirements: 1) all FQNs are canonical FQNs as defined in ``get_state_dict``,
2) if a tensor is sharded, it must be either a ShardedTensor or DTensor,
3) optimizer state_dict cannot contain the parameter IDs; the keys should be
the canonical FQNs.
WARN: ``set_state_dict`` can only be called before ``backward()`` or after ``step()``
is called on the optimizers. Otherwise, the optimizer states won't be initialized
correctly.
Args:
model (nn.Module): the nn.Module to the model.
optimizers (Union[Optimizer, Iterable[Optimizer]]):
The optimizers that are used to optimize ``model``.
model_state_dict: (Union[Dict[nn.Module, Dict[str, ValueType]], Dict[str, ValueType]]):
the model state_dict to load. If the key of the ``model_state_dict``
is nn.Module, the key is a submodule of ``model`` and the value should
be the state_dict of the submodule. When loading the state_dict,
the prefix of the submodule will be append to the state_dict.
optim_state_dict: OptimizerStateType:
the optimizer state_dict to load.
options (StateDictOptions): the options to control how
model state_dict and optimizer state_dict should be loaded. See
`StateDictOptions` for the details.
Returns:
``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields:
* **missing_keys** is a list of str containing the missing keys of the model state_dict.
* **unexpected_keys** is a list of str containing the unexpected keys of the model state_dict.
:type model_state_dict: typing.Dict[str, ValueType]
:type optim_state_dict: typing.OptimizerStateType
"""
model_state_dict: dict[str, ValueType] = _unflatten_model_state_dict(model, model_state_dict)
with _gc_context():
optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers)
info = _verify_options(model, optimizers, optim_only=not model_state_dict, options=options)
_verify_state_dict(model_state_dict, optim_state_dict, info)
_load_optim_state_dict(model, optimizers, optim_state_dict, info)
return _load_model_state_dict(model, model_state_dict, info)
# TODO: correct the state_dict function signature.
# TODO: this API is not yet fully tested. Make it private
@no_type_check
def _patch_model_state_dict(
model: nn.Module,
*,
options: Optional[StateDictOptions] = None,
) -> None:
"""Patch the ``state_dict`` and ``load_state_dict`` attributes of ``model``.
Patch the ``state_dict`` and ``load_state_dict`` attributes of ``model`` to
be a partial function to call ``get_state_dict`` and ``set_state_dict``.
Example:
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.checkpoint.state_dict import patch_model_state_dict
model = fsdp(model)
patch_model_state_dict(model)
Args:
model (nn.Module): the nn.Module to the model.
options (StateDictOptions): the options to control how
model state_dict and optimizer state_dict should be loaded. See
`StateDictOptions` for the details.
Returns:
None
"""
_state_dict_call = functools.partial(
get_model_state_dict,
model=model,
options=options,
)
def state_dict_call():
return _state_dict_call()
model.state_dict = state_dict_call
_load_state_dict_call = functools.partial(
set_model_state_dict,
model=model,
options=options,
)
def load_state_dict_call(state_dict: dict[str, Any]):
_load_state_dict_call(model_state_dict=state_dict)
model.load_state_dict = load_state_dict_call
_patched_state_dict.add(state_dict_call)
_patched_state_dict.add(load_state_dict_call)
# TODO: correct the load_state_dict function signature.
# TODO: this API is not yet fully tested. Make it private
@no_type_check
def _patch_optimizer_state_dict(
model: nn.Module,
*,
optimizers: tuple[torch.optim.Optimizer, ...],
options: Optional[StateDictOptions] = None,
) -> None:
"""Patch the ``state_dict`` and ``load_state_dict`` attributes of ``optimizers``.
Patch the ``state_dict`` and ``load_state_dict`` attributes of ``optimizers`` to
be a partial function to call ``get_state_dict`` and ``set_state_dict``.
Note that if there are multiple optimizers, all of the optimizers will be patched.
So users only need to call one of the state_dict() to get the full result.
Example:
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.checkpoint.state_dict import patch_model_state_dict
model = fsdp(model)
patch_model_state_dict(model)
Args:
model (nn.Module): the nn.Module to the model.
options (StateDictOptions): the options to control how
model state_dict and optimizer state_dict should be loaded. See
`StateDictOptions` for the details.
Returns:
None
"""
_state_dict_call = functools.partial(
get_optimizer_state_dict,
model=model,
optimizers=optimizers,
options=options,
)
def state_dict_call():
return _state_dict_call()
_load_state_dict_call = functools.partial(
set_optimizer_state_dict,
model=model,
optimizers=optimizers,
options=options,
)
def load_state_dict_call(state_dict: dict[str, Any]):
_load_state_dict_call(optim_state_dict=state_dict)
_patched_state_dict.add(state_dict_call)
_patched_state_dict.add(load_state_dict_call)
optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers)
for optim in optimizers:
optim.state_dict = state_dict_call
optim.load_state_dict = load_state_dict_call
| {
"repo_id": "verl-project/verl",
"file_path": "verl/third_party/torch/distributed/checkpoint/state_dict.py",
"license": "Apache License 2.0",
"lines": 1269,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/engine/base.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The abstract base class defining the interface for model training engines.
"""
from abc import abstractmethod
from contextlib import nullcontext
from typing import Any, Callable, ContextManager, Generator, Optional
import torch
from tensordict import TensorDict
from verl.utils.device import get_device_name
from verl.utils.tensordict_utils import maybe_fix_3d_position_ids
class BaseEngine:
"""
Abstract base class defining the interface for model training engines. Interface is subject to
change before release.
Engine implementations must subclass BaseEngine and provide concrete behavior for all methods.
"""
def initialize(self):
"""
Instantiate or load the model, optimizer, and learning rate scheduler.
Should prepare all components necessary for training or evaluation.
"""
raise NotImplementedError
@property
@abstractmethod
def is_param_offload_enabled(self) -> bool:
"""Whether parameter offloading is enabled."""
raise NotImplementedError
@property
@abstractmethod
def is_optimizer_offload_enabled(self) -> bool:
"""Whether optimizer offloading is enabled."""
raise NotImplementedError
def train_mode(self, **kwargs):
"""
Context manager entry for switching the engine and model into training mode.
Usage:
with engine.train_mode():
# runs in training mode
"""
raise NotImplementedError
def eval_mode(self, **kwargs):
"""
Context manager entry for switching the engine and model into evaluation mode.
Usage:
with engine.eval_mode():
# runs in evaluation mode
"""
raise NotImplementedError
def optimizer_zero_grad(self):
"""
Zero the gradients of the optimizer.
"""
raise NotImplementedError
def optimizer_step(self):
"""
Perform an optimization step using the optimizer.
"""
raise NotImplementedError
def lr_scheduler_step(self):
"""
Advance the learning rate scheduler by one step.
Returns:
current_lr (float or list[float]): Updated learning rate(s).
"""
raise NotImplementedError
def forward_backward_batch(self, data: TensorDict, loss_function: Callable, forward_only=False) -> Any:
"""
Perform a forward pass and optionally a backward pass on a batch of data.
Args:
data: The input data for the forward pass, typically containing tensors and metadata.
loss_function: The loss function to optimize. See `verl.workers.roles.utils.losses` for examples.
forward_only: If True, perform only the forward pass. If False, perform forward and backward pass.
Returns:
Any: The output of the forward pass, which can be used for loss computation or other purposes.
"""
raise NotImplementedError
def train_batch(self, data: TensorDict, loss_function: Callable) -> Any:
"""
Perform a training step on a batch of data.
Args:
data: The input data for training, typically containing tensors and metadata.
loss_function: A function that computes the loss and metrics given a batch and predictions.
Returns:
dict[str, torch.Tensor]: A dictionary containing the aggregated training metrics for the batch.
"""
maybe_fix_3d_position_ids(data)
self.optimizer_zero_grad()
outputs = self.forward_backward_batch(data, loss_function, forward_only=False)
grad_norm = self.optimizer_step()
if self.is_mp_src_rank_with_outputs():
assert "grad_norm" not in outputs["metrics"]
outputs["metrics"]["grad_norm"] = grad_norm
return outputs
def infer_batch(self, data: TensorDict, loss_function: Optional[Callable] = None) -> Any:
"""
Perform inference on a batch of data.
Args:
data: The input data for inference, typically containing tensors and metadata.
Returns:
Any: The output of the inference, which can be used for predictions or other purposes.
"""
# see comments from train_batch
maybe_fix_3d_position_ids(data)
with torch.no_grad():
outputs = self.forward_backward_batch(data, loss_function, forward_only=True)
return outputs
def get_per_tensor_param(self) -> tuple[Generator[tuple[str, torch.Tensor], None, None], Optional[dict]]:
"""
Get a generator that yields per-tensor parameters and optional peft config.
Returns:
Generator[tuple[str, torch.Tensor]]: A generator that yields tuples of parameter names and tensors.
Optional[dict]: Optional peft config.
"""
raise NotImplementedError
def get_data_parallel_size(self):
raise NotImplementedError
def get_data_parallel_rank(self):
raise NotImplementedError
def get_data_parallel_group(self):
raise NotImplementedError
def to(self, device: str, model: bool = True, optimizer: bool = True, grad: bool = True):
"""
Move model parameters, optimizer states, or both to the specified device.
Args:
device: Target device identifier.
model: If True, move the model.
optimizer: If True, move the optimizer states.
grad: If True, move the gradient buffer.
"""
if not model:
assert not optimizer and not grad, "Model must be moved to device along with optimizer and grad"
def save_checkpoint(
self,
local_path: str,
hdfs_path: Optional[str] = None,
global_step: int = 0,
max_ckpt_to_keep: Optional[int] = None,
**kwargs,
) -> None:
"""
Save model, optimizer, and scheduler states to a checkpoint.
Args:
local_path: Local filesystem path to save checkpoint.
hdfs_path: Optional HDFS path to copy checkpoint.
global_step: Integer training step number for naming.
max_ckpt_to_keep: Maximum number of recent checkpoints to retain.
**kwargs: Arbitrary keyword arguments.
"""
raise NotImplementedError
def load_checkpoint(
self, local_path: str, hdfs_path: Optional[str] = None, del_local_after_load: bool = True, **kwargs
) -> None:
"""
Load model, optimizer, and scheduler states from a checkpoint.
Args:
local_path: Local filesystem path of the checkpoint.
hdfs_path: Optional HDFS path where checkpoint is stored.
del_local_after_load: Whether to delete local copy after loading.
**kwargs: Arbitrary keyword arguments.
"""
raise NotImplementedError
def is_mp_src_rank_with_outputs(self):
"""
Whether the current rank is the first rank in model parallel group that contains model outputs
"""
raise NotImplementedError
def disable_adapter(self) -> ContextManager:
"""
Disable all adapters temporarily under the context in the model for LoRA
"""
return nullcontext()
class BaseEngineCtx:
def __init__(self, engine: BaseEngine, mode, **kwargs):
"""Base Engine context that handles load and offload
Args:
engine:
**kwargs:
"""
self.engine = engine
self.mode = mode
assert self.mode in ("train", "eval")
self.disable_auto_offload = kwargs.pop("disable_auto_offload", False)
def _context_switch(self, device):
if self.disable_auto_offload:
return
should_move_model = self.engine.is_param_offload_enabled if device == "cpu" else True
should_move_optimizer = self.engine.is_optimizer_offload_enabled if device == "cpu" else True
if self.mode == "eval":
self.engine.to(device=device, model=should_move_model, optimizer=False, grad=False)
elif self.mode == "train":
self.engine.to(
device=device,
model=should_move_model,
optimizer=should_move_optimizer,
grad=should_move_model,
)
def __enter__(self):
self._context_switch(get_device_name())
self.engine.mode = self.mode
def __exit__(self, exc_type, exc_val, exc_tb):
self._context_switch("cpu")
self.engine.mode = None
class EngineRegistry:
"""
A registry for managing and instantiating different types of training engines.
This class uses a dictionary to store engine classes, mapping a string key to each class.
It provides a decorator `register` to add new engines to the registry and a `new` method
to create an instance of a registered engine.
"""
_engines = {}
@classmethod
def register(cls, model_type: str, backend: list[str] | str, device: list[str] | str = "cuda"):
"""
A class method decorator that registers an engine class with a given key.
This allows for dynamic instantiation of engine classes by their registered key.
Args:
model_type (str): The type of the model
backend (list[str] | str): The backend to use for the model type
device (list[str] | str): The device type (e.g., "cuda", "npu", "cpu") this engine supports,
default is "cuda"
Returns:
A decorator function that takes an engine class and registers it.
"""
def decorator(engine_class):
assert issubclass(engine_class, BaseEngine)
if model_type not in cls._engines:
cls._engines[model_type] = {}
backends = backend if isinstance(backend, list) else [backend]
devices = device if isinstance(device, list) else [device]
for current_backend in backends:
for current_device in devices:
if current_backend not in cls._engines[model_type]:
cls._engines[model_type][current_backend] = {}
if current_device not in cls._engines[model_type][current_backend]:
cls._engines[model_type][current_backend][current_device] = engine_class
return engine_class
return decorator
@classmethod
def get_engine_cls(cls, model_type: str, backend: str):
assert model_type in cls._engines, f"Unknown model_type: {model_type}"
assert backend in cls._engines[model_type], f"Unknown backend: {backend}"
device = get_device_name()
assert device in cls._engines[model_type][backend], (
f"Unknown device: {device} for model_type: {model_type} and backend: {backend}"
)
return cls._engines[model_type][backend][device]
@classmethod
def new(cls, model_type, backend, *args, **kwargs):
"""
Function to create a new training engine instance based on the provided config.
Args:
key: A configuration object containing the engine key and other settings.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
engine: An instance of the training engine corresponding to the config.
Raises:
NotImplementedError: If the engine key in the config does not match any known engines.
"""
engine_cls = cls.get_engine_cls(model_type, backend)
return engine_cls(*args, **kwargs)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/engine/base.py",
"license": "Apache License 2.0",
"lines": 274,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/workers/engine/fsdp/utils.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import torch
from torch.distributed.device_mesh import init_device_mesh
from verl.utils.device import get_device_name, is_npu_available
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def apply_npu_fsdp_patches():
"""Apply NPU patches for FSDP backend if NPU is available."""
if is_npu_available:
try:
import verl.models.transformers.npu_patch # noqa
if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
logger.info("Applied NPU patches for FSDP backend")
except Exception as e:
logger.warning(f"Failed to apply NPU patches: {e}")
def create_device_mesh(world_size, fsdp_size):
"""
Create a device mesh for distributed training based on the world size and FSDP size.
Args:
world_size (int): Total number of processes in the distributed training setup.
fsdp_size (int): Size of the Fully Sharded Data Parallel (FSDP) group.
Returns:
torch.distributed.device_mesh.DeviceMesh: The initialized device mesh.
"""
device_name = get_device_name()
if fsdp_size < 0 or fsdp_size >= world_size:
device_mesh = init_device_mesh(device_name, mesh_shape=(world_size,), mesh_dim_names=["fsdp"])
else:
device_mesh = init_device_mesh(
device_name, mesh_shape=(world_size // fsdp_size, fsdp_size), mesh_dim_names=["ddp", "fsdp"]
)
return device_mesh
def get_sharding_strategy(device_mesh):
"""
Determine the appropriate sharding strategy based on the number of dimensions of the device mesh.
Args:
device_mesh (torch.distributed.device_mesh.DeviceMesh): The device mesh used for distributed training.
Returns:
torch.distributed.fsdp.ShardingStrategy: The sharding strategy to be used with FSDP.
Raises:
NotImplementedError: If the number of dimensions of the device mesh is neither 1 nor 2.
"""
from torch.distributed.fsdp import ShardingStrategy
if device_mesh.ndim == 1:
sharding_strategy = ShardingStrategy.FULL_SHARD
elif device_mesh.ndim == 2:
sharding_strategy = ShardingStrategy.HYBRID_SHARD
else:
raise NotImplementedError(f"Get device mesh ndim={device_mesh.ndim}, but only support 1 or 2")
return sharding_strategy
| {
"repo_id": "verl-project/verl",
"file_path": "verl/workers/engine/fsdp/utils.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:scripts/rollout_viewer.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import re
import traceback
from pathlib import Path
from typing import Annotated, Optional
import aiofiles
try:
import ujson as json
except ImportError:
import json
import typer
from rich.highlighter import ReprHighlighter
from rich.markdown import Markdown
from rich.table import Table
from rich.text import Text
from textual import on
from textual.app import App, ComposeResult
from textual.containers import Horizontal, Vertical, VerticalScroll
from textual.widgets import Input, ProgressBar, Select, SelectionList, Static
INDEX_KEY = "__IDX"
FILE_SUFFIX = ".jsonl"
def check_textual_version():
# check if textual version is equal to 0.52.1
import textual
from packaging.version import Version
if Version(textual.__version__) != Version("0.52.1"):
raise ImportError(f"Textual version {textual.__version__} is not supported, please pip install textual==0.52.1")
check_textual_version()
async def load_path(p: Path, data: dict, mask_strs: str, idx: int, pbar):
samples = []
async with aiofiles.open(p, encoding="utf-8") as f:
async for line in f:
d = json.loads(line)
for k in d:
if isinstance(d[k], str):
if mask_strs:
d[k] = re.sub(rf"{mask_strs}", "*", d[k])
else:
d[k] = json.dumps(d[k], ensure_ascii=False, indent=4)
d[INDEX_KEY] = len(samples)
samples.append(d)
data[idx] = {"samples": samples}
print(f"path {p} loaded")
pbar.advance(1)
async def load_dir(path: Path, data: dict[int, dict], pbar, mask_strs: str = ""):
paths = list(path.glob(f"*{FILE_SUFFIX}"))
paths = sorted(paths, key=lambda x: int(x.stem))
tasks = [load_path(p, data, mask_strs, i, pbar) for i, p in enumerate(paths)]
await asyncio.gather(*tasks)
class Highlighter(ReprHighlighter):
highlights = ReprHighlighter.highlights + [
r"(?P<tag_name>[][\<\>{}()\|()【】\[\]=`])",
r"\<\|(?P<tag_name>[\w\W]*?)\|\>",
]
def center_word_with_equals_exactly(word: str, total_length: int, char: str = "=") -> str:
if len(word) > total_length:
return word
padding = total_length - len(word)
left_pad = (padding) // 2
right_pad = (padding + 1) // 2
return char * left_pad + " " + word + " " + char * right_pad
def highlight_keyword(content: str, keyword: Optional[str]):
if not keyword:
return Text(content)
text = Text()
parts = content.split(keyword)
for i, part in enumerate(parts):
text.append(part, style=None)
if i < len(parts) - 1:
# text.append(keyword, style=Style(color="#d154d1", bgcolor="yellow", bold=True))
text.append(keyword, style="on #8f51b5")
return text
help_doc = """
⌨️ keybinds:
- `f/esc`: find/cancel
- `tab/←/→`: change focus
- `j/k`: page down/up
- `g/G`: scroll home/end
- `n/N`: next sample/step
- `p/P`: previous sample/step
- `s`: switch display mode
- plain text
- rich table
"""
class JsonLineViewer(App):
BINDINGS = [
("left", "focus_previous", "Focus Previous"),
("right", "focus_next", "Focus Next"),
("s", "swith_render", "switch render"),
# control
("n", "next_sample", "Next Sample"),
("N", "next_step", "Next Step"),
("p", "previous_sample", "Previous Sample"),
("P", "previous_step", "Previous Step"),
# search
("f", "toggle_search", "find"),
("enter", "next_search", "find next"),
("escape", "cancel_search", "cancel find"),
# scroll
("j", "page_down", "page down"),
("k", "page_up", "page up"),
("g", "page_home", "page home"),
("G", "page_end", "page end"),
]
CSS = """
Select:focus > SelectCurrent {
border: tall #8f51b5;
}
Select.-expanded > SelectCurrent {
border: tall #8f51b5;
}
#select-container {
width: 15%;
height: 100%;
align: center top;
}
#search-container {
height: 10%;
align: center top;
}
#search-box {
width: 50%;
}
#reqid-box {
width: 50%;
}
"""
def __init__(self, step_num: int, data: dict[int, dict], pbar):
super().__init__()
self.step_num = step_num
self.data = data
self.render_table = False
self.selected_step_index = 0
self.selected_sample_index = 0
self.pbar = pbar
self.matches = []
self.current_match_index = 0
self.highlighter = Highlighter()
first_samples = data[list(data.keys())[0]]["samples"]
# Prepare the initial field filter list (all keys from the first sample)
self.filter_fields = [(f, f, True) for f in first_samples[0].keys()]
# Internal set used for fast membership checks when we add new fields on the fly.
# We keep it here so that when new columns appear in later steps (e.g. `request_id`),
# they can be added to the UI automatically without restarting the viewer.
self._field_set: set[str] = set(first_samples[0].keys())
self.sample_num = len(first_samples)
def compose(self) -> ComposeResult:
with Horizontal(id="search-container"):
yield Input(placeholder="find something...", id="search-box")
yield Input(placeholder="request id...", id="reqid-box")
with Vertical(id="search-container2"):
yield self.pbar
yield Static("", id="search-status")
with Horizontal():
with Vertical(id="select-container"):
yield Static("\n")
yield Static(
renderable=Markdown(
help_doc,
),
markup=False,
)
yield Static("\n")
yield Select(
id="step-select",
value=0,
prompt="select step",
options=[("step: 1", 0)],
allow_blank=False,
)
yield Select(
id="sample-select",
value=0,
prompt="select sample",
options=[("sample: 1", 0)],
allow_blank=False,
)
yield Select(
id="sample-sort",
value=0,
prompt="排序",
options=[
("sort", 0),
("score asc", 1),
("score desc", 2),
],
allow_blank=False,
)
yield SelectionList[int](("Select ALL", 1, True), id="fields-select-all")
with VerticalScroll(id="scroll-view2"):
yield SelectionList[str](*self.filter_fields, id="fields-select")
with VerticalScroll(id="scroll-view"):
yield Static(id="content", markup=False)
async def on_mount(self) -> None:
self.step_select = self.query_one("#step-select", Select)
self.sample_select = self.query_one("#sample-select", Select)
self.sample_sort = self.query_one("#sample-sort", Select)
self.content_display = self.query_one("#content", Static)
self.search_box = self.query_one("#search-box", Input)
self.reqid_box = self.query_one("#reqid-box", Input)
self.scroll_view = self.query_one("#scroll-view", VerticalScroll)
self.search_status = self.query_one("#search-status", Static)
self.fields_select = self.query_one("#fields-select", SelectionList)
self.fields_select.border_title = "field filter"
if self.data:
self.step_select.set_options([(f"step: {i + 1}", i) for i in range(self.step_num)])
self.sample_select.set_options([(f"sample: {i + 1}", i) for i in range(self.sample_num)])
self.step_select.focus()
await self.update_content()
def update_result_options(self, offset: int = 0, sort_desc: Optional[bool] = None):
options = []
if isinstance(self.selected_step_index, int) and self.selected_step_index < len(self.data):
if self.sample_num is None or sort_desc is not None:
samples = self.data[self.selected_step_index].get("samples", [])
if not samples:
self.selected_sample_index = offset
return
if sort_desc is not None:
samples = sorted(
samples,
key=lambda x: x.get("score", x.get("score_1", 0)),
reverse=sort_desc,
)
options = [(f"sample: {r[INDEX_KEY] + 1}", r[INDEX_KEY]) for r in samples]
self.sample_select.set_options(options)
self.sample_num = len(samples)
if sort_desc is not None and options:
self.selected_sample_index = options[0][1]
else:
self.selected_sample_index = offset
async def update_content(self, search_keyword: Optional[str] = None):
content = ""
try:
samples = self.data[self.selected_step_index].get("samples", [])
content_dict_full = samples[self.selected_sample_index]
# Dynamically track any NEW keys that appear and add them to the field filter.
self._update_fields_select(content_dict_full.keys())
# Apply field selection filter (only show selected fields)
content_dict = {k: v for k, v in content_dict_full.items() if k in self.fields_select.selected}
if self.render_table:
content = Table("key", "value", show_lines=True)
for k in content_dict:
v = content_dict[k]
v = f"{v}"
content.add_row(
k,
self.highlighter(highlight_keyword(v, search_keyword)),
)
else:
text = Text()
for k in content_dict:
v = content_dict[k]
s = center_word_with_equals_exactly(k, 64) + f"\n{v}\n"
text.append(highlight_keyword(s, search_keyword))
content = self.highlighter(text)
except KeyError:
content = f"Loading data asynchronously, progress: {len(self.data)}/{self.step_num} step"
except Exception:
content = self.highlighter(traceback.format_exc())
self.content_display.update(content)
# ---------------------------------------------------------------------
# Request-ID jump logic
# ---------------------------------------------------------------------
@on(Input.Submitted, "#reqid-box")
async def on_reqid_submitted(self, event: Input.Submitted) -> None:
"""Jump to the sample that has a matching `request_id`."""
req_id_raw = event.value.strip()
# Remove hyphens so search is tolerant to different id formats
req_id = req_id_raw.replace("-", "")
if not req_id:
return
found = False
for step_idx, step_data in self.data.items():
for sample in step_data.get("samples", []):
sample_id = str(sample.get("request_id", ""))
if sample_id.replace("-", "") == req_id:
# Update selected indices
self.selected_step_index = step_idx
self.step_select.value = step_idx
# Ensure sample list is updated and select sample
self.update_result_options(offset=sample[INDEX_KEY])
self.selected_sample_index = sample[INDEX_KEY]
self.sample_select.value = sample[INDEX_KEY]
await self._clear_search()
await self.update_content()
found = True
break
if found:
break
if not found:
self.search_status.update(Text(f"request_id '{req_id_raw}' not found", style="bold red"))
else:
# Keep the typed id in the input box so users see what was searched.
pass
# ---------------------------------------------------------------------
# Helper: add new fields to SelectionList on-the-fly
# ---------------------------------------------------------------------
def _update_fields_select(self, keys):
"""Add any unseen *keys* to the field-selection widget so they can be toggled.
The viewer is often launched with only the first step loaded. Later steps may
introduce new columns (e.g. `request_id`). This helper ensures those fields
become visible without requiring a restart.
"""
# Ensure we have the widget (only after on_mount)
if not hasattr(self, "fields_select"):
return
for k in keys:
if k not in self._field_set:
self._field_set.add(k)
try:
# By default, new fields are selected so they appear immediately.
self.fields_select.add_option(k, k, selected=True)
except Exception:
# Fallback for older textual versions where signature is different.
self.fields_select.add_option((k, k, True))
@on(Select.Changed, "#step-select")
async def step_changed(self, event):
self.selected_step_index = event.value
self.update_result_options()
await self.update_content()
@on(Select.Changed, "#sample-select")
async def sample_changed(self, event):
self.selected_sample_index = event.value
await self._clear_search()
await self.update_content()
@on(Select.Changed, "#sample-sort")
async def sort_changed(self, event):
v = event.value
self.update_result_options(sort_desc=None if v == 0 else False if v == 1 else True)
await self.update_content()
@on(SelectionList.SelectedChanged, "#fields-select")
async def fields_changed(self, event):
await self.update_content()
@on(SelectionList.SelectedChanged, "#fields-select-all")
async def fields_all_changed(self, event):
s = self.query_one("#fields-select-all", SelectionList)
if s.selected:
self.fields_select.select_all()
else:
self.fields_select.deselect_all()
def action_focus_previous(self):
self.screen.focus_previous()
def action_focus_next(self):
self.screen.focus_next()
async def action_next_step(self) -> None:
self.selected_step_index += 1
if self.selected_step_index >= self.step_num:
self.selected_step_index = 0
self.step_select.value = self.selected_step_index
self.update_result_options()
await self.update_content()
async def action_next_sample(self) -> None:
self.selected_sample_index += 1
if not self.sample_num or self.selected_sample_index >= self.sample_num:
self.selected_sample_index = 0
self.sample_select.value = self.selected_sample_index
await self._clear_search()
await self.update_content()
async def action_previous_step(self) -> None:
self.selected_step_index -= 1
if self.selected_step_index < 0:
self.selected_step_index = self.step_num - 1
self.step_select.value = self.selected_step_index
self.update_result_options()
await self.update_content()
async def action_previous_sample(self) -> None:
self.selected_sample_index -= 1
if self.selected_sample_index < 0:
self.selected_sample_index = self.sample_num - 1
self.sample_select.value = self.selected_sample_index
await self._clear_search()
await self.update_content()
async def action_swith_render(self):
self.render_table = not self.render_table
await self.update_content()
def action_toggle_search(self) -> None:
self.search_box.focus()
async def action_cancel_search(self) -> None:
self.search_box.value = ""
await self._clear_search()
await self.update_content()
async def _clear_search(self):
self.matches = []
self.search_status.update("")
self.current_match_index = 0
@on(Input.Submitted, "#search-box")
async def on_search_submitted(self, event: Input.Submitted) -> None:
self.matches = []
self.current_match_index = 0
if event.value:
await self.update_content(event.value)
renderable = self.content_display.render()
if isinstance(renderable, Table):
return
assert isinstance(renderable, Text)
console = self.content_display._console
lines = renderable.wrap(console, self.scroll_view.container_size.width)
line_idx_recorded = set()
for line_idx, line in enumerate(lines):
if line_idx in line_idx_recorded:
continue
if event.value in line:
self.matches.append(
{
"line": line_idx,
"word": event.value,
}
)
line_idx_recorded.add(line_idx)
self.scroll_view.focus()
await self.action_next_search()
async def action_next_search(self) -> None:
if not self.matches or self.current_match_index >= len(self.matches):
return
target_line = self.matches[self.current_match_index]["line"]
self.scroll_view.scroll_to(x=0, y=target_line * 1, animate=False)
self.current_match_index = (self.current_match_index + 1) % len(self.matches)
self.search_status.update(
Text(
f"Find :{self.current_match_index + 1}/{len(self.matches)}",
style="bold on #8f51b5",
)
)
def action_page_up(self):
self.scroll_view.scroll_page_up(animate=False)
def action_page_down(self):
self.scroll_view.scroll_page_down(animate=False)
def action_page_home(self):
self.scroll_view.scroll_home(animate=False)
def action_page_end(self):
self.scroll_view.scroll_end(animate=False)
async def _run(path: Path, mask_str: str):
assert path.exists(), f"{path} not exist"
paths = list(path.glob(f"*{FILE_SUFFIX}"))
paths = sorted(paths, key=lambda x: int(x.stem))
if not paths:
raise ValueError(f"no available reward dump files under f{path}")
print(f"get jsonl file nums: {len(paths)}")
pbar = ProgressBar(total=len(paths), name="data load progress")
data = {}
await load_path(paths[0], data, mask_str, 0, pbar)
app = JsonLineViewer(step_num=len(paths), data=data, pbar=pbar)
await asyncio.gather(load_dir(path, data, pbar, mask_str), app.run_async())
app = typer.Typer()
@app.command(help="launch TUI APP")
def run(
rollout_data_dir: Path,
mask_str: Annotated[str, typer.Option(help="string that will be masked to *")] = r"<\|image_pad\|>|<\|imgpad\|>",
):
loop = asyncio.get_event_loop()
loop.run_until_complete(_run(rollout_data_dir, mask_str))
if __name__ == "__main__":
app()
| {
"repo_id": "verl-project/verl",
"file_path": "scripts/rollout_viewer.py",
"license": "Apache License 2.0",
"lines": 467,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/trainer/config/config.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Optional
from verl.base_config import BaseConfig
__all__ = ["CheckpointConfig", "ProfileConfig", "BaseModelConfig"]
@dataclass
class CheckpointConfig(BaseConfig):
"""Configuration for model checkpointing.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
save_contents (list[str]): What to include in saved checkpoints.
Options: 'model', 'optimizer', 'extra', 'hf_model'.
load_contents (list[str]): Contents to load from checkpoint. Defaults to same as save_contents.
async_save (bool): Whether to save checkpoints asynchronously. Only implemented for Megatron as of now.
"""
save_contents: list[str] = field(default_factory=lambda: ["model", "optimizer", "extra"])
load_contents: list[str] = field(default_factory=lambda: ["model", "optimizer", "extra"])
async_save: bool = False
mbridge_config: dict[str, Any] = field(default_factory=dict)
@dataclass
class ProfileConfig(BaseConfig):
"""Configuration for profiling.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
profile_ranks (Optional[list[int]]): List of ranks to profile. None means all ranks.
step_start (int): Starting step for profiling.
step_end (int): Ending step for profiling.
save_path (Optional[str]): Path to save profiling results.
"""
profile_ranks: Optional[list[int]] = None
step_start: int = -1
step_end: int = -1
save_path: Optional[str] = None
@dataclass
class BaseModelConfig(BaseConfig):
"""Base configuration for a model.
Contains core settings for loading and initializing a pretrained model checkpoint.
Args:
path (str): Path to pretrained model weights.
tokenizer_path (Optional[str]): Tokenizer path (defaults to actor's model path if not set).
override_config (dict): Hugging Face config override.
external_lib (Optional[str]): External model implementation (optional).
trust_remote_code (bool): Whether to trust remote code from Hugging Face models.
lora (dict[str, Any]): LoRA configuration dictionary.
"""
path: str = "~/models/deepseek-llm-7b-chat"
tokenizer_path: Optional[str] = None
override_config: dict[str, Any] = field(default_factory=dict)
external_lib: Optional[str] = None
trust_remote_code: bool = False
lora: dict[str, Any] = field(default_factory=dict)
@dataclass
class ModuleConfig(BaseConfig):
"""Configuration for external Python module, which can be loaded, executed (and optionally, ``import``ed).
Args:
path (str, optional): Path to the module file to load and execute.
name (str, optional): Name of the module to ``import``. Format: ``"import.path.to.module"``.
If ``None``, the module will be loaded with a hased name and
will not be added to ``sys.modules``, thus can not be ``import``ed as ``name``.
"""
path: Optional[str] = None
name: Optional[str] = None
| {
"repo_id": "verl-project/verl",
"file_path": "verl/trainer/config/config.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/agent_loop/tool_parser.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from abc import ABC, abstractmethod
import regex
from pydantic import BaseModel
from verl.utils.ray_utils import get_event_loop
from verl.utils.rollout_trace import rollout_trace_op
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class FunctionCall(BaseModel):
arguments: str
"""
The arguments to call the function with, as generated by the model in JSON
format. Note that the model does not always generate valid JSON, and may
hallucinate parameters not defined by your function schema. Validate the
arguments in your code before calling your function.
"""
name: str
"""The name of the function to call."""
class ToolParser(ABC):
_registry: dict[str, type["ToolParser"]] = {}
def __init__(self, tokenizer) -> None:
self.tokenizer = tokenizer
@abstractmethod
async def extract_tool_calls(self, responses_ids: list[int]) -> tuple[str, list[FunctionCall]]:
"""Extract tool calls from the responses.
Args:
responses_ids (List[int]): The ids of the responses.
Returns:
Tuple[str, List[FunctionCall]]: Content and extracted tool calls.
"""
raise NotImplementedError
@classmethod
def get_tool_parser(cls, name: str, tokenizer):
if name not in cls._registry:
raise ValueError(f"Unknown tool parser: {name}")
return cls._registry[name](tokenizer)
@classmethod
def register(cls, name: str):
def decorator(subclass: type[ToolParser]) -> type[ToolParser]:
cls._registry[name] = subclass
return subclass
return decorator
@ToolParser.register("hermes")
class HermesToolParser(ToolParser):
"""Adapted from https://github.com/vllm-project/vllm/blob/v0.9.1/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py"""
def __init__(self, tokenizer) -> None:
super().__init__(tokenizer)
self.tool_call_start_token: str = "<tool_call>"
self.tool_call_end_token: str = "</tool_call>"
self.tool_call_regex = regex.compile(r"<tool_call>(.*?)</tool_call>", regex.DOTALL)
@rollout_trace_op
async def extract_tool_calls(self, responses_ids: list[int]) -> tuple[str, list[FunctionCall]]:
loop = get_event_loop()
text = await loop.run_in_executor(None, self.tokenizer.decode, responses_ids)
if self.tool_call_start_token not in text or self.tool_call_end_token not in text:
return text, []
matches = self.tool_call_regex.findall(text)
function_calls = []
for match in matches:
try:
function_call = json.loads(match)
name, arguments = function_call["name"], function_call["arguments"]
function_calls.append(FunctionCall(name=name, arguments=json.dumps(arguments, ensure_ascii=False)))
except Exception as e:
logger.error(f"Failed to decode tool call: {e}")
# remaing text exclude tool call tokens
content = self.tool_call_regex.sub("", text)
return content, function_calls
@ToolParser.register("gpt-oss")
class GptOssToolParser(ToolParser):
"""
Tool parser for gpt-oss model.
Adapted from https://github.com/sgl-project/sglang/blob/main/python/sglang/srt/function_call/gpt_oss_detector.py
Args:
tokenizer: The tokenizer to use.
"""
def __init__(self, tokenizer) -> None:
super().__init__(tokenizer)
# check https://cookbook.openai.com/articles/openai-harmony for more details.
self.cot_pattern = regex.compile(
r"<\|start\|>assistant<\|channel\|>analysis<\|message\|>.*?<\|end\|>", regex.DOTALL
)
# <|start|>assistant may be pre-appended in prompts, so we need to remove it.
self.partial_cot_pattern = regex.compile(r"<\|channel\|>analysis<\|message\|>(.*?)<\|end\|>", regex.DOTALL)
self.tool_call_pattern = regex.compile(
r"<\|start\|>assistant<\|channel\|>[^<]* to=functions\.([^<]+) "
r"<\|constrain\|>json<\|message\|>(.*?)<\|call\|>",
regex.DOTALL,
)
@rollout_trace_op
async def extract_tool_calls(self, responses_ids: list[int]) -> tuple[str, list[FunctionCall]]:
loop = get_event_loop()
# We need to keep special tokens for gpt-oss model for better tool call extraction.
text = await loop.run_in_executor(None, lambda: self.tokenizer.decode(responses_ids, skip_special_tokens=False))
# Need to remove padding tokens for better tool call extraction.
text = text.replace(self.tokenizer.pad_token, "")
# Need to reomve COT since COT may contain tool call tokens.But they are not valid tool calls.
text = regex.sub(self.cot_pattern, "", text)
text = regex.sub(self.partial_cot_pattern, "", text)
# check if there are tool calls in the text by re.findall
matches = regex.findall(self.tool_call_pattern, text)
if not matches:
return text, []
function_calls = []
for match in matches:
try:
name, arguments = match[0], match[1]
# don't check if arguments is valid JSON and leave it to client
function_calls.append(FunctionCall(name=name, arguments=arguments))
except Exception as e:
logger.error(f"Failed to decode tool call: {e}")
# remaing text exclude tool call tokens
content = regex.sub(self.tool_call_pattern, "", text)
return content, function_calls
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/agent_loop/tool_parser.py",
"license": "Apache License 2.0",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:examples/data_preprocess/gsm8k_tool_agent_loop.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Preprocess the GSM8k dataset to parquet format
"""
import argparse
import os
import re
import datasets
from verl.utils.hdfs_io import copy, makedirs
def extract_solution(solution_str):
solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str)
assert solution is not None
final_solution = solution.group(0)
final_solution = final_solution.split("#### ")[1].replace(",", "")
return final_solution
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--local_dir", default=None, help="The save directory for the preprocessed dataset.")
parser.add_argument("--hdfs_dir", default=None)
parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.")
parser.add_argument(
"--local_save_dir", default="~/data/gsm8k", help="The save directory for the preprocessed dataset."
)
args = parser.parse_args()
local_dataset_path = args.local_dataset_path
data_source = "openai/gsm8k"
if local_dataset_path is not None:
dataset = datasets.load_dataset(local_dataset_path, "main")
else:
dataset = datasets.load_dataset(data_source, "main")
train_dataset = dataset["train"]
test_dataset = dataset["test"]
instruction_following = "Let's think step by step and output the final answer after `####`."
# add a row to each data item that represents a unique id
def make_map_fn(split):
def process_fn(example, idx):
question_raw = example.pop("question")
question = question_raw + " " + instruction_following
answer_raw = example.pop("answer")
solution = extract_solution(answer_raw)
data = {
"data_source": data_source,
"agent_name": "tool_agent",
"prompt": [
{
"role": "system",
"content": (
"You are a math expert. You are given a question and you need to solve it step by step. "
"Reasoning step by step before any tool call. "
"You should use the `calc_gsm8k_reward` tool after step by step solving the question, "
"before generate final answer at least once and refine your answer if necessary. "
"Put your final answer in the format of `#### <answer>`."
),
},
{
"role": "user",
"content": question,
},
],
"ability": "math",
"reward_model": {"style": "rule", "ground_truth": solution},
"extra_info": {
"split": split,
"index": idx,
"answer": answer_raw,
"question": question_raw,
"need_tools_kwargs": True,
"tools_kwargs": {
"calc_gsm8k_reward": {
"create_kwargs": {"ground_truth": solution},
# "execute_kwargs": {},
# "calc_reward_kwargs": {},
# "release_kwargs": {},
},
},
"interaction_kwargs": {
"query": question,
"ground_truth": solution,
},
},
}
return data
return process_fn
train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True)
test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True)
hdfs_dir = args.hdfs_dir
local_save_dir = args.local_dir
if local_save_dir is not None:
print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.")
else:
local_save_dir = args.local_save_dir
train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet"))
test_dataset.to_parquet(os.path.join(local_save_dir, "test.parquet"))
if hdfs_dir is not None:
makedirs(hdfs_dir)
copy(src=local_save_dir, dst=hdfs_dir)
| {
"repo_id": "verl-project/verl",
"file_path": "examples/data_preprocess/gsm8k_tool_agent_loop.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/workers/rollout/test_sglang_rollout_sharding_manager.py | # Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from verl.workers.rollout.sglang_rollout.utils import get_named_tensor_buckets
_TENSOR_1MB = torch.zeros(512, 512)
_BYTES_1MB = 1 << 20
@pytest.mark.parametrize(
"named_tensors, bucket_size_mb, gt_groups",
[
(
[("a", _TENSOR_1MB), ("b", _TENSOR_1MB)],
0.5 * _BYTES_1MB,
[["a"], ["b"]],
),
(
[("a", _TENSOR_1MB), ("b", _TENSOR_1MB)],
1 * _BYTES_1MB,
[["a"], ["b"]],
),
(
[("a", _TENSOR_1MB), ("b", _TENSOR_1MB)],
1.5 * _BYTES_1MB,
[["a"], ["b"]],
),
(
[("a", _TENSOR_1MB), ("b", _TENSOR_1MB)],
2 * _BYTES_1MB,
[["a", "b"]],
),
],
)
def test_get_named_tensor_buckets(named_tensors, bucket_size_mb, gt_groups: list[list[str]]):
named_tensors_iter = iter(named_tensors)
groups = list(get_named_tensor_buckets(named_tensors_iter, bucket_size_mb))
assert len(groups) == len(gt_groups)
for group, gt_group in zip(groups, gt_groups, strict=True):
assert len(group) == len(gt_group)
for (name, _), (gt_name) in zip(group, gt_group, strict=True):
assert name == gt_name
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/rollout/test_sglang_rollout_sharding_manager.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:scripts/print_cfg.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import hydra
except ImportError as e:
raise ImportError("Please install hydra-core via 'pip install hydra-core' and retry.") from e
@hydra.main(config_path="../verl/trainer/config", config_name="ppo_trainer", version_base=None)
def main(config):
"""Main entry point for PPO training with Hydra configuration management.
Args:
config_dict: Hydra configuration dictionary containing training parameters.
"""
print(config)
from verl.utils.config import omega_conf_to_dataclass
profiler_config = omega_conf_to_dataclass(config.critic.profiler)
print(profiler_config)
if __name__ == "__main__":
main()
| {
"repo_id": "verl-project/verl",
"file_path": "scripts/print_cfg.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/utils/test_temp_env_on_cpu.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from verl.utils.py_functional import temp_env_var
@pytest.fixture(autouse=True)
def clean_env():
"""Fixture to clean up environment variables before and after each test."""
# Store original environment state
original_env = dict(os.environ)
# Clean up any test variables that might exist
test_vars = ["TEST_VAR", "TEST_VAR_2", "EXISTING_VAR"]
for var in test_vars:
if var in os.environ:
del os.environ[var]
# Yield control to the test function
yield
# Restore original environment state after test
os.environ.clear()
os.environ.update(original_env)
def test_set_new_env_var():
"""Test setting a new environment variable that didn't exist before."""
# Ensure variable doesn't exist
assert "TEST_VAR" not in os.environ
with temp_env_var("TEST_VAR", "test_value"):
# Variable should be set inside context
assert os.environ["TEST_VAR"] == "test_value"
assert "TEST_VAR" in os.environ
# Variable should be removed after context
assert "TEST_VAR" not in os.environ
def test_restore_existing_env_var():
"""Test restoring an environment variable that already existed."""
# Set up existing variable
os.environ["EXISTING_VAR"] = "original_value"
with temp_env_var("EXISTING_VAR", "temporary_value"):
# Variable should be temporarily changed
assert os.environ["EXISTING_VAR"] == "temporary_value"
# Variable should be restored to original value
assert os.environ["EXISTING_VAR"] == "original_value"
def test_env_var_restored_on_exception():
"""Test that environment variables are restored even when exceptions occur."""
# Set up existing variable
os.environ["EXISTING_VAR"] = "original_value"
with pytest.raises(ValueError):
with temp_env_var("EXISTING_VAR", "temporary_value"):
# Verify variable is set
assert os.environ["EXISTING_VAR"] == "temporary_value"
# Raise exception
raise ValueError("Test exception")
# Variable should still be restored despite exception
assert os.environ["EXISTING_VAR"] == "original_value"
def test_nested_context_managers():
"""Test nested temp_env_var context managers."""
# Set up original variable
os.environ["TEST_VAR"] = "original"
with temp_env_var("TEST_VAR", "level1"):
assert os.environ["TEST_VAR"] == "level1"
with temp_env_var("TEST_VAR", "level2"):
assert os.environ["TEST_VAR"] == "level2"
# Should restore to level1
assert os.environ["TEST_VAR"] == "level1"
# Should restore to original
assert os.environ["TEST_VAR"] == "original"
def test_multiple_different_vars():
"""Test setting multiple different environment variables."""
# Set up one existing variable
os.environ["EXISTING_VAR"] = "existing_value"
with temp_env_var("EXISTING_VAR", "modified"):
with temp_env_var("TEST_VAR", "new_value"):
assert os.environ["EXISTING_VAR"] == "modified"
assert os.environ["TEST_VAR"] == "new_value"
# Check restoration
assert os.environ["EXISTING_VAR"] == "existing_value"
assert "TEST_VAR" not in os.environ
def test_empty_string_value():
"""Test setting environment variable to empty string."""
with temp_env_var("TEST_VAR", ""):
assert os.environ["TEST_VAR"] == ""
assert "TEST_VAR" in os.environ
# Should be removed after context
assert "TEST_VAR" not in os.environ
def test_overwrite_with_empty_string():
"""Test overwriting existing variable with empty string."""
os.environ["EXISTING_VAR"] = "original"
with temp_env_var("EXISTING_VAR", ""):
assert os.environ["EXISTING_VAR"] == ""
# Should restore original value
assert os.environ["EXISTING_VAR"] == "original"
def test_context_manager_returns_none():
"""Test that context manager yields None."""
with temp_env_var("TEST_VAR", "value") as result:
assert result is None
assert os.environ["TEST_VAR"] == "value"
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_temp_env_on_cpu.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/models/mcore/model_forward_fused.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from typing import Optional
import megatron.core as mcore
import torch
from megatron.core import parallel_state
from megatron.core.config_logger import has_config_logger_enabled, log_config_to_disk
from megatron.core.inference.contexts import BaseInferenceContext
from megatron.core.models.gpt.gpt_model import GPTModel
from megatron.core.packed_seq_params import PackedSeqParams
from megatron.core.tensor_parallel.mappings import gather_from_sequence_parallel_region
from megatron.core.utils import deprecate_inference_params
from packaging import version
from torch import Tensor
from verl.models.mcore.util import preprocess_packed_seqs, preprocess_thd_no_padding
from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy
from verl.utils.megatron_utils import unwrap_model
from verl.utils.model import CausalLMOutputForPPO
from .util import postprocess_packed_seqs_for_dict_output, postprocess_thd_no_padding
def _get_patching_model(model: torch.nn.Module):
model = unwrap_model(model)
if isinstance(model, GPTModel):
return model
if not (hasattr(model, "language_model") and isinstance(model.language_model, GPTModel)):
print(f"Model {model.__class__.__name__} is not a supported for fused forward")
return None
return model.language_model
def patch_fused_forward(model: torch.nn.Module):
assert version.parse(mcore.__version__) >= version.parse("0.13.0"), (
"Fused forward patching requires mecore >= 0.13.0"
)
model = _get_patching_model(model)
if model is not None:
model.forward_backup = model.forward
model.forward = _fused_GPTModel_forward.__get__(model, model.__class__)
def unpatch_fused_forward(model: torch.nn.Module):
model = _get_patching_model(model)
if model is not None:
model.forward = model.forward_backup
def fused_forward_model_gen(vision_model: bool = False):
def fused_forward_model(
model,
input_ids: Tensor,
position_ids: Tensor,
attention_mask: Tensor,
labels: Tensor,
labels_mask: Tensor,
temperature: float,
multi_modal_inputs: dict,
):
pre_process: bool = (
unwrap_model(model).pre_process if not vision_model else False
) # vision model does not need pre_process, because we pack the input_ids to thd in the forward function
post_process: bool = unwrap_model(model).post_process
model_kwargs = {}
if "pixel_values" in multi_modal_inputs:
model_kwargs["pixel_values"] = multi_modal_inputs["pixel_values"].to(input_ids.device)
if "image_grid_thw" in multi_modal_inputs:
model_kwargs["image_grid_thw"] = multi_modal_inputs["image_grid_thw"].to(input_ids.device)
if "pixel_values_videos" in multi_modal_inputs:
model_kwargs["pixel_values_videos"] = multi_modal_inputs["pixel_values_videos"].to(input_ids.device)
if "video_grid_thw" in multi_modal_inputs:
model_kwargs["video_grid_thw"] = multi_modal_inputs["video_grid_thw"].to(input_ids.device)
batch_size, seq_len = attention_mask.shape[:2]
input_ids_rmpad, packed_seq_params = preprocess_packed_seqs(input_ids, attention_mask, pre_process=pre_process)
input_ids_rmpad = input_ids_rmpad.contiguous()
labels_rmpad, _ = preprocess_packed_seqs(labels, attention_mask, pre_process=True)
labels_mask_rmpad, _ = preprocess_packed_seqs(labels_mask, attention_mask, pre_process=True)
labels_rmpad = labels_rmpad.contiguous()
labels_mask_rmpad = labels_mask_rmpad.contiguous()
input_args = dict(
input_ids=input_ids_rmpad,
attention_mask=None,
position_ids=position_ids if not vision_model else None, # vision models will calculate position_ids
packed_seq_params=packed_seq_params,
labels=labels_rmpad,
temperature=temperature,
**model_kwargs,
)
if vision_model:
# workaround for supporting sequence packing with context parallelism
# cp split with sequence packing will make model lose vision token information, so we need to keep
# the original input_ids and pack them after vision embedding is calculated,
# cooporate with mbridge
input_args["input_ids"] = input_ids
input_args["attention_mask"] = attention_mask
output_orig: CausalLMOutputForPPO = model(**input_args)
if post_process:
# output_orig is in type of CausalLMOutputForPPO
output = postprocess_packed_seqs_for_dict_output(
labels_mask_rmpad,
output_orig,
packed_seq_params,
attention_mask,
batch_size,
seq_len,
post_process=post_process,
)
else:
output = output_orig
return output
return fused_forward_model
def fused_forward_no_padding_gen(vision_model: bool = False):
def fused_forward_no_padding(
model,
input_ids: Tensor,
labels: Tensor,
multi_modal_inputs: dict,
temperature: float,
calculate_entropy: bool,
pad_token_id: int,
):
pre_process = unwrap_model(model).pre_process
post_process = unwrap_model(model).post_process
fp8 = unwrap_model(model).config.fp8
use_fp8_padding = fp8 in ["e4m3", "hybrid"]
input_ids_rmpad, packed_seq_params = preprocess_thd_no_padding(
input_ids, pre_process=pre_process, use_fp8_padding=use_fp8_padding
)
input_ids_rmpad = input_ids_rmpad.contiguous()
model_kwargs = {}
if "pixel_values" in multi_modal_inputs:
model_kwargs["pixel_values"] = multi_modal_inputs["pixel_values"].to(input_ids.device)
if "image_grid_thw" in multi_modal_inputs:
model_kwargs["image_grid_thw"] = multi_modal_inputs["image_grid_thw"].to(input_ids.device)
if "pixel_values_videos" in multi_modal_inputs:
model_kwargs["pixel_values_videos"] = multi_modal_inputs["pixel_values_videos"].to(input_ids.device)
if "video_grid_thw" in multi_modal_inputs:
model_kwargs["video_grid_thw"] = multi_modal_inputs["video_grid_thw"].to(input_ids.device)
attention_mask = None
if vision_model:
input_ids_rmpad = input_ids.to_padded_tensor(pad_token_id)
seqlens_in_batch = input_ids.offsets().diff().to(input_ids.device)
max_seq_len = input_ids_rmpad.shape[1]
attention_mask = torch.arange(max_seq_len, device=input_ids.device).unsqueeze(
0
) < seqlens_in_batch.unsqueeze(1)
labels_rmpad, _ = preprocess_thd_no_padding(
labels, pre_process=True, need_roll=True, use_fp8_padding=use_fp8_padding
)
labels_rmpad = labels_rmpad.contiguous()
output_orig: CausalLMOutputForPPO = model(
input_ids=input_ids_rmpad,
attention_mask=attention_mask,
position_ids=None,
packed_seq_params=packed_seq_params,
labels=labels_rmpad,
temperature=temperature,
**model_kwargs,
)
if not post_process:
return output_orig
log_probs = output_orig.log_probs
if log_probs.dim() == 1:
log_probs = log_probs.unsqueeze(0)
log_probs = postprocess_thd_no_padding(
log_probs, packed_seq_params, input_ids, input_ids.shape[0], post_process=post_process
)
output = {"log_probs": log_probs}
if calculate_entropy:
entropy = output_orig.entropy
if entropy.dim() == 1:
entropy = entropy.unsqueeze(0)
entropy = postprocess_thd_no_padding(
entropy, packed_seq_params, input_ids, input_ids.shape[0], post_process=post_process
)
output["entropy"] = entropy
return output
return fused_forward_no_padding
def _fused_GPTModel_forward(
model,
input_ids: Tensor,
position_ids: Tensor,
attention_mask: Tensor,
decoder_input: Tensor = None,
labels: Tensor = None,
inference_context: BaseInferenceContext = None,
packed_seq_params: PackedSeqParams = None,
extra_block_kwargs: dict = None,
runtime_gather_output: Optional[bool] = None,
*,
inference_params: Optional[BaseInferenceContext] = None,
loss_mask: Optional[Tensor] = None,
temperature: float = 1.0,
**kwargs,
) -> CausalLMOutputForPPO:
"""
Patch self._postprocess in forward for GPT models to enable fused kernel support.
https://github.com/NVIDIA/Megatron-LM/blob/core_v0.13.0/megatron/core/models/gpt/gpt_model.py
TODO: Currently we still need to patch `forward` because we need to pass `temperature`
explicitly to `self._postprocess` when calling, maybe there can be a better way to handle this?
"""
inference_context = deprecate_inference_params(inference_context, inference_params)
preproc_output = model._preprocess(
input_ids=input_ids,
position_ids=position_ids,
decoder_input=decoder_input,
inference_context=inference_context,
packed_seq_params=packed_seq_params,
)
(decoder_input, rotary_pos_emb, rotary_pos_cos, rotary_pos_sin, sequence_len_offset) = preproc_output[:5]
# Run decoder.
hidden_states = model.decoder(
hidden_states=decoder_input,
attention_mask=attention_mask,
inference_context=inference_context,
rotary_pos_emb=rotary_pos_emb,
rotary_pos_cos=rotary_pos_cos,
rotary_pos_sin=rotary_pos_sin,
packed_seq_params=packed_seq_params,
sequence_len_offset=sequence_len_offset,
**(extra_block_kwargs or {}),
**kwargs,
)
if not model.post_process:
return hidden_states
output = CausalLMOutputForPPO(
loss=None,
logits=None,
past_key_values=None,
hidden_states=hidden_states,
attentions=None,
)
if model.config.sequence_parallel:
hidden_states = gather_from_sequence_parallel_region(hidden_states)
# Get the output weight - use embedding weight if output_layer is None or weight is shared
if hasattr(model, "output_layer") and model.output_layer is not None and model.output_layer.weight is not None:
output_weight = model.output_layer.weight
else:
# When embeddings are tied, use the embedding weight
output_weight = model.embedding.word_embeddings.weight
logprobs, entropy = linear_cross_entropy(
hidden_states,
output_weight,
labels,
temperature,
"none",
parallel_state.get_tensor_model_parallel_group(),
)
if has_config_logger_enabled(model.config):
payload = OrderedDict(
{
"input_ids": input_ids,
"position_ids": position_ids,
"attention_mask": attention_mask,
"decoder_input": decoder_input,
"logprobs": logprobs,
"entropy": entropy,
}
)
log_config_to_disk(model.config, payload, prefix="input_and_logits")
output.entropy = entropy
output.log_probs = logprobs
return output
| {
"repo_id": "verl-project/verl",
"file_path": "verl/models/mcore/model_forward_fused.py",
"license": "Apache License 2.0",
"lines": 268,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/dynamic_dataset/dynamicgen_dataset.py | # Copyright 2025 Amazon.com Inc and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dataset class that enables dynamic data generation strategies between iterations of training.
This class extends RLHFDataset and uses an AbstractDataGen instance to generate data.
This is especially useful in settings where proposer model generates new tasks based
on rollout data.
"""
import logging
from abc import ABC, abstractmethod
from typing import Optional
import datasets
from omegaconf import DictConfig
from torch.utils.data import Dataset
from transformers import PreTrainedTokenizer, ProcessorMixin
from verl import DataProto
from verl.utils.dataset import RLHFDataset
from verl.utils.import_utils import load_extern_object
logger = logging.getLogger(__name__)
class AbstractDataGenerator(ABC):
def __init__(self, config: DictConfig):
self.config = config
@abstractmethod
def generate(self, dataset: Dataset) -> datasets.Dataset:
"""
Generate method must be implemented by subclasses.
Args:
dataset: The dataset to generate from.
Returns:
Processed data or result as implemented by the subclass.
"""
pass
class MockDataGenerator(AbstractDataGenerator):
"""
A noop data gen class that only reappends the first datapoint.
This class is useful as a placeholder and testing.
"""
def __init__(self, config: DictConfig = None):
super().__init__(config)
def generate(self, dataset: Dataset) -> datasets.Dataset:
print("MockDataGenerator: No operation performed on the dataset.")
return dataset.dataframe.select([0])
class DynamicGenDataset(RLHFDataset):
"""
A dataset class that uses a data generation strategy to process data.
This class extends RLHFDataset and uses an AbstractDataGen instance to generate data.
"""
def __init__(
self,
data_files: str | list[str],
tokenizer: PreTrainedTokenizer,
config: DictConfig,
processor: Optional[ProcessorMixin] = None,
):
super().__init__(data_files, tokenizer, config, processor)
self.datagen: AbstractDataGenerator = config.datagen
assert "datagen" in config and config.datagen.get("path", None) is not None, (
f"datagen path is not set in config: {config}"
)
# Dynamically load the custom datagen class
datagen_cls = load_extern_object(config.datagen.path, config.datagen.name)
# Verify that the custom datagen class inherits from AbstractDataGenerator
abs_cls = AbstractDataGenerator
if not issubclass(datagen_cls, abs_cls):
raise TypeError(
f"The custom datagen class '{config.datagen.name}' from '{config.datagen.path}'"
+ " must inherit from {abs_cls}"
)
self.data_generator = datagen_cls(config.datagen)
self.on_batch_end()
def append_dataframe(self, new_dataframe: datasets.Dataset):
new_dataframe = self.maybe_filter_out_long_prompts(new_dataframe)
self.dataframe = datasets.concatenate_datasets([self.dataframe, new_dataframe])
logger.info(f"new dataset len: {len(self.dataframe)}")
def on_batch_end(self, batch: DataProto) -> None:
"""
Generate data using the provided data generation strategy.
Note: This method is intended to change the dataset after each training batch.
"""
new_data = self.data_generator.generate(self)
self.append_dataframe(new_data)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/dynamic_dataset/dynamicgen_dataset.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/profiler/mstx_profile.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Inspired from https://gitee.com/ascend/MindSpeed-RL/blob/master/mindspeed_rl/utils/utils.py
import functools
import logging
import os
from contextlib import contextmanager
from typing import Any, Callable, Optional
import torch_npu
from packaging import version
from torch_npu.npu import mstx
from .config import NPUToolConfig
from .profile import DistProfiler, ProfilerConfig
def mark_start_range(message: Optional[str] = None) -> None:
"""Start a mark range in the profiler.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
"""
return mstx.range_start(message=message)
def mark_end_range(range_id: str) -> None:
"""End a mark range in the profiler.
Args:
range_id (str):
The id of the mark range to end.
"""
return mstx.range_end(range_id)
def mark_annotate(message: Optional[str] = None) -> Callable:
"""Decorate a function to annotate a mark range along with the function life cycle.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
"""
def decorator(func):
profile_message = message or func.__name__
return mstx.mstx_range(profile_message)(func)
return decorator
@contextmanager
def marked_timer(name: str, timing_raw: dict[str, float], *args: Any, **kwargs: Any) -> None:
"""Context manager for timing with MSTX markers.
This utility function measures the execution time of code within its context,
accumulates the timing information, and adds MSTX markers for profiling.
Args:
name (str): The name/identifier for this timing measurement.
timing_raw (Dict[str, float]): Dictionary to store timing information.
Yields:
None: This is a context manager that yields control back to the code block.
"""
if args:
logging.warning(f"Args are not supported in mstx_profile, but received: {args}")
if kwargs:
logging.warning(f"Kwargs are not supported in mstx_profile, but received: {kwargs}")
mark_range = mark_start_range(message=name)
from .performance import _timer
yield from _timer(name, timing_raw)
mark_end_range(mark_range)
def get_npu_profiler(
contents: list[str],
profile_level: str,
profile_save_path: str,
analysis: bool,
role: Optional[str] = None,
profile_step: Optional[str] = None,
):
"""Generate and return an NPU profiler object.
Args:
contents (list[str]):
A list of options to control the collection content,
such as npu, cpu, memory, shapes, module, stack.
profile_level (str):
The collection level, which can be set to level_none,
level0, level1 and level2.
profile_save_path (str):
The path to save the collected data.
analysis (bool):
Whether to enables automatic data parsing.
role (str, optional):
The role of the current data collection. Defaults to None.
profile_step(str, optional):
The current training step. Defaults to None.
"""
if profile_level == "level_none":
level = torch_npu.profiler.ProfilerLevel.Level_none
elif profile_level == "level0":
level = torch_npu.profiler.ProfilerLevel.Level0
elif profile_level == "level1":
level = torch_npu.profiler.ProfilerLevel.Level1
elif profile_level == "level2":
level = torch_npu.profiler.ProfilerLevel.Level2
else:
raise ValueError(f"level only supports level0, 1, 2, and level_none, but gets {profile_level}")
if profile_step:
profile_save_path = os.path.join(profile_save_path, profile_step)
if role:
profile_save_path = os.path.join(profile_save_path, role)
# The ability to filter communication via mstx_domain_exclude requires torch_npu==2.1 or higher.
if version.parse(torch_npu.__version__) < version.parse("2.1"):
raise RuntimeError("torch_npu==2.1 or higher is required to use mstx_domain_exclude")
experimental_config = torch_npu.profiler._ExperimentalConfig(
profiler_level=level,
export_type=torch_npu.profiler.ExportType.Db,
data_simplification=True,
msprof_tx=True,
mstx_domain_exclude=["communication"],
)
activites = []
if contents is None or "npu" in contents:
activites.append(torch_npu.profiler.ProfilerActivity.NPU)
if contents is None or "cpu" in contents:
activites.append(torch_npu.profiler.ProfilerActivity.CPU)
prof = torch_npu.profiler.profile(
with_modules=contents is None or "module" in contents,
with_stack=contents is None or "stack" in contents,
record_shapes=contents is None or "shapes" in contents,
profile_memory=contents is None or "memory" in contents,
activities=activites,
on_trace_ready=torch_npu.profiler.tensorboard_trace_handler(profile_save_path, analyse_flag=analysis),
experimental_config=experimental_config,
)
return prof
class NPUProfiler(DistProfiler):
"""
NPU profiler. Initialized in a worker to control the NPU profiler.
"""
_define_count = 0
def __init__(self, rank: int, config: ProfilerConfig, tool_config: NPUToolConfig, **kwargs):
"""Initialize the NsightSystemsProfiler.
Args:
rank (int): The rank of the current process.
config (Optional[ProfilerConfig]): Configuration for the profiler. If None, a default configuration is used.
tool_config (NPUToolConfig): The config to control npu profiler behavior.
"""
if not config:
config = ProfilerConfig(ranks=[], enable=False)
if not tool_config:
assert not config.enable, "tool_config must be set when profiler is enabled"
self.discrete: bool = tool_config.discrete
self.profile_npu = None
self.profile_contents = tool_config.contents
self.profile_level = tool_config.level
self.profile_save_path = config.save_path
self.analysis = tool_config.analysis
def start(self, **kwargs):
role = kwargs.get("role", None)
if not self.discrete and NPUProfiler._define_count == 0:
self.profile_npu = get_npu_profiler(
contents=self.profile_contents,
profile_level=self.profile_level,
profile_save_path=self.profile_save_path,
analysis=self.analysis,
role=role,
)
self.profile_npu.start()
NPUProfiler._define_count += 1
def stop(self):
if not self.discrete and NPUProfiler._define_count == 1:
self.profile_npu.step()
self.profile_npu.stop()
NPUProfiler._define_count -= 1
def annotate(self, message: Optional[str] = None, role: Optional[str] = None, **kwargs_outer) -> Callable:
"""Decorate a Worker member function to profile the current rank in the current training step.
Requires the target function to be a member function of a Worker,
which has a member field `profiler` with NPUProfiler type.
Args:
message (str, optional):
The message to be displayed in the profiler. Defaults to None.
role (str, optional):
The role of the current data collection. Defaults to None.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs_inner):
profile_name = message or func.__name__
discrete_mode = self.discrete
if not discrete_mode:
mark_range = mark_start_range(message=profile_name)
else:
profile_npu = get_npu_profiler(
contents=self.profile_contents,
profile_level=self.profile_level,
profile_save_path=self.profile_save_path,
analysis=self.analysis,
role=role,
)
profile_npu.start()
mark_range = mark_start_range(message=profile_name)
result = func(*args, **kwargs_inner)
if not discrete_mode:
mark_end_range(mark_range)
else:
mark_end_range(mark_range)
profile_npu.step()
profile_npu.stop()
return result
return wrapper
return decorator
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/profiler/mstx_profile.py",
"license": "Apache License 2.0",
"lines": 207,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/special_sanity/check_docstrings.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Python script to check docstrings for functions and classes in specified files.
Checks that every public function and class has proper docstring documentation.
"""
import ast
import os
import sys
class DocstringChecker(ast.NodeVisitor):
"""AST visitor to check for missing docstrings in functions and classes."""
def __init__(self, filename: str):
self.filename = filename
self.missing_docstrings: list[tuple[str, str, int]] = []
self.current_class = None
self.function_nesting_level = 0
def visit_FunctionDef(self, node: ast.FunctionDef):
"""Visit function definitions and check for docstrings."""
if not node.name.startswith("_") and self.function_nesting_level == 0:
if not self._has_docstring(node):
func_name = f"{self.current_class}.{node.name}" if self.current_class else node.name
self.missing_docstrings.append((func_name, self.filename, node.lineno))
self.function_nesting_level += 1
self.generic_visit(node)
self.function_nesting_level -= 1
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef):
"""Visit async function definitions and check for docstrings."""
if not node.name.startswith("_") and self.function_nesting_level == 0:
if not self._has_docstring(node):
func_name = f"{self.current_class}.{node.name}" if self.current_class else node.name
self.missing_docstrings.append((func_name, self.filename, node.lineno))
self.function_nesting_level += 1
self.generic_visit(node)
self.function_nesting_level -= 1
def visit_ClassDef(self, node: ast.ClassDef):
"""Visit class definitions and check for docstrings."""
if not node.name.startswith("_"):
if not self._has_docstring(node):
self.missing_docstrings.append((node.name, self.filename, node.lineno))
old_class = self.current_class
self.current_class = node.name
self.generic_visit(node)
self.current_class = old_class
def _has_docstring(self, node) -> bool:
"""Check if a node has a docstring."""
return ast.get_docstring(node) is not None
def check_file_docstrings(filepath: str) -> list[tuple[str, str, int]]:
"""Check docstrings in a single file."""
try:
with open(filepath, encoding="utf-8") as f:
content = f.read()
tree = ast.parse(content, filename=filepath)
checker = DocstringChecker(filepath)
checker.visit(tree)
return checker.missing_docstrings
except Exception as e:
print(f"Error processing {filepath}: {e}")
return []
def main():
"""Main function to check docstrings in specified files."""
files_to_check = [
"verl/trainer/ppo/ray_trainer.py",
"verl/trainer/main_ppo.py",
"verl/trainer/ppo/reward.py",
"verl/utils/reward_score/__init__.py",
"verl/trainer/ppo/core_algos.py",
"verl/experimental/agent_loop/agent_loop.py",
"verl/workers/sharding_manager/fsdp_vllm.py",
"verl/workers/sharding_manager/fsdp_ulysses.py",
]
script_dir = os.path.dirname(os.path.abspath(__file__))
repo_path = os.path.dirname(os.path.dirname(script_dir))
if not os.path.exists(repo_path):
print(f"Repository path {repo_path} does not exist!")
sys.exit(1)
os.chdir(repo_path)
all_missing_docstrings = []
print("Checking docstrings in specified files...")
print("=" * 60)
for file_path in files_to_check:
if not os.path.exists(file_path):
print(f"Warning: File {file_path} does not exist!")
continue
print(f"Checking {file_path}...")
missing = check_file_docstrings(file_path)
all_missing_docstrings.extend(missing)
if missing:
print(f" Found {len(missing)} missing docstrings")
else:
print(" All functions and classes have docstrings [OK]")
print("=" * 60)
if all_missing_docstrings:
print(f"\nSUMMARY: Found {len(all_missing_docstrings)} functions/classes missing docstrings:")
print("-" * 60)
by_file = {}
for name, filepath, lineno in all_missing_docstrings:
if filepath not in by_file:
by_file[filepath] = []
by_file[filepath].append((name, lineno))
for filepath in sorted(by_file.keys()):
print(f"\n{filepath}:")
for name, lineno in sorted(by_file[filepath], key=lambda x: x[1]):
print(f" - {name} (line {lineno})")
print(f"\nTotal missing docstrings: {len(all_missing_docstrings)}")
raise Exception(f"Found {len(all_missing_docstrings)} functions/classes without proper docstrings!")
else:
print("\n[OK] All functions and classes have proper docstrings!")
if __name__ == "__main__":
main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/special_sanity/check_docstrings.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/utils/test_rollout_trace_on_cpu.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from unittest.mock import MagicMock, patch
import pytest
from verl.utils.rollout_trace import RolloutTraceConfig, rollout_trace_attr, rollout_trace_op
@pytest.fixture(autouse=True)
def reset_rollout_trace_config_singleton():
"""Fixture to reset the RolloutTraceConfig singleton before each test."""
RolloutTraceConfig.reset()
@pytest.fixture
def mock_weave_client():
"""Mocks the weave module and its client, yielding the mock client."""
mock_weave = MagicMock()
mock_client = MagicMock()
mock_call = MagicMock()
mock_client.create_call.return_value = mock_call
mock_weave.init.return_value = mock_client
# Also mock the call_context if it's used internally by the decorator
mock_weave.trace.context.call_context.return_value = MagicMock()
with patch.dict(sys.modules, {"weave": mock_weave, "weave.trace.context": mock_weave.trace.context}):
yield mock_client
class TracedClass:
@rollout_trace_op
# @weave.op
# @mlflow.trace
async def my_method(self, a, b="default"):
return f"result: {a}, {b}"
@rollout_trace_op
# @weave.op
# @mlflow.trace
async def middle_method(self, a, b="default"):
await self.my_method("test_a1", b="test_b1")
return f"result: {a}, {b}"
@rollout_trace_op
# @mlflow.trace
async def my_method_with_exception(self):
raise ValueError("Test Exception")
async def upper_method(self):
await self.my_method("test_a0", b="test_b0")
await self.middle_method("test_a2", b="test_b2")
return True
class UntracedClass:
@rollout_trace_op
async def my_method(self, x):
return x * 2
async def test_rollout_trace_on_untraced_class():
"""Tests that the decorator works correctly when no backend is configured."""
instance = UntracedClass()
assert await instance.my_method(10) == 20
async def test_rollout_trace_with_tracer(mock_weave_client):
"""Tests that the decorator calls the tracer's methods correctly."""
RolloutTraceConfig.init(project_name="my-project", experiment_name="my-experiment", backend="weave")
instance = TracedClass()
assert RolloutTraceConfig.get_client() is mock_weave_client
result = await instance.my_method("test_a", b="test_b")
assert result == "result: test_a, test_b"
mock_weave_client.create_call.assert_called_once()
call_kwargs = mock_weave_client.create_call.call_args.kwargs
assert call_kwargs["op"] == "TracedClass.my_method"
expected_inputs = {"a": "test_a", "b": "test_b"}
assert call_kwargs["inputs"] == expected_inputs
mock_call = mock_weave_client.create_call.return_value
mock_weave_client.finish_call.assert_called_once_with(mock_call, output=result)
async def test_rollout_trace_with_exception(mock_weave_client):
"""Tests that `finish` is called with the exception when one is raised."""
RolloutTraceConfig.init(project_name="my-project", experiment_name="my-experiment", backend="weave")
instance = TracedClass()
with pytest.raises(ValueError, match="Test Exception"):
await instance.my_method_with_exception()
mock_weave_client.create_call.assert_called_once()
mock_call = mock_weave_client.create_call.return_value
mock_weave_client.finish_call.assert_called_once()
# Check that finish_call was called with the exception
args, kwargs = mock_weave_client.finish_call.call_args
assert args[0] == mock_call
assert "exception" in kwargs
assert isinstance(kwargs["exception"], ValueError)
async def test_rollout_trace_with_dummy_backend(mock_weave_client):
"""Tests that the tracer is not called when the backend is 'dummy'."""
RolloutTraceConfig.init(project_name="my-project", experiment_name="my-experiment", backend="dummy")
instance = TracedClass()
await instance.my_method("test_a")
mock_weave_client.create_call.assert_not_called()
async def test_trace_disabled_with_trace_false(mock_weave_client):
"""Tests that tracing is disabled when trace=False."""
RolloutTraceConfig.init(
project_name="my-project",
experiment_name="my-experiment",
backend="weave",
)
instance = TracedClass()
assert RolloutTraceConfig.get_backend() == "weave"
with rollout_trace_attr(step=1, sample_index=0, rollout_n=0, trace=False):
result = await instance.my_method("test_a", b="test_b")
assert result == "result: test_a, test_b"
# No tracing should have occurred
mock_weave_client.create_call.assert_not_called()
# Verify that tracing works again with trace=True (default)
with rollout_trace_attr(step=1, sample_index=0, rollout_n=0):
result = await instance.my_method("test_a", b="test_b")
assert result == "result: test_a, test_b"
assert mock_weave_client.create_call.call_count == 1
async def test_trace_false_disables_nested_trace_ops(mock_weave_client):
"""Tests that trace=False disables all nested @rollout_trace_op calls."""
RolloutTraceConfig.init(
project_name="my-project",
experiment_name="my-experiment",
backend="weave",
)
instance = TracedClass()
with rollout_trace_attr(step=1, sample_index=0, rollout_n=0, trace=False):
# Call upper_method which internally calls my_method and middle_method
# All of these are decorated with @rollout_trace_op
result = await instance.upper_method()
assert result is True
# No tracing should have occurred for any of the nested calls
mock_weave_client.create_call.assert_not_called()
with rollout_trace_attr(step=1, sample_index=0, rollout_n=0):
result = await instance.my_method("test_a", b="test_b")
assert result == "result: test_a, test_b"
assert mock_weave_client.create_call.call_count == 1
async def test_trace_enabled_restored_after_exception(mock_weave_client):
"""Tests that trace state is restored even if an exception occurs when trace=False."""
RolloutTraceConfig.init(
project_name="my-project",
experiment_name="my-experiment",
backend="weave",
)
instance = TracedClass()
assert RolloutTraceConfig.get_backend() == "weave"
# Use trace=False and raise an exception
try:
with rollout_trace_attr(step=1, sample_index=0, rollout_n=0, trace=False):
raise RuntimeError("Test exception with trace disabled")
except RuntimeError:
pass
with rollout_trace_attr(step=1, sample_index=0, rollout_n=0):
result = await instance.my_method("test_a", b="test_b")
assert result == "result: test_a, test_b"
assert mock_weave_client.create_call.call_count == 1
@pytest.mark.skipif(
os.environ.get("RUN_WEAVE_INTEGRATION_TESTS", "false").lower() != "true",
reason="Skipping weave integration test. Set RUN_WEAVE_INTEGRATION_TESTS=true to run.",
)
async def test_rollout_trace_with_real_weave_backend():
"""Integration test with a real weave backend."""
# This assumes that the weave environment (e.g., project) is configured
RolloutTraceConfig.init(project_name="my-project", experiment_name="my-experiment", backend="weave")
instance = TracedClass()
with rollout_trace_attr(step=1, sample_index=2, rollout_n=3):
await instance.upper_method()
with pytest.raises(ValueError, match="Test Exception"):
await instance.my_method_with_exception()
print("\nWeave integration test ran successfully. Check your weave project for the trace.")
@pytest.mark.skipif(
os.environ.get("RUN_MLFLOW_INTEGRATION_TESTS", "false").lower() != "true",
reason="Skipping mlflow integration test. Set RUN_MLFLOW_INTEGRATION_TESTS=true to run.",
)
async def test_rollout_trace_with_real_mlflow_backend():
"""Integration test with a real mlflow backend."""
# This assumes that the mlflow environment (e.g., project) is configured
RolloutTraceConfig.init(project_name="my-project", experiment_name="my-experiment", backend="mlflow")
instance = TracedClass()
with rollout_trace_attr(step=1, sample_index=2, rollout_n=3, name="agent_run"):
assert await instance.upper_method()
# with pytest.raises(ValueError, match="Test Exception"):
# await instance.my_method_with_exception()
print("\nWeave integration test ran successfully. Check your weave project for the trace.")
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_rollout_trace_on_cpu.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/utils/rollout_trace.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import functools
import inspect
import os
from contextvars import ContextVar
from typing import Optional
from pydantic import BaseModel
from verl.utils.ray_utils import get_event_loop
_trace_enabled: ContextVar[bool] = ContextVar("_trace_enabled", default=True)
class RolloutTraceConfig:
"""Configuration for rollout tracing with various backends.
Singleton configuration class for managing rollout trace settings across different
tracing backends like Weave and MLflow.
Args:
backend (Optional[str]): Tracing backend to use ('weave', 'mlflow', or None).
client (Optional[object]): Client instance for the selected backend.
token2text (bool): Whether to convert tokens to text in traces. Defaults to False.
project_name (str): Name of the project for tracing.
experiment_name (str): Name of the experiment for tracing.
max_samples_per_step_per_worker (Optional[int]): Maximum number of unique samples to trace
per worker per step. If None, all samples are traced. If set, each worker will randomly
select up to this many unique samples to trace (including all their rollouts for GRPO).
Total traces = max_samples_per_step_per_worker * num_workers * n_rollouts_per_sample.
"""
_instance: Optional["RolloutTraceConfig"] = None
backend: Optional[str] = None
client: Optional[object] = None
token2text: bool = False
_initialized: bool = False
project_name: str = None
experiment_name: str = None
max_samples_per_step_per_worker: Optional[int] = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
@classmethod
def get_instance(cls) -> "RolloutTraceConfig":
if cls._instance is None:
cls._instance = cls()
return cls._instance
@classmethod
def init(
cls,
project_name: str,
experiment_name: str,
backend: str,
token2text: bool = False,
max_samples_per_step_per_worker: Optional[int] = None,
):
config = cls.get_instance()
if config._initialized:
return
config.backend = backend
config.token2text = token2text
config.project_name = project_name
config.experiment_name = experiment_name
config.max_samples_per_step_per_worker = max_samples_per_step_per_worker
if backend == "weave":
import weave
config.client = weave.init(project_name)
elif backend == "mlflow":
import mlflow
mlflow.config.enable_async_logging()
config.client = mlflow
MLFLOW_TRACKING_URI = os.environ.get("MLFLOW_TRACKING_URI", "sqlite:////tmp/mlruns.db")
mlflow.set_tracking_uri(MLFLOW_TRACKING_URI)
mlflow.set_experiment(project_name)
else:
config.client = None
config._initialized = True
@classmethod
def get_backend(cls) -> Optional[str]:
return cls.get_instance().backend
@classmethod
def get_client(cls) -> Optional[object]:
return cls.get_instance().client
@classmethod
def enable_token2text(cls) -> Optional[bool]:
return cls.get_instance().token2text
@classmethod
def reset(cls):
cls._instance = None
@contextlib.contextmanager
def rollout_trace_attr(
sample_index=None, step=None, rollout_n=None, name="rollout_trace", validate=False, trace: bool = True
):
"""A context manager to add attributes to a trace for the configured backend.
Args:
sample_index: Sample index for the trace.
step: Training step number.
rollout_n: Rollout number (for GRPO with multiple rollouts per sample).
name: Name for the trace span (used by mlflow backend).
validate: Whether this is a validation run.
trace: If False, disables tracing for the duration of the context.
"""
backend = RolloutTraceConfig.get_backend()
should_skip = backend is not None and not trace
if should_skip:
token = _trace_enabled.set(False)
try:
yield
finally:
_trace_enabled.reset(token)
return
# Build attributes for the trace
attributes = {}
if backend:
if sample_index is not None:
attributes["sample_index"] = sample_index
if step is not None:
attributes["step"] = step
if rollout_n is not None:
attributes["rollout_n"] = rollout_n
attributes["validate"] = validate
attributes["experiment_name"] = RolloutTraceConfig.get_instance().experiment_name
if not attributes or backend is None:
yield
return
if backend == "weave":
import weave
with weave.attributes(attributes):
yield
elif backend == "mlflow":
import mlflow
with mlflow.start_span(name=name) as span:
trace_id = span.trace_id
for key, value in attributes.items():
mlflow.set_trace_tag(trace_id, str(key), str(value))
yield
else:
yield
def rollout_trace_op(func):
@functools.wraps(func)
async def async_wrapper(self, *args, **kwargs):
if not _trace_enabled.get():
return await func(self, *args, **kwargs)
backend = RolloutTraceConfig.get_backend()
enable_token2text = RolloutTraceConfig.enable_token2text()
if backend is None:
return await func(self, *args, **kwargs)
sig = inspect.signature(func)
bound_args = sig.bind(self, *args, **kwargs)
bound_args.apply_defaults()
inputs = dict(bound_args.arguments)
del inputs["self"]
async def add_token2text(self, result):
if hasattr(result, "prompt_ids") and hasattr(self, "tokenizer") and hasattr(self.tokenizer, "decode"):
# Use model_dump() for Pydantic models to get a proper copy,
# otherwise vars() returns a reference to internal __dict__ which
# can cause serialization issues with MLflow
if isinstance(result, BaseModel):
_result = result.model_dump()
else:
_result = dict(vars(result))
loop = get_event_loop()
if hasattr(result, "prompt_ids"):
prompt_text = await loop.run_in_executor(None, self.tokenizer.decode, result.prompt_ids)
_result["prompt_text"] = prompt_text
if hasattr(result, "response_ids"):
response_text = await loop.run_in_executor(None, self.tokenizer.decode, result.response_ids)
_result["response_text"] = response_text
return _result
return result
if backend == "weave":
tracer = RolloutTraceConfig.get_client()
from weave.trace.context import call_context
cur_attributes = {**call_context.call_attributes.get()}
call = tracer.create_call(op=func.__qualname__, inputs=inputs, attributes=cur_attributes)
try:
result = await func(self, *args, **kwargs)
if enable_token2text:
_result = await add_token2text(self, result)
tracer.finish_call(call, output=_result)
else:
tracer.finish_call(call, output=result)
return result
except Exception as e:
tracer.finish_call(call, exception=e)
raise e
elif backend == "mlflow":
import mlflow
with mlflow.start_span(name=func.__qualname__) as span:
span.set_inputs(inputs)
result = await func(self, *args, **kwargs)
if enable_token2text:
_result = await add_token2text(self, result)
span.set_outputs(_result)
else:
span.set_outputs(result)
return result
else:
return await func(self, *args, **kwargs)
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if not _trace_enabled.get():
return func(self, *args, **kwargs)
backend = RolloutTraceConfig.get_backend()
if backend is None:
return func(self, *args, **kwargs)
sig = inspect.signature(func)
bound_args = sig.bind(self, *args, **kwargs)
bound_args.apply_defaults()
inputs = dict(bound_args.arguments)
del inputs["self"]
if backend == "weave":
tracer = RolloutTraceConfig.get_client()
from weave.trace.context import call_context
cur_attributes = {**call_context.call_attributes.get()}
call = tracer.create_call(op=func.__qualname__, inputs=inputs, attributes=cur_attributes)
try:
result = func(self, *args, **kwargs)
tracer.finish_call(call, output=result)
return result
except Exception as e:
tracer.finish_call(call, exception=e)
raise e
elif backend == "mlflow":
import mlflow
return mlflow.trace(func)(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return async_wrapper if inspect.iscoroutinefunction(func) else wrapper
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/rollout_trace.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/trainer/config/test_legacy_config_on_cpu.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import warnings
from hydra import compose, initialize_config_dir
from hydra.core.global_hydra import GlobalHydra
from omegaconf import OmegaConf
_BREAKING_CHANGES = [
"critic.optim.lr", # mcore critic lr init value 1e-6 -> 1e-5
"actor_rollout_ref.actor.optim.lr_warmup_steps", # None -> -1
"critic.optim.lr_warmup_steps", # None -> -1
"actor_rollout_ref.rollout.name", # vllm -> ???
"actor_rollout_ref.actor.megatron.expert_tensor_parallel_size",
"actor_rollout_ref.ref.megatron.expert_tensor_parallel_size",
"critic.megatron.expert_tensor_parallel_size",
"reward_model.megatron.expert_tensor_parallel_size",
]
class TestConfigComparison(unittest.TestCase):
"""Test that current configs match their legacy counterparts exactly."""
ignored_keys = [
"enable_gradient_checkpointing",
"gradient_checkpointing_kwargs",
"activations_checkpoint_method",
"activations_checkpoint_granularity",
"activations_checkpoint_num_layers",
"discrete",
"profiler",
"profile",
"use_profile",
"npu_profile",
"profile_steps",
"worker_nsight_options",
"controller_nsight_options",
]
ignored_paths = ["reward_model", "custom_reward_function"]
def _compare_configs_recursively(
self, current_config, legacy_config, path="", legacy_allow_missing=True, current_allow_missing=False
):
"""Recursively compare two OmegaConf configs and assert they are identical.
Args:
legacy_allow_missing (bool): sometimes the legacy megatron config contains fewer keys and
we allow that to happen
"""
if path in self.ignored_paths:
return
if isinstance(current_config, dict) and isinstance(legacy_config, dict):
current_keys = set(current_config.keys())
legacy_keys = set(legacy_config.keys())
missing_in_current = legacy_keys - current_keys
missing_in_legacy = current_keys - legacy_keys
# Ignore specific keys that are allowed to be missing
for key in self.ignored_keys:
if key in missing_in_current:
missing_in_current.remove(key)
if key in missing_in_legacy:
missing_in_legacy.remove(key)
if missing_in_current:
msg = f"Keys missing in current config at {path}: {missing_in_current}"
if current_allow_missing:
warnings.warn(msg, stacklevel=1)
else:
self.fail(f"Keys missing in current config at {path}: {missing_in_current}")
if missing_in_legacy:
# if the legacy
msg = f"Keys missing in legacy config at {path}: {missing_in_legacy}"
if legacy_allow_missing:
warnings.warn(msg, stacklevel=1)
else:
self.fail(msg)
for key in current_keys:
current_path = f"{path}.{key}" if path else key
if key in legacy_config:
self._compare_configs_recursively(current_config[key], legacy_config[key], current_path)
elif isinstance(current_config, list) and isinstance(legacy_config, list):
self.assertEqual(
len(current_config),
len(legacy_config),
f"List lengths differ at {path}: current={len(current_config)}, legacy={len(legacy_config)}",
)
for i, (current_item, legacy_item) in enumerate(zip(current_config, legacy_config, strict=True)):
self._compare_configs_recursively(current_item, legacy_item, f"{path}[{i}]")
elif path not in _BREAKING_CHANGES:
self.assertEqual(
current_config,
legacy_config,
f"Values differ at {path}: current={current_config}, legacy={legacy_config}",
)
def test_ppo_trainer_config_matches_legacy(self):
"""Test that ppo_trainer.yaml matches legacy_ppo_trainer.yaml exactly."""
import os
from hydra import compose, initialize_config_dir
from hydra.core.global_hydra import GlobalHydra
GlobalHydra.instance().clear()
try:
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")):
current_config = compose(config_name="ppo_trainer")
legacy_config = OmegaConf.load("tests/trainer/config/legacy_ppo_trainer.yaml")
current_dict = OmegaConf.to_container(current_config, resolve=True)
legacy_dict = OmegaConf.to_container(legacy_config, resolve=True)
if "defaults" in current_dict:
del current_dict["defaults"]
self._compare_configs_recursively(current_dict, legacy_dict)
finally:
GlobalHydra.instance().clear()
def test_ppo_megatron_trainer_config_matches_legacy(self):
"""Test that ppo_megatron_trainer.yaml matches legacy_ppo_megatron_trainer.yaml exactly."""
GlobalHydra.instance().clear()
try:
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")):
current_config = compose(config_name="ppo_megatron_trainer")
legacy_config = OmegaConf.load("tests/trainer/config/legacy_ppo_megatron_trainer.yaml")
current_dict = OmegaConf.to_container(current_config, resolve=True)
legacy_dict = OmegaConf.to_container(legacy_config, resolve=True)
if "defaults" in current_dict:
del current_dict["defaults"]
self._compare_configs_recursively(
current_dict, legacy_dict, legacy_allow_missing=True, current_allow_missing=False
)
finally:
GlobalHydra.instance().clear()
def test_load_component(self):
"""Test that ppo_megatron_trainer.yaml matches legacy_ppo_megatron_trainer.yaml exactly."""
GlobalHydra.instance().clear()
configs_to_load = [
("verl/trainer/config/actor", "dp_actor"),
("verl/trainer/config/actor", "megatron_actor"),
("verl/trainer/config/ref", "dp_ref"),
("verl/trainer/config/ref", "megatron_ref"),
("verl/trainer/config/rollout", "rollout"),
]
for config_dir, config_file in configs_to_load:
try:
with initialize_config_dir(config_dir=os.path.abspath(config_dir)):
compose(config_name=config_file)
finally:
GlobalHydra.instance().clear()
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/trainer/config/test_legacy_config_on_cpu.py",
"license": "Apache License 2.0",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/trainer/constants_ppo.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ray._private.runtime_env.constants import RAY_JOB_CONFIG_JSON_ENV_VAR
PPO_RAY_RUNTIME_ENV = {
"env_vars": {
"TOKENIZERS_PARALLELISM": "true",
"NCCL_DEBUG": "WARN",
"VLLM_LOGGING_LEVEL": "WARN",
"VLLM_ALLOW_RUNTIME_LORA_UPDATING": "true",
"CUDA_DEVICE_MAX_CONNECTIONS": "1",
# To prevent hanging or crash during synchronization of weights between actor and rollout
# in disaggregated mode. See:
# https://docs.vllm.ai/en/latest/usage/troubleshooting.html?h=nccl_cumem_enable#known-issues
# https://github.com/vllm-project/vllm/blob/c6b0a7d3ba03ca414be1174e9bd86a97191b7090/vllm/worker/worker_base.py#L445
"NCCL_CUMEM_ENABLE": "0",
# TODO: disable compile cache due to cache corruption issue
# https://github.com/vllm-project/vllm/issues/31199
"VLLM_DISABLE_COMPILE_CACHE": "1",
# Needed for multi-processes colocated on same NPU device
# https://www.hiascend.com/document/detail/zh/canncommercial/83RC1/maintenref/envvar/envref_07_0143.html
"HCCL_HOST_SOCKET_PORT_RANGE": "auto",
"HCCL_NPU_SOCKET_PORT_RANGE": "auto",
},
}
def get_ppo_ray_runtime_env():
"""
A filter function to return the PPO Ray runtime environment.
To avoid repeat of some environment variables that are already set.
"""
working_dir = (
json.loads(os.environ.get(RAY_JOB_CONFIG_JSON_ENV_VAR, "{}")).get("runtime_env", {}).get("working_dir", None)
)
runtime_env = {
"env_vars": PPO_RAY_RUNTIME_ENV["env_vars"].copy(),
**({"working_dir": None} if working_dir is None else {}),
}
for key in list(runtime_env["env_vars"].keys()):
if os.environ.get(key) is not None:
runtime_env["env_vars"].pop(key, None)
return runtime_env
| {
"repo_id": "verl-project/verl",
"file_path": "verl/trainer/constants_ppo.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/workers/rollout/test_sglang_async_rollout_multimodal_delta.py | # Copyright 2025 Amazon.com, Inc. or its affiliates
# Copyright 2023-2024 SGLang Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from verl.tools.schemas import ToolResponse
from verl.utils.dataset.vision_utils import process_image
from verl.utils.tokenizer import hf_processor
from verl.workers.rollout.schemas import (
AsyncRolloutRequest,
AsyncRolloutRequestStateEnum,
TokenizationSanityCheckModeEnum,
)
def _test_add_tool_response_messages_image_delta(processor, image_list, description_list, resize_image=False):
assert len(image_list) == len(description_list)
# Get the smallest dimensions across all images
processed_images = []
for img_url in image_list:
img = process_image(img_url)
processed_images.append(img)
min_width = min(img.size[0] for img in processed_images)
min_height = min(img.size[1] for img in processed_images)
min_size = (min_width, min_height)
if resize_image:
processed_images_resized = []
for img in processed_images:
img = img.resize(min_size)
processed_images_resized.append(img)
processed_images = processed_images_resized
# Initial message history
system_prompt = (
"You will be provided with an image. Describe this image and then generate a new image for the next round"
)
messages = [
{
"role": "system",
"content": system_prompt,
},
{
"role": "user",
"content": [
{"type": "text", "text": "Here is the first image provided: "},
{"type": "image", "image": [processed_images[0]]},
],
},
]
# Initial multi_modal_data with one image
multi_modal_data = {"image": [processed_images[0]], "video": []}
# Minimal required fields for AsyncRolloutRequest
req = AsyncRolloutRequest(
batch_data_id=0,
request_id="test-req-1",
state=AsyncRolloutRequestStateEnum.PENDING,
messages=messages,
multi_modal_keys=["image", "video"],
multi_modal_data=multi_modal_data.copy(),
tool_schemas=[],
tools_kwargs={},
interaction_kwargs={},
input_ids=None,
prompt_ids=None,
response_ids=None,
attention_mask=None,
prompt_attention_mask=None,
response_attention_mask=None,
position_ids=None,
prompt_position_ids=None,
response_position_ids=None,
loss_mask=None,
prompt_loss_mask=None,
response_loss_mask=None,
reward_scores={},
max_prompt_len=8192,
max_response_len=8192,
max_model_len=16384,
metrics={},
use_inference_chat_template=True,
tokenization_sanity_check_mode=TokenizationSanityCheckModeEnum.STRICT,
generation_prompt_ids=None,
base_conv_wo_gen_prompt_end_pos=0,
base_conv_with_gen_prompt_end_pos=0,
processing_class=processor,
)
prev_generated_len = 0
# Add First Assistant Message and first tool response message(image)
for idx, img in enumerate(processed_images):
if idx == 0:
continue
_ = req.get_generation_prompt_ids(processor)
req.add_assistant_message(processor, content=description_list[idx - 1])
before_tool_call_len = req.input_ids.shape[-1]
req.add_tool_response_messages(
processor, [ToolResponse(image=[img], text="Here is the new image you requested: ")]
)
after_tool_call_len = req.input_ids.shape[-1]
if prev_generated_len == 0:
prev_generated_len = after_tool_call_len - before_tool_call_len
else:
if resize_image:
assert after_tool_call_len - before_tool_call_len == prev_generated_len
assert req.multi_modal_data["image"] == processed_images[: idx + 1]
_ = req.get_generation_prompt_ids(processor)
req.add_assistant_message(processor, content=description_list[-1])
messages = [msg.model_dump() for msg in req.messages]
tools = [tool.model_dump() for tool in req.tool_schemas] if req.tool_schemas else None
full_prompt_info = req._handle_apply_chat_template(
processor,
messages,
multi_modal_data=req.multi_modal_data,
tools=tools,
add_generation_prompt=False,
tokenize=True,
return_dict=True,
)
full_prompt_ids = full_prompt_info["input_ids"]
assert full_prompt_ids.eq(req.input_ids).all()
# We must use dict(full_prompt_info) to convert BatchFeature values to a new dict
# because np.array() only keeps the keys for BatchFeature.
full_prompt_multi_modal_inputs = full_prompt_info.copy()
full_prompt_multi_modal_inputs.pop("input_ids", None)
full_prompt_multi_modal_inputs.pop("attention_mask", None)
for key in full_prompt_multi_modal_inputs:
assert full_prompt_multi_modal_inputs[key].eq(req.multi_modal_inputs[key]).all()
@pytest.mark.skipif(
hf_processor(os.path.expanduser("~/models/Qwen/Qwen2.5-VL-3B-Instruct")) is None,
reason="Processor not available for Qwen/Qwen2.5-VL-B-Instruct",
)
def test_add_tool_response_messages_image_delta():
processor = hf_processor(os.path.expanduser("~/models/Qwen/Qwen2.5-VL-3B-Instruct"))
# From Qwen2.5-VL-3B-Instruct HF example
img_1_url = {"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"}
img_1_description = "A woman sits on the beach at sunset, smiling as she shares a high five with her large dog."
# GitHub Logo
img_2_url = {"image": "https://github.githubassets.com/assets/GitHub-Mark-ea2971cee799.png"}
img_2_description = "A GitHub Logo image"
# Octocat
img_3_url = {"image": "https://octodex.github.com/images/orderedlistocat.png"}
img_3_description = "An Octocat image"
image_list = [img_1_url, img_2_url, img_3_url]
description_list = [img_1_description, img_2_description, img_3_description]
_test_add_tool_response_messages_image_delta(processor, image_list, description_list, resize_image=False)
@pytest.mark.skipif(
hf_processor(os.path.expanduser("~/models/Qwen/Qwen2.5-VL-3B-Instruct")) is None,
reason="Processor not available for Qwen/Qwen2.5-VL-B-Instruct",
)
def test_add_tool_response_messages_image_delta_resize_image():
processor = hf_processor(os.path.expanduser("~/models/Qwen/Qwen2.5-VL-3B-Instruct"))
# From Qwen2.5-VL-3B-Instruct HF example
img_1_url = {"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"}
img_1_description = "A woman sits on the beach at sunset, smiling as she shares a high five with her large dog."
# GitHub Logo
img_2_url = {"image": "https://github.githubassets.com/assets/GitHub-Mark-ea2971cee799.png"}
img_2_description = "A GitHub Logo image"
# Octocat
img_3_url = {"image": "https://octodex.github.com/images/orderedlistocat.png"}
img_3_description = "An Octocat image"
image_list = [img_1_url, img_2_url, img_3_url]
description_list = [img_1_description, img_2_description, img_3_description]
_test_add_tool_response_messages_image_delta(processor, image_list, description_list, resize_image=True)
| {
"repo_id": "verl-project/verl",
"file_path": "tests/workers/rollout/test_sglang_async_rollout_multimodal_delta.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/utils/dataset/test_create_rl_sampler_on_cpu.py | # Copyright 2025 Amazon.com Inc and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test create_rl_sampler
"""
from collections.abc import Sized
import pytest
import torch
from omegaconf import DictConfig, OmegaConf
from torch.utils.data import Dataset, RandomSampler
from verl.experimental.dataset.sampler import AbstractCurriculumSampler
from verl.trainer.main_ppo import create_rl_sampler
class RandomCurriculumSampler(AbstractCurriculumSampler):
def __init__(
self,
data_source: Sized,
data_config: DictConfig,
):
train_dataloader_generator = torch.Generator()
train_dataloader_generator.manual_seed(1)
sampler = RandomSampler(data_source=data_source)
self.sampler = sampler
def __iter__(self):
return self.sampler.__iter__()
def __len__(self) -> int:
return len(self.sampler)
def update(self, batch) -> None:
return
class MockIncorrectSampler:
"""A fake sampler class that does not adhere to the AbstractCurriculumSampler interface."""
def __init__(self, data_source, data_config):
pass
class MockChatDataset(Dataset):
def __init__(self):
self.data = [
{"prompt": "What's your name?", "response": "My name is Assistant."},
{"prompt": "How are you?", "response": "I'm doing well, thank you."},
{"prompt": "What is the capital of France?", "response": "Paris."},
{
"prompt": "Tell me a joke.",
"response": "Why did the chicken cross the road? To get to the other side!",
},
{"prompt": "What is 2+2?", "response": "4"},
]
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def test_create_custom_curriculum_samper():
data_config = OmegaConf.create(
{
"dataloader_num_workers": 0,
"sampler": {
"class_path": "pkg://tests.utils.dataset.test_create_rl_sampler_on_cpu",
"class_name": "RandomCurriculumSampler",
},
}
)
dataset = MockChatDataset()
# doesn't raise
create_rl_sampler(data_config, dataset)
def test_create_custom_curriculum_samper_wrong_class():
data_config = OmegaConf.create(
{
"sampler": {
"class_path": "pkg://tests.utils.dataset.test_create_rl_sampler_on_cpu",
"class_name": "MockIncorrectSampler",
}
}
)
dataset = MockChatDataset()
# MockIncorrectSampler is not an instance of AbstractCurriculumSampler, so raises
with pytest.raises(AssertionError):
create_rl_sampler(data_config, dataset)
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/dataset/test_create_rl_sampler_on_cpu.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/test_base_config_on_cpu.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from verl.base_config import BaseConfig
@pytest.fixture
def base_config_mock():
"""Fixture to create a mock BaseConfig instance with test attributes."""
mock_config = BaseConfig()
mock_config.test_attr = "test_value"
return mock_config
def test_getitem_success(base_config_mock):
"""Test __getitem__ with existing attribute (happy path)."""
assert base_config_mock["test_attr"] == "test_value"
def test_getitem_nonexistent_attribute(base_config_mock):
"""Test __getitem__ with non-existent attribute (exception path 1)."""
with pytest.raises(AttributeError):
_ = base_config_mock["nonexistent_attr"]
def test_getitem_invalid_key_type(base_config_mock):
"""Test __getitem__ with invalid key type (exception path 2)."""
with pytest.raises(TypeError):
_ = base_config_mock[123] # type: ignore
| {
"repo_id": "verl-project/verl",
"file_path": "tests/test_base_config_on_cpu.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/trainer/config/test_algo_config_on_cpu.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from omegaconf import OmegaConf
from verl.trainer.config import AlgoConfig, KLControlConfig
from verl.trainer.ppo.core_algos import (
compute_gae_advantage_return,
compute_grpo_outcome_advantage,
get_adv_estimator_fn,
)
from verl.utils.config import omega_conf_to_dataclass
class TestAlgoConfig(unittest.TestCase):
"""Test the AlgoConfig dataclass and its integration with core algorithms."""
def setUp(self):
"""Set up test fixtures."""
# Create a sample algorithm config as DictConfig (similar to what comes from YAML)
self.config_dict = {
"_target_": "verl.trainer.config.AlgoConfig",
"gamma": 0.99,
"lam": 0.95,
"adv_estimator": "gae",
"norm_adv_by_std_in_grpo": True,
"use_kl_in_reward": True,
"kl_penalty": "kl",
"kl_ctrl": {
"_target_": "verl.trainer.config.KLControlConfig",
"type": "adaptive",
"kl_coef": 0.002,
"horizon": 5000,
"target_kl": 0.05,
},
"use_pf_ppo": True,
"pf_ppo": {"reweight_method": "max_min", "weight_pow": 3.0},
}
self.omega_config = OmegaConf.create(self.config_dict)
def test_dataclass_creation_from_dict(self):
"""Test creating AlgoConfig from dictionary."""
config = omega_conf_to_dataclass(self.config_dict)
self.assertIsInstance(config, AlgoConfig)
self.assertEqual(config.gamma, 0.99)
self.assertEqual(config.lam, 0.95)
self.assertEqual(config.adv_estimator, "gae")
self.assertTrue(config.norm_adv_by_std_in_grpo)
self.assertTrue(config.use_kl_in_reward)
self.assertEqual(config.kl_penalty, "kl")
self.assertTrue(config.use_pf_ppo)
def test_dataclass_creation_from_omega_config(self):
"""Test creating AlgoConfig from OmegaConf DictConfig."""
config = omega_conf_to_dataclass(self.omega_config)
self.assertIsInstance(config, AlgoConfig)
self.assertEqual(config.gamma, 0.99)
self.assertEqual(config.lam, 0.95)
def test_nested_configs(self):
"""Test that nested configurations are properly converted."""
config = omega_conf_to_dataclass(self.omega_config)
# Test KL control config
self.assertIsInstance(config.kl_ctrl, KLControlConfig)
self.assertEqual(config.kl_ctrl.type, "adaptive")
self.assertEqual(config.kl_ctrl.kl_coef, 0.002)
self.assertEqual(config.kl_ctrl.horizon, 5000)
self.assertEqual(config.kl_ctrl.target_kl, 0.05)
# Test PF PPO config
self.assertEqual(config.pf_ppo.get("reweight_method"), "max_min")
self.assertEqual(config.pf_ppo.get("weight_pow"), 3.0)
def test_default_values(self):
"""Test that default values are properly set."""
minimal_config = {"gamma": 0.8}
config = omega_conf_to_dataclass(minimal_config, AlgoConfig)
self.assertEqual(config.gamma, 0.8)
self.assertEqual(config.lam, 1.0) # default value
self.assertEqual(config.adv_estimator, "gae") # default value
self.assertTrue(config.norm_adv_by_std_in_grpo) # default value
self.assertFalse(config.use_kl_in_reward) # default value
self.assertEqual(config.kl_penalty, "kl") # default value
self.assertFalse(config.use_pf_ppo) # default value
def test_get_method_backward_compatibility(self):
"""Test the get method for backward compatibility."""
config = omega_conf_to_dataclass(self.omega_config)
# Test existing attribute
self.assertEqual(config.get("gamma"), 0.99)
self.assertEqual(config.get("gamma", 1.0), 0.99)
# Test non-existing attribute
self.assertIsNone(config.get("non_existing"))
self.assertEqual(config.get("non_existing", "default"), "default")
def test_post_init_nested_configs(self):
"""Test that __post_init__ properly initializes nested configs when None."""
# Create config without nested configs
minimal_config = AlgoConfig(gamma=0.9)
# Check that nested configs are initialized
self.assertIsNotNone(minimal_config.kl_ctrl)
self.assertIsInstance(minimal_config.kl_ctrl, KLControlConfig)
assert not minimal_config.pf_ppo
def test_config_init_from_yaml(self):
import os
from hydra import compose, initialize_config_dir
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")):
cfg = compose(config_name="ppo_trainer")
algo_config = omega_conf_to_dataclass(cfg.algorithm)
from verl.trainer.config import AlgoConfig
assert isinstance(algo_config, AlgoConfig)
class TestAlgoCompute(unittest.TestCase):
"""Test the AlgoConfig dataclass and its integration with core algorithms."""
def setUp(self):
"""Set up test fixtures."""
self.algo_config = AlgoConfig(
gamma=0.99,
lam=0.95,
adv_estimator="gae",
norm_adv_by_std_in_grpo=True,
use_kl_in_reward=True,
kl_penalty="kl",
kl_ctrl=KLControlConfig(type="adaptive", kl_coef=0.002, horizon=5000, target_kl=0.05),
use_pf_ppo=True,
pf_ppo={"reweight_method": "max_min", "weight_pow": 3.0},
)
def test_advantage_estimator_with_cfg(self):
"""Test integration with advantage estimators from core_algos."""
config = self.algo_config
# Test GAE advantage estimator
adv_fn = get_adv_estimator_fn(config.adv_estimator)
self.assertIsNotNone(adv_fn)
# Test with actual GAE computation
batch_size, seq_len = 2, 5
token_level_rewards = torch.randn(batch_size, seq_len)
values = torch.randn(batch_size, seq_len)
response_mask = torch.ones(batch_size, seq_len)
advantages, returns = compute_gae_advantage_return(
token_level_rewards=token_level_rewards,
values=values,
response_mask=response_mask,
gamma=config.gamma,
lam=config.lam,
)
self.assertEqual(advantages.shape, (batch_size, seq_len))
self.assertEqual(returns.shape, (batch_size, seq_len))
def test_grpo_advantage_estimator_with_cfg(self):
"""Test integration with GRPO advantage estimator."""
grpo_config = AlgoConfig(adv_estimator="grpo", norm_adv_by_std_in_grpo=True)
# Test GRPO advantage computation
batch_size, seq_len = 4, 3
token_level_rewards = torch.tensor([[1.0, 0.5, 0.0], [2.0, 1.0, 0.0], [0.5, 0.2, 0.0], [1.5, 0.8, 0.0]])
response_mask = torch.ones(batch_size, seq_len)
index = np.array([0, 0, 1, 1]) # Two groups
advantages, returns = compute_grpo_outcome_advantage(
token_level_rewards=token_level_rewards,
response_mask=response_mask,
index=index,
norm_adv_by_std_in_grpo=grpo_config.norm_adv_by_std_in_grpo,
)
self.assertEqual(advantages.shape, (batch_size, seq_len))
self.assertEqual(returns.shape, (batch_size, seq_len))
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/trainer/config/test_algo_config_on_cpu.py",
"license": "Apache License 2.0",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/base_config.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from dataclasses import FrozenInstanceError, dataclass, fields
from typing import Any
# BaseConfig class inherits from collections.abc.Mapping, which means it can act like a dictionary
@dataclass
class BaseConfig(collections.abc.Mapping):
"""The BaseConfig provides dict-like interface for a dataclass config.
By default all fields in the config is not mutable, unless specified in
"_mutable_fields". The BaseConfig class implements the Mapping Abstract Base Class.
This allows instances of this class to be used like dictionaries.
"""
_mutable_fields = set()
_target_: str = ""
def __setattr__(self, name: str, value):
"""Set the value of an attribute. Check if the attr is mutable before setting the value."""
# If the field already exists, it's considered frozen unless it's in _mutable_fields
if name in self.__dict__ and name not in getattr(self, "_mutable_fields", set()):
raise FrozenInstanceError(f"Field '{name}' is frozen and cannot be modified")
super().__setattr__(name, value)
def get(self, key: str, default: Any = None) -> Any:
"""Get the value associated with the given key. If the key does not exist, return the default value.
Args:
key (str): The attribute name to retrieve.
default (Any, optional): The value to return if the attribute does not exist. Defaults to None.
Returns:
Any: The value of the attribute or the default value.
"""
try:
return getattr(self, key)
except AttributeError:
return default
def __getitem__(self, key: str):
"""Implement the [] operator for the class. Allows accessing attributes like dictionary items.
Args:
key (str): The attribute name to retrieve.
Returns:
Any: The value of the attribute.
Raises:
AttributeError: If the attribute does not exist.
TypeError: If the key type is not string
"""
return getattr(self, key)
def __iter__(self):
"""Implement the iterator protocol. Allows iterating over the attribute names of the instance.
Yields:
str: The name of each field in the dataclass.
"""
for f in fields(self):
yield f.name
def __len__(self):
"""
Return the number of fields in the dataclass.
Returns:
int: The number of fields in the dataclass.
"""
return len(fields(self))
| {
"repo_id": "verl-project/verl",
"file_path": "verl/base_config.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/trainer/config/algorithm.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Any, Optional
from verl.base_config import BaseConfig
__all__ = ["AlgoConfig", "FilterGroupsConfig", "KLControlConfig", "RolloutCorrectionConfig"]
@dataclass
class KLControlConfig(BaseConfig):
"""Configuration for KL control.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
type (str): Type of KL control. Can be "fixed" or "adaptive".
kl_coef (float): Initial coefficient for KL penalty.
horizon (int): Horizon value for adaptive controller.
target_kl (float): Target KL divergence for adaptive controller.
"""
type: str = "fixed"
kl_coef: float = 0.001
horizon: int = 10000
target_kl: float = 0.1
@dataclass
class FilterGroupsConfig(BaseConfig):
"""Configuration for filter groups (used in DAPO and Entropy).
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
enable (bool): Whether to enable filter groups.
metric (Optional[str]): Metric to use for filtering: "acc", "score", "seq_reward", "seq_final_reward", etc.
max_num_gen_batches (int): Non-positive values mean no upper limit.
"""
enable: bool = False
metric: Optional[str] = None
max_num_gen_batches: int = 0
@dataclass
class RolloutCorrectionConfig(BaseConfig):
"""Configuration for Rollout Correction (addresses off-policy issues in RL training).
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Rollout Correction handles off-policiness from multiple sources:
1. Policy mismatch: Rollout policy (e.g., vLLM BF16) vs Training policy (e.g., FSDP FP32)
2. Model update staleness: Rollout data collected from older policy checkpoints
3. General off-policy scenarios: Any distribution shift between data collection and training
For more details, see:
"When Speed Kills Stability: Demystifying RL Collapse from the Training-Inference Mismatch"
https://richardli.xyz/rl-collapse
This typed config replaces the old dict-based approach and provides:
- Type safety and validation
- Clear documentation of all parameters
- Named factory methods for common presets (TIS, MIS, etc.)
- Sensible defaults
Args:
rollout_is (Optional[str]): IS weight aggregation level.
- None: No IS weights (metrics only)
- "token": Per-token IS weights (low variance, biased)
- "sequence": Per-sequence IS weights (unbiased, high variance)
Default: "sequence"
rollout_is_threshold (float): Upper threshold for IS weight truncation/rejection.
Typical range: 1.5-5.0 for token level, 2.0-10.0 for sequence level.
Default: 2.0
rollout_is_batch_normalize (bool): Apply batch normalization to IS weights.
- True: Normalize IS weights to have mean=1.0 within each batch
- False: Use raw (truncated) IS weights (standard)
- Reduces variance by ensuring average weight is 1.0 per batch
- Only affects IS weight values, not rejection sampling
Default: False (no batch normalization)
rollout_rs (Optional[str]): Rejection sampling aggregation modes.
Accepts a comma-delimited list (duplicates removed) of canonical options implemented in
``rollout_corr_helper``:
- "token_k1": Token-level rejection with ``-log r`` (ratio thresholds supplied via
``rollout_rs_threshold`` as ``lower_upper``)
- "token_k2": Token-level rejection with ``0.5 * (log r)^2`` (upper bound only)
- "token_k3": Token-level rejection with ``exp(log r) - 1 - log r`` (upper bound only)
- "seq_sum_k1": Sequence sum of ``-log r`` (ratio bounds)
- "seq_sum_k2": Sequence sum of rejection with ``0.5 * (log r)^2`` (upper bound only)
- "seq_sum_k3": Sequence sum of rejection with ``exp(log r) - 1 - log r`` (upper bound only)
- "seq_mean_k1": Sequence mean of ``-log r`` (ratio bounds)
- "seq_mean_k2": Sequence mean of rejection with ``0.5 * (log r)^2`` (upper bound only)
- "seq_mean_k3": Sequence mean of rejection with ``exp(log r) - 1 - log r`` (upper bound only)
- "seq_max_k2": Sequence max of rejection with ``0.5 * (log r)^2`` (upper bound only)
- "seq_max_k3": Sequence max of rejection with ``exp(log r) - 1 - log r`` (upper bound only)
names automatically. Default: None
rollout_rs_threshold (Optional[Union[str, float]]): Threshold specification for rejection sampling.
Provide one value per option (single entry is broadcast when multiple options are supplied).
Ratio-based modes (``*k1``) expect ``lower_upper`` strings; supplying a single float implies
only the upper ratio bound, with the lower bound inferred as its reciprocal. Divergence modes
(k2/k3) expect positive upper bounds (float or string). Default: None
bypass_mode (bool): Operating mode - bypass or decoupled.
- True: Bypass mode - reuse rollout_log_prob as old_log_prob (2 policies)
Uses compute_policy_loss_bypass_mode() with loss_type selection
- False: Decoupled mode - compute old_log_prob separately (3 policies)
Uses standard PPO loss with IS weight correction
Default: False (decoupled mode)
loss_type (str): Loss function type in bypass mode (bypass_mode=True).
- "reinforce": REINFORCE-style policy gradient with explicit IS weights
L = -E[w * log π(a|s) * A] where w = π_current / π_rollout
- "ppo_clip": PPO clipped objective (IS handled by ratio, no explicit weights)
L = -E[min(r*A, clip(r)*A)] where r = π_current / π_rollout
Default: "ppo_clip"
Example:
# Create with defaults
config = RolloutCorrectionConfig()
# Decoupled PPO mode presets (3 policies: π_rollout, π_old, π_θ)
# IS weights correct for gap between π_old and π_rollout
config = RolloutCorrectionConfig.decoupled_token_is() # Token-TIS
config = RolloutCorrectionConfig.decoupled_seq_is() # Seq-TIS
config = RolloutCorrectionConfig.decoupled_seq_is_rs() # Seq-MIS
config = RolloutCorrectionConfig.decoupled_geo_rs() # Geo-RS (ratio mode)
# Bypass mode presets (2 policies: π_rollout = π_old, π_θ)
# loss_type controls the loss function
# PPO-clip presets (ratio handles IS, so no separate IS weights needed):
config = RolloutCorrectionConfig.bypass_ppo_clip() # PPO-clip only
config = RolloutCorrectionConfig.bypass_ppo_clip_geo_rs() # PPO-clip + Geo-RS
config = RolloutCorrectionConfig.bypass_ppo_clip_k3_rs() # PPO-clip + K3-RS
# REINFORCE presets (explicit IS weights):
config = RolloutCorrectionConfig.bypass_pg_is() # REINFORCE + Seq-TIS
config = RolloutCorrectionConfig.bypass_pg_geo_rs() # REINFORCE + Geo-RS
config = RolloutCorrectionConfig.bypass_pg_geo_rs_seq_tis() # REINFORCE + Geo-RS + Seq-TIS
config = RolloutCorrectionConfig.bypass_pg_geo_rs_token_tis() # REINFORCE + Geo-RS + Token-TIS
# Decoupled Geometric ratio presets (length-normalized IS ratio)
config = RolloutCorrectionConfig.decoupled_geo_rs_seq_tis() # Decoupled Geo-RS + Seq-TIS
config = RolloutCorrectionConfig.decoupled_geo_rs_token_tis() # Decoupled Geo-RS + Token-TIS
# Decoupled K3 KL Estimator presets (more stable for small KL values)
config = RolloutCorrectionConfig.decoupled_k3_rs() # Decoupled K3-RS
config = RolloutCorrectionConfig.decoupled_k3_rs_seq_tis() # Decoupled K3-RS + Seq-TIS
config = RolloutCorrectionConfig.decoupled_k3_rs_token_tis() # Decoupled K3-RS + Token-TIS
Reference:
Liu, Li, Fu, Wang, Liu, Shen (2025)
"When Speed Kills Stability: Demystifying RL Collapse from the Training-Inference Mismatch"
https://richardli.xyz/rl-collapse
"""
rollout_is: Optional[str] = "sequence"
rollout_is_threshold: float = 2.0
rollout_is_batch_normalize: bool = False
rollout_rs: Optional[str] = None
rollout_rs_threshold: Optional[str | float] = None
bypass_mode: bool = False
loss_type: str = "ppo_clip"
@classmethod
def decoupled_token_is(cls, threshold: float = 2.0) -> "RolloutCorrectionConfig":
"""Decoupled Mode with Token-level Importance Sampling.
IS weight correction at token level in decoupled mode (three policies).
Args:
threshold (float): Upper threshold for IS weights. Default: 2.0
Returns:
RolloutCorrectionConfig configured for decoupled mode with token-level IS
"""
return cls(rollout_is="token", rollout_is_threshold=threshold, rollout_rs=None)
@classmethod
def decoupled_seq_is(cls, threshold: float = 2.0) -> "RolloutCorrectionConfig":
"""Decoupled Mode with Sequence-level Importance Sampling.
IS weight correction at sequence level in decoupled mode (three policies).
Args:
threshold (float): Upper threshold for IS weights. Default: 2.0
Returns:
RolloutCorrectionConfig configured for decoupled mode with sequence-level IS
"""
return cls(rollout_is="sequence", rollout_is_threshold=threshold, rollout_rs=None)
@classmethod
def decoupled_seq_is_rs(
cls,
is_threshold: float = 2.0,
rs_threshold: Optional[str | float] = "0.5_2.0",
) -> "RolloutCorrectionConfig":
"""Decoupled Mode with Sequence-level IS + Rejection Sampling.
Sequence-level IS with sequence-level rejection sampling in decoupled mode.
Rejects entire sequences based on sequence-level IS weight.
Args:
is_threshold (float): Upper threshold for IS weights. Default: 2.0
rs_threshold (Optional[Union[str, float]]): Upper threshold for rejection sampling. Default: 0.5_2.0
Returns:
RolloutCorrectionConfig configured for decoupled mode with sequence IS + RS
"""
return cls(
rollout_is="sequence",
rollout_is_threshold=is_threshold,
rollout_rs="seq_sum_k1",
rollout_rs_threshold=rs_threshold,
)
@classmethod
def decoupled_geo_rs(
cls,
rs_threshold: Optional[str | float] = "0.999_1.001",
) -> "RolloutCorrectionConfig":
"""Decoupled Mode with Geometric Mean Rejection Sampling (ratio-based).
Uses geometric mean IS ratio E[log(r)] for rejection sampling at sequence level.
This is a ratio-based mode (ideal = 0.0) with [lower, upper] threshold bounds.
Length-normalized but still uses IS ratio semantics.
Args:
rs_threshold (Optional[Union[str, float]]): Geometric RS threshold (upper). Default: 0.999_1.001 (±0.1%)
Returns:
RolloutCorrectionConfig configured for decoupled mode with Geo-RS
"""
return cls(
rollout_is=None,
rollout_rs="seq_mean_k1",
rollout_rs_threshold=rs_threshold,
)
@classmethod
def bypass_ppo_clip(cls) -> "RolloutCorrectionConfig":
"""Bypass mode with PPO-clip loss.
PPO clipped objective in bypass mode. The PPO ratio = π_θ/π_rollout
already handles IS correction, so no explicit IS weights are applied.
Skips old_log_prob computation for faster execution (2 policies instead of 3).
Returns:
RolloutCorrectionConfig configured for bypass mode with PPO-clip
"""
return cls(
rollout_is=None,
rollout_rs=None,
bypass_mode=True,
loss_type="ppo_clip",
)
@classmethod
def bypass_ppo_clip_geo_rs(
cls,
rs_threshold: Optional[str | float] = "0.999_1.001",
) -> "RolloutCorrectionConfig":
"""Bypass mode with PPO-clip loss and Geometric Mean RS (ratio-based).
PPO clipped objective in bypass mode with geometric mean IS ratio RS.
Uses E[log(r)] (ideal = 0.0) with [lower, upper] threshold bounds.
Args:
rs_threshold (Optional[Union[str, float]]): Geometric RS threshold (upper). Default: 0.999_1.001 (±0.1%)
Returns:
RolloutCorrectionConfig configured for bypass mode with PPO-clip + Geo-RS
"""
return cls(
rollout_is=None,
rollout_rs="seq_mean_k1",
rollout_rs_threshold=rs_threshold,
bypass_mode=True,
loss_type="ppo_clip",
)
@classmethod
def bypass_ppo_clip_k3_rs(
cls,
rs_threshold: float = 0.01,
) -> "RolloutCorrectionConfig":
"""Bypass mode with PPO-clip loss and K3 Rejection Sampling.
PPO clipped objective in bypass mode with K3 KL estimator RS to mask outliers.
K3 is more stable than K1 for small KL values.
The PPO ratio = π_θ/π_rollout already handles IS correction.
Args:
rs_threshold (float): Max allowed K3 divergence. Default: 0.01
Returns:
RolloutCorrectionConfig configured for bypass mode with PPO-clip + K3-RS
"""
return cls(
rollout_is=None,
rollout_rs="seq_mean_k3",
rollout_rs_threshold=rs_threshold,
bypass_mode=True,
loss_type="ppo_clip",
)
@classmethod
def bypass_pg_is(cls, threshold: float = 2.0) -> "RolloutCorrectionConfig":
"""Bypass mode with REINFORCE loss and IS Correction.
Uses REINFORCE loss with explicit IS correction in bypass mode.
No PPO clipping.
Args:
threshold (float): Upper threshold for IS weights. Default: 2.0
Returns:
RolloutCorrectionConfig configured for bypass mode with REINFORCE + IS
"""
return cls(
rollout_is="sequence",
rollout_is_threshold=threshold,
rollout_rs=None,
bypass_mode=True,
loss_type="reinforce",
)
@classmethod
def bypass_pg_geo_rs(
cls,
rs_threshold: Optional[str | float] = "0.999_1.001",
) -> "RolloutCorrectionConfig":
"""Bypass mode with REINFORCE loss and Geometric Mean RS (ratio-based).
REINFORCE with geometric mean IS ratio rejection sampling in bypass mode.
Uses E[log(r)] (ideal = 0.0) with [lower, upper] threshold bounds.
Args:
rs_threshold (Optional[Union[str, float]]): Geometric RS threshold (upper). Default: 0.999_1.001 (±0.1%)
Returns:
RolloutCorrectionConfig configured for bypass mode with REINFORCE + Geo-RS
"""
return cls(
rollout_is=None,
rollout_rs="seq_mean_k1",
rollout_rs_threshold=rs_threshold,
bypass_mode=True,
loss_type="reinforce",
)
@classmethod
def decoupled_geo_rs_seq_tis(
cls,
is_threshold: float = 2.0,
rs_threshold: Optional[str | float] = "0.999_1.001",
) -> "RolloutCorrectionConfig":
"""Decoupled mode with Geometric Mean RS and Sequence-level Truncated IS (ratio-based).
Combines the Geometric Mean Filter (ratio-based validity check) with
Clipped Sequence Weight (debiasing). Uses E[log(r)] (ideal = 0.0).
Args:
is_threshold (float): Upper threshold for sequence IS weights. Default: 2.0
rs_threshold (Optional[Union[str, float]]): Geometric RS threshold (upper). Default: 0.999_1.001 (±0.1%)
Returns:
RolloutCorrectionConfig configured for Geo-RS-Seq-TIS
"""
return cls(
rollout_is="sequence",
rollout_is_threshold=is_threshold,
rollout_rs="seq_mean_k1",
rollout_rs_threshold=rs_threshold,
)
@classmethod
def decoupled_geo_rs_token_tis(
cls,
is_threshold: float = 2.0,
rs_threshold: Optional[str | float] = "0.999_1.001",
) -> "RolloutCorrectionConfig":
"""Decoupled mode with Geometric Mean RS and Token-level Truncated IS (ratio-based).
Combines the Geometric Mean Filter (ratio-based validity check) with
Token-level IS weights. Uses E[log(r)] (ideal = 0.0).
Args:
is_threshold (float): Upper threshold for token IS weights. Default: 2.0
rs_threshold (Optional[Union[str, float]]): Geometric RS threshold (upper). Default: 0.999_1.001 (±0.1%)
Returns:
RolloutCorrectionConfig configured for Geo-RS-Token-TIS
"""
return cls(
rollout_is="token",
rollout_is_threshold=is_threshold,
rollout_rs="seq_mean_k1",
rollout_rs_threshold=rs_threshold,
)
@classmethod
def bypass_pg_geo_rs_seq_tis(
cls,
is_threshold: float = 2.0,
rs_threshold: Optional[str | float] = "0.999_1.001",
) -> "RolloutCorrectionConfig":
"""Bypass mode with REINFORCE loss, Geo-RS, and Sequence-level IS.
Combines geometric mean IS ratio rejection with sequence-level IS
in bypass mode with REINFORCE loss (no PPO clipping).
Uses E[log(r)] (ideal = 0.0) with [lower, upper] threshold bounds.
Args:
is_threshold (float): Upper threshold for sequence IS weights. Default: 2.0
rs_threshold (Optional[Union[str, float]]): Geometric RS threshold (upper). Default: 0.999_1.001 (±0.1%)
Returns:
RolloutCorrectionConfig configured for bypass mode with REINFORCE + Geo-RS + Seq-TIS
"""
return cls(
rollout_is="sequence",
rollout_is_threshold=is_threshold,
rollout_rs="seq_mean_k1",
rollout_rs_threshold=rs_threshold,
bypass_mode=True,
loss_type="reinforce",
)
@classmethod
def bypass_pg_geo_rs_token_tis(
cls,
is_threshold: float = 2.0,
rs_threshold: Optional[str | float] = "0.999_1.001",
) -> "RolloutCorrectionConfig":
"""Bypass mode with REINFORCE loss, Geo-RS, and Token-level IS.
Combines geometric mean IS ratio rejection with token-level IS weights
in bypass mode with REINFORCE loss (no PPO clipping).
Uses E[log(r)] (ideal = 0.0) with [lower, upper] threshold bounds.
Token-level IS has lower variance but introduces bias.
Args:
is_threshold (float): Upper threshold for token IS weights. Default: 2.0
rs_threshold (Optional[Union[str, float]]): Geometric RS threshold (upper). Default: 0.999_1.001 (±0.1%)
Returns:
RolloutCorrectionConfig configured for bypass mode with REINFORCE + Geo-RS + Token-TIS
"""
return cls(
rollout_is="token",
rollout_is_threshold=is_threshold,
rollout_rs="seq_mean_k1",
rollout_rs_threshold=rs_threshold,
bypass_mode=True,
loss_type="reinforce",
)
@classmethod
def decoupled_k3_rs(
cls,
rs_threshold: float = 0.01,
) -> "RolloutCorrectionConfig":
"""Decoupled mode with K3 KL Estimator Rejection Sampling.
Uses K3 KL estimator at sequence level for rejection sampling.
K3 = E[r - log(r) - 1] where r = π_train/π_rollout.
More stable than geometric mean for small KL values.
K3 >= 0 always (equals 0 when policies match exactly).
Args:
rs_threshold (float): Max allowed K3 divergence. Default: 0.01
Typical range: 0.001-0.1
Returns:
RolloutCorrectionConfig configured for K3 RS
"""
return cls(
rollout_is=None,
rollout_rs="seq_mean_k3",
rollout_rs_threshold=rs_threshold,
)
@classmethod
def decoupled_k3_rs_seq_tis(
cls,
is_threshold: float = 2.0,
rs_threshold: float = 0.01,
) -> "RolloutCorrectionConfig":
"""Decoupled mode with K3 RS and Sequence-level Truncated IS.
Combines K3 KL estimator rejection with sequence-level IS weights.
K3 provides more stable outlier detection than geometric mean.
Args:
is_threshold (float): Upper threshold for sequence IS weights. Default: 2.0
rs_threshold (float): Max allowed K3 divergence. Default: 0.01
Returns:
RolloutCorrectionConfig configured for K3-RS-Seq-TIS
"""
return cls(
rollout_is="sequence",
rollout_is_threshold=is_threshold,
rollout_rs="seq_mean_k3",
rollout_rs_threshold=rs_threshold,
)
@classmethod
def decoupled_k3_rs_token_tis(
cls,
is_threshold: float = 2.0,
rs_threshold: float = 0.01,
) -> "RolloutCorrectionConfig":
"""Decoupled mode with K3 RS and Token-level Truncated IS.
Combines K3 KL estimator rejection with token-level IS weights.
K3 provides more stable outlier detection than geometric mean.
Token-level IS has lower variance but introduces bias.
Args:
is_threshold (float): Upper threshold for token IS weights. Default: 2.0
rs_threshold (float): Max allowed K3 divergence. Default: 0.01
Returns:
RolloutCorrectionConfig configured for K3-RS-Token-TIS
"""
return cls(
rollout_is="token",
rollout_is_threshold=is_threshold,
rollout_rs="seq_mean_k3",
rollout_rs_threshold=rs_threshold,
)
@classmethod
def disabled(cls) -> "RolloutCorrectionConfig":
"""Disabled - Metrics Only Mode.
Computes and logs off-policy metrics without applying correction.
Returns:
RolloutCorrectionConfig with all correction disabled
"""
return cls(rollout_is=None, rollout_rs=None)
@dataclass
class AlgoConfig(BaseConfig):
"""Configuration for the algorithm.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
gamma (float): Discount factor for future rewards.
lam (float): Trade-off between bias and variance in the GAE estimator.
adv_estimator (str): Advantage estimator type: "gae", "grpo", "reinforce_plus_plus", etc.
norm_adv_by_std_in_grpo (bool): Whether to normalize advantages by std (specific to GRPO).
use_kl_in_reward (bool): Whether to enable in-reward KL penalty.
kl_penalty (str): How to estimate KL divergence: "kl", "abs", "mse", "low_var_kl", or "full".
kl_ctrl (KLControlConfig): KL control configuration.
use_pf_ppo (bool): Whether to enable preference feedback PPO.
pf_ppo (dict[str, Any]): Preference feedback PPO settings.
filter_groups (Optional[FilterGroupsConfig]): Filter groups configuration, used in DAPO and Entropy
rollout_correction (Optional[RolloutCorrectionConfig]): Rollout Correction configuration.
Addresses off-policy issues from policy mismatch, model staleness, and general distribution shifts.
Set to None to disable entirely. Use factory methods for common presets:
- RolloutCorrectionConfig.decoupled_token_is() - Decoupled mode with token-level IS
- RolloutCorrectionConfig.decoupled_seq_is() - Decoupled mode with sequence-level IS
- RolloutCorrectionConfig.decoupled_seq_is_rs() - Decoupled mode with sequence IS + RS
- RolloutCorrectionConfig.decoupled_k1_rs() - Decoupled mode with K1-RS (divergence)
- RolloutCorrectionConfig.decoupled_geo_rs() - Decoupled mode with Geo-RS (ratio)
- RolloutCorrectionConfig.bypass_ppo_clip() - Bypass mode with PPO-clip
- RolloutCorrectionConfig.bypass_ppo_clip_k1_rs() - Bypass mode with PPO-clip + K1-RS
- RolloutCorrectionConfig.bypass_pg_is() - Bypass mode with REINFORCE + IS
- RolloutCorrectionConfig.bypass_pg_k1_rs() - Bypass mode with REINFORCE + K1-RS
For backward compatibility, you can still pass a dict, which will be converted to
RolloutCorrectionConfig automatically.
"""
gamma: float = 1.0
lam: float = 1.0
adv_estimator: str = "gae"
norm_adv_by_std_in_grpo: bool = True
use_kl_in_reward: bool = False
kl_penalty: str = "kl"
kl_ctrl: KLControlConfig = field(default_factory=KLControlConfig)
use_pf_ppo: bool = False
pf_ppo: dict[str, Any] = field(default_factory=dict)
filter_groups: Optional[FilterGroupsConfig] = None
# Rollout Correction: corrects off-policy issues (policy mismatch, model staleness, distribution shifts)
# Set to None to disable, use RolloutCorrectionConfig presets (e.g., .tis(), .mis()), or pass dict
rollout_correction: Optional[RolloutCorrectionConfig] = None
| {
"repo_id": "verl-project/verl",
"file_path": "verl/trainer/config/algorithm.py",
"license": "Apache License 2.0",
"lines": 505,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/profiler/config.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import warnings
from dataclasses import dataclass, field
from typing import Any, Optional
from omegaconf import MISSING
from verl.base_config import BaseConfig
@dataclass
class NsightToolConfig(BaseConfig):
"""Nsight tool config."""
"True for each task has its own database, False for all tasks in one training step share one database."
discrete: bool = False
name: str = "nsight"
def __post_init__(self) -> None:
pass
@dataclass
class TorchProfilerToolConfig(BaseConfig):
"""Torch profiler tool config."""
# options: cuda, cpu, memory, shapes, stack
contents: list[str] = field(default_factory=list)
discrete: bool = False
name: str = "torch"
def __post_init__(self) -> None:
"""config validation logics go here"""
__support_contents = ["cuda", "cpu", "memory", "shapes", "stack"]
for content in self.contents:
assert content in __support_contents, (
f"Profiler contents only supports {__support_contents}, but gets {content}"
)
assert isinstance(self.contents, list), f"Profiler contents must be of type list, got {type(self.contents)}"
@dataclass
class TorchMemoryToolConfig(BaseConfig):
"""Torch memory profiler tool config.
Args:
trace_alloc_max_entries (int): Maximum number of memory allocation entries to track.
stack_depth (int): Stack trace depth for memory allocations.
"""
trace_alloc_max_entries: int = 100_000
stack_depth: int = 32
name: str = "torch_memory"
def __post_init__(self) -> None:
"""config validation logics go here"""
assert isinstance(self.trace_alloc_max_entries, int), (
f"trace_alloc_max_entries must be int, got {type(self.trace_alloc_max_entries)}"
)
assert isinstance(self.stack_depth, int), f"stack_depth must be int, got {type(self.stack_depth)}"
assert self.trace_alloc_max_entries > 0, (
f"trace_alloc_max_entries must be positive, got {self.trace_alloc_max_entries}"
)
assert self.stack_depth > 0, f"stack_depth must be positive, got {self.stack_depth}"
@dataclass
class NPUToolConfig(NsightToolConfig):
"""NPU profiler too; config."""
# options: npu, cpu, memory, shapes, module, stack
contents: list[str] = field(default_factory=list)
# Collection level, optional values: level_none, level0, level1, level2.
level: str = "level0"
# Whether to automatically parse the data.
analysis: bool = False
name: str = "npu"
def __post_init__(self) -> None:
"""config validation logics go here"""
assert isinstance(self.contents, list), f"Profiler contents must be of type list, got {type(self.contents)}"
assert isinstance(self.level, str), f"Profiler level must be of type str, got {type(self.level)}"
assert isinstance(self.analysis, bool), f"Profiler analysis must be of type bool, got {type(self.analysis)}"
for content in self.contents:
assert content in ["npu", "cpu", "memory", "shapes", "module", "stack"], (
f"Profiler contents only supports npu, cpu, memory, shapes, module, stack, but gets {content}"
)
assert self.level in ["level_none", "level0", "level1", "level2"], (
f"Profiler level only supports level0, 1, 2, and level_none, but gets {self.level}"
)
@dataclass
class ProfilerConfig(BaseConfig):
"""Worker profiler config.
The inheritance from BaseConfig provides omegaconf.DictConfig-like interface for a dataclass config.
Args:
discrete (bool): True for each task has its own database, False for all tasks in one training step
share one database.
all_ranks (bool): Whether to profile all ranks.
ranks (list[int]): The ranks that will be profiled. Defaults to [].
global_tool_config (Any): Global tool configuration for all profiling tools.
"""
tool: Optional[str] = MISSING
enable: bool = False
all_ranks: bool = False
ranks: list[int] = field(default_factory=list)
save_path: Optional[str] = MISSING
tool_config: Any = MISSING # Just a placeholder, will use configs above directly
global_tool_config: Optional[Any] = None # Global tool configuration for all profiling tools
def union(self, other: "ProfilerConfig") -> "ProfilerConfig":
assert self.tool == other.tool, f"Cannot union ProfilerConfig with different tools: {self.tool} vs {other.tool}"
return ProfilerConfig(
tool=self.tool,
enable=self.enable or other.enable,
all_ranks=self.all_ranks or other.all_ranks,
ranks=list(set(self.ranks or []) | set(other.ranks or [])),
save_path=self.save_path,
tool_config=self.tool_config,
global_tool_config=self.global_tool_config or other.global_tool_config,
)
def intersect(self, other: "ProfilerConfig") -> "ProfilerConfig":
assert self.tool == other.tool, (
f"Cannot intersect ProfilerConfig with different tools: {self.tool} vs {other.tool}"
)
return ProfilerConfig(
tool=self.tool,
enable=self.enable and other.enable,
all_ranks=self.all_ranks and other.all_ranks,
ranks=list(set(self.ranks or []) & set(other.ranks or [])),
save_path=self.save_path,
tool_config=self.tool_config,
global_tool_config=self.global_tool_config if self.global_tool_config else other.global_tool_config,
)
def __post_init__(self) -> None:
"""config validation logics go here"""
assert isinstance(self.ranks, set | list | tuple), (
f"Profiler ranks must be of type list, got {type(self.ranks)}"
)
def build_vllm_profiler_args(profiler_config: ProfilerConfig, tool_config: BaseConfig, rank: int) -> dict:
"""
Build arguments and environment variables for vLLM profiler.
Acts as an adapter to bridge verl's unified profiler config and vLLM's specific requirements.
It sets environment variables for compatibility and constructs arguments for vLLM >= 0.13.0.
Args:
profiler_config (ProfilerConfig): The unified profiler configuration.
tool_config (BaseConfig): The tool configuration.
rank (int): The rank of the replica.
Returns:
dict: A dictionary of arguments to be passed to vLLM's start_profile method.
"""
if not profiler_config or not tool_config or not hasattr(tool_config, "contents"):
return {}
contents = tool_config.contents
with_stack = True if "stack" in contents or "module" in contents else False
record_shapes = True if "shapes" in contents else False
with_memory = True if "memory" in contents else False
save_path = os.path.join(profiler_config.save_path, f"agent_loop_rollout_replica_{rank}")
# vLLM < 0.13.0 supports controlling profiler via environment variables
os.environ["VLLM_TORCH_PROFILER_DIR"] = save_path
os.environ["VLLM_TORCH_PROFILER_WITH_STACK"] = "1" if with_stack else "0"
os.environ["VLLM_TORCH_PROFILER_RECORD_SHAPES"] = "1" if record_shapes else "0"
os.environ["VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY"] = "1" if with_memory else "0"
# vLLM >= 0.13.0 supports controlling profiler via arguments.
# While it maintains backward compatibility with environment variables,
# we provide arguments explicitly to align with the new API style.
return {
"profiler_config": json.dumps(
{
"profiler": "torch",
"torch_profiler_dir": save_path,
"torch_profiler_with_memory": with_memory,
"torch_profiler_with_stack": with_stack,
"torch_profiler_record_shapes": record_shapes,
}
)
}
def build_sglang_profiler_args(profiler_config: ProfilerConfig, tool_config: BaseConfig, rank: int) -> dict:
"""
Build arguments for SGLang profiler.
Args:
profiler_config (ProfilerConfig): The unified profiler configuration.
tool_config (BaseConfig): The tool configuration.
rank (int): The rank of the replica.
Returns:
dict: A dictionary of arguments suitable for starting the SGLang profiler.
"""
if not profiler_config or not tool_config or not hasattr(tool_config, "contents"):
return {}
contents = tool_config.contents
if "memory" in contents:
warnings.warn("SGLang profiler does not support memory profiling. Ignoring memory content.", stacklevel=2)
return {
"output_dir": os.path.join(profiler_config.save_path, f"agent_loop_rollout_replica_{rank}"),
"with_stack": "stack" in contents or "module" in contents,
"record_shapes": "shapes" in contents,
}
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/profiler/config.py",
"license": "Apache License 2.0",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/profiler/performance.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import inspect
import logging
from contextlib import contextmanager
from typing import Any, Optional
import torch
import torch.distributed as dist
from codetiming import Timer
from verl.utils.device import get_device_id, get_torch_device
from verl.utils.logger import DecoratorLoggerBase
def _get_current_mem_info(unit: str = "GB", precision: int = 2) -> tuple[str]:
"""Get current memory usage.
Note that CPU device memory info is always 0.
Args:
unit (str, optional): The unit of memory measurement. Defaults to "GB".
precision (int, optional): The number of decimal places to round memory values. Defaults to 2.
Returns:
tuple[str]: A tuple containing memory allocated, memory reserved, memory used, and memory total
in the specified unit.
"""
assert unit in ["GB", "MB", "KB"]
device = get_torch_device()
# torch.cpu.memory_allocated() does not exist
if device == torch.cpu:
return "0.00", "0.00", "0.00", "0.00"
divisor = 1024**3 if unit == "GB" else 1024**2 if unit == "MB" else 1024
mem_allocated = get_torch_device().memory_allocated()
mem_reserved = get_torch_device().memory_reserved()
# use get_torch_device().mem_get_info to profile device memory
# since vllm's sleep mode works below pytorch
# see https://github.com/vllm-project/vllm/pull/11743#issuecomment-2754338119
mem_free, mem_total = get_torch_device().mem_get_info()
mem_used = mem_total - mem_free
mem_allocated = f"{mem_allocated / divisor:.{precision}f}"
mem_reserved = f"{mem_reserved / divisor:.{precision}f}"
mem_used = f"{mem_used / divisor:.{precision}f}"
mem_total = f"{mem_total / divisor:.{precision}f}"
return mem_allocated, mem_reserved, mem_used, mem_total
def log_gpu_memory_usage(head: str, logger: logging.Logger = None, level=logging.DEBUG, rank: int = 0):
"""Log GPU memory usage information.
Args:
head (str): A descriptive header for the memory usage log message.
logger (logging.Logger, optional): Logger instance to use for logging. If None, prints to stdout.
level: Logging level to use. Defaults to logging.DEBUG.
rank (int): The rank of the process to log memory for. Defaults to 0.
"""
if (not dist.is_initialized()) or (rank is None) or (dist.get_rank() == rank):
mem_allocated, mem_reserved, mem_used, mem_total = _get_current_mem_info()
message = (
f"{head}, memory allocated (GB): {mem_allocated}, memory reserved (GB): {mem_reserved}, "
f"device memory used/total (GB): {mem_used}/{mem_total}"
)
if logger is None:
print(message)
else:
logger.log(msg=message, level=level)
class GPUMemoryLogger(DecoratorLoggerBase):
"""A decorator class to log GPU memory usage.
Example:
>>> from verl.utils.profiler.performance import GPUMemoryLogger
>>> @GPUMemoryLogger(role="actor")
>>> def update_actor(self, batch):
... # real actor update logics
... return
"""
def __init__(self, role: str, logger: logging.Logger = None, level=logging.DEBUG, log_only_rank_0: bool = True):
if dist.is_initialized() and dist.get_world_size() > 1:
rank = dist.get_rank()
else:
rank = 0
super().__init__(role, logger, level, rank, log_only_rank_0)
def __call__(self, decorated_function: callable):
def f(*args, **kwargs):
return self.log(decorated_function, *args, **kwargs)
return f
def log(self, func, *args, **kwargs):
name = func.__name__
mem_allocated, mem_reserved, mem_used, mem_total = _get_current_mem_info()
message = (
f"Before {name}, memory allocated (GB): {mem_allocated}, memory reserved (GB): {mem_reserved}, "
f"device memory used/total (GB): {mem_used}/{mem_total}"
)
self.logging_function(message)
output = func(*args, **kwargs)
mem_allocated, mem_reserved, mem_used, mem_total = _get_current_mem_info()
message = (
f"After {name}, memory allocated (GB): {mem_allocated}, memory reserved (GB): {mem_reserved}, "
f"device memory used/total (GB): {mem_used}/{mem_total}"
)
self.logging_function(message)
return output
def log_print(ctn: Any):
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
frame = inspect.currentframe().f_back
function_name = frame.f_code.co_name
line_number = frame.f_lineno
file_name = frame.f_code.co_filename.split("/")[-1]
print(f"[{current_time}-{file_name}:{line_number}:{function_name}]: {ctn}")
def _timer(name: str, timing_raw: dict[str, float]):
"""Inner function that handles the core timing logic.
Args:
name (str): The name/identifier for this timing measurement.
timing_raw (Dict[str, float]): Dictionary to store timing information.
"""
with Timer(name=name, logger=None) as timer:
yield
if name not in timing_raw:
timing_raw[name] = 0
timing_raw[name] += timer.last
@contextmanager
def simple_timer(name: str, timing_raw: dict[str, float]):
"""Context manager for basic timing without NVTX markers.
This utility function measures the execution time of code within its context
and accumulates the timing information in the provided dictionary.
Args:
name (str): The name/identifier for this timing measurement.
timing_raw (Dict[str, float]): Dictionary to store timing information.
Yields:
None: This is a context manager that yields control back to the code block.
"""
yield from _timer(name, timing_raw)
@contextmanager
def marked_timer(
name: str,
timing_raw: dict[str, float],
color: str = None,
domain: Optional[str] = None,
category: Optional[str] = None,
):
"""Context manager for timing with platform markers.
This utility function measures the execution time of code within its context,
accumulates the timing information, and adds platform markers for profiling.
This function is a default implementation when hardware profiler is not available.
Args:
name (str): The name/identifier for this timing measurement.
timing_raw (Dict[str, float]): Dictionary to store timing information.
color (Optional[str]): Color for the marker. Defaults to None.
domain (Optional[str]): Domain for the marker. Defaults to None.
category (Optional[str]): Category for the marker. Defaults to None.
Yields:
None: This is a context manager that yields control back to the code block.
"""
yield from _timer(name, timing_raw)
def reduce_timing(
timing_raw: dict[str, float], reduce_op: torch.distributed.ReduceOp = torch.distributed.ReduceOp.AVG
) -> dict[str, float]:
"""Reduce timing information across all processes.
This function uses distributed communication to gather and sum the timing
information from all processes in a distributed environment.
Args:
timing_raw (Dict[str, float]): Dictionary containing timing information.
Returns:
Dict[str, float]: Reduced timing information.
"""
if not dist.is_initialized():
return timing_raw
key_list, timing_list = [], []
for key in sorted(timing_raw.keys()):
key_list.append(key)
timing_list.append(timing_raw[key])
timing_list = torch.tensor(timing_list, dtype=torch.float32, device=get_device_id())
torch.distributed.all_reduce(timing_list, op=reduce_op)
timing_list = [tensor.item() for tensor in timing_list.to("cpu")]
timing_generate = {key_list[i]: timing_list[i] for i in range(len(key_list))}
return timing_generate
def topk_reduce_ratio_min_max(timing: float, k: int = 10) -> tuple[float, float, float]:
"""Calculate topk items take-up ratio, and min/max timing across all ranks."""
if not dist.is_initialized():
return -1.0, -1.0, -1.0
world_size = dist.get_world_size()
timing_tensor = torch.tensor(timing, dtype=torch.float32, device=get_device_id())
tensor_list = [torch.zeros(1, dtype=torch.float32, device=get_device_id()) for _ in range(world_size)]
torch.distributed.all_gather(tensor_list, timing_tensor)
tensor_stack = torch.stack(tensor_list)
timing_min = tensor_stack.min().cpu().item()
timing_max = tensor_stack.max().cpu().item()
top_k_percentile = torch.quantile(tensor_stack, 1 - k / 100)
tail_ratio = torch.mean((tensor_stack > top_k_percentile).float()).cpu().item()
return tail_ratio, timing_min, timing_max
def gather_timing(timing_raw: dict[str, float]) -> dict[str, list[float]]:
if not dist.is_initialized():
return {k: [v] for k, v in timing_raw.items()}
key_list, timing_list = [], []
for key in sorted(timing_raw.keys()):
key_list.append(key)
timing_list.append(timing_raw[key])
world_size = torch.distributed.get_world_size()
object_gather_list = [None] * world_size
torch.distributed.all_gather_object(object_gather_list, timing_list)
timing_generate = {
key_list[i]: [timing_list[i] for timing_list in object_gather_list] for i in range(len(key_list))
}
return timing_generate
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/profiler/performance.py",
"license": "Apache License 2.0",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/models/mcore/mbridge.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# VANILLA_MBRIDGE
try:
from verl.models.mcore.patch import apply_patch_mbridge
apply_patch_mbridge()
from mbridge import AutoBridge
from mbridge.utils.post_creation_callbacks import freeze_moe_router, make_value_model
except ImportError:
print("mbridge package not found. Please install mbridge with `pip install verl[mcore]` or `pip install mbridge`")
raise
__all__ = ["AutoBridge", "make_value_model", "freeze_moe_router"]
| {
"repo_id": "verl-project/verl",
"file_path": "verl/models/mcore/mbridge.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:scripts/legacy_model_merger.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to merge huggingface model and test verl checkpoints from FSDP and Megatron backends.
To merge FSDP checkpoints:
```sh
python scripts/legacy_model_merger.py merge \
--backend fsdp \
--local_dir checkpoints/verl_fsdp_gsm8k_examples/qwen2_5_0b5_fsdp_saveload/global_step_1/actor \
--target_dir /path/to/merged_hf_model
```
To merge Megatron checkpoints:
```sh
python scripts/legacy_model_merger.py merge \
--backend megatron \
--tie-word-embedding \
--local_dir checkpoints/verl_megatron_gsm8k_examples/qwen2_5_0b5_megatron_saveload/global_step_1/actor \
--target_dir /path/to/merged_hf_model
```
For more details, please refer to documentation:
https://verl.readthedocs.io/en/latest/advance/checkpoint.html#convert-fsdp-and-megatron-checkpoints-to-huggingface-format-model
"""
import argparse
import os
import re
import warnings
from abc import ABC, abstractmethod
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
import numpy as np
import torch
from accelerate import init_empty_weights
from safetensors.torch import load_file
from torch.distributed._tensor import Placement, Shard
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoModelForTokenClassification,
GenerationConfig,
PretrainedConfig,
)
try:
# for torch 2.5+
from torch.distributed.tensor import DTensor
except ImportError:
from torch.distributed._tensor import DTensor
from tqdm import tqdm
from verl.utils import hf_processor, hf_tokenizer
from verl.utils.transformers_compat import get_auto_model_for_vision2seq
AutoModelForVision2Seq = get_auto_model_for_vision2seq()
@dataclass
class ModelMergerConfig:
operation: str # 'merge' or 'test'
backend: str
local_dir: str
hf_model_config_path: str
target_dir: Optional[str] = "tmp"
hf_upload_path: Optional[str] = None
private: bool = False
test_hf_dir: Optional[str] = None
tie_word_embedding: bool = False
is_value_model: bool = False
hf_model_path: Optional[str] = None
hf_upload: bool = field(init=False)
def __post_init__(self):
self.hf_upload = self.operation == "merge" and bool(self.hf_upload_path)
if self.operation == "test":
self.target_dir = None
self.hf_upload_path = None
self.private = False
class BaseModelMerger(ABC):
def __init__(self, config: ModelMergerConfig):
self.config = config
self.hf_model_config_path = config.hf_model_config_path
if config.hf_model_path:
print(
"Warning: --hf_model_path is deprecated and will be removed in a future version. Currently verl will save huggingface model configuration files into checkpoint directories. Therefore, there is no need to provide --hf_model_path. "
)
self.hf_model_config_path = config.hf_model_path
# Auto-detect huggingface subdirectory if it exists
huggingface_subdir = os.path.join(self.hf_model_config_path, "huggingface")
if os.path.isdir(huggingface_subdir):
self.hf_model_config_path = huggingface_subdir
self.model_config = AutoConfig.from_pretrained(self.hf_model_config_path)
def get_transformers_auto_model_class(self):
# Handle case where architectures might be None or empty
if self.model_config.architectures is None or len(self.model_config.architectures) == 0:
# Try to infer from model_type if architectures is missing
model_type = getattr(self.model_config, 'model_type', '').lower()
if 'vision' in model_type or 'vl' in model_type:
return AutoModelForVision2Seq
elif 'causal' in model_type or 'gpt' in model_type or 'llama' in model_type or 'qwen' in model_type:
return AutoModelForCausalLM
else:
raise NotImplementedError(
f"Cannot determine model class: architectures is None and model_type '{model_type}' is not recognized"
)
architecture = self.model_config.architectures[0]
if "ForTokenClassification" in architecture:
return AutoModelForTokenClassification
elif "ForCausalLM" in architecture:
return AutoModelForCausalLM
elif "ForConditionalGeneration" in architecture:
return AutoModelForVision2Seq
raise NotImplementedError(f"Unknown architecture {self.model_config.architectures}")
def patch_model_generation_config(self, model):
"""
The generation_config created from model config may be different to the pretrained model,
this may lead to error when generating: https://github.com/volcengine/verl/issues/1246
This function patch the generation_config created from model config to the pretrained model.
"""
if model.can_generate():
try:
model.generation_config = GenerationConfig.from_pretrained(self.hf_model_config_path)
except OSError:
print(
f"Warning: Generation config file not found in {self.hf_model_config_path}, using a generation config created from the model config."
)
return model
def save_lora_adapter(self, state_dict: dict[str, torch.Tensor]):
"""
Save lora adapter to safetensors.
Returns:
lora_path: str, the path to the lora adapter. None if no lora adapter found.
Note:
This function change the 'state_dict' in place.
"""
lora_params_names = [name for name in state_dict.keys() if "lora_" in name]
if len(lora_params_names) == 0:
return None
import json
from typing import OrderedDict
import peft
from safetensors.torch import save_file
lora_params = OrderedDict()
target_modules = set()
lora_key = None
for name in lora_params_names:
lora_key = name.replace(".default.weight", ".weight")
target_modules.add(lora_key.split(".")[-3])
lora_params[lora_key] = state_dict.pop(name)
lora_rank = min(lora_params[lora_key].shape[0], lora_params[lora_key].shape[1])
peft_dict = {
"r": lora_rank,
"lora_alpha": 0, # lora_alpha is not set. An error should be raised to inform the user to set it manually.
"target_modules": list(target_modules),
}
peft_config = peft.LoraConfig(**peft_dict).to_dict()
peft_config["task_type"] = peft_config["task_type"].value if peft_config["task_type"] else None
peft_config["peft_type"] = peft_config["peft_type"].value if peft_config["peft_type"] else None
peft_config["target_modules"] = list(peft_config["target_modules"])
lora_path = os.path.join(self.config.target_dir, "lora_adapter")
os.makedirs(lora_path, exist_ok=True)
with open(os.path.join(lora_path, "adapter_config.json"), "w", encoding="utf-8") as f:
json.dump(peft_config, f, ensure_ascii=False, indent=4)
save_file(lora_params, os.path.join(lora_path, "adapter_model.safetensors"))
for name in list(state_dict.keys()):
key = (
name.replace("base_model.model.", "")
.replace(".base_layer.weight", ".weight")
.replace(".base_layer.bias", ".bias")
)
state_dict[key] = state_dict.pop(name)
return lora_path
def save_hf_model_and_tokenizer(self, state_dict: dict[str, torch.Tensor]):
auto_model_class = self.get_transformers_auto_model_class()
with init_empty_weights():
model = auto_model_class.from_config(self.model_config, torch_dtype=torch.bfloat16)
model.to_empty(device="cpu")
model = self.patch_model_generation_config(model)
lora_path = self.save_lora_adapter(state_dict)
if lora_path:
print(f"Saving lora adapter to {lora_path}")
print(f"Saving model to {self.config.target_dir}")
model.save_pretrained(self.config.target_dir, state_dict=state_dict)
del state_dict
del model
processor = hf_processor(self.hf_model_config_path)
try:
tokenizer = hf_tokenizer(self.hf_model_config_path)
except Exception as e:
warnings.warn(f"Failed to create tokenizer: {e}. This may affect tokenizer saving", stacklevel=1)
tokenizer = None
if processor is not None:
print(f"Saving processor to {self.config.target_dir}")
processor.save_pretrained(self.config.target_dir)
if tokenizer is not None:
print(f"Saving tokenizer to {self.config.target_dir}")
tokenizer.save_pretrained(self.config.target_dir)
def upload_to_huggingface(self):
from huggingface_hub import HfApi
api = HfApi()
api.create_repo(repo_id=self.config.hf_upload_path, private=self.config.private, exist_ok=True)
api.upload_folder(folder_path=self.config.target_dir, repo_id=self.config.hf_upload_path, repo_type="model")
@abstractmethod
def merge_and_save(self):
raise NotImplementedError("Subclasses should implement this method")
class FSDPModelMerger(BaseModelMerger):
def _get_world_size(self) -> int:
"""Extracts the FSDP world_size from checkpoint filenames (e.g., 'model_world_size_8_rank_0.pt')."""
for filename in os.listdir(self.config.local_dir):
match = re.match(r"model_world_size_(\d+)_rank_0\.pt", filename)
if match:
return int(match.group(1))
raise FileNotFoundError(
f"Could not determine world size. No file matching 'model_world_size_(\\d+)_rank_0.pt' found in {self.config.local_dir}"
)
def _load_rank_zero_state_dict(self, world_size: int) -> dict:
return torch.load(
Path(self.config.local_dir) / f"model_world_size_{world_size}_rank_0.pt",
map_location="cpu",
weights_only=False,
)
def _extract_device_mesh_info(self, state_dict: dict, world_size: int) -> tuple[np.ndarray, tuple[str, ...]]:
"""
Retrieves sharding information (device_mesh, mesh_dim_names) from a DTensor in the state_dict.
If no DTensor is found, infers a simple FSDP mesh based on world_size.
"""
pivot_key = sorted(list(state_dict.keys()))[0]
weight = state_dict[pivot_key]
if isinstance(weight, DTensor):
# get sharding info
device_mesh = weight.device_mesh
mesh = device_mesh.mesh
mesh_dim_names = device_mesh.mesh_dim_names
else:
# for non-DTensor
mesh = np.array([world_size], dtype=np.int64)
mesh_dim_names = ("fsdp",)
return mesh, mesh_dim_names
def _calculate_shard_configuration(
self, mesh: np.ndarray, mesh_dim_names: tuple[str, ...]
) -> tuple[int, tuple[int, ...]]:
"""Calculates the total number of shards and the shape of the device mesh."""
assert mesh_dim_names in (("fsdp",), ("ddp", "fsdp")), f"Unsupported mesh_dim_names {mesh_dim_names}"
if "tp" in mesh_dim_names:
# TODO: "tp" is not supported yet due to the above assert
total_shards = mesh.shape[-1] * mesh.shape[-2]
mesh_shape = (mesh.shape[-2], mesh.shape[-1])
else:
total_shards = mesh.shape[-1]
mesh_shape = (mesh.shape[-1],)
return total_shards, mesh_shape
def _merge_by_placement(self, tensors: list[torch.Tensor], placement: Placement) -> torch.Tensor:
"""Merges a list of tensors based on their DTensor placement"""
if placement.is_replicate():
return tensors[0]
elif placement.is_partial():
raise NotImplementedError("Partial placement is not supported yet")
elif placement.is_shard():
return torch.cat(tensors, dim=placement.dim).contiguous()
raise NotImplementedError(f"Unsupported placement: {placement}")
def _load_and_merge_state_dicts(
self, world_size: int, total_shards: int, mesh_shape: tuple[int, ...], mesh_dim_names: tuple[str, ...]
) -> dict[str, torch.Tensor]:
model_state_dict_lst = [None] * total_shards
def process_one_shard(rank: int, model_state_dict_lst: list):
model_path = Path(self.config.local_dir) / f"model_world_size_{world_size}_rank_{rank}.pt"
state_dict = torch.load(model_path, map_location="cpu", weights_only=False)
model_state_dict_lst[rank] = state_dict
return state_dict
with ThreadPoolExecutor(max_workers=min(32, os.cpu_count())) as executor:
futures = [executor.submit(process_one_shard, rank, model_state_dict_lst) for rank in range(total_shards)]
for future in tqdm(futures, desc=f"Loading {total_shards} FSDP shards", total=total_shards):
future.result()
# Merge state dicts from all shards
state_dict = {}
param_placements: dict[str, list] = {}
for key in set(model_state_dict_lst[0].keys()):
state_dict[key] = []
for model_state_shard in model_state_dict_lst:
# add tensor shard in order of rank to state_dict[key]
tensor = model_state_shard.pop(key)
if isinstance(tensor, DTensor):
state_dict[key].append(tensor._local_tensor.bfloat16())
placements = tuple(tensor.placements)
# replicated placement at dp dimension can be discarded
if mesh_dim_names[0] in ("dp", "ddp"):
placements = placements[1:]
if key not in param_placements:
param_placements[key] = placements
else:
assert param_placements[key] == placements
else:
state_dict[key].append(tensor.bfloat16())
del model_state_dict_lst
# Merge tensors
for key in sorted(state_dict):
if not isinstance(state_dict[key], list):
print(f"No need to merge key {key}")
continue
if key in param_placements:
# merge shards
placements: tuple[Shard] = param_placements[key]
if len(mesh_shape) == 1:
# 1-D list, FSDP without TP
assert len(placements) == 1
shards = state_dict[key]
state_dict[key] = self._merge_by_placement(shards, placements[0])
else:
# 2-D list, FSDP + TP
raise NotImplementedError("FSDP + TP is not supported yet")
else:
state_dict[key] = torch.cat(state_dict[key], dim=0)
return state_dict
def merge_and_save(self):
world_size = self._get_world_size()
rank_zero_state_dict = self._load_rank_zero_state_dict(world_size)
mesh, mesh_dim_names = self._extract_device_mesh_info(rank_zero_state_dict, world_size)
print(f"Got device mesh {mesh}, mesh_dim_names {mesh_dim_names}")
total_shards, mesh_shape = self._calculate_shard_configuration(mesh, mesh_dim_names)
print(f"Processing model shards with {total_shards} {mesh_shape} in total")
merged_state_dict = self._load_and_merge_state_dicts(world_size, total_shards, mesh_shape, mesh_dim_names)
if self.config.operation == "test":
if not self.config.test_hf_dir:
raise ValueError("test_hf_dir must be provided for test operation")
self._test_state_dict(merged_state_dict)
elif self.config.operation == "merge":
self.save_hf_model_and_tokenizer(merged_state_dict)
if self.config.hf_upload:
self.upload_to_huggingface()
else:
raise ValueError(f"Unknown operation: {self.config.operation}")
def _test_state_dict(self, state_dict: dict[str, torch.Tensor]):
auto_model_class = self.get_transformers_auto_model_class()
hf_model = auto_model_class.from_pretrained(self.config.test_hf_dir, torch_dtype=torch.bfloat16)
hf_state_dict = hf_model.state_dict()
del hf_model
hf_model_keys = set(hf_state_dict.keys())
collected_keys = set(state_dict.keys())
missing_keys = hf_model_keys - collected_keys
assert len(missing_keys) == 0, f"Missing keys in collected state dict: {list(sorted(missing_keys))}"
extra_keys = collected_keys - hf_model_keys
assert len(extra_keys) == 0, f"Extra keys in collected state dict: {list(sorted(extra_keys))}"
for key in hf_model_keys:
hf_shape = hf_state_dict[key].shape
collected_shape = state_dict[key].shape
assert hf_shape == collected_shape, (
f"Shape mismatch for key '{key}': original {hf_shape} vs collected {collected_shape}"
)
hf_dtype = hf_state_dict[key].dtype
collected_dtype = state_dict[key].dtype
assert hf_dtype == collected_dtype, (
f"Dtype mismatch for key '{key}': original {hf_dtype} vs collected {collected_dtype}"
)
torch.testing.assert_close(hf_state_dict[key], state_dict[key], atol=1e-6, rtol=1e-6)
print("FSDP checks passed: The merged state_dict matches the hf model saved by FSDPCheckpointManager.")
class MegatronModelMerger(BaseModelMerger):
def __init__(self, config: ModelMergerConfig):
from verl.utils.megatron_utils import get_hf_config_and_tokenizer_checkpoint_path
config.hf_model_config_path = get_hf_config_and_tokenizer_checkpoint_path(config.local_dir)
super().__init__(config)
self.params_mapping = {
# megatron core gpt model name, huggingface model name
# NOTICE: It's a little bit tricky, when 2 keys have the same prefix, we need to make sure the longer key within the containing relationship is processed first.
"embedding.word_embeddings": "model.embed_tokens",
# attn
"self_attention.linear_qkv.layer_norm_weight": "input_layernorm.weight",
"self_attention.linear_qkv.layer_norm_bias": "input_layernorm.bias",
"self_attention.linear_qkv": "self_attn.qkv_proj",
"self_attention.q_layernorm": "self_attn.q_norm",
"self_attention.k_layernorm": "self_attn.k_norm",
"self_attention.linear_proj": "self_attn.o_proj",
# mla
"self_attention.linear_q_proj": "self_attn.q_proj",
"self_attention.linear_q_down_proj": "self_attn.q_a_proj",
"self_attention.linear_q_up_proj.layer_norm_weight": "self_attn.q_a_layernorm.weight",
"self_attention.linear_q_up_proj": "self_attn.q_b_proj",
"self_attention.linear_kv_down_proj": "self_attn.kv_a_proj_with_mqa",
"self_attention.linear_kv_up_proj.layer_norm_weight": "self_attn.kv_a_layernorm.weight",
"self_attention.linear_kv_up_proj": "self_attn.kv_b_proj",
# mlp
"pre_mlp_layernorm": "post_attention_layernorm",
"mlp.linear_fc1.layer_norm_weight": "post_attention_layernorm.weight",
"mlp.linear_fc1.layer_norm_bias": "post_attention_layernorm.bias",
"mlp.linear_fc1": "mlp.gate_up_proj",
"mlp.linear_fc2": "mlp.down_proj",
# moe
"mlp.router.expert_bias": "mlp.gate.e_score_correction_bias",
"mlp.router": "mlp.gate",
"mlp.shared_experts.linear_fc1": "mlp.shared_experts.gate_up_proj",
"mlp.shared_experts.linear_fc2": "mlp.shared_experts.down_proj",
"linear_fc1": "gate_up_proj",
"linear_fc2": "down_proj",
# output
"final_layernorm": "norm",
"output_layer": "lm_head",
}
def _get_tp_pp_rank_from_sharded_dir(self, sharded_dir: str) -> tuple[int, int]:
tp_rank = pp_rank = None
rank_list = sharded_dir.split("_")[2:]
if re.match(r"mp_rank_(\d\d)_(\d\d\d)", sharded_dir):
tp_rank = int(rank_list[0])
pp_rank = int(rank_list[1])
elif re.match(r"mp_rank_(\d\d)", sharded_dir):
tp_rank = int(rank_list[0])
pp_rank = 0
assert tp_rank is not None and pp_rank is not None, f"Invalid sharded dir {sharded_dir}"
return tp_rank, pp_rank
def _check_megatron_checkpoint_path(self, model_path: str) -> tuple[list[str], int, int]:
"""
Validates the Megatron checkpoint structure (presence of 'model.pt' in sharded directories).
Determines TP and PP sizes from directory names.
"""
tp_size = 0
pp_size = 0
sharded_dirs = sorted(os.listdir(model_path))
for sharded_dir in sharded_dirs:
assert "model.pt" in os.listdir(Path(model_path) / sharded_dir), f"model.pt not found in {sharded_dir}"
tp_rank, pp_rank = self._get_tp_pp_rank_from_sharded_dir(sharded_dir)
tp_size = max(tp_size, tp_rank + 1)
pp_size = max(pp_size, pp_rank + 1)
return sharded_dirs, tp_size, pp_size
def _merge_across_tp(
self,
key: str,
tp_data: list[torch.Tensor],
config: PretrainedConfig,
tp_size: int,
is_value_model: bool = False,
) -> Union[torch.Tensor, list[torch.Tensor]]:
if "linear_fc1.weight" in key:
# if the tensor is gate and proj
gate_lst = []
up_lst = []
for infer_param in tp_data:
gate, up = infer_param.chunk(2)
gate_lst.append(gate)
up_lst.append(up)
gate = torch.cat(gate_lst, dim=0)
up = torch.cat(up_lst, dim=0)
return [gate, up]
elif "self_attention.linear_qkv." in key and "layer_norm" not in key:
# if the tensor is qkv, for each param on tp, split into q, k, v
# concat q, k, v separately.
q_lst = []
k_lst = []
v_lst = []
assert config.num_attention_heads % config.num_key_value_heads == 0
num_q_per_kv = config.num_attention_heads // config.num_key_value_heads
assert tp_data[0].shape[0] % (num_q_per_kv + 2) == 0
kv_size_per_tp = tp_data[0].shape[0] // (num_q_per_kv + 2)
split_size = [kv_size_per_tp * num_q_per_kv, kv_size_per_tp, kv_size_per_tp]
for infer_param in tp_data:
num_query_groups_per_partition = config.num_key_value_heads // tp_size
for chunk in infer_param.chunk(num_query_groups_per_partition):
split_size = [
kv_size_per_tp * num_q_per_kv // num_query_groups_per_partition,
kv_size_per_tp // num_query_groups_per_partition,
kv_size_per_tp // num_query_groups_per_partition,
]
q, k, v = chunk.split(split_size)
q_lst.append(q)
k_lst.append(k)
v_lst.append(v)
q = torch.cat(q_lst, dim=0)
k = torch.cat(k_lst, dim=0)
v = torch.cat(v_lst, dim=0)
return [q, k, v]
elif "layer_norm" in key or "layernorm" in key or "router" in key or ("output_layer" in key and is_value_model):
return tp_data[0]
else:
dim = 0
if "linear_fc2.weight" in key or "self_attention.linear_proj" in key:
dim = 1
return torch.cat(tp_data, dim=dim)
def _load_state_dicts(
self, model_ckpt_path: str, sharded_dirs: list[str], tp_size: int, pp_size: int
) -> list[list[dict]]:
model_state_dict_lst = [[None for _ in range(tp_size)] for _ in range(pp_size)]
def _process_one_megatron_shard(sharded_dir: str):
model_file_path = Path(model_ckpt_path) / sharded_dir / "model.pt"
state_dict = torch.load(model_file_path, map_location="cpu", weights_only=False)
tp_rank, pp_rank = self._get_tp_pp_rank_from_sharded_dir(sharded_dir)
model_state_dict_lst[pp_rank][tp_rank] = state_dict
with ThreadPoolExecutor(max_workers=min(32, os.cpu_count())) as executor:
futures = [executor.submit(_process_one_megatron_shard, sharded_dir) for sharded_dir in sharded_dirs]
for future in tqdm(futures, desc=f"Loading {len(sharded_dirs)} Megatron shards", total=len(sharded_dirs)):
future.result()
return model_state_dict_lst
def _check_megatron_state_key(self, key: str) -> bool:
"""
Checks if the key is a valid Megatron state key.
Now the model merger only supports keys that start with "decoder/embedding/output_layer" in TransformerLayer.
Shall not use key starts with "model."
"""
if key.startswith("model."):
raise ValueError(
f"Invalid key {key} in Megatron state_dict. Expected keys to start with 'decoder/embedding/output_layer' in TransformerLayer."
)
skip_checking_keys = ["embedding.word_embeddings", "output_layer"]
for skip_key in skip_checking_keys:
if skip_key in key:
print(f"skip checking key {key}")
return
# Exclude extra state keys
if not key.startswith("decoder"):
raise ValueError(
f"Invalid key {key} in Megatron state_dict. Expected keys to start with 'decoder' in TransformerLayer."
)
def _merge_state_dicts(
self, model_state_dict_lst: list[list[dict]], tp_size: int, pp_size: int
) -> dict[str, torch.Tensor]:
state_dict = {}
vpp_size = len(model_state_dict_lst[0][0])
layers_cum = 0
for vpp_rank in range(vpp_size):
for pp_rank in range(pp_size):
layers_handled = 0
keys = model_state_dict_lst[pp_rank][0][vpp_rank].keys()
for key in keys:
if "extra_state" in key:
continue
if self.config.tie_word_embedding and ("output_layer" in key):
print("skip lm_head and reward_head loading because of tie_word_embeddings")
continue
self._check_megatron_state_key(key)
hf_name = self._replace_name(key, self.params_mapping)
assert hf_name is not None, f"Failed to convert layer name [{key}] from megatron to huggingface."
if "model.layers." in hf_name:
local_layer_no = int(hf_name.split(".")[2])
layers_handled = max(local_layer_no, layers_handled)
global_layer_no = local_layer_no + layers_cum
new_key_list = hf_name.split(".")
new_key_list[2] = str(global_layer_no)
hf_name = ".".join(new_key_list)
else:
warnings.warn(f"hf_name {hf_name} will not be fixed with layer number", stacklevel=2)
tp_data = [model_state_dict_lst[pp_rank][tp_rank][vpp_rank][key] for tp_rank in range(tp_size)]
merged = self._merge_across_tp(key, tp_data, self.model_config, tp_size, self.config.is_value_model)
if not isinstance(merged, list):
state_dict[hf_name] = merged
elif len(merged) == 3:
# split qkv
for n, d in zip(["q", "k", "v"], merged):
state_dict[hf_name.replace("qkv", n)] = d
elif len(merged) == 2:
# split gate up
state_dict[hf_name.replace("gate_up", "gate")] = merged[0]
state_dict[hf_name.replace("gate_up", "up")] = merged[1]
print(
f"converted {key} to {hf_name} with shape {merged.shape if isinstance(merged, torch.Tensor) else [t.shape for t in merged]}"
)
layers_cum += layers_handled + 1 # zero based
return state_dict
def merge_and_save(self):
from verl.utils.megatron_utils import get_model_checkpoint_path
model_ckpt_path = get_model_checkpoint_path(self.config.local_dir)
sharded_dirs, tp_size, pp_size = self._check_megatron_checkpoint_path(model_ckpt_path)
print(f"sharded_dirs: {sharded_dirs}, tp_size: {tp_size}, pp_size: {pp_size}, mp_size: {len(sharded_dirs)}")
model_state_dict_lst = self._load_state_dicts(model_ckpt_path, sharded_dirs, tp_size, pp_size)
merged_state_dict = self._merge_state_dicts(model_state_dict_lst, tp_size, pp_size)
del model_state_dict_lst
if self.config.operation == "test":
if not self.config.test_hf_dir:
raise ValueError("test_hf_dir must be provided for test operation")
self._test_state_dict(merged_state_dict)
elif self.config.operation == "merge":
self.save_hf_model_and_tokenizer(merged_state_dict)
if self.config.hf_upload:
self.upload_to_huggingface()
else:
raise ValueError(f"Unknown operation: {self.config.operation}")
def _test_state_dict(self, state_dict: dict[str, torch.Tensor]):
"""
Compares the merged Megatron state_dict against a reference safetensors model.
Applies necessary name mappings from Megatron to Hugging Face conventions using _replace_name.
"""
ref_state_dict = load_file(Path(self.config.test_hf_dir) / "model.safetensors")
for name, loaded_weight in state_dict.items():
# name = self._replace_name(original_name, self.params_mapping)
if not name or name.endswith(".bias") and name not in ref_state_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
if self.config.tie_word_embedding and "lm_head.weight" in name:
continue
if name not in ref_state_dict:
raise RuntimeError(f"key: {name} not exist in state_dict")
param = ref_state_dict[name]
assert loaded_weight.dtype == param.dtype
torch.testing.assert_close(loaded_weight, param, atol=1e-2, rtol=5e-2)
def _replace_name(self, megatron_name: str, name_mapping: dict[str, str]) -> str:
for m_name, v_name in name_mapping.items():
if m_name not in megatron_name:
continue
megatron_name = megatron_name.replace("decoder", "model")
param_name = megatron_name.replace(m_name, v_name)
return param_name
return None # Return None if no mapping found
def main():
parser = argparse.ArgumentParser(description="verl model merger")
subparsers = parser.add_subparsers(dest="operation", required=True, help="Specify 'merge' or 'test' operation.")
base_op_parser = argparse.ArgumentParser(add_help=False)
base_op_parser.add_argument(
"--backend", type=str, required=True, choices=["fsdp", "megatron"], help="The backend of the model"
)
base_op_parser.add_argument("--local_dir", type=str, required=True, help="Path to the saved model checkpoints")
base_op_parser.add_argument(
"--hf_model_path",
type=str,
default=None,
help="(Deprecated) Path to the original Hugging Face model for config.",
)
base_op_parser.add_argument(
"--tie-word-embedding",
action="store_true",
help="Whether to tie word embedding weights (currently only Megatron supported)",
)
base_op_parser.add_argument(
"--is-value-model",
action="store_true",
help="Whether the model is a value model (currently only Megatron supported)",
)
merge_parser = subparsers.add_parser("merge", parents=[base_op_parser], help="Merge model checkpoints and save.")
merge_parser.add_argument(
"--target_dir", default="tmp", type=str, help="Directory to save the merged huggingface model"
)
merge_parser.add_argument(
"--hf_upload_path", default=None, type=str, help="Hugging Face repository ID to upload the model"
)
merge_parser.add_argument(
"--private", action="store_true", help="Whether to upload the model to a private Hugging Face repository"
)
test_parser = subparsers.add_parser(
"test", parents=[base_op_parser], help="Test merged model against a reference Hugging Face model"
)
test_parser.add_argument(
"--test_hf_dir", type=str, required=True, help="Path to the reference Hugging Face model directory for testing"
)
args = parser.parse_args()
common_config_args = {
"operation": args.operation,
"backend": args.backend,
"tie_word_embedding": args.tie_word_embedding,
"is_value_model": args.is_value_model,
"local_dir": args.local_dir,
"hf_model_path": args.hf_model_path,
"hf_model_config_path": args.local_dir,
}
if args.operation == "merge":
config = ModelMergerConfig(
**common_config_args,
target_dir=args.target_dir,
hf_upload_path=args.hf_upload_path,
private=args.private,
test_hf_dir=None,
)
os.makedirs(config.target_dir, exist_ok=True)
elif args.operation == "test":
config = ModelMergerConfig(
**common_config_args,
test_hf_dir=args.test_hf_dir,
# the following args are not used by test operation
target_dir=None,
hf_upload_path=None,
private=False,
)
else:
raise NotImplementedError(f"Unknown operation: {args.operation}")
if config.backend == "fsdp":
merger = FSDPModelMerger(config)
elif config.backend == "megatron":
merger = MegatronModelMerger(config)
else:
raise NotImplementedError(f"Unknown backend: {config.backend}")
merger.merge_and_save()
if __name__ == "__main__":
main()
| {
"repo_id": "verl-project/verl",
"file_path": "scripts/legacy_model_merger.py",
"license": "Apache License 2.0",
"lines": 682,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/special_sanity/check_pr_description.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
import json
import os
# Number of lines to check
NUM_LINES = 5
# Custom exception types for clear error handling
class TemplateFileError(Exception):
pass
class PRBodyLoadError(Exception):
pass
class PRDescriptionError(Exception):
pass
# Path to the PR template file
template_file = os.path.join(os.getenv("GITHUB_WORKSPACE", "."), ".github", "PULL_REQUEST_TEMPLATE.md")
def load_template(path):
"""
Load only the first NUM_LINES of the PR template file as a list of lines,
without stripping any characters.
"""
lines = []
try:
with open(path, encoding="utf-8") as f:
for _ in range(NUM_LINES):
line = f.readline()
if not line:
break
lines.append(line.strip())
return lines
except Exception as e:
raise TemplateFileError(f"Failed to read PR template (first {NUM_LINES} lines) at {path}: {e}") from e
def load_pr_body(event_path):
try:
with open(event_path, encoding="utf-8") as f:
payload = json.load(f)
return payload.get("pull_request", {}).get("body", "") or ""
except Exception as e:
raise PRBodyLoadError(f"Failed to read PR body from {event_path}: {e}") from e
def check_pr_description(body, template_lines):
"""
Compare the first NUM_LINES lines of the PR body to the template lines.
If they match exactly, the placeholder was not modified.
"""
pr_lines = body.splitlines(keepends=True)
pr_first = [x.strip() for x in pr_lines[:NUM_LINES]]
if pr_first == template_lines:
raise PRDescriptionError(
"It looks like you haven't updated the '### What does this PR do?' section. Please replace "
"the placeholder text with a concise description of what your PR does."
)
else:
print(pr_first)
print(template_lines)
def main():
event_path = os.getenv("GITHUB_EVENT_PATH")
if not event_path:
raise OSError("GITHUB_EVENT_PATH is not set.")
template_lines = load_template(template_file)
pr_body = load_pr_body(event_path)
check_pr_description(pr_body, template_lines)
print("✅ '### What does this PR do?' section has been filled out.")
if __name__ == "__main__":
main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/special_sanity/check_pr_description.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/special_sanity/check_docs_time_info.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Check that every .md and .rst file under docs/ contains the substring "Last updated",
with an allow-list for exceptions.
"""
import sys
from pathlib import Path
# === CONFIGURATION ===
# Relative paths (to docs/) or glob patterns to skip checking
ALLOW_LIST = {
"docs/README.md", # you can list individual files
"docs/legacy/*.rst", # or glob patterns
"docs/index.rst",
"docs/start/install.rst",
"docs/start/quickstart.rst",
"docs/README_vllm0.7.md",
}
# The folder to scan
DOCS_DIR = Path("docs")
# === SCRIPT ===
def is_allowed(path: Path) -> bool:
"""
Return True if `path` matches any entry in ALLOW_LIST.
"""
rel = str(path)
for pattern in ALLOW_LIST:
if Path(rel).match(pattern):
return True
return False
def main():
if not DOCS_DIR.exists():
print(f"Error: Documentation directory '{DOCS_DIR}' does not exist.", file=sys.stderr)
sys.exit(1)
missing = []
# Gather all .md and .rst files under docs/
for ext in ("*.md", "*.rst"):
for path in DOCS_DIR.rglob(ext):
if is_allowed(path):
continue
text = path.read_text(encoding="utf-8", errors="ignore")
if "Last updated" not in text:
missing.append(path)
# Report
if missing:
print("\nThe following files are missing the 'Last updated' string:\n")
for p in missing:
print(f" - {p}")
print(f"\nTotal missing: {len(missing)}\n", file=sys.stderr)
raise AssertionError(
"Some documentation files lack a 'Last updated' line. Please include info such as "
"'Last updated: mm/dd/yyyy' to indicate the last update time of the document."
)
else:
print("✅ All checked files contain 'Last updated'.")
if __name__ == "__main__":
main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/special_sanity/check_docs_time_info.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/interactions/test_interaction_registry.py | # Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import pytest
from omegaconf import OmegaConf
from verl.interactions.base import BaseInteraction
from verl.interactions.gsm8k_interaction import Gsm8kInteraction
from verl.interactions.utils.interaction_registry import (
get_interaction_class,
initialize_interactions_from_config,
)
class TestInteractionRegistry:
def test_get_interaction_class(self):
"""Test getting interaction class by name."""
# Test getting base interaction class
base_cls = get_interaction_class("verl.interactions.base.BaseInteraction")
assert base_cls == BaseInteraction
# Test getting gsm8k interaction class
gsm8k_cls = get_interaction_class("verl.interactions.gsm8k_interaction.Gsm8kInteraction")
assert gsm8k_cls == Gsm8kInteraction
def test_initialize_single_interaction_from_config(self):
"""Test initializing single interaction from config."""
# Create temporary config file
config_content = {
"interaction": [
{
"name": "test_gsm8k",
"class_name": "verl.interactions.gsm8k_interaction.Gsm8kInteraction",
"config": {},
}
]
}
with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f:
OmegaConf.save(config_content, f.name)
temp_config_path = f.name
try:
interaction_map = initialize_interactions_from_config(temp_config_path)
# Check that interaction was created
assert len(interaction_map) == 1
assert "test_gsm8k" in interaction_map
assert isinstance(interaction_map["test_gsm8k"], Gsm8kInteraction)
assert interaction_map["test_gsm8k"].name == "test_gsm8k"
finally:
os.unlink(temp_config_path)
def test_initialize_multiple_interactions_from_config(self):
"""Test initializing multiple interactions from config."""
config_content = {
"interaction": [
{
"name": "gsm8k_solver",
"class_name": "verl.interactions.gsm8k_interaction.Gsm8kInteraction",
"config": {},
},
{
"name": "base_agent",
"class_name": "verl.interactions.base.BaseInteraction",
"config": {"custom_param": "test_value"},
},
]
}
with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f:
OmegaConf.save(config_content, f.name)
temp_config_path = f.name
try:
interaction_map = initialize_interactions_from_config(temp_config_path)
# Check that both interactions were created
assert len(interaction_map) == 2
assert "gsm8k_solver" in interaction_map
assert "base_agent" in interaction_map
# Check types
assert isinstance(interaction_map["gsm8k_solver"], Gsm8kInteraction)
assert isinstance(interaction_map["base_agent"], BaseInteraction)
# Check names were injected
assert interaction_map["gsm8k_solver"].name == "gsm8k_solver"
assert interaction_map["base_agent"].name == "base_agent"
# Check custom config was passed
assert interaction_map["base_agent"].config.get("custom_param") == "test_value"
finally:
os.unlink(temp_config_path)
def test_initialize_interaction_without_explicit_name(self):
"""Test that interaction name is derived from class name when not specified."""
config_content = {
"interaction": [{"class_name": "verl.interactions.gsm8k_interaction.Gsm8kInteraction", "config": {}}]
}
with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f:
OmegaConf.save(config_content, f.name)
temp_config_path = f.name
try:
interaction_map = initialize_interactions_from_config(temp_config_path)
# Check that interaction name was derived from class name
assert len(interaction_map) == 1
assert "gsm8k" in interaction_map # Should be "gsm8k" after removing "interaction" suffix
assert isinstance(interaction_map["gsm8k"], Gsm8kInteraction)
assert interaction_map["gsm8k"].name == "gsm8k"
finally:
os.unlink(temp_config_path)
def test_initialize_empty_config(self):
"""Test initializing from empty config."""
config_content = {"interaction": []}
with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f:
OmegaConf.save(config_content, f.name)
temp_config_path = f.name
try:
interaction_map = initialize_interactions_from_config(temp_config_path)
assert len(interaction_map) == 0
finally:
os.unlink(temp_config_path)
def test_invalid_class_name(self):
"""Test handling of invalid class name."""
config_content = {
"interaction": [{"name": "invalid", "class_name": "invalid.module.InvalidClass", "config": {}}]
}
with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f:
OmegaConf.save(config_content, f.name)
temp_config_path = f.name
try:
with pytest.raises(ModuleNotFoundError):
initialize_interactions_from_config(temp_config_path)
finally:
os.unlink(temp_config_path)
def test_duplicate_interaction_names(self):
"""Test handling of duplicate interaction names."""
config_content = {
"interaction": [
{"name": "duplicate", "class_name": "verl.interactions.base.BaseInteraction", "config": {}},
{
"name": "duplicate",
"class_name": "verl.interactions.gsm8k_interaction.Gsm8kInteraction",
"config": {},
},
]
}
with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f:
OmegaConf.save(config_content, f.name)
temp_config_path = f.name
try:
with pytest.raises(ValueError, match="Duplicate interaction name 'duplicate' found"):
initialize_interactions_from_config(temp_config_path)
finally:
os.unlink(temp_config_path)
def test_auto_name_generation_edge_cases(self):
"""Test automatic name generation for various class name patterns."""
config_content = {
"interaction": [
{"class_name": "verl.interactions.base.BaseInteraction", "config": {}},
{"class_name": "verl.interactions.gsm8k_interaction.Gsm8kInteraction", "config": {}},
]
}
with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f:
OmegaConf.save(config_content, f.name)
temp_config_path = f.name
try:
interaction_map = initialize_interactions_from_config(temp_config_path)
# Check that names were generated correctly
assert len(interaction_map) == 2
assert "base" in interaction_map # BaseInteraction -> base
assert "gsm8k" in interaction_map # Gsm8kInteraction -> gsm8k
finally:
os.unlink(temp_config_path)
| {
"repo_id": "verl-project/verl",
"file_path": "tests/interactions/test_interaction_registry.py",
"license": "Apache License 2.0",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/interactions/utils/interaction_registry.py | # Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util
import logging
import os
import sys
from omegaconf import OmegaConf
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def get_interaction_class(cls_name):
"""Dynamically import and return the interaction class."""
module_name, class_name = cls_name.rsplit(".", 1)
if module_name not in sys.modules:
spec = importlib.util.find_spec(module_name)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
else:
module = sys.modules[module_name]
interaction_cls = getattr(module, class_name)
return interaction_cls
def initialize_interactions_from_config(interaction_config_file):
"""Initialize interactions from configuration file.
Args:
interaction_config_file: Path to the interaction configuration file.
Returns:
dict: A dictionary mapping interaction names to BaseInteraction instances.
"""
interaction_config = OmegaConf.load(interaction_config_file)
interaction_map = {}
for interaction_item in interaction_config.interaction:
cls_name = interaction_item.class_name
interaction_cls = get_interaction_class(cls_name)
# Extract config and name
config = OmegaConf.to_container(interaction_item.config, resolve=True)
# Get the interaction name - either from config or derive from class name
name = interaction_item.get("name", None)
if name is None:
# If no name is specified, use the class name as default
class_simple_name = cls_name.split(".")[-1]
# Remove "Interaction" suffix if present, otherwise use full class name
if class_simple_name.endswith("Interaction"):
name = class_simple_name[:-11].lower() # Remove "Interaction" (11 chars)
else:
name = class_simple_name.lower()
# Check for duplicate names
if name in interaction_map:
raise ValueError(f"Duplicate interaction name '{name}' found. Each interaction must have a unique name.")
# Inject the name into the config
config["name"] = name
# Create the interaction instance
interaction = interaction_cls(config=config)
interaction_map[name] = interaction
logger.info(f"Initialized interaction '{name}' with class '{cls_name}'")
return interaction_map
| {
"repo_id": "verl-project/verl",
"file_path": "verl/interactions/utils/interaction_registry.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/utils/ckpt/test_esi_save_ckpt_on_cpu.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from datetime import datetime, timedelta
from unittest import TestCase
from verl.utils.checkpoint.checkpoint_manager import should_save_ckpt_esi
class TestShouldSaveCkptEsi(TestCase):
def test_no_expiration_timestamp(self):
"""Test case when no expiration timestamp is set"""
os.environ.pop("MLP_CURRENT_CAPACITY_BLOCK_EXPIRATION_TIMESTAMP", None)
os.environ.pop("SAGEMAKER_CURRENT_CAPACITY_BLOCK_EXPIRATION_TIMESTAMP", None)
self.assertFalse(should_save_ckpt_esi(100))
def test_mlp_expiration_valid(self):
"""Test valid MLP expiration timestamp requiring save"""
current_time = time.time()
os.environ["MLP_CURRENT_CAPACITY_BLOCK_EXPIRATION_TIMESTAMP"] = str(current_time + 90)
self.assertTrue(should_save_ckpt_esi(30)) # max_steps_duration=30 seconds
def test_mlp_expiration_passed(self):
"""Test expired MLP timestamp"""
current_time = time.time()
os.environ["MLP_CURRENT_CAPACITY_BLOCK_EXPIRATION_TIMESTAMP"] = str(current_time - 10)
self.assertFalse(should_save_ckpt_esi(30))
def test_mlp_invalid_timestamp(self):
"""Test invalid MLP timestamp format"""
os.environ["MLP_CURRENT_CAPACITY_BLOCK_EXPIRATION_TIMESTAMP"] = "invalid"
self.assertFalse(should_save_ckpt_esi(30))
def test_mlp_expiration_not_reached(self):
"""Test MLP expiration timestamp with insufficient remaining time"""
current_time = time.time()
os.environ["MLP_CURRENT_CAPACITY_BLOCK_EXPIRATION_TIMESTAMP"] = str(current_time + 200)
self.assertFalse(should_save_ckpt_esi(30)) # max_steps_duration=30
def test_aws_expiration_not_reached(self):
"""Test AWS expiration timestamp with sufficient remaining time"""
now = datetime.now()
expiration = now + timedelta(minutes=100) # Exceeds 90-minute threshold
os.environ["SAGEMAKER_CURRENT_CAPACITY_BLOCK_EXPIRATION_TIMESTAMP"] = str(int(expiration.timestamp()))
self.assertFalse(should_save_ckpt_esi(30 * 60))
def test_redundant_time(self):
"""Test redundant_time parameter effect"""
current_time = time.time()
# Total required: 60+30+30=120 seconds
os.environ["MLP_CURRENT_CAPACITY_BLOCK_EXPIRATION_TIMESTAMP"] = str(current_time + 120)
self.assertTrue(should_save_ckpt_esi(30, redundant_time=30))
def test_zero_max_steps_duration(self):
"""Test zero max_steps_duration"""
current_time = time.time()
os.environ["MLP_CURRENT_CAPACITY_BLOCK_EXPIRATION_TIMESTAMP"] = str(current_time + 60)
self.assertFalse(should_save_ckpt_esi(0))
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/ckpt/test_esi_save_ckpt_on_cpu.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/experimental/agent_loop/agent_utils.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ray
from omegaconf import DictConfig
from verl.checkpoint_engine import CheckpointEngineManager
from verl.experimental.agent_loop import AgentLoopManager
from verl.experimental.reward_loop import RewardLoopManager
from verl.single_controller.ray import RayClassWithInitArgs, RayWorkerGroup
from verl.single_controller.ray.base import create_colocated_worker_cls
from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role
from verl.utils import omega_conf_to_dataclass
from verl.workers.fsdp_workers import AsyncActorRolloutRefWorker
def init_agent_loop_manager(config: DictConfig) -> AgentLoopManager | RayWorkerGroup:
# =========================== 1. Create hybrid ActorRollout workers ===========================
actor_rollout_cls = AsyncActorRolloutRefWorker
role_worker_mapping = {
Role.ActorRollout: ray.remote(actor_rollout_cls),
}
global_pool_id = "global_pool"
resource_pool_spec = {
global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes,
}
mapping = {
Role.ActorRollout: global_pool_id,
}
if config.reward.reward_model.enable_resource_pool:
mapping[Role.RewardModel] = "reward_pool"
if config.reward.reward_model.n_gpus_per_node <= 0:
raise ValueError("config.reward.reward_model.n_gpus_per_node must be greater than 0")
if config.reward.reward_model.nnodes <= 0:
raise ValueError("config.reward.reward_model.nnodes must be greater than 0")
reward_pool = [config.reward.reward_model.n_gpus_per_node] * config.reward.reward_model.nnodes
resource_pool_spec["reward_pool"] = reward_pool
resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping)
resource_pool_manager.create_resource_pool()
resource_pool_to_cls = {pool: {} for pool in resource_pool_manager.resource_pool_dict.values()}
# create actor and rollout
resource_pool = resource_pool_manager.get_resource_pool(Role.ActorRollout)
actor_rollout_cls = RayClassWithInitArgs(
cls=role_worker_mapping[Role.ActorRollout], config=config.actor_rollout_ref, role="actor_rollout"
)
resource_pool_to_cls[resource_pool]["actor_rollout"] = actor_rollout_cls
all_wg = {}
for resource_pool, class_dict in resource_pool_to_cls.items():
worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict)
wg_dict = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=worker_dict_cls)
spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys())
all_wg.update(spawn_wg)
actor_rollout_wg = all_wg["actor_rollout"]
actor_rollout_wg.init_model()
if config.actor_rollout_ref.rollout.mode == "sync":
raise ValueError("Agent loop tests require async rollout mode. Please set rollout.mode=async.")
# =========================== 2. Create AgentLoopManager ===========================
rm_resource_pool = (
resource_pool_manager.get_resource_pool(Role.RewardModel) if config.reward.reward_model.enable else None
)
reward_loop_manager = RewardLoopManager(
config=config,
rm_resource_pool=rm_resource_pool,
)
agent_loop_manager = AgentLoopManager.create(
config=config,
worker_group=actor_rollout_wg,
reward_loop_worker_handles=reward_loop_manager.reward_loop_workers,
)
checkpoint_manager = CheckpointEngineManager(
config=omega_conf_to_dataclass(config.actor_rollout_ref.rollout.checkpoint_engine),
trainer=actor_rollout_wg,
replicas=agent_loop_manager.rollout_replicas,
)
checkpoint_manager.sleep_replicas()
checkpoint_manager.update_weights()
return agent_loop_manager
| {
"repo_id": "verl-project/verl",
"file_path": "tests/experimental/agent_loop/agent_utils.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/experimental/agent_loop/test_basic_agent_loop.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from typing import Any
import numpy as np
import pytest
import ray
from omegaconf import DictConfig
from transformers.utils import get_json_schema
from tests.experimental.agent_loop.agent_utils import init_agent_loop_manager
from verl.checkpoint_engine import CheckpointEngineManager
from verl.experimental.agent_loop.agent_loop import get_trajectory_info
from verl.protocol import DataProto
from verl.tools.base_tool import BaseTool, OpenAIFunctionToolSchema
from verl.tools.schemas import ToolResponse
from verl.utils import hf_tokenizer
from verl.utils.config import omega_conf_to_dataclass
from verl.workers.config import CheckpointEngineConfig
@pytest.fixture
def init_config() -> DictConfig:
from hydra import compose, initialize_config_dir
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")):
config = compose(
config_name="ppo_trainer",
overrides=[
"actor_rollout_ref.actor.use_dynamic_bsz=true",
# test sleep/wake_up with fsdp offload
"actor_rollout_ref.actor.fsdp_config.param_offload=True",
"actor_rollout_ref.actor.fsdp_config.optimizer_offload=True",
"reward.reward_manager.name=dapo",
"+reward.reward_kwargs.overlong_buffer_cfg.enable=False",
"+reward.reward_kwargs.overlong_buffer_cfg.len=3072",
"+reward.reward_kwargs.max_resp_len=4096",
],
)
model_path = os.path.expanduser("~/models/Qwen/Qwen2.5-1.5B-Instruct")
config.actor_rollout_ref.model.path = model_path
config.actor_rollout_ref.rollout.name = os.environ["ROLLOUT_NAME"]
config.actor_rollout_ref.rollout.mode = "async"
config.actor_rollout_ref.rollout.enforce_eager = True
config.actor_rollout_ref.rollout.prompt_length = 4096
config.actor_rollout_ref.rollout.response_length = 4096
config.actor_rollout_ref.rollout.n = 4
config.actor_rollout_ref.rollout.agent.num_workers = 2
config.actor_rollout_ref.rollout.skip_tokenizer_init = True
return config
def test_single_turn(init_config):
ray.init(
runtime_env={
"env_vars": {
"TOKENIZERS_PARALLELISM": "true",
"NCCL_DEBUG": "WARN",
"VLLM_LOGGING_LEVEL": "INFO",
"VLLM_USE_V1": "1",
}
}
)
agent_loop_manager = init_agent_loop_manager(init_config)
raw_prompts = [
[
{
"role": "user",
"content": "Let's play a role playing game. Your name is Alice, your favorite color is blue.",
}
],
[{"role": "user", "content": "Let's play a role playing game. Your name is Bob, your favorite color is red."}],
]
batch = DataProto(
non_tensor_batch={
"raw_prompt": np.array(raw_prompts),
"agent_name": np.array(["single_turn_agent"] * len(raw_prompts)),
"data_source": np.array(["openai/gsm8k"] * len(raw_prompts)),
"reward_model": np.array([{"style": "rule", "ground_truth": "1.0"}] * len(raw_prompts)),
},
)
n = init_config.actor_rollout_ref.rollout.n
batch = batch.repeat(n)
result = agent_loop_manager.generate_sequences(prompts=batch)
assert len(result) == len(raw_prompts) * n
# check result
seq_len = result.batch["prompts"].size(1) + result.batch["responses"].size(1)
assert result.batch["input_ids"].size(1) == seq_len
assert result.batch["attention_mask"].size(1) == seq_len
assert result.batch["position_ids"].size(1) == seq_len
if init_config.actor_rollout_ref.rollout.calculate_log_probs:
assert result.batch["rollout_log_probs"].size(1) == result.batch["responses"].size(1)
# check compute score
assert result.batch["rm_scores"].shape == result.batch["responses"].shape
reward_tensor = result.batch["rm_scores"]
reward_extra_keys = result.meta_info.get("reward_extra_keys", [])
reward_extra_info = {key: result.non_tensor_batch[key] for key in reward_extra_keys}
assert reward_tensor.shape == result.batch["responses"].shape
assert "acc" in reward_extra_info, f"reward_extra_info {reward_extra_info} should contain 'acc'"
assert reward_extra_info["acc"].shape == (len(result),), f"invalid acc: {reward_extra_info['acc']}"
# check turns
num_turns = result.non_tensor_batch["__num_turns__"]
assert np.all(num_turns == 2)
print("Test passed!")
ray.shutdown()
class WeatherTool(BaseTool):
def get_current_temperature(self, location: str, unit: str = "celsius"):
"""Get current temperature at a location.
Args:
location: The location to get the temperature for, in the format "City, State, Country".
unit: The unit to return the temperature in. Defaults to "celsius". (choices: ["celsius", "fahrenheit"])
Returns:
the temperature, the location, and the unit in a dict
"""
print(f"[DEBUG] get_current_temperature: {location}, {unit}")
return {
"temperature": 26.1,
"location": location,
"unit": unit,
}
def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema:
schema = get_json_schema(self.get_current_temperature)
return OpenAIFunctionToolSchema(**schema)
async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]:
try:
result = self.get_current_temperature(**parameters)
return ToolResponse(text=json.dumps(result)), 0, {}
except Exception as e:
return ToolResponse(text=str(e)), 0, {}
class WeatherToolWithData(BaseTool):
def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema:
schema = get_json_schema(self.get_temperature_date)
return OpenAIFunctionToolSchema(**schema)
def get_temperature_date(self, location: str, date: str, unit: str = "celsius"):
"""Get temperature at a location and date.
Args:
location: The location to get the temperature for, in the format "City, State, Country".
date: The date to get the temperature for, in the format "Year-Month-Day".
unit: The unit to return the temperature in. Defaults to "celsius". (choices: ["celsius", "fahrenheit"])
Returns:
the temperature, the location, the date and the unit in a dict
"""
print(f"[DEBUG] get_temperature_date: {location}, {date}, {unit}")
return {
"temperature": 25.9,
"location": location,
"date": date,
"unit": unit,
}
async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]:
try:
result = self.get_temperature_date(**parameters)
return ToolResponse(text=json.dumps(result)), 0, {}
except Exception as e:
return ToolResponse(text=str(e)), 0, {}
def test_tool_agent(init_config):
ray.init(
runtime_env={
"env_vars": {
"TOKENIZERS_PARALLELISM": "true",
"NCCL_DEBUG": "WARN",
"VLLM_LOGGING_LEVEL": "INFO",
"VLLM_USE_V1": "1",
}
},
ignore_reinit_error=True,
)
# =========================== 1. Init rollout manager ===========================
tool_config = {
"tools": [
{
"class_name": "tests.experimental.agent_loop.test_basic_agent_loop.WeatherTool",
"config": {"type": "native"},
},
{
"class_name": "tests.experimental.agent_loop.test_basic_agent_loop.WeatherToolWithData",
"config": {"type": "native"},
},
]
}
tool_config_path = "/tmp/tool_config.json"
with open(tool_config_path, "w") as f:
json.dump(tool_config, f)
n = 2
init_config.actor_rollout_ref.rollout.n = n
init_config.actor_rollout_ref.rollout.multi_turn.tool_config_path = tool_config_path
init_config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls = 2
init_config.actor_rollout_ref.rollout.calculate_log_probs = True
agent_loop_manager = init_agent_loop_manager(init_config)
# =========================== 2. Generate sequences ===========================
raw_prompts = [
[
{"role": "user", "content": "How are you?"},
],
[
{"role": "user", "content": "What's the temperature in Los Angeles now?"},
],
[
{"role": "user", "content": "What's the temperature in New York now?"},
],
[
{
"role": "system",
"content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant.\n\n"
"Current Date: 2024-09-30",
},
{"role": "user", "content": "What's the temperature in San Francisco now? How about tomorrow?"},
],
]
batch = DataProto(
non_tensor_batch={
"raw_prompt": np.array([np.array(prompt) for prompt in raw_prompts], dtype=object),
"agent_name": np.array(["tool_agent"] * len(raw_prompts)),
"data_source": np.array(["openai/gsm8k"] * len(raw_prompts)),
"reward_model": np.array([{"style": "rule", "ground_truth": "1.0"}] * len(raw_prompts)),
},
)
batch = batch.repeat(n)
result = agent_loop_manager.generate_sequences(prompts=batch)
assert len(result) == len(raw_prompts) * n
# Check turns
num_turns = result.non_tensor_batch["__num_turns__"]
print(f"num_turns: {num_turns}")
for i in range(len(num_turns)):
if i // n == 0:
# [user, assistant]
assert num_turns[i] == 2
else:
# [user, assistant, tool, assistant]
assert num_turns[i] == 4
# Check response_mask
tokenizer = hf_tokenizer(init_config.actor_rollout_ref.model.path)
responses = result.batch["responses"]
response_mask = result.batch["response_mask"]
attention_mask = result.batch["attention_mask"]
assert result.batch["rm_scores"].size(1) == responses.size(1)
assert responses.size() == response_mask.size(), f"{responses.size()} != {response_mask.size()}"
assert result.batch["rollout_log_probs"].size(1) == result.batch["responses"].size(1)
response_length = response_mask.size(1)
for i in range(len(responses)):
# response with tool response
valid_tokens = responses[i][attention_mask[i][-response_length:].bool()]
response_with_obs = tokenizer.decode(valid_tokens)
# response without tool response
valid_tokens = responses[i][response_mask[i].bool()]
response_without_obs = tokenizer.decode(valid_tokens)
assert "<tool_response>" not in response_without_obs, (
f"found <tool_response> in response: {response_without_obs}"
)
assert "</tool_response>" not in response_without_obs, (
f"found </tool_response> in response: {response_without_obs}"
)
print("=========================")
print(response_with_obs)
print("---")
print(response_without_obs)
print("Test passed!")
ray.shutdown()
def test_tool_agent_with_interaction(init_config):
ray.init(
runtime_env={
"env_vars": {
"TOKENIZERS_PARALLELISM": "true",
"NCCL_DEBUG": "WARN",
"VLLM_LOGGING_LEVEL": "INFO",
"VLLM_USE_V1": "1",
}
}
)
# =========================== 1. Init rollout manager ===========================
tool_config = {
"tools": [
{
"class_name": "tests.experimental.agent_loop.test_basic_agent_loop.WeatherTool",
"config": {"type": "native"},
},
{
"class_name": "tests.experimental.agent_loop.test_basic_agent_loop.WeatherToolWithData",
"config": {"type": "native"},
},
]
}
tool_config_path = "/tmp/tool_config.json"
with open(tool_config_path, "w") as f:
json.dump(tool_config, f)
interaction_config = {
"interaction": [
{"name": "weather", "class_name": "verl.interactions.weather_interaction.WeatherInteraction", "config": {}}
]
}
interaction_config_path = "/tmp/interaction_config.json"
with open(interaction_config_path, "w") as f:
json.dump(interaction_config, f)
n = 2
init_config.actor_rollout_ref.rollout.n = n
init_config.actor_rollout_ref.rollout.multi_turn.tool_config_path = tool_config_path
init_config.actor_rollout_ref.rollout.multi_turn.interaction_config_path = interaction_config_path
init_config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls = 2
agent_loop_manager = init_agent_loop_manager(init_config)
checkpoint_engine_config = omega_conf_to_dataclass(
init_config.actor_rollout_ref.rollout.checkpoint_engine, CheckpointEngineConfig
)
checkpoint_manager = CheckpointEngineManager(
config=checkpoint_engine_config,
trainer=agent_loop_manager.worker_group,
replicas=agent_loop_manager.rollout_replicas,
)
checkpoint_manager.sleep_replicas()
checkpoint_manager.update_weights()
# =========================== 2. Generate sequences ===========================
raw_prompts = [
[
{"role": "user", "content": "How are you?"},
],
[
{"role": "user", "content": "What's the temperature in Los Angeles now?"},
],
[
{"role": "user", "content": "What's the temperature in New York now?"},
],
[
{
"role": "system",
"content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant.\n\n"
"Current Date: 2024-09-30",
},
{"role": "user", "content": "What's the temperature in San Francisco now? How about tomorrow?"},
],
]
batch = DataProto(
non_tensor_batch={
"raw_prompt": np.array([np.array(prompt) for prompt in raw_prompts], dtype=object),
"agent_name": np.array(["tool_agent"] * len(raw_prompts)),
"data_source": np.array(["openai/gsm8k"] * len(raw_prompts)),
"reward_model": np.array([{"style": "rule", "ground_truth": "1.0"}] * len(raw_prompts)),
"extra_info": np.array(
[
{"interaction_kwargs": {"name": "weather"}},
{"interaction_kwargs": {"name": "weather"}},
{"interaction_kwargs": {"name": "weather"}},
{"interaction_kwargs": {"name": "weather"}},
]
),
},
)
batch = batch.repeat(n)
result = agent_loop_manager.generate_sequences(prompts=batch)
assert len(result) == len(raw_prompts) * n
# Check turns
num_turns = result.non_tensor_batch["__num_turns__"]
print(f"num_turns: {num_turns}")
for i in range(len(num_turns)):
if i // n == 0:
# [user, assistant, user]
assert num_turns[i] == 3
else:
# [user, assistant, tool, assistant, user]
assert num_turns[i] == 5
# Check response_mask
tokenizer = hf_tokenizer(init_config.actor_rollout_ref.model.path)
responses = result.batch["responses"]
response_mask = result.batch["response_mask"]
attention_mask = result.batch["attention_mask"]
assert responses.size() == response_mask.size(), f"{responses.size()} != {response_mask.size()}"
response_length = response_mask.size(1)
for i in range(len(responses)):
# response with tool response
valid_tokens = responses[i][attention_mask[i][-response_length:].bool()]
response_with_obs = tokenizer.decode(valid_tokens)
# response without tool response
valid_tokens = responses[i][response_mask[i].bool()]
response_without_obs = tokenizer.decode(valid_tokens)
assert "\udb82\udc89" not in response_without_obs, f"found \udb82\udc89 in response: {response_without_obs}"
assert "\udb82\udc8a" not in response_without_obs, f"found \udb82\udc8a in response: {response_without_obs}"
print("=========================")
print(response_with_obs)
print("---")
print(response_without_obs)
print("Test passed!")
ray.shutdown()
@pytest.mark.asyncio
async def test_get_trajectory_info():
"""Tests the get_trajectory_info method."""
# Initialize the class to set up class-level attributes
step = 10
index = [1, 1, 3, 3]
expected_info = [
{"step": step, "sample_index": 1, "rollout_n": 0, "validate": False},
{"step": step, "sample_index": 1, "rollout_n": 1, "validate": False},
{"step": step, "sample_index": 3, "rollout_n": 0, "validate": False},
{"step": step, "sample_index": 3, "rollout_n": 1, "validate": False},
]
trajectory_info = await get_trajectory_info(step, index, validate=False)
assert trajectory_info == expected_info
| {
"repo_id": "verl-project/verl",
"file_path": "tests/experimental/agent_loop/test_basic_agent_loop.py",
"license": "Apache License 2.0",
"lines": 400,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/experimental/agent_loop/agent_loop.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import heapq
import logging
import os
import random
from abc import ABC, abstractmethod
from typing import Any, Optional
from uuid import uuid4
import hydra
import numpy as np
import ray
import torch
from cachetools import LRUCache
from omegaconf import DictConfig, OmegaConf
from PIL import Image
from pydantic import BaseModel, ConfigDict
from tensordict import TensorDict
from transformers import AutoProcessor, AutoTokenizer
from verl.experimental.agent_loop.prometheus_utils import update_prometheus_config
from verl.experimental.agent_loop.utils import resolve_config_path
from verl.protocol import DataProto
from verl.single_controller.ray.base import RayResourcePool, RayWorkerGroup
from verl.utils.chat_template import initialize_system_prompt
from verl.utils.config import omega_conf_to_dataclass
from verl.utils.dataset.rl_dataset import RLHFDataset, get_dataset_class
from verl.utils.model import compute_position_id_with_mask
from verl.utils.ray_utils import auto_await, get_event_loop
from verl.utils.rollout_trace import (
RolloutTraceConfig,
rollout_trace_attr,
rollout_trace_op,
)
from verl.utils.tokenizer import normalize_token_ids
from verl.workers.config import HFModelConfig, RolloutConfig
from verl.workers.rollout.replica import TokenOutput, get_rollout_replica_class
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
def _get_rollout_and_model_config(config: DictConfig) -> tuple[DictConfig, DictConfig]:
# TODO: backward compatibility, remove this once we switch to new trainer.
if config.get("actor_rollout_ref"):
return config.actor_rollout_ref.rollout, config.actor_rollout_ref.model
else:
return config.rollout, config.model
class AsyncLLMServerManager:
"""
A class to manage multiple OpenAI compatible LLM servers. This class provides
- Load balance: least requests load balancing
- Sticky session: send multi-turn chat completions to same server for automatic prefix caching
"""
def __init__(self, config: DictConfig, server_handles: list[ray.actor.ActorHandle], max_cache_size: int = 10000):
"""Initialize the AsyncLLMServerManager.
Args:
config (DictConfig): whole config for main entrypoint.
server_handles (List[ray.actor.ActorHandle]): OpenAI compatible LLM server actor handles.
max_cache_size (int, optional): max cache size for request_id to server mapping. Defaults to 10000.
"""
self.config = config
self.server_handles = server_handles
random.shuffle(self.server_handles)
# Least requests load balancing
self.weighted_serveres = [[0, idx, server] for idx, server in enumerate(self.server_handles)]
heapq.heapify(self.weighted_serveres)
# LRU cache to map request_id to server
self.request_id_to_server = LRUCache(maxsize=max_cache_size)
def _choose_server(self, request_id: str) -> ray.actor.ActorHandle:
# TODO: implement server pressure awareness load balancing
if request_id in self.request_id_to_server:
return self.request_id_to_server[request_id]
_, _, server = self.weighted_serveres[0]
self.weighted_serveres[0][0] += 1
heapq.heapreplace(self.weighted_serveres, self.weighted_serveres[0])
self.request_id_to_server[request_id] = server
return server
@rollout_trace_op
async def generate(
self,
request_id,
*,
prompt_ids: list[int],
sampling_params: dict[str, Any],
image_data: Optional[list[Any]] = None,
video_data: Optional[list[Any]] = None,
) -> TokenOutput:
"""Generate tokens from prompt ids.
Args:
request_id (str): request id for sticky session.
prompt_ids (List[int]): List of prompt token ids.
sampling_params (Dict[str, Any]): Sampling parameters for the chat completion.
Returns:
TokenOutput: token output
"""
server = self._choose_server(request_id)
output = await server.generate.remote(
request_id=uuid4().hex, # use new request_id for each turn
prompt_ids=prompt_ids,
sampling_params=sampling_params,
image_data=image_data,
video_data=video_data,
)
return output
class AgentLoopMetrics(BaseModel):
"""Agent loop performance metrics."""
generate_sequences: float = 0.0
tool_calls: float = 0.0
num_preempted: int = -1 # -1 means not available
class AgentLoopOutput(BaseModel):
"""Agent loop output."""
prompt_ids: list[int]
"""Prompt token ids."""
response_ids: list[int]
"""Response token ids including LLM generated token, tool response token."""
response_mask: list[int]
"""Response mask, 1 for LLM generated token, 0 for tool response token."""
response_logprobs: Optional[list[float]] = None
"""Log probabilities for the response tokens."""
routed_experts: Optional[Any] = None
"""Routed experts for the total tokens."""
multi_modal_data: Optional[dict[str, Any]] = None
"""Multi-modal data for multi-modal tools."""
reward_score: Optional[float] = None
"""Reward score for the trajectory."""
num_turns: int = 0
"""Number of chat turns, including user, assistant, tool."""
metrics: AgentLoopMetrics
"""Auxiliary performance metrics"""
extra_fields: dict[str, Any] = {}
"""Extra fields for dynamic addition."""
class _InternalAgentLoopOutput(AgentLoopOutput):
"""Internal agent loop output with padded sequences."""
model_config = ConfigDict(arbitrary_types_allowed=True)
prompt_ids: torch.Tensor
"""Padded prompt token ids."""
response_ids: torch.Tensor
"""Padded response token ids."""
input_ids: torch.Tensor
"""Padded input ids(prompt_ids + response_ids)."""
position_ids: torch.Tensor
"""Padded position ids."""
response_mask: torch.Tensor
"""Padded response mask."""
attention_mask: torch.Tensor
"""Padded attention mask."""
response_logprobs: Optional[torch.Tensor] = None
"""Padded log probabilities for the response tokens."""
routed_experts: Optional[torch.Tensor] = None
"""Padded routed experts for the total tokens."""
multi_modal_inputs: Optional[dict[str, torch.Tensor]] = None
"""Multi-modal inputs for processors (e.g., pixel_values, image_grid_thw)."""
extra_fields: dict[str, Any] = {}
"""Extra fields for dynamic addition."""
class DictConfigWrap:
"""Wrapper for DictConfig to avoid hydra.utils.instantiate recursive resolve."""
def __init__(self, config: DictConfig):
self.config = config
class AgentLoopBase(ABC):
"""An agent loop takes an input message, chat with OpenAI compatible LLM server and interact with various
environments.
Args:
trainer_config (DictConfig): whole config for main entrypoint.
server_manager (AsyncLLMServerManager): OpenAI compatible LLM server manager.
tokenizer (AutoTokenizer): Tokenizer for tokenize messages.
processor (AutoProcessor): Processor for process messages.
dataset_cls (type[Dataset]): Dataset class for creating dataset, Defaults to RLHFDataset.
data_config (DictConfigWrap): Dataset config.
"""
def __init__(
self,
trainer_config: DictConfigWrap,
server_manager: AsyncLLMServerManager,
tokenizer: AutoTokenizer,
processor: AutoProcessor,
dataset_cls: type[RLHFDataset],
data_config: DictConfigWrap,
**kwargs,
):
self.config = trainer_config.config
self.rollout_config, _ = _get_rollout_and_model_config(self.config)
self.server_manager = server_manager
self.tokenizer = tokenizer
self.processor = processor
self.dataset_cls = dataset_cls
self.data_config = data_config.config
self.apply_chat_template_kwargs = self.data_config.get("apply_chat_template_kwargs", {})
self.system_prompt = initialize_system_prompt(self.tokenizer, **self.apply_chat_template_kwargs)
self.loop = get_event_loop()
async def process_vision_info(self, messages: list[dict]) -> dict:
"""Extract images and videos from messages.
Args:
messages (list[dict]): Input messages.
Returns:
dict: Multi-modal data with keys "images" and "videos".
"""
multi_modal_data = {}
if self.processor is not None:
images, videos = await self.dataset_cls.process_vision_info(
messages, image_patch_size=self.processor.image_processor.patch_size, config=self.data_config
)
if images is not None:
multi_modal_data["images"] = images
if videos is not None:
multi_modal_data["videos"] = videos
return multi_modal_data
async def apply_chat_template(
self,
messages: list[dict],
tools: list[dict] = None,
images: list[Image.Image] = None,
videos: list[tuple[torch.Tensor, dict]] = None,
remove_system_prompt: bool = False,
):
"""Apply chat template to messages with optional tools, images, and videos.
Args:
messages (list[dict]): Input messages.
tools (list[dict], optional): Tools schemas. Defaults to None.
images (list[Image.Image], optional): Input images. Defaults to None.
videos (list[tuple[torch.Tensor, dict]], optional): Input videos. Defaults to None.
remove_system_prompt (bool, optional): Whether to remove system prompt. Defaults to False.
Returns:
list[int]: Prompt token ids.
"""
if self.processor is not None:
raw_prompt = await self.loop.run_in_executor(
None,
lambda: self.processor.apply_chat_template(
messages,
tools=tools,
add_generation_prompt=True,
tokenize=False,
**self.apply_chat_template_kwargs,
),
)
# split the videos and according metadatas
if videos is not None:
videos, video_metadatas = zip(*videos, strict=False)
videos, video_metadatas = list(videos), list(video_metadatas)
else:
video_metadatas = None
model_inputs = self.processor(
text=[raw_prompt],
images=images,
videos=videos,
video_metadata=video_metadatas,
return_tensors="pt",
do_sample_frames=False,
)
prompt_ids = normalize_token_ids(model_inputs.pop("input_ids"))
else:
tokenized_prompt = await self.loop.run_in_executor(
None,
lambda: self.tokenizer.apply_chat_template(
messages,
tools=tools,
add_generation_prompt=True,
tokenize=True,
**self.apply_chat_template_kwargs,
),
)
prompt_ids = normalize_token_ids(tokenized_prompt)
if remove_system_prompt:
prompt_ids = prompt_ids[len(self.system_prompt) :]
return prompt_ids
@abstractmethod
async def run(self, sampling_params: dict[str, Any], **kwargs) -> AgentLoopOutput:
"""Run agent loop to interact with LLM server and environment.
Args:
sampling_params (Dict[str, Any]): LLM sampling params.
**kwargs: dataset fields from `verl.utils.dataset.RLHFDataset`.
Returns:
AgentLoopOutput: Agent loop output.
"""
raise NotImplementedError
"""Agent loop registry: key is agent_name, value is a dict of agent loop config
used by hydra.utils.instantiate to initialize agent loop instance.
https://hydra.cc/docs/advanced/instantiate_objects/overview/
"""
_agent_loop_registry: dict[str, dict] = {}
def register(agent_name: str):
"""Register agent loop class."""
def decorator(subclass: type[AgentLoopBase]) -> type[AgentLoopBase]:
fqdn = f"{subclass.__module__}.{subclass.__qualname__}"
_agent_loop_registry[agent_name] = {"_target_": fqdn}
return subclass
return decorator
class AgentLoopWorker:
"""Agent loop worker takes a batch of messages and run each message in an agent loop.
Args:
config (DictConfig): whole config for main entrypoint.
server_handles (List[ray.actor.ActorHandle]): OpenAI compatible LLM server actor handles.
reward_loop_worker_handles (List[ray.actor.ActorHandle]): Actor handles for streaming reward computation.
"""
def __init__(
self,
config: DictConfig,
server_handles: list[ray.actor.ActorHandle],
reward_loop_worker_handles: list[ray.actor.ActorHandle] = None,
):
self.config = config
rollout_config, model_config = _get_rollout_and_model_config(config)
self.rollout_config: RolloutConfig = omega_conf_to_dataclass(rollout_config)
self.model_config: HFModelConfig = omega_conf_to_dataclass(model_config)
# for recipe to change
if not hasattr(self, "server_manager"):
self.server_manager = AsyncLLMServerManager(config, server_handles)
self.dataset_cls = get_dataset_class(config.data)
self.reward_loop_worker_handles = reward_loop_worker_handles
self.tokenizer = self.model_config.tokenizer
self.processor = self.model_config.processor
agent_loop_config_path = self.rollout_config.agent.agent_loop_config_path
if agent_loop_config_path:
resolved_path = resolve_config_path(agent_loop_config_path)
agent_loop_configs = OmegaConf.load(resolved_path)
for agent_loop_config in agent_loop_configs:
_agent_loop_registry[agent_loop_config.name] = agent_loop_config
if self.model_config.get("custom_chat_template", None) is not None:
if self.model_config.processor is not None:
self.model_config.processor.chat_template = self.model_config.custom_chat_template
self.model_config.tokenizer.chat_template = self.model_config.custom_chat_template
trace_config = self.rollout_config.trace
RolloutTraceConfig.init(
self.rollout_config.trace.project_name,
self.rollout_config.trace.experiment_name,
trace_config.get("backend"),
trace_config.get("token2text", False),
trace_config.get("max_samples_per_step_per_worker", None),
)
async def generate_sequences(self, batch: DataProto) -> DataProto:
"""Generate sequences from agent loop.
Args:
batch (DataProto): Input batch.
Returns:
DataProto: Output batch.
- prompts: [bsz, prompt_length], prompt token ids from dataset.
- responses: [bsz, response_length], output token ids include response tokens
from LLM generation and observation tokens from tool_calls.
- response_mask: [bsz, response_length], 1 for LLM generated tokens, 0 for observation/padding tokens.
- input_ids: [bsz, prompt_length + response_length], whole sequence token ids, including prompt tokens
and response tokens.
- attention_mask: [bsz, prompt_length + response_length], 0 for padding tokens, 1 for other tokens.
- position_ids: [bsz, prompt_length + response_length], incremental position ids.
For multi-turn conversations:
responses: |<- LLM generation ->|<- tool_calls ->|<- LLM generation ->|<- padding ->|
response_mask: | 1, 1, 1, ..., 1, 1 | 0, 0, .., 0, 0 | 1, 1, 1, ..., 1, 1 | 0, 0, ..., 0|
"""
config = self.rollout_config
sampling_params = dict(
temperature=config.temperature,
top_p=config.top_p,
top_k=config.top_k,
repetition_penalty=1.0,
logprobs=config.calculate_log_probs,
)
# override sampling params for validation
if batch.meta_info.get("validate", False):
sampling_params["top_p"] = config.val_kwargs.top_p
sampling_params["top_k"] = config.val_kwargs.top_k
sampling_params["temperature"] = config.val_kwargs.temperature
# by default, we assume it's a single turn agent
if "agent_name" not in batch.non_tensor_batch:
default_agent_loop = config.agent.default_agent_loop
batch.non_tensor_batch["agent_name"] = np.array([default_agent_loop] * len(batch), dtype=object)
if "index" in batch.non_tensor_batch:
index = batch.non_tensor_batch["index"]
else:
index = np.arange(len(batch))
max_samples_per_worker = RolloutTraceConfig.get_instance().max_samples_per_step_per_worker
# For n rollouts per sample, we trace all n rollouts for selected samples
# Note: This sampling happens per-worker, so total traces = max_samples_per_worker * num_workers * n
if max_samples_per_worker is not None:
unique_sample_indices = np.unique(index)
if max_samples_per_worker < len(unique_sample_indices):
selected_samples = set(
np.random.choice(unique_sample_indices, max_samples_per_worker, replace=False).tolist()
)
traced_indices = set(i for i in range(len(batch)) if index[i] in selected_samples)
else:
traced_indices = set(range(len(batch)))
else:
traced_indices = set(range(len(batch)))
trajectory_info = await get_trajectory_info(
batch.meta_info.get("global_steps", -1), index.tolist(), batch.meta_info.get("validate", False)
)
tasks = []
for i in range(len(batch)):
trace_this_sample = i in traced_indices
kwargs = {k: v[i] for k, v in batch.non_tensor_batch.items()}
tasks.append(
asyncio.create_task(
self._run_agent_loop(sampling_params, trajectory_info[i], trace=trace_this_sample, **kwargs)
)
)
outputs = await asyncio.gather(*tasks)
output = self._postprocess(outputs, input_non_tensor_batch=batch.non_tensor_batch)
return output
async def _run_agent_loop(
self,
sampling_params: dict[str, Any],
trajectory: dict[str, Any],
*,
agent_name: str,
trace: bool = True,
**kwargs,
) -> _InternalAgentLoopOutput:
with rollout_trace_attr(
step=trajectory["step"],
sample_index=trajectory["sample_index"],
rollout_n=trajectory["rollout_n"],
validate=trajectory["validate"],
name="agent_loop",
trace=trace,
):
assert agent_name in _agent_loop_registry, (
f"Agent loop {agent_name} not registered, registered agent loops: {_agent_loop_registry.keys()}"
)
agent_loop_config = _agent_loop_registry[agent_name]
agent_loop = hydra.utils.instantiate(
config=agent_loop_config,
trainer_config=DictConfigWrap(config=self.config),
server_manager=self.server_manager,
tokenizer=self.tokenizer,
processor=self.processor,
dataset_cls=self.dataset_cls,
data_config=DictConfigWrap(self.config.data),
)
output: AgentLoopOutput = await agent_loop.run(sampling_params, **kwargs)
return await self._agent_loop_postprocess(output, **kwargs)
async def _agent_loop_postprocess(self, output, **kwargs) -> _InternalAgentLoopOutput:
"""Perform post-processing operations on the output of each individual agent loop."""
output.extra_fields["raw_prompt"] = kwargs["raw_prompt"]
# Some AgentLoop may have already computed the reward score, e.g SWE-agent.
# NOTE: consistent with the legacy batch version of generate_sequences that existed in the
# deprecated vLLM SPMD rollout implementation.
# prompt_ids: left padded with zeros (e.g., [0,0,0,0,1,2,3,4])
# response_ids: right padded with zeros (e.g., [5,6,7,8,0,0,0,0])
# input_ids: concatenation of prompt + response
# Mask:
# For example, if the prompt is [1,2,3,4] and the response is [5,6,7,(tool start)8,9(tool end),10,11,12]
# - prompt_attention_mask: 0s for padding, 1s for tokens
# e.g., [0,0,0,0,1,1,1,1]
# - response_attention_mask: 0s for padding, 1s for tokens
# e.g., [1,1,1,1,1,1,1,1,1,1,1,0,0,0,0]
# attention_mask: concatenation of prompt_attention_mask and response_attention_mask
# e.g., [0,0,0,0,1,1,1,1(prompt),1,1,1,1,1,1,1,1,1,1,1,0,0,0,0(response)]
# - response_mask: 1s for LLM generated tokens, 0 for tool response/padding tokens
# e.g., [1,1,1,1,1,1,1,(tool start),0,0(tool end),1,1,0,0,0,0]
# - position_ids: sequential positions for tokens, starting at 0
# e.g., [0,0,0,0,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,0,0,0,0]
# TODO(wuxibin): remove padding and use tensordict.
self.tokenizer.padding_side = "left"
prompt_output = self.tokenizer.pad(
{"input_ids": output.prompt_ids},
padding="max_length",
max_length=self.rollout_config.prompt_length,
return_tensors="pt",
return_attention_mask=True,
)
if prompt_output["input_ids"].dim() == 1:
prompt_output["input_ids"] = prompt_output["input_ids"].unsqueeze(0)
prompt_output["attention_mask"] = prompt_output["attention_mask"].unsqueeze(0)
self.tokenizer.padding_side = "right"
response_output = self.tokenizer.pad(
{"input_ids": output.response_ids},
padding="max_length",
max_length=self.rollout_config.response_length,
return_tensors="pt",
return_attention_mask=True,
)
if response_output["input_ids"].dim() == 1:
response_output["input_ids"] = response_output["input_ids"].unsqueeze(0)
response_output["attention_mask"] = response_output["attention_mask"].unsqueeze(0)
response_mask_output = self.tokenizer.pad(
{"input_ids": output.response_mask},
padding="max_length",
max_length=self.rollout_config.response_length,
return_tensors="pt",
return_attention_mask=False,
)
if response_mask_output["input_ids"].dim() == 1:
response_mask_output["input_ids"] = response_mask_output["input_ids"].unsqueeze(0)
response_logprobs = None
if output.response_logprobs is not None:
pad_size = self.rollout_config.response_length - len(output.response_logprobs)
response_logprobs = torch.tensor(output.response_logprobs + [0.0] * pad_size).unsqueeze(0)
response_mask = response_mask_output["input_ids"] * response_output["attention_mask"]
attention_mask = torch.cat([prompt_output["attention_mask"], response_output["attention_mask"]], dim=1)
input_ids = torch.cat([prompt_output["input_ids"], response_output["input_ids"]], dim=1)
routed_experts = None
if output.routed_experts is not None:
total_length = input_ids.shape[1]
length, layer_num, topk_num = output.routed_experts.shape
if isinstance(output.routed_experts, np.ndarray):
experts_tensor = torch.from_numpy(output.routed_experts)
elif isinstance(output.routed_experts, torch.Tensor):
experts_tensor = output.routed_experts
else:
raise TypeError(f"Unsupported type for routed_experts: {type(output.routed_experts)}")
routed_experts = torch.zeros(1, total_length, layer_num, topk_num, dtype=experts_tensor.dtype)
# Calculate start position: left padding means original prompt starts at the end
start_pos = prompt_output["input_ids"].shape[1] - len(output.prompt_ids)
end_pos = min(start_pos + length, total_length)
# Add boundary checks for robustness
if start_pos < 0 or end_pos > total_length:
raise ValueError(
f"Invalid position range: start_pos={start_pos}, end_pos={end_pos}, total_length={total_length}"
)
routed_experts[:, start_pos:end_pos] = experts_tensor.unsqueeze(0)
multi_modal_inputs = self._compute_multi_modal_inputs(output, input_ids)
position_ids = self._compute_position_ids(input_ids, attention_mask, multi_modal_inputs)
await self._compute_score(
output,
prompts=prompt_output["input_ids"],
responses=response_output["input_ids"],
attention_mask=attention_mask,
input_ids=input_ids,
position_ids=position_ids,
kwargs=kwargs,
)
return _InternalAgentLoopOutput(
prompt_ids=prompt_output["input_ids"],
response_ids=response_output["input_ids"],
input_ids=input_ids,
position_ids=position_ids,
response_mask=response_mask,
attention_mask=attention_mask,
response_logprobs=response_logprobs,
routed_experts=routed_experts,
multi_modal_inputs=multi_modal_inputs,
multi_modal_data=output.multi_modal_data,
reward_score=output.reward_score,
num_turns=output.num_turns,
metrics=output.metrics,
extra_fields=output.extra_fields,
)
def _compute_multi_modal_inputs(self, output, input_ids) -> dict[str, torch.Tensor]:
"""Compute multi-modal inputs with image and video."""
multi_modal_inputs = {}
if self.processor is None:
return multi_modal_inputs
images = output.multi_modal_data.get("images")
videos = output.multi_modal_data.get("videos")
# split the videos and according metadatas
if videos is not None:
videos, video_metadatas = zip(*videos, strict=False)
videos, video_metadatas = list(videos), list(video_metadatas)
else:
video_metadatas = None
current_text = self.tokenizer.decode(input_ids.squeeze(0), skip_special_tokens=True)
multi_modal_inputs = self.processor(
text=[current_text],
images=images,
videos=videos,
video_metadata=video_metadatas,
return_tensors="pt",
do_sample_frames=False,
)
multi_modal_inputs.pop("input_ids", None)
multi_modal_inputs.pop("attention_mask", None)
# We must use dict(multi_modal_inputs) to convert BatchFeature values to a new dict
# because np.array() only keeps the keys for BatchFeature.
multi_modal_inputs = dict(multi_modal_inputs.convert_to_tensors("pt"))
image_grid_thw = multi_modal_inputs.get("image_grid_thw")
if image_grid_thw is not None:
images_seqlens = torch.repeat_interleave(image_grid_thw[:, 1] * image_grid_thw[:, 2], image_grid_thw[:, 0])
multi_modal_inputs["images_seqlens"] = images_seqlens
return multi_modal_inputs
def _compute_position_ids(self, input_ids, attention_mask, multi_modal_inputs) -> torch.Tensor:
"""Compute position ids for multi-modal inputs."""
if self.processor is None:
return compute_position_id_with_mask(attention_mask) # (1, seq_len)
image_grid_thw = multi_modal_inputs.get("image_grid_thw")
video_grid_thw = multi_modal_inputs.get("video_grid_thw")
# Model's get_rope_index has been dynamically bind to the processor.
vision_position_ids, _ = self.processor.get_rope_index(
input_ids=input_ids,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
attention_mask=attention_mask,
)
vision_position_ids = vision_position_ids.transpose(0, 1) # (3, 1, seq_len) => (1, 3, seq_len)
valid_mask = attention_mask[0].bool()
text_position_ids = torch.ones((1, len(input_ids[0])), dtype=torch.long)
text_position_ids[0, valid_mask] = torch.arange(valid_mask.sum().item())
text_position_ids = text_position_ids.unsqueeze(0)
position_ids = torch.cat((text_position_ids, vision_position_ids), dim=1) # (1, 4, seq_length)
return position_ids
async def _compute_score(self, output, prompts, responses, attention_mask, input_ids, position_ids, kwargs):
"""Compute reward score for single sample."""
enable_async_reward = self.reward_loop_worker_handles is not None
if output.reward_score is None and enable_async_reward:
batch = TensorDict(
{
"prompts": prompts, # [1, prompt_length]
"responses": responses, # [1, response_length]
"attention_mask": attention_mask, # [1, prompt_length + response_length]
"input_ids": input_ids, # [1, prompt_length + response_length]
"position_ids": position_ids,
},
batch_size=1,
)
non_tensor_batch = {
**{k: np.array([v]) for k, v in kwargs.items()},
"__num_turns__": np.array([output.num_turns]),
"tool_extra_fields": np.array([output.extra_fields], dtype=object),
}
data = DataProto(
batch=batch,
non_tensor_batch=non_tensor_batch,
)
selected_reward_loop_worker_handle = random.choice(self.reward_loop_worker_handles)
result = await selected_reward_loop_worker_handle.compute_score.remote(data)
output.reward_score = result["reward_score"]
output.extra_fields["reward_extra_info"] = result["reward_extra_info"]
def _postprocess(
self,
inputs: list[_InternalAgentLoopOutput],
input_non_tensor_batch: dict | None = None,
) -> DataProto:
"""Process the padded outputs from _run_agent_loop and combine them into a batch."""
# Convert lists back to tensors and stack them to create a batch.
prompt_ids = torch.cat([input.prompt_ids for input in inputs], dim=0)
response_ids = torch.cat([input.response_ids for input in inputs], dim=0)
response_mask = torch.cat([input.response_mask for input in inputs], dim=0)
attention_mask = torch.cat([input.attention_mask for input in inputs], dim=0)
input_ids = torch.cat([input.input_ids for input in inputs], dim=0)
position_ids = torch.cat([input.position_ids for input in inputs], dim=0)
optional_outputs = {}
if inputs[0].response_logprobs is not None:
optional_outputs["rollout_log_probs"] = torch.cat([input.response_logprobs for input in inputs], dim=0)
if inputs[0].routed_experts is not None:
optional_outputs["routed_experts"] = torch.cat([input.routed_experts for input in inputs], dim=0)
batch = TensorDict(
{
"prompts": prompt_ids, # [bsz, prompt_length]
"responses": response_ids, # [bsz, response_length]
"response_mask": response_mask, # [bsz, response_length]
"input_ids": input_ids, # [bsz, prompt_length + response_length]
"attention_mask": attention_mask, # [bsz, prompt_length + response_length]
# position_ids: [bsz, 3, prompt_length + response_length] or [bsz, prompt_length + response_length]
"position_ids": position_ids,
**optional_outputs,
},
batch_size=len(inputs),
)
scores = [input.reward_score for input in inputs]
if all(score is not None for score in scores):
prompt_length = prompt_ids.size(1)
response_length = attention_mask[:, prompt_length:].sum(dim=1) - 1
rm_scores = torch.zeros_like(response_mask, dtype=torch.float32)
rm_scores[torch.arange(response_mask.size(0)), response_length] = torch.tensor(scores, dtype=torch.float32)
batch["rm_scores"] = rm_scores
non_tensor_batch = {
"__num_turns__": np.array([input.num_turns for input in inputs], dtype=np.int32),
}
if self.reward_loop_worker_handles is None and input_non_tensor_batch:
non_tensor_batch.update(input_non_tensor_batch)
# add reward_extra_info to non_tensor_batch
reward_extra_infos = [input.extra_fields.get("reward_extra_info", {}) for input in inputs]
reward_extra_keys = list(reward_extra_infos[0].keys())
for key in reward_extra_keys:
non_tensor_batch[key] = np.array([info[key] for info in reward_extra_infos])
# Add multi_modal_inputs to non_tensor_batch if any samples have them
multi_modal_inputs_list = [input.multi_modal_inputs for input in inputs]
if any(mmi is not None for mmi in multi_modal_inputs_list):
non_tensor_batch["multi_modal_inputs"] = np.array(multi_modal_inputs_list, dtype=object)
metrics = [input.metrics.model_dump() for input in inputs]
# Collect extra fields from all inputs and convert them to np.ndarray
# Keep a stable set of keys so downstream batch concat stays consistent across agent loops.
extra_fields = {}
default_extra_keys = {
"turn_scores",
"tool_rewards",
"is_cancel",
"param_version_start",
"param_version_end",
"extras",
}
all_keys = set(key for input_item in inputs for key in input_item.extra_fields) | default_extra_keys
for key in all_keys:
temp_arr = np.empty(len(inputs), dtype=object)
temp_arr[:] = [input.extra_fields.get(key) for input in inputs]
extra_fields[key] = temp_arr
non_tensor_batch.update(extra_fields)
# Only include reward_extra_keys in meta_info if rm_scores is in batch
# This avoids conflicts when reward_tensor is merged later in ray_trainer.py
if "rm_scores" in batch.keys():
meta_info = {"metrics": metrics, "reward_extra_keys": reward_extra_keys}
else:
meta_info = {"metrics": metrics}
return DataProto(
batch=batch,
non_tensor_batch=non_tensor_batch,
meta_info=meta_info,
)
async def get_trajectory_info(step, index, validate):
"""Get trajectory info.
Args:
step (int): global steps in the trainer.
index (list): form datastore extra_info.index column.
validate (bool): whether is a validate step.
Returns:
list: trajectory.
"""
trajectory_info = []
rollout_n = 0
for i in range(len(index)):
if i > 0 and index[i - 1] == index[i]:
rollout_n += 1
else:
rollout_n = 0
trajectory_info.append({"step": step, "sample_index": index[i], "rollout_n": rollout_n, "validate": validate})
return trajectory_info
class AgentLoopManager:
"""Agent loop manager that manages a group of agent loop workers.
- if worker_group is not None, rollout server is in hybrid mode, share GPUs with training engine.
- otherwise, rollout server is in standalone mode, use separate GPUs, e.g., one-step-off/fully async training.
Args:
config (DictConfig): whole config for main entrypoint.
worker_group (RayWorkerGroup): ActorRolloutRef worker group for hybrid mode; None for standalone mode.
rollout_resource_pool (RayResourcePool): Resource pool for hybrid mode, only used by TensorRT-LLM.
reward_loop_worker_handles (List[ray.actor.ActorHandle]): Actor handles for streaming reward computation.
"""
def __init__(
self,
config: DictConfig,
worker_group: RayWorkerGroup = None,
rollout_resource_pool: RayResourcePool = None,
reward_loop_worker_handles: list[ray.actor.ActorHandle] = None,
):
self.config = config
self.rollout_config, self.model_config = _get_rollout_and_model_config(config)
self.worker_group = worker_group
self.rollout_resource_pool = rollout_resource_pool
self.reward_loop_worker_handles = reward_loop_worker_handles
assert worker_group is not None or self.rollout_config.nnodes > 0, "nnodes must be > 0 in standalone mode"
# for recipe to change
if not hasattr(self, "rollout_replica_class"):
self.rollout_replica_class = get_rollout_replica_class(self.rollout_config.name)
if not hasattr(self, "agent_loop_workers_class"):
self.agent_loop_workers_class = ray.remote(AgentLoopWorker)
@classmethod
@auto_await
async def create(
cls,
config: DictConfig,
worker_group: RayWorkerGroup = None,
rollout_resource_pool: RayResourcePool = None,
reward_loop_worker_handles: list[ray.actor.ActorHandle] = None,
):
"""Create agent loop manager."""
instance = cls(config, worker_group, rollout_resource_pool, reward_loop_worker_handles)
await instance._initialize_llm_servers()
await instance._init_agent_loop_workers()
return instance
async def _initialize_llm_servers(self):
rollout_world_size = (
self.rollout_config.tensor_model_parallel_size
* self.rollout_config.data_parallel_size
* self.rollout_config.pipeline_model_parallel_size
)
world_size = (
self.worker_group.world_size
if self.worker_group
else self.rollout_config.n_gpus_per_node * self.rollout_config.nnodes
)
num_replicas = world_size // rollout_world_size
self.rollout_replicas = [
self.rollout_replica_class(
replica_rank=replica_rank,
config=self.rollout_config,
model_config=self.model_config,
gpus_per_node=self.rollout_config.n_gpus_per_node,
)
for replica_rank in range(num_replicas)
]
if self.worker_group and self.rollout_config.name != "trtllm":
await asyncio.gather(*[server.init_hybrid(self.worker_group) for server in self.rollout_replicas])
# TODO: unify trtllm to init_hybrid
elif self.worker_group and self.rollout_config.name == "trtllm":
await asyncio.gather(
*[
server.init_hybrid_colocated(self.worker_group, self.rollout_resource_pool)
for server in self.rollout_replicas
]
)
else:
await asyncio.gather(*[server.init_standalone() for server in self.rollout_replicas])
self.server_handles = [server._server_handle for server in self.rollout_replicas]
self.server_addresses = [server._server_address for server in self.rollout_replicas]
print(f"AgentLoopManager: {self.server_addresses}")
# Update Prometheus configuration with server addresses
if self.rollout_config.prometheus.enable:
if self.rollout_config.disable_log_stats:
raise ValueError("PROMETHEUS needs disable_log_stats==False, but it is currently True.")
update_prometheus_config(self.rollout_config.prometheus, self.server_addresses, self.rollout_config.name)
async def _init_agent_loop_workers(self):
self.agent_loop_workers = []
num_workers = self.rollout_config.agent.num_workers
node_ids = [node["NodeID"] for node in ray.nodes() if node["Alive"] and node["Resources"].get("CPU", 0) > 0]
for i in range(num_workers):
# Round-robin scheduling over the all nodes
node_id = node_ids[i % len(node_ids)]
self.agent_loop_workers.append(
self.agent_loop_workers_class.options(
name=f"agent_loop_worker_{i}" + f"_{uuid4().hex[:8]}",
scheduling_strategy=ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy(
node_id=node_id, soft=True
),
).remote(self.config, self.server_handles, self.reward_loop_worker_handles)
)
@auto_await
async def generate_sequences(self, prompts: DataProto) -> DataProto:
"""Split input batch and dispatch to agent loop workers.
Args:
prompts (DataProto): Input batch.
Returns:
DataProto: Output batch.
"""
chunkes = prompts.chunk(len(self.agent_loop_workers))
outputs = await asyncio.gather(
*[
worker.generate_sequences.remote(chunk)
for worker, chunk in zip(self.agent_loop_workers, chunkes, strict=True)
]
)
output = DataProto.concat(outputs)
# calculate performance metrics
metrics = [output.meta_info.pop("metrics") for output in outputs] # List[List[Dict[str, str]]]
timing = self._performance_metrics(metrics, output)
output.meta_info = {"timing": timing, **outputs[0].meta_info}
return output
def _performance_metrics(self, metrics: list[list[dict[str, str]]], output: DataProto) -> dict[str, float]:
timing = {}
t_generate_sequences = np.array([metric["generate_sequences"] for chunk in metrics for metric in chunk])
t_tool_calls = np.array([metric["tool_calls"] for chunk in metrics for metric in chunk])
num_preempted = np.array([metric["num_preempted"] for chunk in metrics for metric in chunk])
timing["agent_loop/num_preempted/min"] = num_preempted.min()
timing["agent_loop/num_preempted/max"] = num_preempted.max()
timing["agent_loop/num_preempted/mean"] = num_preempted.mean()
timing["agent_loop/generate_sequences/min"] = t_generate_sequences.min()
timing["agent_loop/generate_sequences/max"] = t_generate_sequences.max()
timing["agent_loop/generate_sequences/mean"] = t_generate_sequences.mean()
timing["agent_loop/tool_calls/min"] = t_tool_calls.min()
timing["agent_loop/tool_calls/max"] = t_tool_calls.max()
timing["agent_loop/tool_calls/mean"] = t_tool_calls.mean()
# batch sequence generation is bounded by the slowest sample
slowest = np.argmax(t_generate_sequences + t_tool_calls)
attention_mask = output.batch["attention_mask"][slowest]
prompt_length = output.batch["prompts"].shape[1]
timing["agent_loop/slowest/generate_sequences"] = t_generate_sequences[slowest]
timing["agent_loop/slowest/tool_calls"] = t_tool_calls[slowest]
timing["agent_loop/slowest/prompt_length"] = attention_mask[:prompt_length].sum().item()
timing["agent_loop/slowest/response_length"] = attention_mask[prompt_length:].sum().item()
timing["agent_loop/slowest/num_preempted"] = num_preempted[slowest]
return timing
@auto_await
async def clear_kv_cache(self):
"""Clear all rollout kv cache, but don`t sleep."""
await asyncio.gather(*[replica.clear_kv_cache() for replica in self.rollout_replicas])
@auto_await
async def start_profile(self, **kwargs):
"""Start profiling on all rollout replicas."""
await asyncio.gather(*[replica.start_profile(**kwargs) for replica in self.rollout_replicas])
@auto_await
async def stop_profile(self):
"""Stop profiling on all rollout replicas."""
await asyncio.gather(*[replica.stop_profile() for replica in self.rollout_replicas])
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/agent_loop/agent_loop.py",
"license": "Apache License 2.0",
"lines": 878,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/experimental/agent_loop/tool_agent_loop.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import json
import logging
import os
from enum import Enum
from typing import Any, Optional
from uuid import uuid4
import torch
from PIL import Image
from verl.experimental.agent_loop.agent_loop import (
AgentLoopBase,
AgentLoopOutput,
register,
)
from verl.experimental.agent_loop.tool_parser import FunctionCall, ToolParser
from verl.experimental.agent_loop.utils import build_gpt_oss_tool_response_text
from verl.interactions.base import BaseInteraction
from verl.interactions.utils.interaction_registry import initialize_interactions_from_config
from verl.tools.schemas import ToolResponse
from verl.tools.utils.tool_registry import initialize_tools_from_config
from verl.utils.profiler import simple_timer
from verl.utils.rollout_trace import rollout_trace_op
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class AgentState(Enum):
PENDING = "pending"
GENERATING = "generating"
PROCESSING_TOOLS = "processing_tools"
TERMINATED = "terminated"
INTERACTING = "interacting"
class AgentData:
"""Encapsulates all state variables for the agent loop. AgentData is passed to tool calling in case that
tool may need to access full history state. User can store any tool session data in `extra_fields`."""
def __init__(
self,
messages: list[dict[str, Any]],
image_data: list[Image.Image],
video_data: list[tuple[torch.Tensor, dict[str, Any]]],
metrics: dict[str, Any],
request_id: str,
tools_kwargs: dict[str, Any],
interaction: Optional[BaseInteraction] = None,
interaction_kwargs: Optional[dict[str, Any]] = None,
):
self.messages = messages
self.image_data = image_data
self.video_data = video_data
self.metrics = metrics
self.request_id = request_id
self.tools_kwargs = tools_kwargs
self.interaction = interaction
self.interaction_kwargs = interaction_kwargs or {}
# State variables
self.prompt_ids: list[int] = []
self.response_ids: list[int] = []
self.response_mask: list[int] = []
self.response_logprobs: list[float] = []
self.turn_scores: list[float] = []
self.tool_rewards: list[float] = []
self.user_turns = 0
self.assistant_turns = 0
# Temporary state for tool calls
self.tool_calls: list[FunctionCall] = []
self.routed_experts = None
# Extra fields for dynamic addition, e.g., tool session data
self.extra_fields: dict[str, Any] = {}
@register("tool_agent")
class ToolAgentLoop(AgentLoopBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Initialize tools from config file
self.max_user_turns = self.rollout_config.multi_turn.max_user_turns
self.max_assistant_turns = self.rollout_config.multi_turn.max_assistant_turns
self.max_parallel_calls = self.rollout_config.multi_turn.max_parallel_calls
self.max_tool_response_length = self.rollout_config.multi_turn.max_tool_response_length
self.tool_response_truncate_side = self.rollout_config.multi_turn.tool_response_truncate_side
tool_config_path = self.rollout_config.multi_turn.tool_config_path
tool_list = initialize_tools_from_config(tool_config_path) if tool_config_path else []
self.tools = {tool.name: tool for tool in tool_list}
self.tool_schemas = [tool.tool_schema.model_dump(exclude_unset=True, exclude_none=True) for tool in tool_list]
self.tool_parser = ToolParser.get_tool_parser(self.rollout_config.multi_turn.format, self.tokenizer)
self.tool_parser_name = self.rollout_config.multi_turn.format
self.prompt_length = self.rollout_config.prompt_length
self.response_length = self.rollout_config.response_length
# Initialize interactions from config file
self.interaction_config_file = self.rollout_config.multi_turn.interaction_config_path
if self.interaction_config_file:
self.interaction_map: dict[str, BaseInteraction] = self._initialize_interactions(
self.interaction_config_file
)
@rollout_trace_op
async def run(self, sampling_params: dict[str, Any], **kwargs) -> AgentLoopOutput:
messages = list(kwargs["raw_prompt"])
# extract images and videos from messages
multi_modal_data = await self.process_vision_info(messages)
images = multi_modal_data.get("images")
videos = multi_modal_data.get("videos")
metrics = {}
request_id = uuid4().hex
tools_kwargs = kwargs.get("tools_kwargs", {})
# Initialize interaction if needed
interaction = None
interaction_kwargs = {}
if self.interaction_config_file:
interaction_kwargs = kwargs["extra_info"]["interaction_kwargs"]
if "name" not in interaction_kwargs:
raise ValueError("'name' key is required in interaction_kwargs")
interaction_name = interaction_kwargs["name"]
if interaction_name not in self.interaction_map:
raise ValueError(
f"Interaction '{interaction_name}' not found in interaction_map. Available interactions: "
f"{list(self.interaction_map.keys())}"
)
interaction = self.interaction_map[interaction_name]
await interaction.start_interaction(request_id, **interaction_kwargs)
# Create AgentData instance to encapsulate all state
agent_data = AgentData(
messages=messages,
image_data=images,
video_data=videos,
metrics=metrics,
request_id=request_id,
tools_kwargs=tools_kwargs,
interaction=interaction,
interaction_kwargs=interaction_kwargs,
)
# State machine loop
state = AgentState.PENDING
while state != AgentState.TERMINATED:
if state == AgentState.PENDING:
state = await self._handle_pending_state(agent_data, sampling_params)
elif state == AgentState.GENERATING:
state = await self._handle_generating_state(agent_data, sampling_params)
elif state == AgentState.PROCESSING_TOOLS:
state = await self._handle_processing_tools_state(agent_data)
elif state == AgentState.INTERACTING:
state = await self._handle_interacting_state(agent_data)
else:
logger.error(f"Invalid state: {state}")
state = AgentState.TERMINATED
# Finalize output
response_ids = agent_data.prompt_ids[-len(agent_data.response_mask) :]
prompt_ids = agent_data.prompt_ids[: len(agent_data.prompt_ids) - len(agent_data.response_mask)]
multi_modal_data = {}
if agent_data.image_data is not None:
multi_modal_data["images"] = agent_data.image_data
if agent_data.video_data is not None:
multi_modal_data["videos"] = agent_data.video_data
output = AgentLoopOutput(
prompt_ids=prompt_ids,
response_ids=response_ids[: self.response_length],
response_mask=agent_data.response_mask[: self.response_length],
multi_modal_data=multi_modal_data,
response_logprobs=agent_data.response_logprobs[: self.response_length]
if agent_data.response_logprobs
else None,
num_turns=agent_data.user_turns + agent_data.assistant_turns + 1,
metrics=agent_data.metrics,
routed_experts=agent_data.routed_experts,
extra_fields={},
)
output.extra_fields.update({"turn_scores": agent_data.turn_scores, "tool_rewards": agent_data.tool_rewards})
return output
async def _handle_pending_state(self, agent_data: AgentData, sampling_params: dict[str, Any]) -> AgentState:
"""Handle the pending state: prepare the prompt and start generation."""
prompt_ids = await self.apply_chat_template(
agent_data.messages,
tools=self.tool_schemas,
images=agent_data.image_data,
videos=agent_data.video_data,
)
agent_data.prompt_ids = prompt_ids
return AgentState.GENERATING
async def _handle_generating_state(
self, agent_data: AgentData, sampling_params: dict[str, Any], ignore_termination: bool = False
) -> AgentState:
"""Handle the generating state: generate model response and check for tool calls."""
add_messages: list[dict[str, Any]] = []
with simple_timer("generate_sequences", agent_data.metrics):
output = await self.server_manager.generate(
request_id=agent_data.request_id,
prompt_ids=agent_data.prompt_ids,
sampling_params=sampling_params,
image_data=agent_data.image_data,
video_data=agent_data.video_data,
)
# first time to set num_preempted
if agent_data.metrics.get("num_preempted") is None:
agent_data.metrics["num_preempted"] = output.num_preempted if output.num_preempted is not None else -1
# then add num_preempted to the metrics
else:
agent_data.metrics["num_preempted"] += output.num_preempted if output.num_preempted is not None else 0
agent_data.assistant_turns += 1
agent_data.response_ids = output.token_ids
agent_data.prompt_ids += agent_data.response_ids
agent_data.response_mask += [1] * len(agent_data.response_ids)
if output.log_probs:
agent_data.response_logprobs += output.log_probs
if output.routed_experts is not None:
agent_data.routed_experts = output.routed_experts
# Check termination conditions
if not ignore_termination and len(agent_data.response_mask) >= self.response_length:
return AgentState.TERMINATED
if self.max_assistant_turns and agent_data.assistant_turns >= self.max_assistant_turns:
return AgentState.TERMINATED
if self.max_user_turns and agent_data.user_turns >= self.max_user_turns:
return AgentState.TERMINATED
# Extract tool calls
_, agent_data.tool_calls = await self.tool_parser.extract_tool_calls(agent_data.response_ids)
# Handle interaction if needed
if self.interaction_config_file:
assistant_message = await self.loop.run_in_executor(
None, lambda: self.tokenizer.decode(agent_data.response_ids, skip_special_tokens=True)
)
add_messages.append({"role": "assistant", "content": assistant_message})
agent_data.messages.extend(add_messages)
# Determine next state
if agent_data.tool_calls:
return AgentState.PROCESSING_TOOLS
elif self.interaction_config_file:
return AgentState.INTERACTING
else:
return AgentState.TERMINATED
async def _handle_processing_tools_state(self, agent_data: AgentData) -> AgentState:
"""Handle the processing tools state: execute tool calls and prepare tool responses."""
add_messages: list[dict[str, Any]] = []
new_images_this_turn: list[Any] = [] # Local variable instead of agent_data attribute
tasks = []
tool_call_names = []
for tool_call in agent_data.tool_calls[: self.max_parallel_calls]:
tasks.append(self._call_tool(tool_call, agent_data.tools_kwargs, agent_data))
tool_call_names.append(tool_call.name)
with simple_timer("tool_calls", agent_data.metrics):
responses = await asyncio.gather(*tasks)
# Process tool responses and update multi_modal_data
# Removed: agent_data.new_images_this_turn = []
for tool_response, tool_reward, _ in responses:
# Create message from tool response
if tool_response.image or tool_response.video:
# Multi-modal content with structured format
if not getattr(self.processor, "image_processor", None):
raise ValueError(
"Multimedia data can only be processed by `processor`, but the processor is None. "
"This error is often caused if you are using a LLM model but your tool returns multimodal "
"data. Plase use a vlm as the base model."
)
content = []
if tool_response.image:
content.append({"type": "image"})
if tool_response.video:
content.append({"type": "video"})
if tool_response.text:
content.append({"type": "text", "text": tool_response.text})
message = {"role": "tool", "content": content}
else:
# Text-only content
message = {"role": "tool", "content": tool_response.text or ""}
add_messages.append(message)
# Handle image data
if tool_response.image:
# Add new image data
if isinstance(tool_response.image, list):
# Ensure all elements in the list are valid image objects
for img in tool_response.image:
if img is not None: # Add a check to ensure the image is not None
new_images_this_turn.append(img) # Using local variable
else:
# Ensure the image is not None
if tool_response.image is not None:
new_images_this_turn.append(tool_response.image) # Using local variable
# Handle video data
if tool_response.video:
# Currently not supported, raise informative error
logger.warning("Multimedia type 'video' is not currently supported. Only 'image' is supported.")
raise NotImplementedError(
"Multimedia type 'video' is not currently supported. Only 'image' is supported."
)
if tool_reward is not None:
agent_data.tool_rewards.append(tool_reward)
agent_data.messages.extend(add_messages)
if self.tool_parser_name == "gpt-oss":
logger.info("manually format tool responses for gpt-oss")
tool_response_text = build_gpt_oss_tool_response_text(add_messages, tool_call_names)
response_ids = await self.loop.run_in_executor(
None, lambda: self.tokenizer.encode(tool_response_text, add_special_tokens=False)
)
else:
# Note that we have to pass None to the images and videos if there are no new images / videos
# to stay compatible with downstream image processing logic!
images = new_images_this_turn if new_images_this_turn else None
videos = None
response_ids = await self.apply_chat_template(
add_messages,
images=images,
videos=videos,
remove_system_prompt=True,
)
if len(agent_data.response_mask) + len(response_ids) >= self.response_length:
return AgentState.TERMINATED
# Update prompt_ids and response_mask
if new_images_this_turn:
if agent_data.image_data is None:
agent_data.image_data = []
elif not isinstance(agent_data.image_data, list):
agent_data.image_data = [agent_data.image_data]
for img in new_images_this_turn:
agent_data.image_data.append(img)
agent_data.prompt_ids += response_ids
agent_data.response_mask += [0] * len(response_ids)
if agent_data.response_logprobs:
agent_data.response_logprobs += [0.0] * len(response_ids)
agent_data.user_turns += 1
return AgentState.GENERATING
async def _handle_interacting_state(self, agent_data: AgentData) -> AgentState:
"""Handle the interacting state: get user input from interaction."""
(
should_terminate_sequence,
interaction_responses,
reward,
metrics,
) = await agent_data.interaction.generate_response(
agent_data.request_id, agent_data.messages, **agent_data.interaction_kwargs
)
agent_data.user_turns += 1
add_messages: list[dict[str, Any]] = [{"role": "user", "content": interaction_responses}]
agent_data.messages.extend(add_messages)
if reward is not None:
agent_data.turn_scores.append(reward)
# Update prompt with user responses (similar to _handle_processing_tools_state)
response_ids = await self.apply_chat_template(
add_messages,
remove_system_prompt=True,
)
# Update prompt_ids and response_mask
agent_data.prompt_ids += response_ids
agent_data.response_mask += [0] * len(response_ids)
if agent_data.response_logprobs:
agent_data.response_logprobs += [0.0] * len(response_ids)
# double check prompt
# Check termination condition
if should_terminate_sequence:
return AgentState.TERMINATED
else:
return AgentState.GENERATING
async def _call_tool(
self, tool_call: FunctionCall, tools_kwargs: dict[str, Any], agent_data: AgentData
) -> tuple[ToolResponse, float, dict]:
"""Call tool and return tool response."""
tool, instance_id = None, None
try:
# TODO: append malformed tool_call to the prompt: invalid function name or arguments
tool_name = tool_call.name
tool_args = json.loads(tool_call.arguments)
tool = self.tools[tool_name]
kwargs = tools_kwargs.get(tool_name, {})
instance_id, _ = await tool.create(create_kwargs=kwargs.get("create_kwargs", {}))
tool_execution_response, tool_reward, res = await tool.execute(
instance_id, tool_args, agent_data=agent_data
)
except Exception as e:
logger.warning(f"Error when executing tool: {e}")
return (
ToolResponse(
text=f"Error when executing tool: {e}",
),
0.0,
{},
)
finally:
if tool and instance_id:
await tool.release(instance_id)
tool_response_text = tool_execution_response.text
if tool_response_text and len(tool_response_text) > self.max_tool_response_length:
if self.tool_response_truncate_side == "left":
tool_response_text = tool_response_text[: self.max_tool_response_length] + "...(truncated)"
elif self.tool_response_truncate_side == "right":
tool_response_text = "(truncated)..." + tool_response_text[-self.max_tool_response_length :]
else:
length = self.max_tool_response_length // 2
tool_response_text = tool_response_text[:length] + "...(truncated)..." + tool_response_text[-length:]
# Create ToolResponse from tool execution result
tool_response_kwargs = {"text": tool_response_text}
# Add multimedia data if present
for attr_name in ["image", "video"]:
if hasattr(tool_execution_response, attr_name):
attr_value = getattr(tool_execution_response, attr_name)
if attr_value is not None:
tool_response_kwargs[attr_name] = attr_value
return ToolResponse(**tool_response_kwargs), tool_reward, res
def _initialize_interactions(self, interaction_config_file):
"""Initialize interactions from configuration.
Returns:
dict[str, BaseInteraction]: A dictionary mapping interaction names to interaction instances.
"""
if interaction_config_file is None:
return {}
interaction_map = initialize_interactions_from_config(interaction_config_file)
return interaction_map
| {
"repo_id": "verl-project/verl",
"file_path": "verl/experimental/agent_loop/tool_agent_loop.py",
"license": "Apache License 2.0",
"lines": 411,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/model_merger/__main__.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is used to merge huggingface model and test verl checkpoints from FSDP and Megatron backends.
To merge FSDP checkpoints:
```sh
python -m verl.model_merger merge \
--backend fsdp \
--local_dir checkpoints/verl_fsdp_gsm8k_examples/qwen2_5_0b5_fsdp_saveload/global_step_1/actor \
--target_dir /path/to/merged_hf_model
```
To merge Megatron checkpoints:
```sh
python -m verl.model_merger merge \
--backend megatron \
--tie-word-embedding \
--local_dir checkpoints/verl_megatron_gsm8k_examples/qwen2_5_0b5_megatron_saveload/global_step_1/actor \
--target_dir /path/to/merged_hf_model
```
or use distribtued merge for large models like dpskv3 671B
```sh
torchrun --nproc_per_node 1 --nnodes 8 --node_rank ${RANK} -m verl.model_merger merge\
--backend megatron \
--local_dir ./checkpoints/global_step_1/actor \
--target_dir /path/to/merged_hf_model
```
For more details, please refer to documentation:
https://verl.readthedocs.io/en/latest/advance/checkpoint.html#convert-fsdp-and-megatron-checkpoints-to-huggingface-format-model
"""
from .base_model_merger import generate_config_from_args, parse_args
def main():
args = parse_args()
config = generate_config_from_args(args)
print(f"config: {config}")
if config.backend == "fsdp":
from .fsdp_model_merger import FSDPModelMerger
merger = FSDPModelMerger(config)
elif config.backend == "megatron":
from .megatron_model_merger import MegatronModelMerger
merger = MegatronModelMerger(config)
else:
raise NotImplementedError(f"Unknown backend: {config.backend}")
merger.merge_and_save()
merger.cleanup()
if __name__ == "__main__":
main()
| {
"repo_id": "verl-project/verl",
"file_path": "verl/model_merger/__main__.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/model_merger/base_model_merger.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import warnings
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Optional
import torch
from accelerate import init_empty_weights
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoModelForTokenClassification,
GenerationConfig,
)
from verl.utils import hf_processor, hf_tokenizer
from verl.utils.transformers_compat import get_auto_model_for_vision2seq
AutoModelForVision2Seq = get_auto_model_for_vision2seq()
def parse_args():
parser = argparse.ArgumentParser(description="verl model merger")
subparsers = parser.add_subparsers(dest="operation", required=True, help="Specify 'merge' or 'test' operation.")
base_op_parser = argparse.ArgumentParser(add_help=False)
base_op_parser.add_argument(
"--backend", type=str, required=True, choices=["fsdp", "megatron"], help="The backend of the model"
)
base_op_parser.add_argument("--local_dir", type=str, default=None, help="Path to the saved model checkpoints.")
base_op_parser.add_argument(
"--tie-word-embedding",
action="store_true",
help="Whether to tie word embedding weights (currently only Megatron supported)",
)
base_op_parser.add_argument("--trust-remote-code", action="store_true", help="Whether to trust remote code")
base_op_parser.add_argument(
"--is-value-model",
action="store_true",
help="Whether the model is a value model (currently only Megatron supported)",
)
base_op_parser.add_argument(
"--use_cpu_initialization",
action="store_true",
help="Whether to use CPU initialization for the model. This is useful for large models that cannot "
"fit into GPU memory during initialization.",
)
merge_parser = subparsers.add_parser("merge", parents=[base_op_parser], help="Merge model checkpoints and save.")
merge_parser.add_argument(
"--target_dir", default="tmp", type=str, help="Directory to save the merged huggingface model"
)
merge_parser.add_argument(
"--hf_upload_path", default=None, type=str, help="Hugging Face repository ID to upload the model"
)
merge_parser.add_argument(
"--private", action="store_true", help="Whether to upload the model to a private Hugging Face repository"
)
test_parser = subparsers.add_parser(
"test", parents=[base_op_parser], help="Test merged model against a reference Hugging Face model"
)
test_parser.add_argument(
"--test_hf_dir", type=str, required=True, help="Path to the reference Hugging Face model directory for testing"
)
args = parser.parse_args()
return args
@dataclass
class ModelMergerConfig:
"""Configuration for model merger operations.
Args:
operation (str): Operation type - 'merge' or 'test'.
backend (str): Backend type for the model ('fsdp' or 'megatron').
target_dir (Optional[str]): Directory to save the merged huggingface model. Defaults to "tmp".
hf_upload_path (Optional[str]): Hugging Face repository ID to upload the model. Defaults to None.
private (bool): Whether to upload the model to a private Hugging Face repository. Defaults to False.
test_hf_dir (Optional[str]): Path to the reference Hugging Face model directory for testing. Defaults to None.
tie_word_embedding (bool): Whether to tie word embedding weights (currently only Megatron
supported). Defaults to False.
trust_remote_code (bool): Whether to trust remote code. Defaults to False.
is_value_model (bool): Whether the model is a value model (currently only Megatron
supported). Defaults to False.
local_dir (Optional[str]): Path to the saved model checkpoints. Defaults to None.
hf_model_config_path (Optional[str]): Path to HuggingFace model configuration files. Defaults to None.
hf_upload (bool): Whether to upload to HuggingFace (computed automatically). Not for initialization.
use_cpu_initialization (bool): Whether to use CPU initialization for large models. Defaults to False.
"""
operation: str # 'merge' or 'test'
backend: str
target_dir: Optional[str] = "tmp"
hf_upload_path: Optional[str] = None
private: bool = False
test_hf_dir: Optional[str] = None
tie_word_embedding: bool = False
trust_remote_code: bool = False
is_value_model: bool = False
local_dir: Optional[str] = None
hf_model_config_path: Optional[str] = None
hf_upload: bool = field(init=False)
use_cpu_initialization: bool = False
def __post_init__(self):
self.hf_upload = self.operation == "merge" and bool(self.hf_upload_path)
if self.operation == "test":
self.target_dir = None
self.hf_upload_path = None
self.private = False
def generate_config_from_args(args: argparse.Namespace) -> ModelMergerConfig:
common_config_args = {
"operation": args.operation,
"backend": args.backend,
"tie_word_embedding": args.tie_word_embedding,
"trust_remote_code": args.trust_remote_code,
"is_value_model": args.is_value_model,
"local_dir": args.local_dir,
"hf_model_config_path": os.path.join(args.local_dir, "huggingface"),
"use_cpu_initialization": args.use_cpu_initialization,
}
if args.operation == "merge":
config = ModelMergerConfig(
**common_config_args,
target_dir=args.target_dir,
hf_upload_path=args.hf_upload_path,
private=args.private,
test_hf_dir=None,
)
os.makedirs(config.target_dir, exist_ok=True)
elif args.operation == "test":
config = ModelMergerConfig(
**common_config_args,
test_hf_dir=args.test_hf_dir,
# the following args are not used by test operation
target_dir=None,
hf_upload_path=None,
private=False,
)
else:
raise NotImplementedError(f"Unknown operation: {args.operation}")
return config
class BaseModelMerger(ABC):
"""
Abstract base class for merging distributed model checkpoints into HuggingFace format.
This class provides common functionality for converting model checkpoints from different
distributed training backends (FSDP, Megatron) into standard HuggingFace format that
can be easily loaded and used for inference or further training.
The merger supports two main operations:
- merge: Convert and save checkpoints to HuggingFace format
- test: Validate merged checkpoints against a reference model
Args:
config (ModelMergerConfig): Configuration object containing paths, backend type,
and operation parameters.
Attributes:
config (ModelMergerConfig): The configuration object passed during initialization.
hf_model_config_path (str): Path to the HuggingFace model configuration files.
model_config (PretrainedConfig): Loaded HuggingFace model configuration.
"""
def __init__(self, config: ModelMergerConfig):
self.config = config
self.hf_model_config_path = config.hf_model_config_path
self.model_config = AutoConfig.from_pretrained(
self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code
)
def get_transformers_auto_model_class(self):
has_remote_code = hasattr(self.model_config, "auto_map") and any(
self.model_config.architectures[0] in val for val in self.model_config.auto_map.values()
)
if has_remote_code:
auto_class = next(
k for k, v in self.model_config.auto_map.items() if self.model_config.architectures[0] in v
)
match auto_class:
case "AutoModelForCausalLM":
return AutoModelForCausalLM
case "AutoModelForTokenClassification":
return AutoModelForTokenClassification
case "AutoModelForVision2Seq":
return AutoModelForVision2Seq
case "AutoModelForImageTextToText":
return AutoModelForVision2Seq
case _:
raise NotImplementedError(f"Unknown auto class {auto_class}")
else:
if "ForTokenClassification" in self.model_config.architectures[0]:
return AutoModelForTokenClassification
elif "ForCausalLM" in self.model_config.architectures[0]:
return AutoModelForCausalLM
elif "ForConditionalGeneration" in self.model_config.architectures[0]:
return AutoModelForVision2Seq
raise NotImplementedError(f"Unknown architecture {self.model_config.architectures}")
def patch_model_generation_config(self, model):
"""
The generation_config created from model config may be different to the pretrained model,
this may lead to error when generating: https://github.com/volcengine/verl/issues/1246
This function patch the generation_config created from model config to the pretrained model.
"""
if model.can_generate():
try:
model.generation_config = GenerationConfig.from_pretrained(self.hf_model_config_path)
except OSError:
print(
f"Warning: Generation config file not found in {self.hf_model_config_path}, using a "
f"generation config created from the model config."
)
return model
def _load_lora_train_meta(self) -> Optional[dict[str, object]]:
if not self.config.local_dir:
return None
meta_path = os.path.join(self.config.local_dir, "lora_train_meta.json")
if not os.path.exists(meta_path):
return None
import json
try:
with open(meta_path, encoding="utf-8") as f:
lora_meta = json.load(f)
except Exception as e:
warnings.warn(f"Failed to read LoRA metadata from {meta_path}: {e}", stacklevel=2)
return None
result = {}
if "r" in lora_meta:
try:
result["r"] = int(lora_meta["r"])
except (TypeError, ValueError):
warnings.warn(f"Invalid LoRA rank in {meta_path}: {lora_meta['r']}", stacklevel=2)
if "lora_alpha" in lora_meta:
try:
result["lora_alpha"] = int(lora_meta["lora_alpha"])
except (TypeError, ValueError):
warnings.warn(f"Invalid lora_alpha in {meta_path}: {lora_meta['lora_alpha']}", stacklevel=2)
if "task_type" in lora_meta:
task_type = lora_meta["task_type"]
if task_type is None:
pass
elif isinstance(task_type, str):
result["task_type"] = task_type
else:
warnings.warn(f"Invalid task_type in {meta_path}: {task_type}", stacklevel=2)
return result if len(result) > 0 else None
def save_lora_adapter(self, state_dict: dict[str, torch.Tensor]):
"""
Save lora adapter to safetensors.
Returns:
lora_path: str, the path to the lora adapter. None if no lora adapter found.
Note:
This function change the 'state_dict' in place.
"""
lora_params_names = [name for name in state_dict.keys() if "lora_" in name]
if len(lora_params_names) == 0:
return None
import json
from typing import OrderedDict
import peft
from safetensors.torch import save_file
lora_params = OrderedDict()
target_modules = set()
lora_key = None
for name in lora_params_names:
lora_key = name.replace(".default.weight", ".weight")
target_modules.add(lora_key.split(".")[-3])
lora_params[lora_key] = state_dict.pop(name)
inferred_lora_rank = min(lora_params[lora_key].shape[0], lora_params[lora_key].shape[1])
lora_meta = self._load_lora_train_meta()
lora_rank = inferred_lora_rank
lora_alpha = 0
task_type = None
if lora_meta is not None:
meta_rank = lora_meta.get("r")
if meta_rank is not None and meta_rank > 0:
if meta_rank != inferred_lora_rank:
warnings.warn(
f"LoRA rank mismatch between metadata ({meta_rank}) and adapter weights "
f"({inferred_lora_rank}); using metadata rank.",
stacklevel=2,
)
lora_rank = meta_rank
meta_alpha = lora_meta.get("lora_alpha")
if meta_alpha is not None:
lora_alpha = meta_alpha
meta_task_type = lora_meta.get("task_type")
if meta_task_type is not None:
task_type = meta_task_type
if lora_alpha == 0:
warnings.warn(
"LoRA alpha metadata is missing or equals 0; falling back to lora_alpha=0. "
"Please verify checkpoint LoRA metadata (lora_train_meta.json).",
stacklevel=2,
)
peft_dict = {
"r": lora_rank,
"lora_alpha": lora_alpha,
"target_modules": list(target_modules),
}
if task_type is not None:
peft_dict["task_type"] = task_type
peft_config = peft.LoraConfig(**peft_dict).to_dict()
peft_config["task_type"] = peft_config["task_type"].value if peft_config["task_type"] else None
peft_config["peft_type"] = peft_config["peft_type"].value if peft_config["peft_type"] else None
peft_config["target_modules"] = list(peft_config["target_modules"])
lora_path = os.path.join(self.config.target_dir, "lora_adapter")
os.makedirs(lora_path, exist_ok=True)
with open(os.path.join(lora_path, "adapter_config.json"), "w", encoding="utf-8") as f:
json.dump(peft_config, f, ensure_ascii=False, indent=4)
save_file(lora_params, os.path.join(lora_path, "adapter_model.safetensors"))
for name in list(state_dict.keys()):
key = (
name.replace("base_model.model.", "")
.replace(".base_layer.weight", ".weight")
.replace(".base_layer.bias", ".bias")
)
state_dict[key] = state_dict.pop(name)
return lora_path
def save_hf_model_and_tokenizer(self, state_dict: dict[str, torch.Tensor]):
auto_model_class = self.get_transformers_auto_model_class()
with init_empty_weights():
model = auto_model_class.from_config(
self.model_config, torch_dtype=torch.bfloat16, trust_remote_code=self.config.trust_remote_code
)
model.to_empty(device="cpu")
model = self.patch_model_generation_config(model)
lora_path = self.save_lora_adapter(state_dict)
if lora_path:
print(f"Saving lora adapter to {lora_path}")
print(f"Saving model to {self.config.target_dir}")
model.save_pretrained(self.config.target_dir, state_dict=state_dict)
del state_dict
del model
processor = hf_processor(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code)
tokenizer = hf_tokenizer(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code)
if processor is not None:
print(f"Saving processor to {self.config.target_dir}")
processor.save_pretrained(self.config.target_dir)
if tokenizer is not None:
print(f"Saving tokenizer to {self.config.target_dir}")
tokenizer.save_pretrained(self.config.target_dir)
def upload_to_huggingface(self):
import requests
from huggingface_hub import HfApi
from huggingface_hub.utils import HfHubHTTPError, RepositoryNotFoundError
api = HfApi()
try:
# Attempt to create repository
api.create_repo(repo_id=self.config.hf_upload_path, private=self.config.private, exist_ok=True)
except HfHubHTTPError as e:
# Handle authentication/API errors
if e.response.status_code == 401:
raise PermissionError(
"Hugging Face authentication failed. Verify your token is valid and has write permissions."
) from e
elif e.response.status_code == 404:
raise RepositoryNotFoundError(f"Repository path not found: {self.config.hf_upload_path}") from e
else:
raise ConnectionError(f"Failed to create repository ({e.response.status_code}): {e}") from e
except requests.exceptions.ConnectionError as e:
raise ConnectionError("Network connection failed. Check your internet connection.") from e
try:
# Attempt folder upload
api.upload_folder(folder_path=self.config.target_dir, repo_id=self.config.hf_upload_path, repo_type="model")
except HfHubHTTPError as e:
if e.response.status_code == 401:
raise PermissionError("Authentication failed during upload. Token may have expired.") from e
else:
raise RuntimeError(f"Upload failed ({e.response.status_code}): {e}") from e
except requests.exceptions.ConnectionError as e:
raise ConnectionError("Network interruption during upload. Try again with stable connection.") from e
except OSError as e:
raise FileNotFoundError(f"Local folder error: {self.config.target_dir} - {str(e)}") from e
except Exception as e:
raise RuntimeError(f"Unexpected error during upload: {str(e)}") from e
@abstractmethod
def merge_and_save(self):
raise NotImplementedError("Subclasses should implement this method")
@abstractmethod
def cleanup(self):
raise NotImplementedError("Subclasses should implement this method to clean up resources if needed")
| {
"repo_id": "verl-project/verl",
"file_path": "verl/model_merger/base_model_merger.py",
"license": "Apache License 2.0",
"lines": 378,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/model_merger/fsdp_model_merger.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
import numpy as np
import torch
from torch.distributed._tensor import Placement, Shard
try:
# for torch 2.5+
from torch.distributed.tensor import DTensor
except ImportError:
from torch.distributed._tensor import DTensor
from tqdm import tqdm
from .base_model_merger import BaseModelMerger
class FSDPModelMerger(BaseModelMerger):
"""
Model merger for FSDP (Fully Sharded Data Parallel) checkpoints.
This class handles the conversion of FSDP distributed checkpoints into HuggingFace format.
FSDP shards model parameters across multiple processes, and this merger reconstructs
the full model by loading and concatenating the sharded parameters from all ranks.
The merger supports various FSDP configurations including:
- Pure FSDP (single dimension sharding)
- FSDP + DDP (data parallel + fully sharded data parallel)
- DTensor-based sharding with custom device meshes
Key features:
- Automatic detection of world size from checkpoint filenames
- Support for DTensor and non-DTensor checkpoints
- Parallel loading of checkpoint shards for efficiency
- Validation against reference HuggingFace models
Example:
To merge FSDP checkpoints:
```python
config = ModelMergerConfig(
operation="merge",
backend="fsdp",
local_dir="path/to/fsdp/checkpoints",
target_dir="path/to/output"
)
merger = FSDPModelMerger(config)
merger.merge_and_save()
```
"""
def _get_world_size(self) -> int:
"""_summary_
From FSDP json config file, extract the world size.
Returns:
int: world size
"""
config_path = Path(self.config.local_dir) / "fsdp_config.json"
if not config_path.exists():
raise FileNotFoundError(f"Config file {config_path} does not exist.")
with open(config_path) as f:
config = json.load(f)
# Extract world size from the config
world_size = config.get("world_size", None)
if world_size is None:
raise ValueError("World size not found in the config file.")
return world_size
def _load_rank_zero_state_dict(self, world_size: int) -> dict:
return torch.load(
Path(self.config.local_dir) / f"model_world_size_{world_size}_rank_0.pt",
map_location="cpu",
weights_only=False,
)
def _extract_device_mesh_info(self, state_dict: dict, world_size: int) -> tuple[np.ndarray, tuple[str, ...]]:
"""
Retrieves sharding information (device_mesh, mesh_dim_names) from a DTensor in the state_dict.
If no DTensor is found, infers a simple FSDP mesh based on world_size.
"""
pivot_key = sorted(list(state_dict.keys()))[0]
weight = state_dict[pivot_key]
if isinstance(weight, DTensor):
# get sharding info
device_mesh = weight.device_mesh
mesh = device_mesh.mesh
mesh_dim_names = device_mesh.mesh_dim_names
else:
# for non-DTensor
mesh = np.array([world_size], dtype=np.int64)
mesh_dim_names = ("fsdp",)
return mesh, mesh_dim_names
def _calculate_shard_configuration(
self, mesh: np.ndarray, mesh_dim_names: tuple[str, ...]
) -> tuple[int, tuple[int, ...]]:
"""Calculates the total number of shards and the shape of the device mesh."""
assert mesh_dim_names in (("fsdp",), ("ddp", "fsdp")), f"Unsupported mesh_dim_names {mesh_dim_names}"
if "tp" in mesh_dim_names:
# TODO: "tp" is not supported yet due to the above assert
total_shards = mesh.shape[-1] * mesh.shape[-2]
mesh_shape = (mesh.shape[-2], mesh.shape[-1])
else:
total_shards = mesh.shape[-1]
mesh_shape = (mesh.shape[-1],)
return total_shards, mesh_shape
def _merge_by_placement(self, tensors: list[torch.Tensor], placement: Placement) -> torch.Tensor:
"""Merges a list of tensors based on their DTensor placement"""
if placement.is_replicate():
return tensors[0]
elif placement.is_partial():
raise NotImplementedError("Partial placement is not supported yet")
elif placement.is_shard():
return torch.cat(tensors, dim=placement.dim).contiguous()
raise NotImplementedError(f"Unsupported placement: {placement}")
def _load_and_merge_state_dicts(
self, world_size: int, total_shards: int, mesh_shape: tuple[int, ...], mesh_dim_names: tuple[str, ...]
) -> dict[str, torch.Tensor]:
model_state_dict_lst = [None] * total_shards
def process_one_shard(rank: int, model_state_dict_lst: list):
model_path = Path(self.config.local_dir) / f"model_world_size_{world_size}_rank_{rank}.pt"
state_dict = torch.load(model_path, map_location="cpu", weights_only=False)
model_state_dict_lst[rank] = state_dict
return state_dict
with ThreadPoolExecutor(max_workers=min(32, os.cpu_count())) as executor:
futures = [executor.submit(process_one_shard, rank, model_state_dict_lst) for rank in range(total_shards)]
for future in tqdm(futures, desc=f"Loading {total_shards} FSDP shards", total=total_shards):
future.result()
# Merge state dicts from all shards
state_dict = {}
param_placements: dict[str, list] = {}
for key in set(model_state_dict_lst[0].keys()):
state_dict[key] = []
for model_state_shard in model_state_dict_lst:
# add tensor shard in order of rank to state_dict[key]
tensor = model_state_shard.pop(key)
if isinstance(tensor, DTensor):
state_dict[key].append(tensor._local_tensor.bfloat16())
placements = tuple(tensor.placements)
# replicated placement at dp dimension can be discarded
if mesh_dim_names[0] in ("dp", "ddp"):
placements = placements[1:]
if key not in param_placements:
param_placements[key] = placements
else:
assert param_placements[key] == placements
else:
state_dict[key].append(tensor.bfloat16())
del model_state_dict_lst
# Merge tensors
for key in sorted(state_dict):
if not isinstance(state_dict[key], list):
print(f"No need to merge key {key}")
continue
if key in param_placements:
# merge shards
placements: tuple[Shard] = param_placements[key]
if len(mesh_shape) == 1:
# 1-D list, FSDP without TP
assert len(placements) == 1
shards = state_dict[key]
state_dict[key] = self._merge_by_placement(shards, placements[0])
else:
# 2-D list, FSDP + TP
raise NotImplementedError("FSDP + TP is not supported yet")
else:
state_dict[key] = torch.cat(state_dict[key], dim=0)
return state_dict
def merge_and_save(self):
world_size = self._get_world_size()
rank_zero_state_dict = self._load_rank_zero_state_dict(world_size)
mesh, mesh_dim_names = self._extract_device_mesh_info(rank_zero_state_dict, world_size)
print(f"Got device mesh {mesh}, mesh_dim_names {mesh_dim_names}")
total_shards, mesh_shape = self._calculate_shard_configuration(mesh, mesh_dim_names)
print(f"Processing model shards with {total_shards} {mesh_shape} in total")
merged_state_dict = self._load_and_merge_state_dicts(world_size, total_shards, mesh_shape, mesh_dim_names)
if self.config.operation == "test":
if not self.config.test_hf_dir:
raise ValueError("test_hf_dir must be provided for test operation")
self._validate_state_dict(merged_state_dict)
elif self.config.operation == "merge":
self.save_hf_model_and_tokenizer(merged_state_dict)
if self.config.hf_upload:
self.upload_to_huggingface()
else:
raise ValueError(f"Unknown operation: {self.config.operation}")
def _validate_state_dict(self, state_dict: dict[str, torch.Tensor]):
auto_model_class = self.get_transformers_auto_model_class()
hf_model = auto_model_class.from_pretrained(self.config.test_hf_dir, torch_dtype=torch.bfloat16)
hf_state_dict = hf_model.state_dict()
del hf_model
hf_model_keys = set(hf_state_dict.keys())
collected_keys = set(state_dict.keys())
missing_keys = hf_model_keys - collected_keys
assert len(missing_keys) == 0, f"Missing keys in collected state dict: {list(sorted(missing_keys))}"
extra_keys = collected_keys - hf_model_keys
assert len(extra_keys) == 0, f"Extra keys in collected state dict: {list(sorted(extra_keys))}"
for key in hf_model_keys:
hf_shape = hf_state_dict[key].shape
collected_shape = state_dict[key].shape
assert hf_shape == collected_shape, (
f"Shape mismatch for key '{key}': original {hf_shape} vs collected {collected_shape}"
)
hf_dtype = hf_state_dict[key].dtype
collected_dtype = state_dict[key].dtype
assert hf_dtype == collected_dtype, (
f"Dtype mismatch for key '{key}': original {hf_dtype} vs collected {collected_dtype}"
)
torch.testing.assert_close(hf_state_dict[key], state_dict[key], atol=1e-6, rtol=1e-6)
print("FSDP checks passed: The merged state_dict matches the hf model saved by FSDPCheckpointManager.")
def cleanup(self):
"""Cleanup temporary files if needed."""
# FSDP merger does not create temporary files, so no cleanup is needed.
pass
| {
"repo_id": "verl-project/verl",
"file_path": "verl/model_merger/fsdp_model_merger.py",
"license": "Apache License 2.0",
"lines": 215,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/model_merger/megatron_model_merger.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import warnings
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Callable, ContextManager
import numpy as np
import torch
import torch.distributed as dist
try:
# NPU patch
import mindspeed.megatron_adaptor # noqa: F401
except ImportError:
pass
from accelerate import init_empty_weights
from megatron.core import mpu
from megatron.core.models.gpt.gpt_model import ModelType
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
from safetensors.torch import load_file
from transformers import (
AutoConfig,
PretrainedConfig,
)
from verl.models.mcore import hf_to_mcore_config
from verl.utils.device import get_device_name, get_nccl_backend, get_torch_device
from verl.utils.distributed import set_numa_affinity
from verl.utils.megatron.dist_checkpointing import load_dist_checkpointing
from verl.utils.megatron_utils import get_model
from verl.utils.tokenizer import hf_processor, hf_tokenizer
from .base_model_merger import BaseModelMerger, ModelMergerConfig
@contextmanager
def noop_context() -> Any:
yield
def get_dynamic_pipeline_shards(layer_num: int, pp_size: int) -> list[int]:
"""Calculate the pipeline sharding configuration for Megatron-LM.
Args:
layer_num: Total number of layers in the model.
pp_size: Number of pipeline parallel ranks.
Returns:
layer number of each pp rank. Make the sharding of the pipeline as uniform as possible.
"""
if layer_num < pp_size:
raise ValueError(f"layer_num {layer_num} must be greater than pp_size {pp_size}.")
if pp_size < 1:
raise ValueError(f"pp_size must be at least 1, got {pp_size}.")
if pp_size == 1:
return [layer_num]
if pp_size == 2:
return [
layer_num // 2,
layer_num - layer_num // 2,
]
middle_size = pp_size - 2
shards_strategy = []
for middle_layer_num in range(layer_num):
first_last_layer_num = layer_num - middle_layer_num * middle_size
first_layer_num = first_last_layer_num // 2
last_layer_num = first_last_layer_num - first_last_layer_num // 2
if 0 < first_layer_num <= middle_layer_num and 0 < last_layer_num <= middle_layer_num:
shards_strategy.append(
(
[first_layer_num] + [middle_layer_num] * middle_size + [last_layer_num],
abs(first_layer_num - middle_layer_num),
)
)
# sort by diff of layer_num, to make it as uniform as possible
res = sorted(shards_strategy, key=lambda x: x[1])[0][0]
assert sum(res) == layer_num, f"sum(res)={sum(res)} != layer_num={layer_num}, pp_size={pp_size}"
return res
class MegatronModelMerger(BaseModelMerger):
"""
Model merger for Megatron-LM distributed checkpoints.
This class handles the conversion of Megatron-LM distributed checkpoints into HuggingFace format.
Megatron-LM uses tensor parallelism, pipeline parallelism, and data parallelism to distribute
large language models across multiple GPUs. This merger reconstructs the full model by
loading distributed checkpoints and applying the necessary transformations.
Key features:
- Support for tensor parallel, pipeline parallel, and data parallel configurations
- Automatic parameter name mapping from Megatron to HuggingFace conventions
- Handling of QKV and gate-up tensor splitting/merging
- Support for tied word embeddings and value models
- Integration with Megatron's distributed checkpointing system
The merger handles various model architectures and configurations:
- Standard transformer models (GPT-style)
- Models with tied word embeddings
- Value models for reinforcement learning
- Multi-layer attention (MLA) architectures
- Mixture of Experts (MoE) models
Args:
config (ModelMergerConfig): Configuration object with Megatron-specific settings
including tie_word_embedding and is_value_model flags.
Example:
To merge Megatron checkpoints:
```python
config = ModelMergerConfig(
operation="merge",
backend="megatron",
local_dir="path/to/megatron/checkpoints",
target_dir="path/to/output",
tie_word_embedding=True
)
merger = MegatronModelMerger(config)
merger.merge_and_save()
```
"""
def __init__(self, config: ModelMergerConfig):
super().__init__(config)
# Currently we use only 1 rank to merge the dist_ckpt, we will move to multi-process save shortly afterwards
if "WORLD_SIZE" not in os.environ:
os.environ["RANK"] = "0"
os.environ["LOCAL_RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
set_numa_affinity()
torch.distributed.init_process_group(get_nccl_backend())
self.rank = torch.distributed.get_rank()
self.world_size = torch.distributed.get_world_size()
local_rank = os.environ.get("LOCAL_RANK", 0)
get_torch_device().set_device(f"{get_device_name()}:{local_rank}")
mpu.initialize_model_parallel(
tensor_model_parallel_size=1,
pipeline_model_parallel_size=self.world_size,
virtual_pipeline_model_parallel_size=None,
context_parallel_size=1,
expert_model_parallel_size=1,
)
model_parallel_cuda_manual_seed(0)
self.hf_config = AutoConfig.from_pretrained(
self.config.hf_model_config_path, trust_remote_code=self.config.trust_remote_code
)
print(self.hf_config, flush=True)
self.params_mapping = {
# megatron core gpt model name, huggingface model name
# NOTICE: It's a little bit tricky, when 2 keys have the same prefix, we need to make sure the
# longer key within the containing relationship is processed first.
"embedding.word_embeddings": "model.embed_tokens",
# input layer norm for dpskv3
"input_layernorm.weight": "input_layernorm.weight",
"input_layernorm.bias": "input_layernorm.bias",
# attn
"self_attention.linear_qkv.layer_norm_weight": "input_layernorm.weight",
"self_attention.linear_qkv.layer_norm_bias": "input_layernorm.bias",
"self_attention.linear_qkv": "self_attn.qkv_proj",
"self_attention.q_layernorm": "self_attn.q_norm",
"self_attention.k_layernorm": "self_attn.k_norm",
"self_attention.linear_proj": "self_attn.o_proj",
# mla
"self_attention.linear_q_proj": "self_attn.q_proj",
"self_attention.linear_q_down_proj": "self_attn.q_a_proj",
"self_attention.linear_q_up_proj.layer_norm_weight": "self_attn.q_a_layernorm.weight",
"self_attention.linear_q_up_proj": "self_attn.q_b_proj",
"self_attention.linear_kv_down_proj": "self_attn.kv_a_proj_with_mqa",
"self_attention.linear_kv_up_proj.layer_norm_weight": "self_attn.kv_a_layernorm.weight",
"self_attention.linear_kv_up_proj": "self_attn.kv_b_proj",
# mlp
"pre_mlp_layernorm": "post_attention_layernorm",
"mlp.linear_fc1.layer_norm_weight": "post_attention_layernorm.weight",
"mlp.linear_fc1.layer_norm_bias": "post_attention_layernorm.bias",
"mlp.linear_fc1": "mlp.gate_up_proj",
"mlp.linear_fc2": "mlp.down_proj",
# moe
"mlp.router.expert_bias": "mlp.gate.e_score_correction_bias",
"mlp.router": "mlp.gate",
"mlp.shared_experts.linear_fc1": "mlp.shared_experts.gate_up_proj",
"mlp.shared_experts.linear_fc2": "mlp.shared_experts.down_proj",
"linear_fc1": "gate_up_proj",
"linear_fc2": "down_proj",
# output
"final_layernorm": "norm",
"output_layer": "lm_head",
}
if "Qwen2MoeForCausalLM" in self.hf_config.architectures:
self.params_mapping["mlp.shared_experts.linear_fc1"] = "mlp.shared_expert.gate_up_proj"
self.params_mapping["mlp.shared_experts.linear_fc2"] = "mlp.shared_expert.down_proj"
self.params_mapping["mlp.shared_experts.gate_weight"] = "mlp.shared_expert_gate.weight"
def _load_state_dicts(self, model_ckpt_path: str) -> dict[str, Any]:
"""_summary_
Use Megatron dist_checkpointing to load the model state dicts from the checkpoint directory.
Args:
model_ckpt_path (str): Path to the model checkpoint directory.
Returns:
State dict containing the model parameters.
"""
# init hf config
self.pipeline_shards = get_dynamic_pipeline_shards(self.hf_config.num_hidden_layers, self.world_size)
print(f"Pipeline shards: {self.pipeline_shards}, total layers: {sum(self.pipeline_shards)}")
tf_config = hf_to_mcore_config(
self.hf_config,
torch.bfloat16,
num_layers_in_first_pipeline_stage=self.pipeline_shards[0] if len(self.pipeline_shards) > 1 else None,
num_layers_in_last_pipeline_stage=self.pipeline_shards[-1] if len(self.pipeline_shards) > 2 else None,
)
tf_config.use_cpu_initialization = self.config.use_cpu_initialization
tie_word_embeddings = getattr(self.hf_config, "tie_word_embeddings", False)
# init megatron model
def megatron_model_provider(pre_process, post_process):
from verl.models.mcore import init_mcore_model
parallel_model = init_mcore_model(
tf_config,
self.hf_config,
pre_process,
post_process,
share_embeddings_and_output_weights=tie_word_embeddings,
value=False,
)
return parallel_model
context: Callable[..., ContextManager] = (
init_empty_weights if self.config.use_cpu_initialization else noop_context
)
with context():
whole_model = get_model(
model_provider_func=megatron_model_provider,
model_type=ModelType.encoder_or_decoder,
wrap_with_ddp=False,
transformer_config=tf_config,
)
if self.config.use_cpu_initialization:
# convert meta device to empty tensor so it can use `copy_` function
whole_model[0].module = whole_model[0].module.to_empty(device="cpu")
# load state dicts
sharded_state_dict = {}
for vpp_rank, model in enumerate(whole_model):
key = f"model{vpp_rank}" if len(whole_model) > 1 else "model"
mpu.set_virtual_pipeline_model_parallel_rank(vpp_rank)
sharded_state_dict[key] = model.sharded_state_dict()
model_state_dict = load_dist_checkpointing(sharded_state_dict, model_ckpt_path)
model_state_dict_list = []
for vpp_rank, model in enumerate(whole_model):
key = f"model{vpp_rank}" if len(whole_model) > 1 else "model"
mpu.set_virtual_pipeline_model_parallel_rank(vpp_rank)
model_state_dict_list.append(model_state_dict[key])
return model_state_dict_list
def _check_megatron_state_key(self, key: str) -> bool:
"""
Checks if the key is a valid Megatron state key.
Now the model merger only supports keys that start with "decoder/embedding/output_layer" in TransformerLayer.
Shall not use key starts with "model."
"""
if key.startswith("model."):
raise ValueError(
f"Invalid key {key} in Megatron state_dict. Expected keys to start with "
f"'decoder/embedding/output_layer' in TransformerLayer."
)
skip_checking_keys = ["embedding.word_embeddings", "output_layer"]
for skip_key in skip_checking_keys:
if skip_key in key:
print(f"skip checking key {key}")
return
# Exclude extra state keys
if not key.startswith("decoder"):
raise ValueError(
f"Invalid key {key} in Megatron state_dict. Expected keys to start with 'decoder' in TransformerLayer."
)
def _split_tensors(
self, key: str, tensor: torch.Tensor, config: PretrainedConfig, is_value_model: bool = False
) -> list[torch.Tensor]:
"""
Splits a tensor into multiple tensors based on the name.
This is used to handle qkv and gate_up tensors.
"""
if "linear_fc1.weight" in key:
# if the tensor is gate and proj
gate_lst = []
up_lst = []
gate, up = tensor.chunk(2)
gate_lst.append(gate)
up_lst.append(up)
gate = torch.cat(gate_lst, dim=0)
up = torch.cat(up_lst, dim=0)
return [gate, up]
elif "self_attention.linear_qkv." in key and "layer_norm" not in key:
# if the tensor is qkv, for each param on tp, split into q, k, v
# concat q, k, v separately.
q_lst, k_lst, v_lst = [], [], []
assert config.num_attention_heads % config.num_key_value_heads == 0
num_q_per_kv = config.num_attention_heads // config.num_key_value_heads
assert tensor.shape[0] % (num_q_per_kv + 2) == 0, (
f"Tensor shape {tensor.shape} is not divisible by {num_q_per_kv + 2}"
)
kv_size = tensor.shape[0] // (num_q_per_kv + 2)
split_size = [kv_size * num_q_per_kv, kv_size, kv_size]
num_query_groups_per_partition = config.num_key_value_heads
for chunk in tensor.chunk(num_query_groups_per_partition):
split_size = [
kv_size * num_q_per_kv // num_query_groups_per_partition,
kv_size // num_query_groups_per_partition,
kv_size // num_query_groups_per_partition,
]
q, k, v = chunk.split(split_size)
q_lst.append(q)
k_lst.append(k)
v_lst.append(v)
return [torch.cat(q_lst, dim=0), torch.cat(k_lst, dim=0), torch.cat(v_lst, dim=0)]
else:
return [tensor]
def _merge_state_dicts(self, model_state_dict_list: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
state_dict = {}
layers_cum = 0
if self.world_size > 1:
pipeline_cumsum = np.cumsum(self.pipeline_shards)
layers_cum = 0 if self.rank == 0 else pipeline_cumsum[self.rank - 1]
print(f"{layers_cum=}")
for model_state_dict in model_state_dict_list:
layers_handled = 0
keys = model_state_dict.keys()
for key in keys:
if "extra_state" in key:
continue
if self.config.tie_word_embedding and ("output_layer" in key):
print("skip lm_head and reward_head loading because of tie_word_embeddings")
continue
self._check_megatron_state_key(key)
hf_name = self._replace_name(key, self.params_mapping)
assert hf_name is not None, f"Failed to convert layer name [{key}] from megatron to huggingface."
if "model.layers." in hf_name:
local_layer_no = int(hf_name.split(".")[2])
layers_handled = max(local_layer_no, layers_handled)
global_layer_no = local_layer_no + layers_cum
new_key_list = hf_name.split(".")
new_key_list[2] = str(global_layer_no)
hf_name = ".".join(new_key_list)
else:
warnings.warn(f"hf_name {hf_name} will not be fixed with layer number", stacklevel=2)
if "mlp.experts." in hf_name and ".weight" in hf_name:
name_prefix, expert_id = hf_name.split(".weight")
for proj in ["gate_up", "down"]:
if f"{proj}_proj" in hf_name:
hf_name = hf_name.replace(
f"mlp.experts.{proj}_proj.weight{expert_id}",
f"mlp.experts.{expert_id}.{proj}_proj.weight",
)
tensor = model_state_dict[key]
split_tensor = self._split_tensors(
key, tensor, self.hf_config, is_value_model=self.config.is_value_model
)
if len(split_tensor) == 1:
state_dict[hf_name] = split_tensor[0]
elif len(split_tensor) == 3:
# split qkv
for n, d in zip(["q", "k", "v"], split_tensor, strict=True):
state_dict[hf_name.replace("qkv", n)] = d
elif len(split_tensor) == 2:
# split gate up
state_dict[hf_name.replace("gate_up", "gate")] = split_tensor[0]
state_dict[hf_name.replace("gate_up", "up")] = split_tensor[1]
shape_info = (
split_tensor.shape if isinstance(split_tensor, torch.Tensor) else [t.shape for t in split_tensor]
)
print(f"converted {key} to {hf_name} with shape {shape_info}")
layers_cum += layers_handled + 1 # zero based
return state_dict
def save_hf_model_and_tokenizer(self, merged_state_dict):
if self.world_size == 1:
return super().save_hf_model_and_tokenizer(merged_state_dict)
from safetensors.torch import save_file
layer_num = self.hf_config.num_hidden_layers
# FIXME: make configurable
saves_per_layer = 1 if layer_num < 30 else 2
saves_total = saves_per_layer * layer_num
saves_indexes = {}
# calculate the layer start index and key chunks
layer_this_rank = self.pipeline_shards[self.rank]
pipeline_cumsum = np.cumsum(self.pipeline_shards)
layer_start = 0 if self.rank == 0 else pipeline_cumsum[self.rank - 1]
keys = list(merged_state_dict.keys())
keys_chunk = np.array_split(np.array(keys), layer_this_rank * saves_per_layer)
numel = 0
assert len(keys_chunk) == layer_this_rank * saves_per_layer, (
f"Expected {len(keys_chunk)} chunks, but got {layer_this_rank * saves_per_layer} for rank {self.rank}."
)
# save to model shards manually
target_dir = Path(self.config.target_dir)
for i, keys in enumerate(keys_chunk):
sd_to_save = {k: merged_state_dict[k] for k in keys}
numel += sum([sd_to_save[i].numel() for i in sd_to_save])
save_idx = layer_start * saves_per_layer + i
save_path = target_dir / f"model-{save_idx + 1:05d}-of-{saves_total:05d}.safetensors"
save_file(sd_to_save, save_path)
for k in keys:
saves_indexes[k] = str(save_path.name)
tensor = torch.tensor([numel]).to(get_device_name())
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
numel = tensor.cpu().item()
all_save_indexes = [{} for _ in range(self.world_size)]
dist.all_gather_object(all_save_indexes, saves_indexes)
saves_indexes = {k: v for i in all_save_indexes for k, v in i.items()}
if self.rank == 0:
with open(target_dir / "model.safetensors.index.json", "w") as f:
json.dump(
{
"metadata": {
"total_size": numel,
},
"weight_map": saves_indexes,
},
f,
indent=4,
)
print(f"model saved to {target_dir} with {numel=}")
self.model_config.save_pretrained(self.config.target_dir)
processor = hf_processor(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code)
tokenizer = hf_tokenizer(self.hf_model_config_path, trust_remote_code=self.config.trust_remote_code)
if processor is not None:
print(f"Saving processor to {self.config.target_dir}")
processor.save_pretrained(self.config.target_dir)
if tokenizer is not None:
print(f"Saving tokenizer to {self.config.target_dir}")
tokenizer.save_pretrained(self.config.target_dir)
def merge_and_save(self):
from verl.utils.megatron_utils import get_dist_checkpoint_path
model_ckpt_path = get_dist_checkpoint_path(self.config.local_dir)
model_state_dict = self._load_state_dicts(model_ckpt_path)
merged_state_dict = self._merge_state_dicts(model_state_dict)
del model_state_dict
if self.config.operation == "test":
if not self.config.test_hf_dir:
raise ValueError("test_hf_dir must be provided for test operation")
self._validate_state_dict(merged_state_dict)
elif self.config.operation == "merge":
self.save_hf_model_and_tokenizer(merged_state_dict)
if self.config.hf_upload:
self.upload_to_huggingface()
else:
raise ValueError(f"Unknown operation: {self.config.operation}")
def _validate_state_dict(self, state_dict: dict[str, torch.Tensor]):
"""
Compares the merged Megatron state_dict against a reference safetensors model.
Applies necessary name mappings from Megatron to Hugging Face conventions using _replace_name.
"""
ref_state_dict = load_file(Path(self.config.test_hf_dir) / "model.safetensors")
for name, loaded_weight in state_dict.items():
# name = self._replace_name(original_name, self.params_mapping)
if not name or name.endswith(".bias") and name not in ref_state_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
if "lm_head.weight" in name:
if self.config.is_value_model or self.config.tie_word_embedding:
continue
if name not in ref_state_dict:
raise RuntimeError(f"key: {name} not exist in state_dict")
param = ref_state_dict[name]
assert loaded_weight.dtype == param.dtype
torch.testing.assert_close(loaded_weight.to("cpu"), param, atol=1e-2, rtol=5e-2)
def _replace_name(self, megatron_name: str, name_mapping: dict[str, str]) -> str:
for m_name, v_name in name_mapping.items():
if m_name not in megatron_name:
continue
megatron_name = megatron_name.replace("decoder", "model")
param_name = megatron_name.replace(m_name, v_name)
return param_name
return None # Return None if no mapping found
def cleanup(self):
torch.distributed.destroy_process_group()
| {
"repo_id": "verl-project/verl",
"file_path": "verl/model_merger/megatron_model_merger.py",
"license": "Apache License 2.0",
"lines": 468,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/utils/megatron/dist_checkpointing.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import megatron.core
import torch
from megatron.core import dist_checkpointing, mpu
from megatron.core.dist_checkpointing.serialization import (
get_default_load_sharded_strategy,
get_default_save_sharded_strategy,
)
from megatron.core.dist_checkpointing.strategies.fully_parallel import (
FullyParallelLoadStrategyWrapper,
FullyParallelSaveStrategyWrapper,
)
from packaging import version
def save_dist_checkpointing(
sharded_state_dict,
ckpt_path,
async_save=False,
content_metadata=None,
):
validate_sharding_integrity = True
# Get checkpointing strategies
save_strategy = get_default_save_sharded_strategy("torch_dist")
save_strategy = FullyParallelSaveStrategyWrapper(
save_strategy, mpu.get_data_parallel_group(with_context_parallel=True)
)
# https://github.com/NVIDIA/Megatron-LM/blob/core_v0.14.0/megatron/core/optimizer/distrib_optimizer.py#L1109-L1123
mcore_ge_014 = version.parse(megatron.core.__version__) >= version.parse("0.14.0")
# Save model sharded state dicts
save_kwargs = dict(
sharded_strategy=save_strategy,
async_sharded_save=async_save,
validate_access_integrity=validate_sharding_integrity,
)
if content_metadata is not None:
if mcore_ge_014:
save_kwargs["content_metadata"] = content_metadata
return dist_checkpointing.save(sharded_state_dict, ckpt_path, **save_kwargs)
def load_dist_checkpointing(sharded_state_dict, ckpt_dir):
# Get checkpointing strategies
load_strategy = get_default_load_sharded_strategy(ckpt_dir)
load_strategy = FullyParallelLoadStrategyWrapper(
load_strategy, mpu.get_data_parallel_group(with_context_parallel=True)
)
# Fix torch.load weights only error
try:
import transformer_engine as te
torch.serialization.add_safe_globals([torch.optim.AdamW])
torch.serialization.add_safe_globals([te.pytorch.optimizers.fused_adam.FusedAdam])
except Exception:
pass
# Load model sharded state dicts
state_dict = dist_checkpointing.load(sharded_state_dict, ckpt_dir, sharded_strategy=load_strategy)
return state_dict
| {
"repo_id": "verl-project/verl",
"file_path": "verl/utils/megatron/dist_checkpointing.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:examples/data_preprocess/geo3k_multiturn_w_tool.py | # Copyright 2023-2025 SGLang Team
# Copyright Amazon.com, Inc. or its affiliates.
# Copyright 2025 Reallm Labs Ltd. or its affiliates
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Preprocess the Geometry3k dataset to parquet format
"""
import argparse
import os
import datasets
from verl.utils.hdfs_io import copy, makedirs
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--local_dir", default=None, help="The save directory for the preprocessed dataset.")
parser.add_argument("--hdfs_dir", default=None)
parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.")
parser.add_argument(
"--local_save_dir",
default="~/data/geo3k_multiturn_w_tool",
help="The save directory for the preprocessed dataset.",
)
args = parser.parse_args()
local_dataset_path = args.local_dataset_path
data_source = "hiyouga/geometry3k"
if local_dataset_path is not None:
dataset = datasets.load_dataset(local_dataset_path)
else:
dataset = datasets.load_dataset(data_source)
train_dataset = dataset["train"]
test_dataset = dataset["test"]
instruction_following = (
r"You FIRST think about the reasoning process as an internal monologue and then provide the final answer. "
r"The reasoning process MUST BE enclosed within <think> </think> tags. "
r"The final answer MUST BE put in \boxed{}."
)
# add a row to each data item that represents a unique id
def make_map_fn(split):
def process_fn(example, idx):
problem = example.pop("problem")
prompt = problem + " " + instruction_following
answer = example.pop("answer")
images = example.pop("images")
data = {
"data_source": data_source,
"prompt": [
{
"role": "system",
"content": (
"You are a math expert. You are given a question and you need to solve it step by step. "
"Reasoning step by step before any tool call. "
"You should use the `calc_geo3k_reward` tool after step by step solving the question, "
"before generate final answer at least once and refine your answer if necessary. "
),
},
{
"role": "user",
"content": prompt,
},
],
"images": images,
"ability": "math",
"reward_model": {"style": "rule", "ground_truth": answer},
"extra_info": {
"split": split,
"index": idx,
"answer": answer,
"question": problem,
"need_tools_kwargs": True,
"tools_kwargs": {
"calc_geo3k_reward": {
"create_kwargs": {"ground_truth": answer},
# "execute_kwargs": {},
# "calc_reward_kwargs": {},
# "release_kwargs": {},
},
},
},
}
return data
return process_fn
train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True, num_proc=8)
test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True, num_proc=8)
hdfs_dir = args.hdfs_dir
local_save_dir = args.local_dir
if local_save_dir is not None:
print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.")
else:
local_save_dir = args.local_save_dir
train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet"))
test_dataset.to_parquet(os.path.join(local_save_dir, "test.parquet"))
if hdfs_dir is not None:
makedirs(hdfs_dir)
copy(src=local_save_dir, dst=hdfs_dir)
| {
"repo_id": "verl-project/verl",
"file_path": "examples/data_preprocess/geo3k_multiturn_w_tool.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/tools/geo3k_tool.py | # Copyright 2023-2025 SGLang Team
# Copyright Amazon.com, Inc. or its affiliates.
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Any, Optional
from uuid import uuid4
from verl.utils.reward_score import geo3k
from verl.utils.rollout_trace import rollout_trace_op
from .base_tool import BaseTool
from .schemas import OpenAIFunctionToolSchema, ToolResponse
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class Geo3kTool(BaseTool):
"""A demo tool for calculating the reward of geo3k.
- `get_openai_tool_schema`: return the tool schema in OpenAI format.
- `create`: create a tool instance for a trajectory.
- `execute`: execute the tool.
- `calc_reward`: calculate the reward respect to tool state.
- `release`: release the tool instance.
"""
def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema):
"""
_tool_schema = OpenAIFunctionToolSchema.model_validate({
"type": "function",
"function": {
"name": "calc_geo3k_reward",
"description": "A tool for calculating the reward of geo3k",
"parameters": {
"type": "object",
"properties": {
"answer": {
"type": "string",
"description": "The answer to the question, enclosed in \\boxed{}",
},
},
"required": ["answer"],
},
}
})
"""
super().__init__(config, tool_schema)
self._instance_dict = {}
def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema:
return self.tool_schema
async def create(
self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs
) -> tuple[str, ToolResponse]:
if instance_id is None:
instance_id = str(uuid4())
self._instance_dict[instance_id] = {
"response": "",
"ground_truth": ground_truth,
"reward": 0.0,
}
return instance_id, ToolResponse()
@rollout_trace_op
async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]:
answer = parameters.get("answer", "")
if not isinstance(answer, str):
answer = str(answer)
self._instance_dict[instance_id]["response"] = answer
reward = await self.calc_reward(instance_id)
# penalty for non improved answer submission
tool_reward = 0.0 if reward > self._instance_dict[instance_id]["reward"] else -0.05
# update the reward
self._instance_dict[instance_id]["reward"] = reward
return ToolResponse(text=f"Current parsed {answer=} {reward=}"), tool_reward, {}
async def calc_reward(self, instance_id: str, **kwargs) -> float:
return geo3k.compute_score(
self._instance_dict[instance_id]["response"],
self._instance_dict[instance_id]["ground_truth"],
use_boxed=False,
format_score=0.0,
)
async def release(self, instance_id: str, **kwargs) -> None:
del self._instance_dict[instance_id]
| {
"repo_id": "verl-project/verl",
"file_path": "verl/tools/geo3k_tool.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:examples/data_preprocess/gsm8k_multiturn_w_interaction.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Preprocess the GSM8k dataset to parquet format
"""
import argparse
import os
import re
import datasets
from verl.utils.hdfs_io import copy, makedirs
def extract_solution(solution_str):
solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str)
assert solution is not None
final_solution = solution.group(0)
final_solution = final_solution.split("#### ")[1].replace(",", "")
return final_solution
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--local_dir", default=None, help="The save directory for the preprocessed dataset.")
parser.add_argument("--hdfs_dir", default=None)
parser.add_argument("--local_dataset_path", default=None, help="The local path to the raw dataset, if it exists.")
parser.add_argument(
"--local_save_dir", default="~/data/gsm8k", help="The save directory for the preprocessed dataset."
)
args = parser.parse_args()
local_dataset_path = args.local_dataset_path
data_source = "openai/gsm8k"
if local_dataset_path is not None:
dataset = datasets.load_dataset(local_dataset_path, "main")
else:
dataset = datasets.load_dataset(data_source, "main")
train_dataset = dataset["train"]
test_dataset = dataset["test"]
instruction_following = "Let's think step by step and output the final answer after `####`."
# add a row to each data item that represents a unique id
def make_map_fn(split):
def process_fn(example, idx):
question_raw = example.pop("question")
question = question_raw + " " + instruction_following
answer_raw = example.pop("answer")
solution = extract_solution(answer_raw)
data = {
"data_source": data_source,
"prompt": [
{
"role": "system",
"content": (
"You are a math expert. You are given a question and you need to solve it step by step. "
"You should rethinking carefully if user point out your answer is wrong. "
"Put your final answer in the format of `#### <answer>`."
),
},
{
"role": "user",
"content": question,
},
],
"ability": "math",
"reward_model": {"style": "rule", "ground_truth": solution},
"extra_info": {
"split": split,
"index": idx,
"answer": answer_raw,
"question": question_raw,
"interaction_kwargs": {
"name": "gsm8k",
"query": question,
"ground_truth": solution,
},
},
}
return data
return process_fn
train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True)
test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True)
hdfs_dir = args.hdfs_dir
local_save_dir = args.local_dir
if local_save_dir is not None:
print("Warning: Argument 'local_dir' is deprecated. Please use 'local_save_dir' instead.")
else:
local_save_dir = args.local_save_dir
train_dataset.to_parquet(os.path.join(local_save_dir, "train.parquet"))
test_dataset.to_parquet(os.path.join(local_save_dir, "test.parquet"))
if hdfs_dir is not None:
makedirs(hdfs_dir)
copy(src=local_save_dir, dst=hdfs_dir)
| {
"repo_id": "verl-project/verl",
"file_path": "examples/data_preprocess/gsm8k_multiturn_w_interaction.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/interactions/test_gsm8k_interaction.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import patch
import pytest
from verl.interactions.gsm8k_interaction import Gsm8kInteraction
class TestGsm8kInteraction:
"""Test cases for Gsm8kInteraction class."""
def setup_method(self):
"""Set up test environment before each test method."""
self.config = {"name": "gsm8k"}
self.interaction = Gsm8kInteraction(self.config)
def test_init(self):
"""Test Gsm8kInteraction initialization."""
assert self.interaction._instance_dict == {}
assert self.interaction.config == self.config
assert self.interaction.name == "gsm8k"
@pytest.mark.asyncio
async def test_start_interaction_with_instance_id(self):
"""Test start_interaction with provided instance_id."""
instance_id = "test_instance"
ground_truth = "42"
result_id = await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth)
assert result_id == instance_id
assert instance_id in self.interaction._instance_dict
assert self.interaction._instance_dict[instance_id]["response"] == ""
assert self.interaction._instance_dict[instance_id]["ground_truth"] == ground_truth
assert self.interaction._instance_dict[instance_id]["reward"] == 0.0
@pytest.mark.asyncio
async def test_start_interaction_without_instance_id(self):
"""Test start_interaction without provided instance_id (auto-generated)."""
ground_truth = "42"
result_id = await self.interaction.start_interaction(ground_truth=ground_truth)
assert result_id is not None
assert len(result_id) == 36 # UUID4 length
assert result_id in self.interaction._instance_dict
assert self.interaction._instance_dict[result_id]["ground_truth"] == ground_truth
@pytest.mark.asyncio
async def test_start_interaction_without_ground_truth(self):
"""Test start_interaction without ground_truth parameter."""
instance_id = "test_instance"
result_id = await self.interaction.start_interaction(instance_id=instance_id)
assert result_id == instance_id
assert self.interaction._instance_dict[instance_id]["ground_truth"] is None
@pytest.mark.asyncio
async def test_generate_response_correct_answer_with_prefix(self):
"""Test generate_response with correct answer already having #### prefix."""
instance_id = "test_instance"
ground_truth = "42"
# Setup instance
await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth)
messages = [{"role": "assistant", "content": "#### 42"}]
with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=1.0):
should_terminate, response, reward, metadata = await self.interaction.generate_response(
instance_id, messages
)
assert should_terminate is True
assert response == "Your response is correct!"
assert reward == 1.0
assert metadata == {}
assert self.interaction._instance_dict[instance_id]["response"] == "#### 42"
@pytest.mark.asyncio
async def test_generate_response_correct_answer_without_prefix(self):
"""Test generate_response with correct answer missing #### prefix."""
instance_id = "test_instance"
ground_truth = "42"
# Setup instance
await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth)
messages = [{"role": "assistant", "content": "42"}]
with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=1.0):
should_terminate, response, reward, metadata = await self.interaction.generate_response(
instance_id, messages
)
assert should_terminate is True
assert response == "Your response is correct!"
assert reward == 1.0
assert self.interaction._instance_dict[instance_id]["response"] == "42"
@pytest.mark.asyncio
async def test_generate_response_incorrect_answer(self):
"""Test generate_response with incorrect answer."""
instance_id = "test_instance"
ground_truth = "42"
# Setup instance
await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth)
messages = [{"role": "assistant", "content": "24"}]
with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=0.0):
should_terminate, response, reward, metadata = await self.interaction.generate_response(
instance_id, messages
)
assert should_terminate is False
assert response == "Your response is incorrect! You need to reflect on your answer and try again."
assert reward == 0.0
assert self.interaction._instance_dict[instance_id]["response"] == "24"
@pytest.mark.asyncio
async def test_generate_response_multiple_messages(self):
"""Test generate_response with multiple messages (should use last assistant message)."""
instance_id = "test_instance"
ground_truth = "42"
# Setup instance
await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth)
messages = [
{"role": "user", "content": "What is 2+2?"},
{"role": "assistant", "content": "### 4"},
{"role": "user", "content": "What is 40+2?"},
{"role": "assistant", "content": "#### 42"},
]
with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=1.0):
should_terminate, response, reward, metadata = await self.interaction.generate_response(
instance_id, messages
)
assert should_terminate is True
assert response == "Your response is correct!"
assert self.interaction._instance_dict[instance_id]["response"] == "#### 42"
@pytest.mark.asyncio
async def test_generate_response_no_assistant_message(self):
"""Test generate_response with no assistant messages."""
instance_id = "test_instance"
ground_truth = "42"
# Setup instance
await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth)
messages = [{"role": "user", "content": "Hello!"}]
with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=0.0):
should_terminate, response, reward, metadata = await self.interaction.generate_response(
instance_id, messages
)
assert should_terminate is False
assert self.interaction._instance_dict[instance_id]["response"] == ""
@pytest.mark.asyncio
async def test_calculate_score_direct_call(self):
"""Test calculate_score method directly."""
instance_id = "test_instance"
ground_truth = "42"
# Setup instance
await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth)
# Set a response
self.interaction._instance_dict[instance_id]["response"] = "#### 42"
with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=1.0) as mock_compute:
score = await self.interaction.calculate_score(instance_id)
assert score == 1.0
mock_compute.assert_called_once_with("#### 42", "42", method="strict", format_score=0.0, score=1.0)
@pytest.mark.asyncio
async def test_calculate_score_with_kwargs(self):
"""Test calculate_score method with additional kwargs."""
instance_id = "test_instance"
ground_truth = "42"
# Setup instance
await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth)
# Set a response
self.interaction._instance_dict[instance_id]["response"] = "#### 24"
with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=0.0) as mock_compute:
score = await self.interaction.calculate_score(instance_id, extra_param="test")
assert score == 0.0
mock_compute.assert_called_once_with("#### 24", "42", method="strict", format_score=0.0, score=1.0)
@pytest.mark.asyncio
async def test_finalize_interaction(self):
"""Test finalize_interaction method."""
instance_id = "test_instance"
ground_truth = "42"
# Setup instance
await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth)
assert instance_id in self.interaction._instance_dict
await self.interaction.finalize_interaction(instance_id)
assert instance_id not in self.interaction._instance_dict
@pytest.mark.asyncio
async def test_finalize_interaction_with_kwargs(self):
"""Test finalize_interaction method with additional kwargs."""
instance_id = "test_instance"
ground_truth = "42"
# Setup instance
await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth)
assert instance_id in self.interaction._instance_dict
await self.interaction.finalize_interaction(instance_id, extra_param="test")
assert instance_id not in self.interaction._instance_dict
@pytest.mark.asyncio
async def test_finalize_nonexistent_interaction(self):
"""Test finalize_interaction with non-existent instance_id."""
instance_id = "nonexistent_instance"
# This should raise KeyError
with pytest.raises(KeyError):
await self.interaction.finalize_interaction(instance_id)
@pytest.mark.asyncio
async def test_full_interaction_workflow_correct(self):
"""Test complete interaction workflow with correct answer."""
ground_truth = "42"
# Start interaction
instance_id = await self.interaction.start_interaction(ground_truth=ground_truth)
# Generate response with correct answer
messages = [{"role": "assistant", "content": "42"}]
with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=1.0):
should_terminate, response, reward, metadata = await self.interaction.generate_response(
instance_id, messages
)
assert should_terminate is True
assert reward == 1.0
# Finalize interaction
await self.interaction.finalize_interaction(instance_id)
assert instance_id not in self.interaction._instance_dict
@pytest.mark.asyncio
async def test_full_interaction_workflow_incorrect(self):
"""Test complete interaction workflow with incorrect answer."""
ground_truth = "42"
# Start interaction
instance_id = await self.interaction.start_interaction(ground_truth=ground_truth)
# Generate response with incorrect answer
messages = [{"role": "assistant", "content": "24"}]
with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=0.0):
should_terminate, response, reward, metadata = await self.interaction.generate_response(
instance_id, messages
)
assert should_terminate is False
assert reward == 0.0
# Continue with another attempt
messages.append({"role": "user", "content": response})
messages.append({"role": "assistant", "content": "42"})
with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=1.0):
should_terminate, response, reward, metadata = await self.interaction.generate_response(
instance_id, messages
)
assert should_terminate is True
assert reward == 1.0
# Finalize interaction
await self.interaction.finalize_interaction(instance_id)
assert instance_id not in self.interaction._instance_dict
@pytest.mark.asyncio
async def test_multiple_concurrent_interactions(self):
"""Test multiple concurrent interaction instances."""
ground_truth_1 = "42"
ground_truth_2 = "24"
# Start multiple interactions
instance_id_1 = await self.interaction.start_interaction(ground_truth=ground_truth_1)
instance_id_2 = await self.interaction.start_interaction(ground_truth=ground_truth_2)
assert len(self.interaction._instance_dict) == 2
assert instance_id_1 in self.interaction._instance_dict
assert instance_id_2 in self.interaction._instance_dict
# Test responses for both instances
messages_1 = [{"role": "assistant", "content": "42"}]
messages_2 = [{"role": "assistant", "content": "24"}]
with patch("verl.utils.reward_score.gsm8k.compute_score", side_effect=[1.0, 1.0]):
should_terminate_1, _, reward_1, _ = await self.interaction.generate_response(instance_id_1, messages_1)
should_terminate_2, _, reward_2, _ = await self.interaction.generate_response(instance_id_2, messages_2)
assert should_terminate_1 is True
assert should_terminate_2 is True
assert reward_1 == 1.0
assert reward_2 == 1.0
# Finalize both interactions
await self.interaction.finalize_interaction(instance_id_1)
await self.interaction.finalize_interaction(instance_id_2)
assert len(self.interaction._instance_dict) == 0
@pytest.mark.asyncio
async def test_edge_case_empty_messages(self):
"""Test edge case with empty messages list."""
instance_id = "test_instance"
ground_truth = "42"
# Setup instance
await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth)
messages = []
with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=0.0):
should_terminate, response, reward, metadata = await self.interaction.generate_response(
instance_id, messages
)
assert should_terminate is False
assert reward == 0.0
assert self.interaction._instance_dict[instance_id]["response"] == ""
@pytest.mark.asyncio
async def test_edge_case_message_without_content(self):
"""Test edge case with message without content field."""
instance_id = "test_instance"
ground_truth = "42"
# Setup instance
await self.interaction.start_interaction(instance_id=instance_id, ground_truth=ground_truth)
messages = [
{"role": "assistant"} # Missing content field
]
with patch("verl.utils.reward_score.gsm8k.compute_score", return_value=0.0):
should_terminate, response, reward, metadata = await self.interaction.generate_response(
instance_id, messages
)
assert should_terminate is False
assert reward == 0.0
assert self.interaction._instance_dict[instance_id]["response"] is None
def test_inheritance_from_base_interaction(self):
"""Test that Gsm8kInteraction properly inherits from BaseInteraction."""
from verl.interactions.base import BaseInteraction
assert isinstance(self.interaction, BaseInteraction)
# Test that all required methods are implemented
assert hasattr(self.interaction, "start_interaction")
assert hasattr(self.interaction, "generate_response")
assert hasattr(self.interaction, "calculate_score")
assert hasattr(self.interaction, "finalize_interaction")
# Test that methods are callable
assert callable(self.interaction.start_interaction)
assert callable(self.interaction.generate_response)
assert callable(self.interaction.calculate_score)
assert callable(self.interaction.finalize_interaction)
def test_name_attribute_initialization(self):
"""Test name attribute initialization with different configs."""
# Test with explicit name in config
config_with_name = {"name": "custom_gsm8k"}
interaction_with_name = Gsm8kInteraction(config_with_name)
assert interaction_with_name.name == "custom_gsm8k"
# Test with default name when not provided in config
config_without_name = {}
interaction_without_name = Gsm8kInteraction(config_without_name)
assert interaction_without_name.name == "interaction_agent" # Default from BaseInteraction
# Test that name is accessible as attribute
assert hasattr(self.interaction, "name")
assert self.interaction.name == "gsm8k"
| {
"repo_id": "verl-project/verl",
"file_path": "tests/interactions/test_gsm8k_interaction.py",
"license": "Apache License 2.0",
"lines": 319,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/interactions/base.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
from uuid import uuid4
class BaseInteraction:
def __init__(self, config: dict[str, Any]):
self.config = config
self.name: str = config.get("name", "interaction_agent") # More general agent default role name
async def start_interaction(self, instance_id: Optional[str] = None, **kwargs) -> str:
"""Create a tool instance.
Args:
instance_id: The instance id of the tool.
Returns:
The instance id of the tool.
"""
if instance_id is None:
return str(uuid4())
else:
return instance_id
async def generate_response(
self, instance_id: str, messages: list[dict[str, Any]], **kwargs
) -> tuple[bool, str, float, dict[str, Any]]: # More clear response generation method
"""
Generates a response for the current turn of interaction.
Returns a tuple containing:
- should_terminate_sequence (bool): True if the interaction sequence should end.
- response_content (str): The textual content of the response.
- current_turn_score (float): The score for this specific turn/response.
- additional_data (dict): Any extra information or metadata.
"""
should_terminate_sequence: bool = False # if True, end rollout
response_content: str = "Your current result seems acceptable."
current_turn_score: float = 0.8
additional_data: dict[str, Any] = {}
return should_terminate_sequence, response_content, current_turn_score, additional_data
async def calculate_score(self) -> float: # More clear score calculation method
"""
Calculates a score for the interaction,
potentially considering aspects like partial exposure & in-context task switching.
should be invoke at turn-level
"""
# ...implement the logic to calculate turn-level score...
score = 0.0
return score
async def finalize_interaction(self) -> None: # More clear interaction end and resource release method
"""
Finalizes the interaction session and releases any associated state or resources.
Simulates: release state
"""
# ...implement the logic to release state...
pass
| {
"repo_id": "verl-project/verl",
"file_path": "verl/interactions/base.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/interactions/gsm8k_interaction.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023-2024 SGLang Team
# Copyright 2025 ModelBest Inc. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import Any, Optional
from uuid import uuid4
from verl.utils.reward_score import gsm8k
from .base import BaseInteraction
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class Gsm8kInteraction(BaseInteraction):
"""A demo interaction for calculating the reward of gsm8k.
- `start_interaction`: start a interaction instance for a trajectory.
- `generate_response`: generate the response of the assistant.
- `calculate_score`: calculate the score of the interaction.
- `finalize_interaction`: finalize the interaction instance.
"""
def __init__(self, config: dict):
super().__init__(config)
self._instance_dict = {}
async def start_interaction(
self, instance_id: Optional[str] = None, ground_truth: Optional[str] = None, **kwargs
) -> str:
if instance_id is None:
instance_id = str(uuid4())
self._instance_dict[instance_id] = {
"response": "",
"ground_truth": ground_truth,
"reward": 0.0,
}
return instance_id
async def generate_response(
self, instance_id: str, messages: list[dict[str, Any]], **kwargs
) -> tuple[bool, str, float, dict]:
content = ""
for i in range(len(messages) - 1, -1, -1):
item = messages[i]
if item.get("role") == "assistant":
content = item.get("content")
break
self._instance_dict[instance_id]["response"] = content
reward = await self.calculate_score(instance_id)
if reward == 1.0:
response = "Your response is correct!"
should_terminate_sequence = True
else:
response = "Your response is incorrect! You need to reflect on your answer and try again."
should_terminate_sequence = False
return should_terminate_sequence, response, reward, {}
async def calculate_score(self, instance_id: str, **kwargs) -> float:
return gsm8k.compute_score(
self._instance_dict[instance_id]["response"],
self._instance_dict[instance_id]["ground_truth"],
method="strict",
format_score=0.0,
score=1.0,
)
async def finalize_interaction(self, instance_id: str, **kwargs) -> None:
del self._instance_dict[instance_id]
| {
"repo_id": "verl-project/verl",
"file_path": "verl/interactions/gsm8k_interaction.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/single_controller/test_ray_collectives.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test for using ray collective group.
Suppose we Actor and Rollout. Actor contains 4 workers and Rollout contains 2 workers. We established a Worker to
Rollout relationship by using collective groups
Actor: rank 0, 1 - Rollout rank 0
Rollout rank 2, 3 - Rollout rank 1
Then, we initiate 4 p2p comms from actor to rollout
"""
import ray
import ray.util.collective as collective
import torch
from verl.single_controller.base import Worker
from verl.single_controller.base.decorator import Dispatch, register
from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup
@ray.remote
class Actor(Worker):
@register(Dispatch.ONE_TO_ALL)
def init(self):
remote_rank = self.rank // 2
self.group_name = f"A{self.rank}_R{remote_rank}"
collective.init_collective_group(world_size=2, rank=0, backend="nccl", group_name=self.group_name)
@register(Dispatch.ONE_TO_ALL, blocking=False)
def send_tensors(self):
tensor = torch.ones(size=(4,), dtype=torch.float32, device="cuda") * self.rank
collective.send(tensor=tensor, dst_rank=1, group_name=self.group_name)
@ray.remote
class Rollout(Worker):
@register(Dispatch.ONE_TO_ALL)
def init(self):
self.remote_first_rank = self.rank * 2
self.remote_second_rank = self.remote_first_rank + 1
self.first_group_name = f"A{self.remote_first_rank}_R{self.rank}"
self.second_group_name = f"A{self.remote_second_rank}_R{self.rank}"
collective.init_collective_group(world_size=2, rank=1, backend="nccl", group_name=self.first_group_name)
collective.init_collective_group(world_size=2, rank=1, backend="nccl", group_name=self.second_group_name)
@register(Dispatch.ONE_TO_ALL, blocking=False)
def receive_tensors(self):
self.tensor1 = torch.randn(size=(4,), dtype=torch.float32, device="cuda")
self.tensor2 = torch.randn(size=(4,), dtype=torch.float32, device="cuda")
collective.recv(self.tensor1, src_rank=0, group_name=self.first_group_name)
collective.recv(self.tensor2, src_rank=0, group_name=self.second_group_name)
@register(Dispatch.ONE_TO_ALL)
def get_tensors(self):
return {f"src_{self.remote_first_rank}": self.tensor1, f"src_{self.remote_second_rank}": self.tensor2}
def test_ray_collective_group():
ray.init()
actor_resource_pool = RayResourcePool([4])
rollout_resource_pool = RayResourcePool([2])
actor_cls = RayClassWithInitArgs(cls=Actor)
rollout_cls = RayClassWithInitArgs(cls=Rollout)
actor_wg = RayWorkerGroup(
resource_pool=actor_resource_pool, ray_cls_with_init=actor_cls, name_prefix="collective_group_actor"
)
rollout_wg = RayWorkerGroup(
resource_pool=rollout_resource_pool, ray_cls_with_init=rollout_cls, name_prefix="collective_group_rollout"
)
actor_wg.init()
rollout_wg.init()
out1 = actor_wg.send_tensors()
out2 = rollout_wg.receive_tensors()
# block to wait
ray.get(out1)
ray.get(out2)
output = rollout_wg.get_tensors()
rollout_0_output = output[0]
rollout_1_output = output[1]
output = rollout_0_output | rollout_1_output
print(output)
for i in range(4):
assert torch.sum(output[f"src_{i}"]).item() == 4 * i
ray.shutdown()
if __name__ == "__main__":
test_ray_collective_group()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/single_controller/test_ray_collectives.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/utils/test_config_on_cpu.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from dataclasses import dataclass, field
from omegaconf import OmegaConf
from verl.base_config import BaseConfig
from verl.utils import omega_conf_to_dataclass
@dataclass
class TestDataclass(BaseConfig):
hidden_size: int = 0
activation: str = "relu"
@dataclass
class TestTrainConfig(BaseConfig):
batch_size: int = 0
model: TestDataclass = field(default_factory=TestDataclass)
override_config: dict = field(default_factory=dict)
_cfg_str = """train_config:
_target_: tests.utils.test_config_on_cpu.TestTrainConfig
batch_size: 32
model:
hidden_size: 768
activation: relu
override_config: {}"""
class TestConfigOnCPU(unittest.TestCase):
"""Test cases for configuration utilities on CPU.
Test Plan:
1. Test basic OmegaConf to dataclass conversion for simple nested structures
2. Test nested OmegaConf to dataclass conversion for complex hierarchical configurations
3. Verify all configuration values are correctly converted and accessible
"""
def setUp(self):
self.config = OmegaConf.create(_cfg_str)
def test_omega_conf_to_dataclass(self):
sub_cfg = self.config.train_config.model
cfg = omega_conf_to_dataclass(sub_cfg, TestDataclass)
self.assertEqual(cfg.hidden_size, 768)
self.assertEqual(cfg.activation, "relu")
assert isinstance(cfg, TestDataclass)
def test_nested_omega_conf_to_dataclass(self):
cfg = omega_conf_to_dataclass(self.config.train_config, TestTrainConfig)
self.assertEqual(cfg.batch_size, 32)
self.assertEqual(cfg.model.hidden_size, 768)
self.assertEqual(cfg.model.activation, "relu")
assert isinstance(cfg, TestTrainConfig)
assert isinstance(cfg.model, TestDataclass)
class TestPrintCfgCommand(unittest.TestCase):
"""Test suite for the print_cfg.py command-line tool."""
def test_command_with_override(self):
"""Test that the command runs without error when overriding config values."""
import subprocess
# Run the command
result = subprocess.run(
["python3", "scripts/print_cfg.py"],
capture_output=True,
text=True,
)
# Verify the command exited successfully
self.assertEqual(result.returncode, 0, f"Command failed with stderr: {result.stderr}")
# Verify the output contains expected config information
self.assertIn("critic", result.stdout)
self.assertIn("profiler", result.stdout)
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_config_on_cpu.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/utils/test_nvtx_profile.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest.mock import MagicMock, patch
from verl.utils import omega_conf_to_dataclass
from verl.utils.profiler.config import NsightToolConfig, ProfilerConfig
from verl.utils.profiler.profile import DistProfiler
class TestProfilerConfig(unittest.TestCase):
def test_config_init(self):
import os
from hydra import compose, initialize_config_dir
with initialize_config_dir(config_dir=os.path.abspath("verl/trainer/config")):
cfg = compose(config_name="ppo_trainer")
for config in [
cfg.actor_rollout_ref.actor.profiler,
cfg.actor_rollout_ref.rollout.profiler,
cfg.actor_rollout_ref.ref.profiler,
cfg.critic.profiler,
]:
profiler_config = omega_conf_to_dataclass(config)
self.assertEqual(profiler_config.tool, config.tool)
self.assertEqual(profiler_config.enable, config.enable)
self.assertEqual(profiler_config.all_ranks, config.all_ranks)
self.assertEqual(profiler_config.ranks, config.ranks)
self.assertEqual(profiler_config.save_path, config.save_path)
self.assertEqual(profiler_config.ranks, config.ranks)
assert isinstance(profiler_config, ProfilerConfig)
with self.assertRaises(AttributeError):
_ = profiler_config.non_existing_key
assert config.get("non_existing_key") == profiler_config.get("non_existing_key")
assert config.get("non_existing_key", 1) == profiler_config.get("non_existing_key", 1)
def test_frozen_config(self):
"""Test that modifying frozen keys in ProfilerConfig raises exceptions."""
from dataclasses import FrozenInstanceError
from verl.utils.profiler.config import ProfilerConfig
# Create a new ProfilerConfig instance
config = ProfilerConfig(all_ranks=False, ranks=[0])
with self.assertRaises(FrozenInstanceError):
config.all_ranks = True
with self.assertRaises(FrozenInstanceError):
config.ranks = [1, 2, 3]
with self.assertRaises(TypeError):
config["all_ranks"] = True
with self.assertRaises(TypeError):
config["ranks"] = [1, 2, 3]
class TestNsightSystemsProfiler(unittest.TestCase):
"""Test suite for NsightSystemsProfiler functionality.
Test Plan:
1. Initialization: Verify profiler state after creation
2. Basic Profiling: Test start/stop functionality
3. Discrete Mode: TODO: Test discrete profiling behavior
4. Annotation: Test the annotate decorator in both normal and discrete modes
5. Config Validation: Verify proper config initialization from OmegaConf
"""
def setUp(self):
self.config = ProfilerConfig(tool="nsys", enable=True, all_ranks=True)
self.rank = 0
self.profiler = DistProfiler(self.rank, self.config, tool_config=NsightToolConfig(discrete=False))
def test_initialization(self):
self.assertEqual(self.profiler.check_this_rank(), True)
self.assertEqual(self.profiler.check_this_step(), False)
def test_start_stop_profiling(self):
with patch("torch.cuda.profiler.start") as mock_start, patch("torch.cuda.profiler.stop") as mock_stop:
# Test start
self.profiler.start()
self.assertTrue(self.profiler.check_this_step())
mock_start.assert_called_once()
# Test stop
self.profiler.stop()
self.assertFalse(self.profiler.check_this_step())
mock_stop.assert_called_once()
# def test_discrete_profiling(self):
# discrete_config = ProfilerConfig(discrete=True, all_ranks=True)
# profiler = NsightSystemsProfiler(self.rank, discrete_config)
# with patch("torch.cuda.profiler.start") as mock_start, patch("torch.cuda.profiler.stop") as mock_stop:
# profiler.start()
# self.assertTrue(profiler.this_step)
# mock_start.assert_not_called() # Shouldn't start immediately in discrete mode
# profiler.stop()
# self.assertFalse(profiler.this_step)
# mock_stop.assert_not_called() # Shouldn't stop immediately in discrete mode
def test_annotate_decorator(self):
mock_self = MagicMock()
mock_self.profiler = self.profiler
mock_self.profiler.start()
decorator = mock_self.profiler.annotate(message="test")
@decorator
def test_func(self, *args, **kwargs):
return "result"
with (
patch("torch.cuda.profiler.start") as mock_start,
patch("torch.cuda.profiler.stop") as mock_stop,
patch("verl.utils.profiler.nvtx_profile.mark_start_range") as mock_start_range,
patch("verl.utils.profiler.nvtx_profile.mark_end_range") as mock_end_range,
):
result = test_func(mock_self)
self.assertEqual(result, "result")
mock_start_range.assert_called_once()
mock_end_range.assert_called_once()
mock_start.assert_not_called() # Not discrete mode
mock_stop.assert_not_called() # Not discrete mode
# def test_annotate_discrete_mode(self):
# discrete_config = ProfilerConfig(discrete=True, all_ranks=True)
# profiler = NsightSystemsProfiler(self.rank, discrete_config)
# mock_self = MagicMock()
# mock_self.profiler = profiler
# mock_self.profiler.this_step = True
# @NsightSystemsProfiler.annotate(message="test")
# def test_func(self, *args, **kwargs):
# return "result"
# with (
# patch("torch.cuda.profiler.start") as mock_start,
# patch("torch.cuda.profiler.stop") as mock_stop,
# patch("verl.utils.profiler.nvtx_profile.mark_start_range") as mock_start_range,
# patch("verl.utils.profiler.nvtx_profile.mark_end_range") as mock_end_range,
# ):
# result = test_func(mock_self)
# self.assertEqual(result, "result")
# mock_start_range.assert_called_once()
# mock_end_range.assert_called_once()
# mock_start.assert_called_once() # Should start in discrete mode
# mock_stop.assert_called_once() # Should stop in discrete mode
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/utils/test_nvtx_profile.py",
"license": "Apache License 2.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/special_sanity/check_api_docs.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fail CI if any function or class that is publicly exported via
``__all__`` lacks a docstring.
Usage
-----
# Check specific modules or packages
python check_docstrings.py mypkg.core mypkg.utils
# Check an entire source tree (all top-level packages under cwd)
python check_docstrings.py
"""
from __future__ import annotations
import argparse
import importlib
import inspect
import pkgutil
import sys
from pathlib import Path
from types import ModuleType
from typing import Iterable
_ALLOW_LIST = [
"verl.third_party.vllm.LLM",
"verl.third_party.vllm.parallel_state",
"verl.utils.profiler.WorkerProfiler",
"verl.utils.profiler.WorkerProfilerExtension",
"verl.utils.profiler.log_gpu_memory_usage",
"verl.utils.profiler.log_print",
"verl.utils.profiler.mark_annotate",
"verl.utils.profiler.mark_end_range",
"verl.utils.profiler.mark_start_range",
"verl.models.mcore.qwen2_5_vl.get_vision_model_config",
"verl.models.mcore.qwen2_5_vl.get_vision_projection_config",
"verl.models.mcore.mbridge.freeze_moe_router",
"verl.models.mcore.mbridge.make_value_model",
"verl.utils.transformers_compat.flash_attn_supports_top_left_mask",
]
def iter_submodules(root: ModuleType) -> Iterable[ModuleType]:
"""Yield *root* and every sub-module inside it."""
yield root
def print_pkg_error(pkg_name):
print(f"[warn] Skipping {pkg_name!r}", file=sys.stderr)
if getattr(root, "__path__", None): # only packages have __path__
for mod_info in pkgutil.walk_packages(root.__path__, prefix=f"{root.__name__}.", onerror=print_pkg_error):
try:
yield importlib.import_module(mod_info.name)
except Exception as exc:
print(f"[warn] Skipping {mod_info.name!r}: {exc}", file=sys.stderr)
def names_missing_doc(mod: ModuleType) -> list[str]:
"""Return fully-qualified names that need docstrings."""
missing: list[str] = []
public = getattr(mod, "__all__", [])
for name in public:
obj = getattr(mod, name, None)
if f"{mod.__name__}.{name}" in _ALLOW_LIST:
continue
if obj is None:
# Exported but not found in the module: flag it anyway.
missing.append(f"{mod.__name__}.{name} (not found)")
continue
if inspect.isfunction(obj) or inspect.isclass(obj):
doc = inspect.getdoc(obj)
if not doc or not doc.strip():
missing.append(f"{mod.__name__}.{name}")
return missing
def check_module(qualname: str) -> list[str]:
"""Import *qualname* and check it (and sub-modules)."""
try:
module = importlib.import_module(qualname)
except ModuleNotFoundError as exc:
print(f"[error] Cannot import '{qualname}': {exc}", file=sys.stderr)
return [qualname]
missing: list[str] = []
for submod in iter_submodules(module):
missing.extend(names_missing_doc(submod))
return missing
def autodiscover_packages() -> list[str]:
"""Detect top-level packages under CWD when no argument is given."""
pkgs: list[str] = []
for p in Path.cwd().iterdir():
if p.is_dir() and (p / "__init__.py").exists():
pkgs.append(p.name)
return pkgs
def main() -> None:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"modules",
nargs="*",
help="Fully-qualified module or package names (defaults to every top-level package found in CWD).",
)
args = parser.parse_args()
targets = args.modules or autodiscover_packages()
if not targets:
raise ValueError("[error] No modules specified and none detected automatically.")
all_missing: list[str] = []
for modname in targets:
all_missing.extend(check_module(modname))
if all_missing:
print("\nMissing docstrings:")
for name in sorted(all_missing):
print(f" - {name}")
raise ValueError("Missing docstrings detected. Please enhance them with docs accordingly.")
print("✅ All exported functions/classes have docstrings.")
if __name__ == "__main__":
main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/special_sanity/check_api_docs.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/tools/mcp_base_tool.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from typing import Any, Optional
from uuid import uuid4
from fastmcp.exceptions import ClientError
from verl.tools.utils.mcp_clients.McpClientManager import ClientManager
from verl.utils.rollout_trace import rollout_trace_op
from .base_tool import BaseTool
from .schemas import OpenAIFunctionToolSchema, ToolResponse
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class MCPBaseTool(BaseTool):
def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema):
super().__init__(config, tool_schema)
self._instance_dict = {}
self.timeout = config.get("timeout", 30)
# TODO(hechanghao): create a global client manager to manage the rate limit, client and pool
logger.info(f"Initialized MCPBaseTool with config: {config}")
def get_openai_tool_schema(self) -> OpenAIFunctionToolSchema:
"""Return the OpenAI tool schema."""
return self.tool_schema
async def create(self, instance_id: Optional[str] = None, **kwargs) -> tuple[str, ToolResponse]:
"""Create a tool instance.
Args:
instance_id: The instance id of the tool.
Returns:
The instance id of the tool.
tool_crtool_creation_response: The response of the tool when creating the instance.
"""
if instance_id is None:
instance_id = str(uuid4())
self._instance_dict[instance_id] = {
"response": "",
"reward": [],
}
return instance_id, ToolResponse()
async def _call_tool(self, instance_id, parameters) -> tuple[str, dict]:
err_msg = ""
metadata = {}
try:
call_tool_result = await ClientManager.call_tool(self.name, parameters, self.timeout)
logger.debug(f"Tool result for instance {instance_id} with tool {self.name}: {call_tool_result.content}")
result, metadata = self._parse_tool_result(call_tool_result.content)
except ClientError as e:
err_msg = f"\n Tool call failed: {e}"
except ConnectionError as e:
err_msg = f"\n Connection failed: {e}"
except Exception as e:
err_msg = f"\n An unexpected error occurred: {e}"
finally:
if err_msg:
result = err_msg
metadata["api_request_error"] = err_msg
else:
metadata["api_request_error"] = None
return result, metadata
@rollout_trace_op
async def execute(self, instance_id: str, parameters: dict[str, Any], **kwargs) -> tuple[ToolResponse, float, dict]:
if self.name == "" or self.name is None or parameters is None:
error_msg = "Error: 'parameters' is missing or empty."
logger.error(f"[MCPTool] {error_msg} Received tool name: {self.name}, parameters: {parameters}")
return ToolResponse(text=json.dumps({"result": error_msg})), 0.0, {}
try:
result_text, metadata = await self._call_tool(instance_id, parameters)
# Store results in instance dictionary
self._instance_dict[instance_id]["reward"].append(result_text.strip())
# Convert metadata to metrics
metrics = {
"query_count": metadata.get("query_count", 0),
"status": metadata.get("status", "unknown"),
"total_results": metadata.get("total_results", 0),
"api_request_error": metadata.get("api_request_error"),
}
return ToolResponse(text=result_text), 0.0, metrics
except Exception as e:
error_result = json.dumps({"result": f"Tool execution failed: {e}"})
logger.error(f"[MCPBaseTool] Execution failed: {e}")
return ToolResponse(text=error_result), 0.0, {"error": str(e)}
async def calc_reward(self, instance_id: str, **kwargs) -> str:
return self._instance_dict[instance_id]["reward"]
async def release(self, instance_id: str, **kwargs) -> None:
if instance_id in self._instance_dict:
del self._instance_dict[instance_id]
def _parse_tool_result(self, content: list) -> tuple[str, dict]:
tools_content = [part.text for part in filter(lambda x: x.type == "text", content)]
return " ".join(tools_content), {}
| {
"repo_id": "verl-project/verl",
"file_path": "verl/tools/mcp_base_tool.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/tools/mcp_search_tool.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import re
from verl.tools.mcp_base_tool import MCPBaseTool
from .schemas import OpenAIFunctionToolSchema
logger = logging.getLogger(__name__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class MCPSearchTool(MCPBaseTool):
def __init__(self, config: dict, tool_schema: OpenAIFunctionToolSchema):
super().__init__(config, tool_schema)
def _parse_tool_result(self, content: list) -> tuple[str, dict]:
res = ""
res_cnt = 0
query_list = []
metadata = {
"api_request_error": "",
"status": "unknown",
"total_results": 0,
}
try:
for part in content:
if part.type != "text":
continue
text = part.text.replace("'", '"')
query_match = re.search(r'query"\s*:\s*"([^"]+)"', text)
query = query_match.group(1) if query_match else ""
query_list.append(query)
title_matches = re.findall(r'"title"\s*:', text)
title_count = len(title_matches)
results_match = re.search(r'"results"\s*:\s*(\[.*?\])', text, re.DOTALL)
results_content = results_match.group(1) if results_match else ""
res += results_content
res_cnt += title_count
except json.JSONDecodeError:
err_msg = "json parse error."
logger.error(err_msg)
metadata["api_request_error"] = err_msg
metadata["status"] = "error"
# update metadata
metadata["status"] = "success"
metadata["queries"] = query_list
metadata["query_count"] = len(query_list)
metadata["total_results"] = res_cnt
return res, metadata
| {
"repo_id": "verl-project/verl",
"file_path": "verl/tools/mcp_search_tool.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/tools/utils/mcp_clients/McpClientManager.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import json
import logging
from typing import Any
from fastmcp import Client
from fastmcp.client.transports import SSETransport
from verl.tools.utils.mcp_clients.utils import TokenBucket, mcp2openai
logger = logging.getLogger(__name__)
class MCPClientManager:
rootServerName = "mcpServers"
initialized = False
clients = []
tool_client_mapping = {}
rate_limiter = None
async def initialize(self, config_path, rate_limit: float = 10.0):
if self.initialized:
return
"""Initialize the MCP Client Manager and start all clients"""
result = self._load_config(config_path)
servers = result[self.rootServerName]
exclude_sse_servers = {self.rootServerName: {}}
for server_name in servers.keys():
server = servers[server_name]
if "auth_token" in server:
transport = SSETransport(url=server["url"], headers={"Authorization": f"Bearer {server['auth_token']}"})
client = Client(transport)
self.clients.append(client)
else:
exclude_sse_servers[self.rootServerName][server_name] = server
if exclude_sse_servers[self.rootServerName]:
self.clients.append(Client(exclude_sse_servers))
# Initialize rate limiter
self.rate_limiter = TokenBucket(rate_limit)
self.initialized = True
async def call_tool(self, tool_name, parameters, timeout):
# Apply rate limiting
while not self.rate_limiter.acquire():
await asyncio.sleep(0.1)
client = self.get_client_with_tool_name(tool_name)
async with client:
return await client.call_tool_mcp(tool_name, parameters)
async def fetch_tool_schemas(self, tool_selected_list: list[str]) -> list[dict]:
tool_schemas = []
for client in self.clients:
async with client:
tools = await client.list_tools_mcp()
for tool in tools.tools:
if not tool_selected_list:
self.tool_client_mapping[tool.name] = client
tool_schemas.append(mcp2openai(tool))
elif tool.name in tool_selected_list:
self.tool_client_mapping[tool.name] = client
tool_schemas.append(mcp2openai(tool))
return tool_schemas
def get_client_with_tool_name(self, tool_name: str):
return self.tool_client_mapping[tool_name]
def _load_config(self, file: str) -> dict[str, Any]:
try:
with open(file) as f:
return json.load(f)
except FileNotFoundError:
logger.warning(f'the "{file}" file was not found')
except Exception:
logger.error(f'there was an error reading the "{file}" file')
return {}
ClientManager = MCPClientManager()
| {
"repo_id": "verl-project/verl",
"file_path": "verl/tools/utils/mcp_clients/McpClientManager.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/tools/utils/mcp_clients/utils.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
from mcp import Tool
logger = logging.getLogger(__file__)
class TokenBucket:
def __init__(self, rate_limit: float):
self.rate_limit = rate_limit # tokens per second
self.tokens = rate_limit
self.last_update = time.time()
self.lock = threading.Lock()
def acquire(self) -> bool:
with self.lock:
now = time.time()
# Add new tokens based on time elapsed
new_tokens = (now - self.last_update) * self.rate_limit
self.tokens = min(self.rate_limit, self.tokens + new_tokens)
self.last_update = now
if self.tokens >= 1:
self.tokens -= 1
return True
return False
def mcp2openai(mcp_tool: Tool) -> dict:
"""Convert a MCP Tool to an OpenAI ChatCompletionTool."""
openai_format = {
"type": "function",
"function": {
"name": mcp_tool.name,
"description": mcp_tool.description,
"parameters": mcp_tool.inputSchema,
"strict": False,
},
}
if not openai_format["function"]["parameters"].get("required", None):
openai_format["function"]["parameters"]["required"] = []
return openai_format
| {
"repo_id": "verl-project/verl",
"file_path": "verl/tools/utils/mcp_clients/utils.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:verl/tools/utils/tool_registry.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import importlib
import logging
import os
import sys
import threading
from enum import Enum
from omegaconf import OmegaConf
from verl.tools.schemas import OpenAIFunctionToolSchema
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))
class ToolType(Enum):
NATIVE = "native"
MCP = "mcp"
async def initialize_mcp_tool(tool_cls, tool_config) -> list:
from verl.tools.utils.mcp_clients.McpClientManager import ClientManager
tool_list = []
mcp_servers_config_path = tool_config.mcp.mcp_servers_config_path
tool_selected_list = tool_config.mcp.tool_selected_list if "tool_selected_list" in tool_config.mcp else None
await ClientManager.initialize(mcp_servers_config_path, tool_config.config.rate_limit)
# Wait for MCP client to be ready
max_retries = 10
retry_interval = 2 # seconds
for i in range(max_retries):
tool_schemas = await ClientManager.fetch_tool_schemas(tool_selected_list)
if tool_schemas:
break
if i < max_retries - 1:
logger.debug(f"Waiting for MCP client to be ready, attempt {i + 1}/{max_retries}")
await asyncio.sleep(retry_interval)
else:
raise RuntimeError("Failed to initialize MCP tools after maximum retries")
# mcp registry
assert len(tool_schemas), "mcp tool is empty"
for tool_schema_dict in tool_schemas:
logger.debug(f"tool_schema_dict: {tool_schema_dict}")
tool_schema = OpenAIFunctionToolSchema.model_validate(tool_schema_dict)
tool = tool_cls(
config=OmegaConf.to_container(tool_config.config, resolve=True),
tool_schema=tool_schema,
)
tool_list.append(tool)
return tool_list
def get_tool_class(cls_name):
module_name, class_name = cls_name.rsplit(".", 1)
if module_name not in sys.modules:
spec = importlib.util.find_spec(module_name)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
else:
module = sys.modules[module_name]
tool_cls = getattr(module, class_name)
return tool_cls
def initialize_tools_from_config(tools_config_file):
"""Initialize tools from config file.
Supports both NATIVE and MCP tool types. For MCP tools, a temporary event loop
is created only when needed and properly closed after use to prevent memory leaks.
"""
tools_config = OmegaConf.load(tools_config_file)
tool_list = []
# Lazy initialization for MCP support - only create event loop when needed
tmp_event_loop = None
thread = None
def get_mcp_event_loop():
"""Lazily create event loop and thread for MCP tools."""
nonlocal tmp_event_loop, thread
if tmp_event_loop is None:
tmp_event_loop = asyncio.new_event_loop()
thread = threading.Thread(target=tmp_event_loop.run_forever, name="mcp tool list fetcher", daemon=True)
thread.start()
return tmp_event_loop
def run_coroutine(coroutine):
"""Run coroutine in the MCP event loop."""
loop = get_mcp_event_loop()
future = asyncio.run_coroutine_threadsafe(coroutine, loop)
return future.result()
try:
for tool_config in tools_config.tools:
cls_name = tool_config.class_name
tool_type = ToolType(tool_config.config.type)
tool_cls = get_tool_class(cls_name)
match tool_type:
case ToolType.NATIVE:
if tool_config.get("tool_schema", None) is None:
tool_schema = None
else:
tool_schema_dict = OmegaConf.to_container(tool_config.tool_schema, resolve=True)
tool_schema = OpenAIFunctionToolSchema.model_validate(tool_schema_dict)
tool = tool_cls(
config=OmegaConf.to_container(tool_config.config, resolve=True),
tool_schema=tool_schema,
)
tool_list.append(tool)
case ToolType.MCP:
mcp_tools = run_coroutine(initialize_mcp_tool(tool_cls, tool_config))
tool_list.extend(mcp_tools)
case _:
raise NotImplementedError
finally:
# Properly cleanup event loop if it was created
if tmp_event_loop is not None:
# stop first and then close
tmp_event_loop.call_soon_threadsafe(tmp_event_loop.stop)
if thread is not None and thread.is_alive():
thread.join(timeout=5.0)
tmp_event_loop.close()
return tool_list
| {
"repo_id": "verl-project/verl",
"file_path": "verl/tools/utils/tool_registry.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/special_sanity/check_device_api_usage.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This CI test is used for checking whether device api usage is irregular, suggest using api in `verl/utils/device.py`.
Search targets include .py files in verl/recipe and verl/verl.
Some files that must contain ".cuda", "cuda" or "nccl" keyword is pre-defined in whitelist below.
"""
import os
from argparse import ArgumentParser
from pathlib import Path
# directory or file path must contain keyword ".cuda" or "cuda"
CUDA_KEYWORD_CHECK_WHITELIST = [
"verl/utils/device.py",
"verl/utils/torch_functional.py", # import flash_attn only on cuda
"verl/utils/profiler/nvtx_profile.py", # appear in NsightSystemsProfiler
"verl/utils/profiler/torch_profile.py", # appear in TorchProfiler
"verl/utils/profiler/config.py", # appear in TorchProfilerToolConfig
"verl/utils/kernel/linear_cross_entropy.py", # appear in nvidia nvtx
"verl/utils/rendezvous/ray_backend.py", # appear in cupy importance
"verl/single_controller/ray/base.py", # appear in default device_name
"verl/trainer/ppo/ray_trainer.py", # appear in default device_name
"verl/experimental/transfer_queue/ray_trainer.py", # appear in docstring as default device_name
"verl/experimental/one_step_off_policy/ray_trainer.py", # appear in docstring as default device_name
"verl/utils/reward_score/sandbox_fusion/utils.py", # appear in sandbox language type
"verl/third_party/torch/distributed/_state_dict_utils.py", # torch monkey patch fixes
"verl/third_party/torch/distributed/checkpoint/state_dict.py", # torch monkey patch fixes
"verl/workers/engine/base.py", # appear in default device_name
"verl/workers/engine/utils.py", # appear in enable_full_determinism
"verl/workers/engine/fsdp/transformer_impl.py", # appear in default device_name
"verl/workers/engine/veomni/transformer_impl.py", # appear in default device_name
"verl/workers/engine/torchtitan/transformer_impl.py", # appear in default device_name
"verl/workers/engine/torchtitan/utils.py", # appear in torch.cuda.empty_cache()
"verl/workers/rollout/vllm_rollout/vllm_async_server.py", # appear in config.cudagraph_capture_sizes
"verl/workers/rollout/sglang_rollout/async_sglang_server.py", # manually set CUDA_VISIBLE_DEVICES
"verl/workers/rollout/trtllm_rollout/trtllm_async_server.py", # appear in config.cudagraph_capture_sizes
"verl/workers/rollout/replica.py", # appear in default device_name
"verl/checkpoint_engine", # checkpoint engine backend are device specific
]
# directory or file path must contain keyword "nccl"
NCCL_KEYWORD_CHECK_WHITELIST = [
"verl/utils/device.py",
"verl/third_party/sglang/parallel_state.py", # appear in default backend
"verl/recipe/fully_async_policy/param_sync.py", # fully_async_policy in default backend
]
SEARCH_WHITELIST = CUDA_KEYWORD_CHECK_WHITELIST + NCCL_KEYWORD_CHECK_WHITELIST
SEARCH_KEYWORDS = [".cuda", '"cuda"', '"nccl"']
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--directory", "-d", required=True, type=str)
args = parser.parse_args()
directory_in_str = args.directory
pathlist = Path(directory_in_str).glob("**/*.py")
for path in pathlist:
path_in_str = str(path.absolute())
# judge whether current path is in pre-defined search whitelist or not.
path_in_whitelist = False
for sw in SEARCH_WHITELIST:
# for easy debugging in non-linux system
sw = sw.replace("/", os.sep)
if sw in path_in_str:
print(f"[SKIP] File {path_in_str} is in device api usage check whitelist, checking is skipped.")
path_in_whitelist = True
break
if path_in_whitelist:
continue
with open(path_in_str, encoding="utf-8") as f:
file_content = f.read()
find_invalid_device_management = False
for sk in SEARCH_KEYWORDS:
if sk in file_content:
find_invalid_device_management = True
break
print(
f"[CHECK] File {path_in_str} is detected for device api usage check, check result: "
f"{'success' if not find_invalid_device_management else f'failed, because detect {sk}'}."
)
assert not find_invalid_device_management, (
f'file {path_in_str} contains .cuda/"cuda"/"nccl" usage, please use api in '
f"verl/utils/device.py directly."
)
| {
"repo_id": "verl-project/verl",
"file_path": "tests/special_sanity/check_device_api_usage.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/models/transformers/npu_patch.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Copyright 2025 The Qwen Team and The HuggingFace Inc. team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
import torch_npu
from torch import nn
from transformers.activations import ACT2FN
from transformers.models.qwen2 import modeling_qwen2
from transformers.models.qwen2_5_vl import modeling_qwen2_5_vl
from transformers.models.qwen3 import modeling_qwen3
from transformers.models.qwen3_moe import modeling_qwen3_moe
from transformers.models.qwen3_next import modeling_qwen3_next
from transformers.models.qwen3_vl import modeling_qwen3_vl
from transformers.models.qwen3_vl_moe import modeling_qwen3_vl_moe
from transformers.utils import logging
logger = logging.get_logger(__name__)
def rms_norm_forward_npu(self, x):
"""NPU optimized implementation for RMSNorm."""
if x.dtype != self.weight.dtype:
x = x.to(self.weight.dtype)
return torch_npu.npu_rms_norm(x, self.weight, epsilon=self.variance_epsilon)[0]
def silu_forward_npu(self, hidden_state):
"""NPU optimized implementation for SiLU in `forward` func in MLP layer."""
gate_up = torch.cat((self.gate_proj(hidden_state), self.up_proj(hidden_state)), dim=-1)
return self.down_proj(torch_npu.npu_swiglu(gate_up, dim=-1))
def apply_rotary_pos_emb_npu(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""NPU optimized implementation for RoPE."""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = torch_npu.npu_rotary_mul(q, cos, sin)
k_embed = torch_npu.npu_rotary_mul(k, cos, sin)
return q_embed.to(q.dtype), k_embed.to(k.dtype)
def qwen3_next_rms_norm_forward_npu(self, x):
return torch_npu.npu_rms_norm(x.float(), 1.0 + self.weight.float(), epsilon=self.eps)[0].type_as(x)
def qwen3_next_rms_norm_forward_gated_npu(self, hidden_states, gate=None):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
hidden_states = torch_npu.npu_rms_norm(hidden_states, self.weight.float(), epsilon=self.variance_epsilon)[0]
hidden_states = hidden_states * F.silu(gate.to(torch.float32))
return hidden_states.to(input_dtype)
def qwen3_next_apply_rotary_pos_emb_npu(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
# Keep half or full tensor for later concatenation
rotary_dim = cos.shape[-1]
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
q_embed = torch_npu.npu_rotary_mul(q_rot, cos, sin).to(q.dtype)
k_embed = torch_npu.npu_rotary_mul(k_rot, cos, sin).to(k.dtype)
q_embed = torch.cat([q_embed, q_pass], dim=-1)
k_embed = torch.cat([k_embed, k_pass], dim=-1)
return q_embed, k_embed
class NPUGmmFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, weight, group_list, group_list_type=1):
"""
Grouped Matmul(GMM) for Ascend NPU.
Args:
x (torch.Tensor): Input tensor, shape (tokens_num * top_k, hidden_size)
weight (torch.Tensor): Expert weights, shape (n_experts, hidden_size, intermediate_size)
group_list (torch.Tensor): Expert token counts, shape (n_experts,)
- type 0: cumsum of tokens per expert
- type 1: direct tokens per expert (default)
"""
ctx.save_for_backward(x, weight)
ctx.group_list = group_list
ctx.group_list_type = group_list_type
output = torch_npu.npu_grouped_matmul(
[x], [weight], bias=None, group_list=group_list, split_item=2, group_type=0, group_list_type=group_list_type
)[0]
return output
@staticmethod
def backward(ctx, grad_output):
x, weight = ctx.saved_tensors
group_list = ctx.group_list
group_list_type = ctx.group_list_type
dx = torch_npu.npu_grouped_matmul(
[grad_output],
[weight.transpose(1, 2)],
bias=None,
group_list=group_list,
split_item=2,
group_type=0,
group_list_type=group_list_type,
)[0]
dw = torch_npu.npu_grouped_matmul(
[x.transpose(0, 1)],
[grad_output],
bias=None,
group_list=group_list,
split_item=3,
group_type=2,
group_list_type=group_list_type,
)[0]
return dx, dw, None, None
def _qwen3_sparse_moe_routed_forward_npu(self, hidden_states: torch.Tensor):
"""
Shared NPU routed-expert path for Qwen3Moe/Qwen3Next sparse MoE blocks.
Returns:
tuple: (flattened_input, routed_hidden_states, router_logits)
"""
hidden_dim = hidden_states.shape[-1]
hidden_states = hidden_states.view(-1, hidden_dim)
# router_logits: (batch * sequence_length, n_experts)
router_logits = self.gate(hidden_states)
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
if self.norm_topk_prob: # only diff with mixtral sparse moe block!
routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
# we cast back to the input dtype
routing_weights = routing_weights.to(hidden_states.dtype)
# Loop over all available experts in the model and perform the computation on each expert
# Concat all weights
input_dtype = hidden_states.dtype
up_weight_list = [e.up_proj.weight for e in self.experts]
gate_weight_list = [e.gate_proj.weight for e in self.experts]
down_weight_list = [e.down_proj.weight for e in self.experts]
w1 = torch.stack(up_weight_list).transpose(1, 2).to(input_dtype)
w2 = torch.stack(gate_weight_list).transpose(1, 2).to(input_dtype)
w3 = torch.stack(down_weight_list).transpose(1, 2).to(input_dtype)
permuted_tokens, row_ids_map = torch_npu.npu_moe_token_permute(hidden_states, selected_experts.to(torch.int32))
tokens_per_expert = torch.histc(selected_experts, bins=self.num_experts, min=0, max=self.num_experts)
up_res = NPUGmmFunction.apply(permuted_tokens, w1, tokens_per_expert)
gate_res = NPUGmmFunction.apply(permuted_tokens, w2, tokens_per_expert)
act_res = torch_npu.npu_swiglu(torch.cat([gate_res, up_res], dim=-1))
down_res = NPUGmmFunction.apply(act_res, w3, tokens_per_expert)
routed_hidden_states = torch_npu.npu_moe_token_unpermute(down_res, row_ids_map, probs=routing_weights)
return hidden_states, routed_hidden_states, router_logits
def qwen3_moe_sparse_moe_block_forward_npu(self, hidden_states: torch.Tensor) -> torch.Tensor:
"""NPU optimized implementation for `forward` in Qwen3MoeSparseMoeBlock."""
output_shape = hidden_states.shape
_, routed_hidden_states, router_logits = _qwen3_sparse_moe_routed_forward_npu(self, hidden_states)
final_hidden_states = routed_hidden_states.reshape(output_shape)
return final_hidden_states, router_logits
def qwen3_next_sparse_moe_block_forward_npu(self, hidden_states: torch.Tensor) -> torch.Tensor:
"""NPU optimized implementation for `forward` in Qwen3NextSparseMoeBlock."""
output_shape = hidden_states.shape
hidden_states, routed_hidden_states, router_logits = _qwen3_sparse_moe_routed_forward_npu(self, hidden_states)
shared_expert_output = self.shared_expert(hidden_states)
shared_expert_output = torch.sigmoid(self.shared_expert_gate(hidden_states)) * shared_expert_output
final_hidden_states = (routed_hidden_states + shared_expert_output).reshape(output_shape)
return final_hidden_states, router_logits
class NPUQwen3VLMoeTextExperts(nn.Module):
"""NPU optimized implementation for Qwen3VLMoeTextExperts."""
def __init__(self, config):
super().__init__()
self.num_experts = config.num_experts
self.intermediate_size = config.moe_intermediate_size
self.hidden_size = config.hidden_size
self.expert_dim = self.intermediate_size
self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_size, 2 * self.expert_dim))
self.down_proj = nn.Parameter(torch.empty((self.num_experts, self.expert_dim, self.hidden_size)))
self.act_fn = ACT2FN[config.hidden_act]
def forward(
self, hidden_states: torch.Tensor, routing_weights: torch.Tensor, router_indices: torch.Tensor
) -> torch.Tensor:
"""
When training it is more efficient to just loop over the experts and compute the output for each expert
as otherwise the memory would explode.
For inference we can sacrifice some memory and compute the output for all experts at once.
By repeating the inputs.
Args:
hidden_states (torch.Tensor): (batch_size * token_num, hidden_size)
routing_weights (torch.Tensor): (batch_size * token_num, num_experts)
router_indices (torch.Tensor): (batch_size * token_num, top_k)
Returns:
torch.Tensor
"""
batch_size = hidden_states.shape[0]
hidden_states = hidden_states.reshape(-1, self.hidden_size) # (num_tokens, hidden_size)
if self.training:
permuted_hidden_states, row_ids_map = torch_npu.npu_moe_token_permute(
hidden_states, router_indices.to(torch.int32)
)
tokens_per_expert = torch.histc(router_indices, bins=self.num_experts, min=0, max=self.num_experts)
intermediate_hidden_states = NPUGmmFunction.apply(
permuted_hidden_states, self.gate_up_proj, tokens_per_expert
)
intermediate_activations = torch_npu.npu_swiglu(intermediate_hidden_states, dim=-1)
output = NPUGmmFunction.apply(intermediate_activations, self.down_proj, tokens_per_expert)
num_tokens = hidden_states.shape[0]
top_k = router_indices.shape[1]
batch_idx = torch.arange(num_tokens, device=routing_weights.device)
batch_idx = batch_idx.unsqueeze(1).expand(-1, top_k)
selected_probs = routing_weights[batch_idx, router_indices]
next_states = torch_npu.npu_moe_token_unpermute(output, row_ids_map, probs=selected_probs)
next_states = next_states.view(batch_size, -1, self.hidden_size)
else:
hidden_states = hidden_states.repeat(self.num_experts, 1)
hidden_states = hidden_states.view(self.num_experts, -1, self.hidden_size)
gate_up = torch.bmm(hidden_states, self.gate_up_proj)
gate, up = gate_up.chunk(2, dim=-1) # not supported for DTensors
next_states = torch.bmm((up * self.act_fn(gate)), self.down_proj)
next_states = next_states.reshape(self.num_experts, batch_size, -1, self.hidden_size)
next_states = (
next_states * routing_weights.transpose(0, 1).view(self.num_experts, batch_size, -1)[..., None]
)
next_states = next_states.sum(dim=0)
return next_states
class NPUQwen3VLMoeTextSparseMoeBlock(nn.Module):
"""NPU optimized implementation for Qwen3VLMoeTextSparseMoeBlock."""
def __init__(self, config):
super().__init__()
self.hidden_size = config.hidden_size
self.num_experts = config.num_experts
self.top_k = config.num_experts_per_tok
self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
self.experts = NPUQwen3VLMoeTextExperts(config)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size = hidden_states.shape[0]
hidden_states = hidden_states.reshape(-1, self.hidden_size)
router_logits = self.gate(hidden_states)
routing_weights = torch.nn.functional.softmax(router_logits, dim=-1, dtype=torch.float)
routing_weights, router_indices = torch.topk(routing_weights, self.top_k, dim=-1)
routing_weights = routing_weights / routing_weights.sum(dim=-1, keepdim=True)
routing_weights = routing_weights.to(router_logits.dtype)
hidden_states = hidden_states.reshape(batch_size, -1, self.hidden_size)
if not self.training:
routing_weights = torch.zeros_like(router_logits).scatter_(1, router_indices, routing_weights)
routed_out = self.experts(hidden_states, routing_weights, router_indices)
return routed_out
# Patches for Qwen2 Model
modeling_qwen2.Qwen2RMSNorm.forward = rms_norm_forward_npu
modeling_qwen2.Qwen2MLP.forward = silu_forward_npu
modeling_qwen2.apply_rotary_pos_emb = apply_rotary_pos_emb_npu
# Patches for Qwen2.5-VL Model
modeling_qwen2_5_vl.Qwen2RMSNorm.forward = rms_norm_forward_npu
modeling_qwen2_5_vl.Qwen2_5_VLMLP.forward = silu_forward_npu
# Patches for Qwen3 Model
modeling_qwen3.Qwen3RMSNorm.forward = rms_norm_forward_npu
modeling_qwen3.Qwen3MLP.forward = silu_forward_npu
modeling_qwen3.apply_rotary_pos_emb = apply_rotary_pos_emb_npu
# Patches for Qwen3 MoE Model
modeling_qwen3_moe.Qwen3MoeRMSNorm.forward = rms_norm_forward_npu
modeling_qwen3_moe.Qwen3MoeSparseMoeBlock.forward = qwen3_moe_sparse_moe_block_forward_npu
modeling_qwen3_moe.apply_rotary_pos_emb = apply_rotary_pos_emb_npu
# Patches for Qwen3 VL Model
modeling_qwen3_vl.Qwen3VLTextRMSNorm.forward = rms_norm_forward_npu
modeling_qwen3_vl.Qwen3VLTextMLP.forward = silu_forward_npu
# Patches for Qwen3-VL MoE Model
modeling_qwen3_vl_moe.Qwen3VLMoeTextSparseMoeBlock = NPUQwen3VLMoeTextSparseMoeBlock
modeling_qwen3_vl_moe.Qwen3VLMoeTextRMSNorm.forward = rms_norm_forward_npu
modeling_qwen3_vl_moe.apply_rotary_pos_emb = apply_rotary_pos_emb_npu
# Patches for Qwen3 Next Model
modeling_qwen3_next.Qwen3NextSparseMoeBlock.forward = qwen3_next_sparse_moe_block_forward_npu
modeling_qwen3_next.Qwen3NextRMSNormGated.forward = qwen3_next_rms_norm_forward_gated_npu
modeling_qwen3_next.Qwen3NextRMSNorm.forward = qwen3_next_rms_norm_forward_npu
modeling_qwen3_next.apply_rotary_pos_emb = qwen3_next_apply_rotary_pos_emb_npu
| {
"repo_id": "verl-project/verl",
"file_path": "verl/models/transformers/npu_patch.py",
"license": "Apache License 2.0",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
verl-project/verl:tests/special_sanity/validate_structure.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
"""
Validate that test file subfolders mirror the top-level package layout.
Usage examples
--------------
# Typical run (defaults: impl_root=my_project, tests_root=tests)
python check_tests_structure.py
# Custom layout and extra allowed folders
python check_tests_structure.py \
--impl-root verl \
--tests-root tests \
--allow-dirs special_e2e special_sanity special_standalone special_distributed
"""
from __future__ import annotations
import argparse
import sys
from pathlib import Path
def discover_allowed_modules(impl_root: Path, extra: list[str]) -> set[str]:
"""Return the set of first-level directories that tests may live under."""
allowed = {p.name for p in impl_root.iterdir() if p.is_dir()}
allowed.update(extra)
return allowed
def find_violations(tests_root: Path, allowed: set[str], allowed_files: list[str]) -> list[str]:
"""Return a list of error strings for test files in the wrong place."""
errors: list[str] = []
for test_file in tests_root.rglob("test*.py"):
if str(test_file) in allowed_files:
continue
rel_parts = test_file.relative_to(tests_root).parts
if len(rel_parts) < 2:
errors.append(f"{test_file}: must be inside one of {sorted(allowed)} (not at tests root)")
continue
first_folder = rel_parts[0]
if first_folder not in allowed:
errors.append(
f"{test_file}: subfolder '{first_folder}' under tests/ is not an allowed module. "
f"The valid ones are: {sorted(allowed)}"
)
return errors
def main() -> None:
parser = argparse.ArgumentParser(description="Check that test files follow tests/<module>/… layout.")
parser.add_argument(
"--impl-root",
type=Path,
default="verl",
help="Implementation root (default: my_project)",
)
parser.add_argument(
"--tests-root",
type=Path,
default="tests",
help="Root of test tree (default: tests)",
)
parser.add_argument(
"--allow-dirs",
nargs="*",
default=["special_e2e", "special_sanity", "special_standalone", "special_distributed"],
help="Extra top-level test folders that are exempt from the rule",
)
parser.add_argument(
"--allow-files",
nargs="*",
default=[
"tests/test_protocol_on_cpu.py",
"tests/test_base_config_on_cpu.py",
"tests/test_protocol_v2_on_cpu.py",
],
help="Extra top-level test folders that are exempt from the rule",
)
args = parser.parse_args()
if not args.impl_root.is_dir():
raise Exception(f"Implementation root '{args.impl_root}' does not exist.")
if not args.tests_root.is_dir():
raise Exception(f"Tests root '{args.tests_root}' does not exist.")
allowed = discover_allowed_modules(args.impl_root, args.allow_dirs)
violations = find_violations(args.tests_root, allowed, args.allow_files)
if violations:
print("❌ Test layout violations found:\n", file=sys.stderr)
for err in violations:
print(" -", err, file=sys.stderr)
print(
f"\nGuideline:\n Place each test file under tests/<module_name>/…\n where <module_name> is "
f"one of the top-level packages inside '{args.impl_root}', or is explicitly listed via --allow-dirs.\n",
file=sys.stderr,
)
raise Exception("❌ Test layout violations found.")
print("✅ Tests folder structure looks good.")
if __name__ == "__main__":
main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/special_sanity/validate_structure.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/special_sanity/type_coverage_check.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom type annotation check tool.
To inspect the type annotation for functions in the entire codebase, please run:
find verl -type f -name "*.py" | xargs -n 1 python3 tests/special_sanity/type_coverage_check.py --all-lines
--debug --target-file
"""
import argparse
import ast
import linecache
import subprocess
from pathlib import Path
def get_changed_files() -> list[Path]:
result = subprocess.run(
["git", "diff", "--name-only", "--diff-filter=AM", "origin/main...HEAD"], stdout=subprocess.PIPE, text=True
)
return [Path(f) for f in result.stdout.splitlines() if f.endswith(".py")]
def get_changed_lines(file_path: Path) -> set[int]:
result = subprocess.run(
["git", "diff", "-U0", "origin/main...HEAD", "--", str(file_path)],
stdout=subprocess.PIPE,
text=True,
)
lines: set[int] = set()
for line in result.stdout.splitlines():
if line.startswith("@@"):
for part in line.split():
try:
if part.startswith("+") and "," in part:
start, count = map(int, part[1:].split(","))
lines.update(range(start, start + count))
elif part.startswith("+") and "," not in part:
lines.add(int(part[1:]))
except Exception:
# (vermouth1992) There are many edge cases here because + can be in the changed program
pass
return lines
CHECK_SUCCESS = 0
CHECK_WARNING = 1
CHECK_FAILURE = -1
def should_check_type(arg_name: str) -> bool:
if arg_name in ("self", "cls"):
return False
if arg_name.startswith("*"):
return False
return True
def has_type_annotations(node: ast.AST, debug: bool = False) -> int:
if isinstance(node, ast.FunctionDef):
is_private = node.name.startswith("_")
if node.args.vararg is not None or node.args.kwarg is not None:
return CHECK_SUCCESS
has_ann = (
all(arg.annotation is not None for arg in node.args.args if should_check_type(arg.arg))
and node.returns is not None
)
if has_ann or is_private:
return CHECK_SUCCESS
else:
if debug:
print(node, [(arg.annotation, arg.arg) for arg in node.args.args if should_check_type(arg.arg)])
return CHECK_FAILURE
return CHECK_SUCCESS
def check_file(
file_path: Path, changed_lines: set[int], debug: bool = False
) -> tuple[int, int, list[tuple[Path, int, str]], list[tuple[Path, int, str]]]:
with open(file_path) as f:
source: str = f.read()
tree = ast.parse(source, filename=str(file_path))
annotated = 0
total = 0
warning_lines: list[tuple[Path, int, str]] = []
failure_lines: list[tuple[Path, int, str]] = []
for node in ast.walk(tree):
if hasattr(node, "lineno") and node.lineno in changed_lines:
if isinstance(node, ast.FunctionDef | ast.Assign | ast.AnnAssign):
total += 1
result = has_type_annotations(node, debug)
if result == CHECK_SUCCESS or result == CHECK_WARNING:
annotated += 1
if result == CHECK_WARNING:
warning_lines.append(
(file_path, node.lineno, linecache.getline(str(file_path), node.lineno).strip())
)
else:
source_line = linecache.getline(str(file_path), node.lineno).strip()
failure_lines.append((file_path, node.lineno, source_line))
return annotated, total, warning_lines, failure_lines
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--threshold", type=float, default=0.3, help="Minimum ratio of annotated lines required (0.0 - 1.0)"
)
parser.add_argument("--target-file", type=str, default=None, help="Path to the Python source file to analyse")
parser.add_argument(
"--all-lines",
action="store_true",
help="Check all lines in the file instead of only changed lines based on git",
)
parser.add_argument("--debug", action="store_true", help="Add debugging logs")
args = parser.parse_args()
total_changed = 0
total_annotated = 0
all_warnings: list[tuple[Path, int, str]] = []
all_failures: list[tuple[Path, int, str]] = []
target_files = [args.target_file] if args.target_file is not None else get_changed_files()
for fpath in target_files:
if "tests/" in str(fpath):
continue
if args.all_lines:
changed_lines = [i + 1 for i in range(len(open(fpath).readlines()))]
else:
changed_lines = get_changed_lines(fpath)
annotated, total, warning_lines, failure_lines = check_file(fpath, changed_lines, args.debug)
total_annotated += annotated
total_changed += total
all_warnings.extend(warning_lines)
all_failures.extend(failure_lines)
ratio = (total_annotated / total_changed) if total_changed else 1.0
print(
f"🔍 Type coverage on {'all' if args.all_lines else 'changed'} lines: "
f"{total_annotated}/{total_changed} = {ratio:.2%}. Files inspected: {target_files}"
)
if all_warnings:
print("\n⚠️ Suggest Improve: Lines missing type annotations for inputs and outputs:\n")
for fname, lineno, line in all_warnings:
print(f"{fname}:{lineno}: {line}")
if all_failures:
print("⚠️ [ERROR] Lines missing type annotations for inputs and outputs:\n")
for fname, lineno, line in all_failures:
print(f"{fname}:{lineno}: {line}")
if ratio < args.threshold:
print(
f"Please add type annotations for inputs and outputs to meet threshold {args.threshold}. "
f"Cases exempt from checking:"
)
print("1. Private methods.")
print("2. Args with name in ('self', 'cls'), or *args / **kwargs")
print("3. Files under tests/")
raise Exception(f"\n❌ Type coverage below threshold ({args.threshold:.0%}).")
else:
if all_warnings or all_failures:
print("")
print("✅ Type annotation coverage acceptable.\n")
if __name__ == "__main__":
main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/special_sanity/type_coverage_check.py",
"license": "Apache License 2.0",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:tests/special_sanity/validate_imported_docs.py | # Copyright 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
verify_imported_docs.py
Assert that every function or class *explicitly imported* (via
`from <module> import <name>`) in a given Python file has a docstring.
"""
from __future__ import annotations
import argparse
import ast
import importlib
import inspect
import pathlib
import sys
def _parse_args() -> argparse.Namespace:
p = argparse.ArgumentParser(description="Verify that imported functions/classes have docstrings.")
p.add_argument(
"--target-file",
default="verl/trainer/ppo/ray_trainer.py",
help="Path to the Python source file to analyse (e.g. verl/trainer/ppo/ray_trainer.py)",
)
p.add_argument(
"--allow-list",
default=["omegaconf.open_dict"],
help="a list of third_party dependencies that do not have proper docs :(",
)
p.add_argument(
"--project-root",
default=".",
help="Directory to prepend to PYTHONPATH so local packages resolve (default: .)",
)
p.add_argument(
"--quiet",
action="store_true",
help="Suppress success message (still prints errors).",
)
return p.parse_args()
def _import_attr(module_name: str, attr_name: str):
"""Import `module_name` then return `getattr(module, attr_name)`."""
module = importlib.import_module(module_name)
return getattr(module, attr_name)
def _check_file(py_file: pathlib.Path, project_root: pathlib.Path, allow_list: list[str]) -> list[str]:
"""Return a list of error strings (empty == success)."""
# Ensure local packages resolve
sys.path.insert(0, str(project_root.resolve()))
tree = ast.parse(py_file.read_text(), filename=str(py_file))
problems: list[str] = []
for node in ast.walk(tree):
if not isinstance(node, ast.ImportFrom):
continue
# Relative imports (level > 0) get the leading dots stripped
module_name = "." * node.level + (node.module or "")
for alias in node.names:
if alias.name == "*":
problems.append(
f"{py_file}:{node.lineno} - wildcard import `from {module_name} import *` cannot be verified."
)
continue
imported_name = alias.name
try:
obj = _import_attr(module_name, imported_name)
except Exception: # pragma: no cover – wide net for import quirks
pass
# For some reason the module cannot be imported, skip for now
# problems.append(
# f"{py_file}:{node.lineno} - could not resolve "
# f"`{imported_name}` from `{module_name}` ({exc})"
# )
continue
if f"{module_name}.{imported_name}" in allow_list:
continue
if inspect.isfunction(obj) or inspect.isclass(obj):
doc = inspect.getdoc(obj)
if not (doc and doc.strip()):
kind = "class" if inspect.isclass(obj) else "function"
problems.append(
f"{py_file}:{node.lineno} - {kind} `{module_name}.{imported_name}` is missing a docstring."
)
return problems
def main() -> None:
args = _parse_args()
target_path = pathlib.Path(args.target_file).resolve()
project_root = pathlib.Path(args.project_root).resolve()
if not target_path.is_file():
raise Exception(f"❌ Target file not found: {target_path}")
errors = _check_file(target_path, project_root, args.allow_list)
if errors:
print("Docstring verification failed:\n")
print("\n".join(f" • {e}" for e in errors))
raise Exception("❌ Docstring verification failed.")
if not args.quiet:
print(f"✅ All explicitly imported functions/classes in {target_path} have docstrings.")
if __name__ == "__main__":
main()
| {
"repo_id": "verl-project/verl",
"file_path": "tests/special_sanity/validate_imported_docs.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
verl-project/verl:verl/models/transformers/dense_common.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Union
import torch
from transformers.cache_utils import Cache
from transformers.modeling_outputs import CausalLMOutputWithPast
@dataclass
class CausalLMOutputForPPO(CausalLMOutputWithPast):
log_probs: Optional[torch.FloatTensor] = None
entropy: Optional[torch.FloatTensor] = None
def forward_base_model(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> CausalLMOutputWithPast:
r"""
Copy paste LLaMa's forward
https://github.com/linkedin/Liger-Kernel/blob/main/src/liger_kernel/transformers/model/llama.py
This function should be generic enough for all pure text models.
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
return outputs
def forward_with_torch_backend(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Union["Cache", list[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: int | torch.Tensor = 0,
temperature: float = 1.0,
**loss_kwargs,
) -> tuple | CausalLMOutputForPPO:
from verl.utils.experimental.torch_functional import FusedLinearForPPO
outputs = forward_base_model(
self,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
)
hidden_states = outputs[0]
if not return_dict:
raise NotImplementedError("forward_with_torch_backend has to return_dict")
# Loss calculations
if labels is not None:
rolled_labels = torch.roll(labels, shifts=-1, dims=-1)
elif input_ids is not None:
rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1)
else:
raise RuntimeError("To use forward_with_torch_backend, either labels or input_ids must be provided.")
fused_linear_for_ppo = FusedLinearForPPO()
log_probs, entropy = fused_linear_for_ppo.forward(
hidden_states=hidden_states,
vocab_weights=self.lm_head.weight,
input_ids=rolled_labels,
temperature=temperature,
)
return CausalLMOutputForPPO(
log_probs=log_probs,
entropy=entropy,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def forward_with_triton_backend(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Union["Cache", list[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: int | torch.Tensor = 0,
temperature: float = 1.0,
**loss_kwargs,
) -> tuple | CausalLMOutputForPPO:
from verl.utils.kernel.linear_cross_entropy import linear_cross_entropy
outputs = forward_base_model(
self,
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = outputs[0]
if not return_dict:
raise NotImplementedError("forward_with_triton_backend has to return_dict")
# Loss calculations
if labels is not None:
rolled_labels = torch.roll(labels, shifts=-1, dims=-1)
elif input_ids is not None:
rolled_labels = torch.roll(input_ids, shifts=-1, dims=-1)
else:
raise RuntimeError("To use forward_with_triton_backend, either labels or input_ids must be provided.")
log_probs, entropy = linear_cross_entropy(
hidden_states,
self.lm_head.weight,
rolled_labels,
temperature,
"none",
)
return CausalLMOutputForPPO(
log_probs=log_probs,
entropy=entropy,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| {
"repo_id": "verl-project/verl",
"file_path": "verl/models/transformers/dense_common.py",
"license": "Apache License 2.0",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.