|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
Rollout with huggingface models.
|
|
|
TODO: refactor this class. Currently, it will hang when using FSDP HybridShard. We should actually create a single
|
|
|
GPU model. Then, get full state_dict and bind the state_dict to the single GPU model. Then, use the single GPU model
|
|
|
to perform generation.
|
|
|
"""
|
|
|
|
|
|
import contextlib
|
|
|
|
|
|
import torch
|
|
|
import torch.distributed
|
|
|
from tensordict import TensorDict
|
|
|
from torch import nn
|
|
|
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
|
|
from transformers import GenerationConfig
|
|
|
|
|
|
from verl import DataProto
|
|
|
from verl.utils.torch_functional import get_response_mask
|
|
|
|
|
|
from .base import BaseRollout
|
|
|
|
|
|
__all__ = ["HFRollout"]
|
|
|
|
|
|
|
|
|
class HFRollout(BaseRollout):
|
|
|
def __init__(self, module: nn.Module, config):
|
|
|
super().__init__()
|
|
|
self.config = config
|
|
|
self.module = module
|
|
|
|
|
|
def generate_sequences(self, prompts: DataProto) -> DataProto:
|
|
|
batch_size = prompts.batch.batch_size[0]
|
|
|
num_chunks = max(batch_size // self.config.get("micro_batch_size", batch_size), 1)
|
|
|
batch_prompts = prompts.chunk(chunks=num_chunks)
|
|
|
output = [self._generate_minibatch(p) for p in batch_prompts]
|
|
|
output = DataProto.concat(output)
|
|
|
return output
|
|
|
|
|
|
@torch.no_grad()
|
|
|
def _generate_minibatch(self, prompts: DataProto) -> DataProto:
|
|
|
|
|
|
do_sample = prompts.meta_info.get("do_sample", self.config.do_sample)
|
|
|
is_validate = prompts.meta_info.get("validate", False)
|
|
|
|
|
|
temperature = prompts.meta_info.get("temperature", self.config.temperature)
|
|
|
response_length = prompts.meta_info.get("response_length", self.config.response_length)
|
|
|
top_p = prompts.meta_info.get("top_p", self.config.get("top_p", 1.0))
|
|
|
top_k = max(0, prompts.meta_info.get("top_k", self.config.get("top_k", 0)))
|
|
|
|
|
|
if not do_sample:
|
|
|
|
|
|
kwargs = {
|
|
|
"do_sample": False,
|
|
|
"num_beams": 1,
|
|
|
}
|
|
|
elif is_validate:
|
|
|
|
|
|
kwargs = {
|
|
|
"do_sample": True,
|
|
|
"num_beams": 1,
|
|
|
"top_k": max(0, self.config.val_kwargs.top_k),
|
|
|
"top_p": self.config.val_kwargs.top_p,
|
|
|
"temperature": self.config.val_kwargs.temperature,
|
|
|
"num_return_sequences": 1,
|
|
|
}
|
|
|
else:
|
|
|
|
|
|
kwargs = {
|
|
|
"do_sample": True,
|
|
|
"num_beams": 1,
|
|
|
"top_p": top_p,
|
|
|
"top_k": top_k,
|
|
|
"temperature": temperature,
|
|
|
"num_return_sequences": self.config.n,
|
|
|
}
|
|
|
|
|
|
|
|
|
generation_config = GenerationConfig(**kwargs)
|
|
|
|
|
|
idx = prompts.batch["input_ids"]
|
|
|
prompt_length = idx.size(1)
|
|
|
attention_mask = prompts.batch["attention_mask"]
|
|
|
position_ids = prompts.batch["position_ids"]
|
|
|
|
|
|
|
|
|
eos_token_id = prompts.meta_info["eos_token_id"]
|
|
|
pad_token_id = prompts.meta_info["pad_token_id"]
|
|
|
|
|
|
self.module.eval()
|
|
|
param_ctx = contextlib.nullcontext()
|
|
|
|
|
|
if isinstance(self.module, FSDP):
|
|
|
|
|
|
param_ctx = FSDP.summon_full_params(self.module, writeback=False, recurse=False)
|
|
|
with param_ctx, torch.autocast(device_type="cuda", dtype=torch.bfloat16):
|
|
|
output = self.module.generate(
|
|
|
input_ids=idx,
|
|
|
attention_mask=attention_mask,
|
|
|
do_sample=do_sample,
|
|
|
max_new_tokens=response_length,
|
|
|
eos_token_id=eos_token_id,
|
|
|
pad_token_id=pad_token_id,
|
|
|
generation_config=generation_config,
|
|
|
output_scores=False,
|
|
|
return_dict_in_generate=True,
|
|
|
use_cache=True,
|
|
|
)
|
|
|
|
|
|
|
|
|
seq = output.sequences
|
|
|
generated_batch_size = seq.size(0)
|
|
|
|
|
|
|
|
|
|
|
|
sequence_length = prompt_length + self.config.response_length
|
|
|
delta_length = sequence_length - seq.shape[1]
|
|
|
|
|
|
if delta_length > 0:
|
|
|
delta_tokens = torch.ones(size=(generated_batch_size, delta_length), device=seq.device, dtype=seq.dtype)
|
|
|
delta_tokens = pad_token_id * delta_tokens
|
|
|
seq = torch.cat((seq, delta_tokens), dim=1)
|
|
|
assert seq.shape[1] == sequence_length
|
|
|
|
|
|
|
|
|
num_return_sequences = kwargs.get("num_return_sequences", 1)
|
|
|
if num_return_sequences > 1:
|
|
|
position_ids = position_ids.repeat_interleave(num_return_sequences, dim=0)
|
|
|
attention_mask = attention_mask.repeat_interleave(num_return_sequences, dim=0)
|
|
|
|
|
|
prompt = seq[:, :prompt_length]
|
|
|
response = seq[:, prompt_length:]
|
|
|
|
|
|
response_length = response.size(1)
|
|
|
delta_position_id = torch.arange(1, response_length + 1, device=position_ids.device)
|
|
|
delta_position_id = delta_position_id.unsqueeze(0).repeat(generated_batch_size, 1)
|
|
|
|
|
|
response_position_ids = position_ids[:, -1:] + delta_position_id
|
|
|
position_ids = torch.cat([position_ids, response_position_ids], dim=-1)
|
|
|
|
|
|
response_attention_mask = get_response_mask(response_id=response, eos_token=eos_token_id, dtype=attention_mask.dtype)
|
|
|
attention_mask = torch.cat((attention_mask, response_attention_mask), dim=-1)
|
|
|
|
|
|
batch = TensorDict(
|
|
|
{
|
|
|
"prompts": prompt,
|
|
|
"responses": response,
|
|
|
"input_ids": seq,
|
|
|
"attention_mask": attention_mask,
|
|
|
"position_ids": position_ids,
|
|
|
},
|
|
|
batch_size=generated_batch_size,
|
|
|
)
|
|
|
|
|
|
|
|
|
torch.cuda.empty_cache()
|
|
|
|
|
|
self.module.train()
|
|
|
return DataProto(batch=batch)
|
|
|
|