File size: 7,376 Bytes
bcdf9fa | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Rollout with huggingface models.
TODO: refactor this class. Currently, it will hang when using FSDP HybridShard. We should actually create a single
GPU model. Then, get full state_dict and bind the state_dict to the single GPU model. Then, use the single GPU model
to perform generation.
"""
import contextlib
import torch
import torch.distributed
from tensordict import TensorDict
from torch import nn
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from transformers import GenerationConfig
from verl import DataProto
from verl.utils.torch_functional import get_response_mask
from .base import BaseRollout
__all__ = ["HFRollout"]
class HFRollout(BaseRollout):
def __init__(self, module: nn.Module, config):
super().__init__()
self.config = config
self.module = module
def generate_sequences(self, prompts: DataProto) -> DataProto:
batch_size = prompts.batch.batch_size[0]
num_chunks = max(batch_size // self.config.get("micro_batch_size", batch_size), 1)
batch_prompts = prompts.chunk(chunks=num_chunks)
output = [self._generate_minibatch(p) for p in batch_prompts]
output = DataProto.concat(output)
return output
@torch.no_grad()
def _generate_minibatch(self, prompts: DataProto) -> DataProto:
# make sampling args can be overriden by inputs
do_sample = prompts.meta_info.get("do_sample", self.config.do_sample)
is_validate = prompts.meta_info.get("validate", False)
temperature = prompts.meta_info.get("temperature", self.config.temperature)
response_length = prompts.meta_info.get("response_length", self.config.response_length)
top_p = prompts.meta_info.get("top_p", self.config.get("top_p", 1.0))
top_k = max(0, prompts.meta_info.get("top_k", self.config.get("top_k", 0))) # to be compatible with vllm
if not do_sample:
# do_sample==False -> greedy decoding
kwargs = {
"do_sample": False,
"num_beams": 1,
}
elif is_validate:
# do validate and do sample -> use val_kwargs
kwargs = {
"do_sample": True,
"num_beams": 1,
"top_k": max(0, self.config.val_kwargs.top_k), # to be compatible with vllm
"top_p": self.config.val_kwargs.top_p,
"temperature": self.config.val_kwargs.temperature,
"num_return_sequences": 1, # if validate, already repeat in ray_trainer
}
else:
# do_sample -> use rollout config
kwargs = {
"do_sample": True,
"num_beams": 1,
"top_p": top_p,
"top_k": top_k,
"temperature": temperature,
"num_return_sequences": self.config.n,
}
# make config according to generate mode
generation_config = GenerationConfig(**kwargs)
idx = prompts.batch["input_ids"] # (bs, prompt_length)
prompt_length = idx.size(1)
attention_mask = prompts.batch["attention_mask"] # left-padded attention_mask
position_ids = prompts.batch["position_ids"]
# used to construct attention_mask
eos_token_id = prompts.meta_info["eos_token_id"]
pad_token_id = prompts.meta_info["pad_token_id"]
self.module.eval()
param_ctx = contextlib.nullcontext()
if isinstance(self.module, FSDP):
# recurse need to set to False according to https://github.com/pytorch/pytorch/issues/100069
param_ctx = FSDP.summon_full_params(self.module, writeback=False, recurse=False)
with param_ctx, torch.autocast(device_type="cuda", dtype=torch.bfloat16):
output = self.module.generate(
input_ids=idx,
attention_mask=attention_mask,
do_sample=do_sample,
max_new_tokens=response_length,
eos_token_id=eos_token_id,
pad_token_id=pad_token_id,
generation_config=generation_config,
output_scores=False, # this is potentially very large
return_dict_in_generate=True,
use_cache=True,
)
# TODO: filter out the seq with no answers like ds-chat
seq = output.sequences
generated_batch_size = seq.size(0) # bs * num_return_sequences
# huggingface generate will stop generating when all the batch reaches [EOS].
# We have to pad to response_length
sequence_length = prompt_length + self.config.response_length
delta_length = sequence_length - seq.shape[1]
if delta_length > 0:
delta_tokens = torch.ones(size=(generated_batch_size, delta_length), device=seq.device, dtype=seq.dtype)
delta_tokens = pad_token_id * delta_tokens
seq = torch.cat((seq, delta_tokens), dim=1)
assert seq.shape[1] == sequence_length
# make necessary reputations if num_return_sequences > 1
num_return_sequences = kwargs.get("num_return_sequences", 1)
if num_return_sequences > 1:
position_ids = position_ids.repeat_interleave(num_return_sequences, dim=0)
attention_mask = attention_mask.repeat_interleave(num_return_sequences, dim=0)
prompt = seq[:, :prompt_length] # (generated_batch_size, prompt_length)
response = seq[:, prompt_length:] # (generated_batch_size, response_length)
response_length = response.size(1)
delta_position_id = torch.arange(1, response_length + 1, device=position_ids.device)
delta_position_id = delta_position_id.unsqueeze(0).repeat(generated_batch_size, 1)
response_position_ids = position_ids[:, -1:] + delta_position_id
position_ids = torch.cat([position_ids, response_position_ids], dim=-1)
response_attention_mask = get_response_mask(response_id=response, eos_token=eos_token_id, dtype=attention_mask.dtype)
attention_mask = torch.cat((attention_mask, response_attention_mask), dim=-1)
batch = TensorDict(
{
"prompts": prompt,
"responses": response,
"input_ids": seq,
"attention_mask": attention_mask,
"position_ids": position_ids,
},
batch_size=generated_batch_size,
)
# empty cache before compute old_log_prob
torch.cuda.empty_cache()
self.module.train()
return DataProto(batch=batch)
|