| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | from collections import defaultdict
|
| |
|
| | import torch
|
| |
|
| | from verl import DataProto
|
| | from verl.utils.reward_score import _default_compute_score
|
| |
|
| | class NaiveRewardManager:
|
| | """The reward manager."""
|
| |
|
| | def __init__(self, tokenizer, num_examine, compute_score=None, reward_fn_key="data_source") -> None:
|
| | self.tokenizer = tokenizer
|
| | self.num_examine = num_examine
|
| | self.compute_score = compute_score or _default_compute_score
|
| | self.reward_fn_key = reward_fn_key
|
| |
|
| | def __call__(self, data: DataProto, return_dict=False):
|
| | """We will expand this function gradually based on the available datasets"""
|
| |
|
| |
|
| | if "rm_scores" in data.batch.keys():
|
| | if return_dict:
|
| | return {"reward_tensor": data.batch["rm_scores"]}
|
| | else:
|
| | return data.batch["rm_scores"]
|
| |
|
| | reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32)
|
| | reward_extra_info = defaultdict(list)
|
| |
|
| | already_print_data_sources = {}
|
| |
|
| | for i in range(len(data)):
|
| | data_item = data[i]
|
| |
|
| | prompt_ids = data_item.batch["prompts"]
|
| |
|
| | prompt_length = prompt_ids.shape[-1]
|
| |
|
| | valid_prompt_length = data_item.batch["attention_mask"][:prompt_length].sum()
|
| | valid_prompt_ids = prompt_ids[-valid_prompt_length:]
|
| |
|
| | response_ids = data_item.batch["responses"]
|
| | valid_response_length = data_item.batch["attention_mask"][prompt_length:].sum()
|
| | valid_response_ids = response_ids[:valid_response_length]
|
| |
|
| |
|
| | prompt_str = self.tokenizer.decode(valid_prompt_ids, skip_special_tokens=True)
|
| | response_str = self.tokenizer.decode(valid_response_ids, skip_special_tokens=True)
|
| |
|
| | ground_truth = data_item.non_tensor_batch["reward_model"]["ground_truth"]
|
| |
|
| | data_source = data_item.non_tensor_batch[self.reward_fn_key]
|
| |
|
| | extra_info = data_item.non_tensor_batch.get("extra_info", None)
|
| |
|
| | score = self.compute_score(
|
| | data_source=data_source,
|
| | solution_str=response_str,
|
| | ground_truth=ground_truth,
|
| | extra_info=extra_info,
|
| | )
|
| |
|
| | if isinstance(score, dict):
|
| | reward = score["score"]
|
| |
|
| | for key, value in score.items():
|
| | reward_extra_info[key].append(value)
|
| | else:
|
| | reward = score
|
| |
|
| | reward_tensor[i, valid_response_length - 1] = reward
|
| |
|
| | if data_source not in already_print_data_sources:
|
| | already_print_data_sources[data_source] = 0
|
| |
|
| | if already_print_data_sources[data_source] < self.num_examine:
|
| | already_print_data_sources[data_source] += 1
|
| | print("[response]", response_str)
|
| | print("[ground_truth]", ground_truth)
|
| | if isinstance(score, dict):
|
| | for key, value in score.items():
|
| | print(f"[{key}]", value)
|
| | else:
|
| | print("[score]", score)
|
| |
|
| | if return_dict:
|
| | return {
|
| | "reward_tensor": reward_tensor,
|
| | "reward_extra_info": reward_extra_info,
|
| | }
|
| | else:
|
| | return reward_tensor
|
| |
|