|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
SFT dataset
|
|
|
- We assume user pass a single parquet file.
|
|
|
- We load all the data into the memory.
|
|
|
Each parquet file contains
|
|
|
"""
|
|
|
|
|
|
from typing import List, Union
|
|
|
import numpy as np
|
|
|
import pandas as pd
|
|
|
import torch
|
|
|
from torch.utils.data import Dataset
|
|
|
from transformers import PreTrainedTokenizer
|
|
|
|
|
|
from verl.utils import hf_tokenizer
|
|
|
from verl.utils.fs import copy_to_local
|
|
|
from verl.utils.model import compute_position_id_with_mask
|
|
|
|
|
|
class SFTDataset(Dataset):
|
|
|
"""
|
|
|
This is an in-memory SFTDataset
|
|
|
|
|
|
Arguments:
|
|
|
config (OmegaConf): the data config
|
|
|
"""
|
|
|
|
|
|
def __init__(self, parquet_files: Union[str, List[str]], tokenizer, config):
|
|
|
prompt_key = config.get("prompt_key", "prompt")
|
|
|
response_key = config.get("response_key", "response")
|
|
|
prompt_dict_keys = config.get('prompt_dict_keys', None)
|
|
|
response_dict_keys = config.get('response_dict_keys', None)
|
|
|
max_length = config.get("max_length", 1024)
|
|
|
truncation = config.get("truncation", "error")
|
|
|
|
|
|
assert truncation in ["error", "left", "right"]
|
|
|
self.truncation = truncation
|
|
|
|
|
|
if not isinstance(parquet_files, List):
|
|
|
parquet_files = [parquet_files]
|
|
|
|
|
|
self.parquet_files = parquet_files
|
|
|
if isinstance(tokenizer, str):
|
|
|
tokenizer = hf_tokenizer(tokenizer)
|
|
|
self.tokenizer: PreTrainedTokenizer = tokenizer
|
|
|
|
|
|
self.prompt_key = prompt_key if isinstance(prompt_key, (tuple, list)) else [prompt_key]
|
|
|
self.response_key = response_key if isinstance(response_key, (tuple, list)) else [response_key]
|
|
|
self.prompt_dict_keys = [] if not prompt_dict_keys else prompt_dict_keys
|
|
|
self.response_dict_keys = [] if not response_dict_keys else response_dict_keys
|
|
|
self.max_length = max_length
|
|
|
|
|
|
self._download()
|
|
|
self._read_files_and_tokenize()
|
|
|
|
|
|
def _download(self):
|
|
|
for i, parquet_file in enumerate(self.parquet_files):
|
|
|
self.parquet_files[i] = copy_to_local(parquet_file, verbose=True)
|
|
|
|
|
|
def _read_files_and_tokenize(self):
|
|
|
|
|
|
dataframes = []
|
|
|
for parquet_file in self.parquet_files:
|
|
|
|
|
|
dataframe = pd.read_parquet(parquet_file)
|
|
|
dataframes.append(dataframe)
|
|
|
self.dataframe = pd.concat(dataframes)
|
|
|
|
|
|
self.prompts = self.dataframe[self.prompt_key]
|
|
|
self.responses = self.dataframe[self.response_key]
|
|
|
|
|
|
def __len__(self):
|
|
|
return len(self.prompts)
|
|
|
|
|
|
def __getitem__(self, item):
|
|
|
tokenizer = self.tokenizer
|
|
|
|
|
|
prompt_chat = self.prompts.iloc[item].item()
|
|
|
response = self.responses.iloc[item].item()
|
|
|
|
|
|
|
|
|
prompt_chat_str = tokenizer.apply_chat_template(prompt_chat, add_generation_prompt=True, tokenize=False)
|
|
|
response_chat_str = response + tokenizer.eos_token
|
|
|
|
|
|
|
|
|
|
|
|
prompt_ids_output = tokenizer(prompt_chat_str, return_tensors="pt", add_special_tokens=False)
|
|
|
prompt_ids = prompt_ids_output["input_ids"][0]
|
|
|
prompt_attention_mask = prompt_ids_output["attention_mask"][0]
|
|
|
|
|
|
response_ids_output = tokenizer(response_chat_str, return_tensors="pt", add_special_tokens=False)
|
|
|
response_ids = response_ids_output["input_ids"][0]
|
|
|
response_attention_mask = response_ids_output["attention_mask"][0]
|
|
|
|
|
|
prompt_length = prompt_ids.shape[0]
|
|
|
response_length = response_ids.shape[0]
|
|
|
|
|
|
input_ids = torch.cat((prompt_ids, response_ids), dim=-1)
|
|
|
attention_mask = torch.cat((prompt_attention_mask, response_attention_mask), dim=-1)
|
|
|
|
|
|
|
|
|
sequence_length = input_ids.shape[0]
|
|
|
|
|
|
if sequence_length < self.max_length:
|
|
|
padded_input_ids = torch.ones(size=(self.max_length - sequence_length,), dtype=input_ids.dtype) * self.tokenizer.pad_token_id
|
|
|
padded_attention_mask = torch.zeros(size=(self.max_length - sequence_length,), dtype=attention_mask.dtype)
|
|
|
|
|
|
input_ids = torch.cat((input_ids, padded_input_ids))
|
|
|
attention_mask = torch.cat((attention_mask, padded_attention_mask))
|
|
|
elif sequence_length > self.max_length:
|
|
|
if self.truncation == "left":
|
|
|
|
|
|
input_ids = input_ids[-self.max_length :]
|
|
|
attention_mask = attention_mask[-self.max_length :]
|
|
|
elif self.truncation == "right":
|
|
|
input_ids = input_ids[: self.max_length]
|
|
|
attention_mask = attention_mask[: self.max_length]
|
|
|
elif self.truncation == "error":
|
|
|
raise NotImplementedError(f"{sequence_length=} is larger than {self.max_length=}")
|
|
|
else:
|
|
|
raise NotImplementedError(f"Unknown truncation method {self.truncation}")
|
|
|
|
|
|
position_ids = compute_position_id_with_mask(attention_mask)
|
|
|
|
|
|
loss_mask = attention_mask.clone()
|
|
|
if prompt_length > 1:
|
|
|
|
|
|
loss_mask[: min(prompt_length, loss_mask.size(0)) - 1] = 0
|
|
|
|
|
|
loss_mask[min(prompt_length + response_length, loss_mask.size(0)) - 1] = 0
|
|
|
|
|
|
return {
|
|
|
"input_ids": input_ids,
|
|
|
"attention_mask": attention_mask,
|
|
|
"position_ids": position_ids,
|
|
|
"loss_mask": loss_mask,
|
|
|
}
|
|
|
|