|
|
from torch.utils.data import Dataset |
|
|
from transformers import DataCollatorForSeq2Seq,DataCollatorForLanguageModeling,LogitsProcessor,LogitsProcessorList |
|
|
import torch |
|
|
from torch.nn.utils.rnn import pad_sequence |
|
|
|
|
|
import re |
|
|
|
|
|
from utils.warp import Warp |
|
|
from configs.hyperparametric import Reward_config |
|
|
config = Reward_config().to_dict() |
|
|
template_to_qwen = Warp.template_to_qwen |
|
|
|
|
|
|
|
|
|
|
|
def sample_for_feature(feature): |
|
|
if feature['reward'] == 1: |
|
|
return '<v>接受</v><|im_end|>' |
|
|
|
|
|
if feature['reward'] - feature['I-sample'] / feature['I-all'] > 0: |
|
|
return '<v>接受</v><|im_end|>' |
|
|
return '<v>拒绝</v><|im_end|>' |
|
|
|
|
|
class DataCollatorForReward(DataCollatorForLanguageModeling): |
|
|
def __init__(self,tokenizer): |
|
|
self.tokenizer = tokenizer |
|
|
|
|
|
def __call__(self, examples): |
|
|
text = [example['text'] for example in examples] |
|
|
text = [re.sub('已有推理步骤在0至100内打分并在<v></v>中给出分数,例如<v>1</v>','已有推理步骤选择接受或拒绝,并在<v></v>中给出选择,例如<v>接受</v>',t) for t in text] |
|
|
|
|
|
labels = [sample_for_feature(example) for example in examples] |
|
|
|
|
|
batch_input = self.tokenizer( |
|
|
[t + l for t,l in zip(text,labels)], |
|
|
truncation=config['truncation'], |
|
|
max_length=config['max_length'], |
|
|
return_tensors=None, |
|
|
add_special_tokens=True |
|
|
) |
|
|
|
|
|
input_ids,attention_mask = batch_input['input_ids'], batch_input['attention_mask'] |
|
|
padding = [max([len(i) for i in input_ids])-len(ids) for ids in input_ids] |
|
|
input_ids = [ids+[self.tokenizer.pad_token_id]*padding[i] for i,ids in enumerate(input_ids)] |
|
|
attention_mask = [ids+[0]*padding[i] for i,ids in enumerate(attention_mask)] |
|
|
|
|
|
label_ids,label_mask = [],[] |
|
|
for i, label in enumerate(labels): |
|
|
label = self.tokenizer.encode(label, add_special_tokens=False) |
|
|
label_ids.append([-100]*len(input_ids[i])+label) |
|
|
label_mask.append([0]*len(input_ids[i])+[1]*len(label)) |
|
|
input_ids[i] += label |
|
|
attention_mask[i] += [1] * len(label) |
|
|
|
|
|
input_ids = pad_sequence( |
|
|
[torch.tensor(ids) for ids in input_ids], |
|
|
batch_first=True, |
|
|
padding_value=self.tokenizer.pad_token_id, |
|
|
) |
|
|
attention_mask = pad_sequence( |
|
|
[torch.tensor(mask) for mask in attention_mask], |
|
|
batch_first=True, |
|
|
padding_value=0, |
|
|
) |
|
|
label_ids = pad_sequence( |
|
|
[torch.tensor(l) for l in label_ids], |
|
|
batch_first=True, |
|
|
padding_value=-100, |
|
|
) |
|
|
label_mask = pad_sequence( |
|
|
[torch.tensor(mask) for mask in label_mask], |
|
|
batch_first=True, |
|
|
padding_value=0, |
|
|
) |
|
|
|
|
|
batch = {'input_ids':input_ids,'attention_mask':attention_mask,'labels':label_ids,'label_mask':label_mask} |
|
|
|
|
|
|
|
|
extra_features = [{k: v for k, v in example.items() if k not in ['text','y']} |
|
|
for example in examples] |
|
|
|
|
|
if extra_features and len(extra_features[0]) > 0: |
|
|
for key in extra_features[0].keys(): |
|
|
batch[key] = torch.tensor([f[key] for f in extra_features],dtype=torch.float32) |
|
|
|
|
|
if len(batch['input_ids'].shape) > 2: |
|
|
batch['input_ids'] = batch['input_ids'].squeeze(0) |
|
|
if len(batch['attention_mask'].shape) > 2: |
|
|
batch['attention_mask'] = batch['attention_mask'].squeeze(0) |
|
|
|
|
|
return batch |
|
|
|
|
|
|
|
|
|