from torch.utils.data import Dataset from transformers import DataCollatorForSeq2Seq,DataCollatorForLanguageModeling,LogitsProcessor,LogitsProcessorList import torch from torch.nn.utils.rnn import pad_sequence import re from utils.warp import Warp from configs.hyperparametric import Reward_config config = Reward_config().to_dict() template_to_qwen = Warp.template_to_qwen #'system\nYou are a helpful assistant.<|end▁of▁sentence|>\n<|begin▁of▁sentence|>user\n根据案情描述对给出的已有推理步骤在0至100内打分并在中给出分数,例如1\n案情描述:2012年3月份一天,被告人于某生伙同司某、张某戊、张某丁(均另案处理)窜至河南省郑州市,经杜某(另案处理)联系,由于某生出资从马某(另案处理)手中购买500万假银行承兑汇票一张(出票人为江门市林源贸易有限公司,出票日期为2012年2月28日,到期日为2012年8月28日,票号为4020005120580925,收款人为中国矿产有限责任公司,付款行为江门市区农村信用合作联社)。2012年4月16日,被告人于某生将此银行承兑汇票以淄博亚豪博业建陶有限公司名义转让给莱芜市光昊工贸有限公司,获得赃款4765000元。\n已有推理步骤:\n

根据案情描述,被告人于某生伙同他人购买并销售假银行承兑汇票,用于谋取巨额利益,符合票据诈骗罪。

\n:<|end▁of▁sentence|>\n<|begin▁of▁sentence|>assistant\n' def sample_for_feature(feature): if feature['reward'] == 1: return '接受<|im_end|>' #return '接受' if feature['reward'] - feature['I-sample'] / feature['I-all'] > 0: return '接受<|im_end|>' return '拒绝<|im_end|>' class DataCollatorForReward(DataCollatorForLanguageModeling): def __init__(self,tokenizer): self.tokenizer = tokenizer def __call__(self, examples): text = [example['text'] for example in examples] text = [re.sub('已有推理步骤在0至100内打分并在中给出分数,例如1','已有推理步骤选择接受或拒绝,并在中给出选择,例如接受',t) for t in text] labels = [sample_for_feature(example) for example in examples] batch_input = self.tokenizer( [t + l for t,l in zip(text,labels)], truncation=config['truncation'], max_length=config['max_length'], return_tensors=None, add_special_tokens=True ) input_ids,attention_mask = batch_input['input_ids'], batch_input['attention_mask'] padding = [max([len(i) for i in input_ids])-len(ids) for ids in input_ids] input_ids = [ids+[self.tokenizer.pad_token_id]*padding[i] for i,ids in enumerate(input_ids)] attention_mask = [ids+[0]*padding[i] for i,ids in enumerate(attention_mask)] label_ids,label_mask = [],[] for i, label in enumerate(labels): label = self.tokenizer.encode(label, add_special_tokens=False) label_ids.append([-100]*len(input_ids[i])+label) label_mask.append([0]*len(input_ids[i])+[1]*len(label)) input_ids[i] += label attention_mask[i] += [1] * len(label) input_ids = pad_sequence( [torch.tensor(ids) for ids in input_ids], batch_first=True, padding_value=self.tokenizer.pad_token_id, ) attention_mask = pad_sequence( [torch.tensor(mask) for mask in attention_mask], batch_first=True, padding_value=0, ) label_ids = pad_sequence( [torch.tensor(l) for l in label_ids], batch_first=True, padding_value=-100, ) label_mask = pad_sequence( [torch.tensor(mask) for mask in label_mask], batch_first=True, padding_value=0, ) batch = {'input_ids':input_ids,'attention_mask':attention_mask,'labels':label_ids,'label_mask':label_mask} extra_features = [{k: v for k, v in example.items() if k not in ['text','y']} for example in examples] if extra_features and len(extra_features[0]) > 0: for key in extra_features[0].keys(): batch[key] = torch.tensor([f[key] for f in extra_features],dtype=torch.float32) if len(batch['input_ids'].shape) > 2: batch['input_ids'] = batch['input_ids'].squeeze(0) if len(batch['attention_mask'].shape) > 2: batch['attention_mask'] = batch['attention_mask'].squeeze(0) return batch