import torch import re from configs.hyperparametric import Reward_config from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments from model.logitsprocessor import OutputControlLogitsProcessor,RewardControlLogitsProcessor from tree.asts import AST #reward_config = Reward_config() def find_subarray_positions(large_array, small_array): n = len(small_array) result = [] for i in range(len(large_array) - n + 1): if large_array[i:i+n] == small_array: result.append(i) return result class Warp(): def __init__(self,args,): self.args = args self.generate_tokenizer = None self.generate_model = None self.logits_processor = None self.reward_tokenizer = None self.reward_model = None self.reward_processor = None def load_generate_model(self): args = self.args self.generate_tokenizer = AutoTokenizer.from_pretrained(args.generate_model_path, trust_remote_code=True) self.generate_model = AutoModelForCausalLM.from_pretrained(args.generate_model_path,trust_remote_code=True).half().cuda() if args.logits_control: syntax_tree = AST(path=args.control_file,tokenizer=self.generate_tokenizer) self.logits_processor = OutputControlLogitsProcessor(ast=syntax_tree, tokenizer=self.generate_tokenizer) if re.search('',self.generate_tokenizer.chat_template): self.generate_tokenizer.chat_template = re.sub('','',self.generate_tokenizer.chat_template) self.none_token = '

none

' self.generate_bos = self.generate_tokenizer.bos_token self.generate_eos = self.generate_tokenizer.eos_token def load_reward_model(self,**kwargs): args = self.args if 'bnb_config' in kwargs.keys(): self.reward_model = AutoModelForCausalLM.from_pretrained(args.reward_model_path, trust_remote_code=True, device_map='auto', quantization_config=kwargs['bnb_config'], ) else: self.reward_model = AutoModelForCausalLM.from_pretrained(args.reward_model_path, trust_remote_code=True ).half().cuda() self.reward_tokenizer = AutoTokenizer.from_pretrained(args.reward_model_path, trust_remote_code=True) if args.logits_control: self.reward_processor = RewardControlLogitsProcessor(tokenizer=self.generate_tokenizer) if re.search('',self.reward_tokenizer.chat_template): self.reward_tokenizer.chat_template = re.sub('','',self.reward_tokenizer.chat_template) @staticmethod def template_to_qwen(x,bos='<|im_start|>',eos='<|im_end|>'): return """system\n{system}{eos}\n{bos}user\n{query}{eos}\n{bos}assistant\n""".format( bos=bos, eos=eos, system="You are a helpful assistant.", query=x ) @staticmethod def assistant_from_template_in_response(x,bos='<|im_start|>',eos='<|im_end|>'): processed_string = x.split(bos)[3].strip() processed_string = processed_string.split(eos)[0].strip() processed_string = re.sub('^assistant','',processed_string).strip() return processed_string @staticmethod def step_from_response(x): step = '' if '' in x: x = x.split('')[-1].strip() if '

' in x: x_ = re.search('

.+

',x) step = x_.group() if x_ else x if '' in x: x_ = re.search('.+',x) step = x_.group() if x_ else x return step @staticmethod def value_from_response(x): if '' in x: x_ = re.search('\w+',x) return x_.group()[3:-4] if x_ else '' #return float(x_.group()[3:-4])/100 if x_ else 0 return '' class WarpLJP(Warp): def __init__(self,args,mode='p'): super().__init__(args) self.mode = mode def processing_single(self,x,mode=''): self.none_token = '

' p = x['Procuratorate'] a = [] if 'd' in mode: for d in x['Defence'].split('。'): if len(d.strip()) > 0: a.append(d) if 'f' in mode: for f in x['Fact'].split('。'): if len(f.strip()) > 0: a.append(f) crime = [c['charge'] for c in x['Annotations'][0]['annotation']] penalty = [c['penalty'] for c in x['Annotations'][0]['annotation']] imprisonment = [c['imprisonment'] for c in x['Annotations'][0]['annotation']] label = {'crime':';'.join(['{c}罪'.format(c=c) for c in crime]), 'penalty':';'.join(['{c}'.format(c=c) for c in penalty]), 'imprisonment':';'.join(['{c}'.format(c=c) for c in imprisonment])} if a != []: a = [_ for _ in map(lambda x_:'

{i}

'.format(i=x_) if not x_.startswith('

') else x_,a)] return p,a,label else: return p,['

'],label @staticmethod def prompt_to_value(x,a,bos='<|im_start|>',eos='<|im_end|>'): pmt = '根据案情描述对给出的已有推理步骤选择接受或拒绝,并在中给出选择,例如接受\n案情描述:{x}\n已有推理步骤:\n{a}\n:'.format(x=x,a=a) return Warp.template_to_qwen(pmt,bos=bos,eos=eos) @staticmethod def prompt_to_crime(x,a,bos='<|im_start|>',eos='<|im_end|>'): pmt = '根据案情描述和已有步骤仅给出一个推理。如果是结论则直接输出,例如盗窃罪。如果是步骤则直接输出

,例如

步骤1:…

\n案情描述:{x}\n已有推理步骤:\n{a}\n:'.format(x=x,a=a) return Warp.template_to_qwen(pmt,bos=bos,eos=eos)