File size: 6,342 Bytes
5e56f2f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import torch
import re
from configs.hyperparametric import Reward_config

from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments
from model.logitsprocessor import OutputControlLogitsProcessor,RewardControlLogitsProcessor
from tree.asts import AST

#reward_config = Reward_config()

def find_subarray_positions(large_array, small_array):
    n = len(small_array)
    result = []
    
    for i in range(len(large_array) - n + 1):
        if large_array[i:i+n] == small_array:
            result.append(i)
    
    return result

class Warp():
    def __init__(self,args,):
        self.args = args
        self.generate_tokenizer = None
        self.generate_model = None
        self.logits_processor = None
        self.reward_tokenizer = None
        self.reward_model = None
        self.reward_processor = None

    def load_generate_model(self):
        args = self.args
        self.generate_tokenizer = AutoTokenizer.from_pretrained(args.generate_model_path, trust_remote_code=True)
        self.generate_model = AutoModelForCausalLM.from_pretrained(args.generate_model_path,trust_remote_code=True).half().cuda()

        if args.logits_control:
            syntax_tree = AST(path=args.control_file,tokenizer=self.generate_tokenizer)
            self.logits_processor = OutputControlLogitsProcessor(ast=syntax_tree, tokenizer=self.generate_tokenizer)

        if re.search('<think>',self.generate_tokenizer.chat_template):
            self.generate_tokenizer.chat_template = re.sub('<think>','',self.generate_tokenizer.chat_template)

        self.none_token = '<p>none</p>'
        self.generate_bos = self.generate_tokenizer.bos_token
        self.generate_eos = self.generate_tokenizer.eos_token 

    def load_reward_model(self,**kwargs):
        args = self.args
        if 'bnb_config' in kwargs.keys():
            self.reward_model = AutoModelForCausalLM.from_pretrained(args.reward_model_path,
                                                                     trust_remote_code=True,
                                                                     device_map='auto', 
                                                                     quantization_config=kwargs['bnb_config'],
                                                                     )
        else:
            self.reward_model = AutoModelForCausalLM.from_pretrained(args.reward_model_path,
                                                                     trust_remote_code=True
                                                                     ).half().cuda()
        self.reward_tokenizer = AutoTokenizer.from_pretrained(args.reward_model_path, trust_remote_code=True)
        
        if args.logits_control:
            self.reward_processor = RewardControlLogitsProcessor(tokenizer=self.generate_tokenizer)


        if re.search('<think>',self.reward_tokenizer.chat_template):
            self.reward_tokenizer.chat_template = re.sub('<think>','',self.reward_tokenizer.chat_template)

    @staticmethod
    def template_to_qwen(x,bos='<|im_start|>',eos='<|im_end|>'):
        return """system\n{system}{eos}\n{bos}user\n{query}{eos}\n{bos}assistant\n""".format(
            bos=bos,
            eos=eos,
            system="You are a helpful assistant.", 
            query=x
        )
    
    @staticmethod
    def assistant_from_template_in_response(x,bos='<|im_start|>',eos='<|im_end|>'):
        processed_string = x.split(bos)[3].strip()
        processed_string = processed_string.split(eos)[0].strip()
        processed_string = re.sub('^assistant','',processed_string).strip()
        return processed_string

    @staticmethod
    def step_from_response(x):
        step = '</none_response>'
        if '</think>' in x:
            x = x.split('</think>')[-1].strip()
        if '<p>' in x:
            x_ = re.search('<p>.+</p>',x)
            step = x_.group() if x_ else x
        if '<e>' in x:
            x_ = re.search('<e>.+</e>',x)
            step = x_.group() if x_ else x    
        return step

    @staticmethod
    def value_from_response(x):
        if '<v>' in x:
            x_ = re.search('<v>\w+</v>',x)
            return x_.group()[3:-4] if x_ else ''
            #return float(x_.group()[3:-4])/100 if x_ else 0
        return ''

class WarpLJP(Warp):
    def __init__(self,args,mode='p'):
        super().__init__(args)
        self.mode = mode
    
    def processing_single(self,x,mode=''):
        self.none_token = '<p>无</p>'

        p = x['Procuratorate']
        a = []
        if 'd' in mode:
            for d in x['Defence'].split('。'):
                if len(d.strip()) > 0:
                    a.append(d)
        if 'f' in mode:
            for f in x['Fact'].split('。'):
                if len(f.strip()) > 0:
                    a.append(f)

        crime = [c['charge'] for c in x['Annotations'][0]['annotation']]
        penalty = [c['penalty'] for c in x['Annotations'][0]['annotation']]
        imprisonment = [c['imprisonment'] for c in x['Annotations'][0]['annotation']]

        label = {'crime':';'.join(['<e>{c}罪</e>'.format(c=c) for c in crime]),
                 'penalty':';'.join(['<e>{c}</e>'.format(c=c) for c in penalty]),
                 'imprisonment':';'.join(['<e>{c}</e>'.format(c=c) for c in imprisonment])}
        
        
        if a != []:
            a = [_ for _ in map(lambda x_:'<p>{i}</p>'.format(i=x_) if not x_.startswith('<p>') else x_,a)]
            return p,a,label
        else:
            return p,['<p>无</p>'],label
        
    @staticmethod
    def prompt_to_value(x,a,bos='<|im_start|>',eos='<|im_end|>'):  
        pmt = '根据案情描述对给出的已有推理步骤选择接受或拒绝,并在<v></v>中给出选择,例如<v>接受</v>\n案情描述:{x}\n已有推理步骤:\n{a}\n:'.format(x=x,a=a)    
        return Warp.template_to_qwen(pmt,bos=bos,eos=eos)

    @staticmethod
    def prompt_to_crime(x,a,bos='<|im_start|>',eos='<|im_end|>'):  
        pmt = '根据案情描述和已有步骤仅给出一个推理。如果是结论则直接输出<e></e>,例如<e>盗窃罪</e>。如果是步骤则直接输出<p></p>,例如<p>步骤1:…</p>\n案情描述:{x}\n已有推理步骤:\n{a}\n:'.format(x=x,a=a)    
        return Warp.template_to_qwen(pmt,bos=bos,eos=eos)