File size: 2,372 Bytes
5e56f2f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import torch
from torch import nn
import re

#from utils.warp import get_tag_tokens
from configs.hyperparametric import Reward_config

config = Reward_config().to_dict()

class FormatGradientMasker:
    def __init__(self, tokenizer, pattern=r"<a>\d</a>"):
        self.tokenizer = tokenizer
        self.pattern = re.compile(pattern)
        self.format_token_ids = self._get_format_tokens()
        
    def _get_format_tokens(self):
        tokens = set()
        lower,upper = config['lower'],config['upper']
        for i in range(lower, upper):
            text = '{s}{_}{e}'.format(s=config['open_tag'],_=i,e=config['close_tag'])
            token_ids = self.tokenizer.encode(text, add_special_tokens=False)
            tokens.update(token_ids)
        return tokens
    
    def create_mask(self, input_ids):
        mask = torch.zeros_like(input_ids, dtype=torch.float32)
        text = self.tokenizer.decode(input_ids[0], skip_special_tokens=True)
        
        for match in self.pattern.finditer(text):
            start_pos = match.start()
            end_pos = match.end()
            match_text = text[start_pos:end_pos]
            match_tokens = self.tokenizer.encode(match_text, add_special_tokens=False)
            
            # 在input_ids中找到匹配位置
            for i in range(len(input_ids[0]) - len(match_tokens) + 1):
                if torch.all(input_ids[0, i:i+len(match_tokens)] == torch.tensor(match_tokens).to(input_ids.device)):
                    mask[0, i:i+len(match_tokens)] = 1
                    
        return mask.bool()

class FormatAwareLoss(nn.Module):
    def __init__(self, tokenizer):
        super().__init__()
        self.tokenizer = tokenizer
        self.ce_loss = nn.CrossEntropyLoss(reduction='none')
        self.masker = FormatGradientMasker(tokenizer)
        
    def forward(self, logits, labels):
        shift_logits = logits[..., :-1, :].contiguous()
        shift_labels = labels[..., 1:].contiguous()
        
        losses = self.ce_loss(
            shift_logits.view(-1, shift_logits.size(-1)),
            shift_labels.view(-1)
        ).view(shift_labels.shape)
        
        mask = self.masker.create_mask(labels[:, :-1])
        
        # 只保留格式区域的损失
        masked_losses = losses * mask.float()
        
        return masked_losses.sum() / (mask.sum() + 1e-8)