|
|
import torch |
|
|
from torch import nn |
|
|
import re |
|
|
|
|
|
|
|
|
from configs.hyperparametric import Reward_config |
|
|
|
|
|
config = Reward_config().to_dict() |
|
|
|
|
|
class FormatGradientMasker: |
|
|
def __init__(self, tokenizer, pattern=r"<a>\d</a>"): |
|
|
self.tokenizer = tokenizer |
|
|
self.pattern = re.compile(pattern) |
|
|
self.format_token_ids = self._get_format_tokens() |
|
|
|
|
|
def _get_format_tokens(self): |
|
|
tokens = set() |
|
|
lower,upper = config['lower'],config['upper'] |
|
|
for i in range(lower, upper): |
|
|
text = '{s}{_}{e}'.format(s=config['open_tag'],_=i,e=config['close_tag']) |
|
|
token_ids = self.tokenizer.encode(text, add_special_tokens=False) |
|
|
tokens.update(token_ids) |
|
|
return tokens |
|
|
|
|
|
def create_mask(self, input_ids): |
|
|
mask = torch.zeros_like(input_ids, dtype=torch.float32) |
|
|
text = self.tokenizer.decode(input_ids[0], skip_special_tokens=True) |
|
|
|
|
|
for match in self.pattern.finditer(text): |
|
|
start_pos = match.start() |
|
|
end_pos = match.end() |
|
|
match_text = text[start_pos:end_pos] |
|
|
match_tokens = self.tokenizer.encode(match_text, add_special_tokens=False) |
|
|
|
|
|
|
|
|
for i in range(len(input_ids[0]) - len(match_tokens) + 1): |
|
|
if torch.all(input_ids[0, i:i+len(match_tokens)] == torch.tensor(match_tokens).to(input_ids.device)): |
|
|
mask[0, i:i+len(match_tokens)] = 1 |
|
|
|
|
|
return mask.bool() |
|
|
|
|
|
class FormatAwareLoss(nn.Module): |
|
|
def __init__(self, tokenizer): |
|
|
super().__init__() |
|
|
self.tokenizer = tokenizer |
|
|
self.ce_loss = nn.CrossEntropyLoss(reduction='none') |
|
|
self.masker = FormatGradientMasker(tokenizer) |
|
|
|
|
|
def forward(self, logits, labels): |
|
|
shift_logits = logits[..., :-1, :].contiguous() |
|
|
shift_labels = labels[..., 1:].contiguous() |
|
|
|
|
|
losses = self.ce_loss( |
|
|
shift_logits.view(-1, shift_logits.size(-1)), |
|
|
shift_labels.view(-1) |
|
|
).view(shift_labels.shape) |
|
|
|
|
|
mask = self.masker.create_mask(labels[:, :-1]) |
|
|
|
|
|
|
|
|
masked_losses = losses * mask.float() |
|
|
|
|
|
return masked_losses.sum() / (mask.sum() + 1e-8) |