| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import torch.distributed as dist |
|
|
| from transformers.models.roberta.modeling_roberta import RobertaPreTrainedModel, RobertaModel, RobertaLMHead |
| from transformers.models.bert.modeling_bert import BertPreTrainedModel, BertModel, BertLMPredictionHead |
| from transformers.modeling_outputs import SequenceClassifierOutput, BaseModelOutputWithPoolingAndCrossAttentions |
| from argparse import Namespace |
|
|
|
|
| class MLPLayer(nn.Module): |
| """ |
| Head for getting sentence representations over RoBERTa/BERT's CLS representation. |
| """ |
|
|
| def __init__(self, config): |
| super().__init__() |
| self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
| self.fc = nn.Linear(config.hidden_size, 1536) |
| self.activation = nn.Tanh() |
|
|
| def forward(self, features, **kwargs): |
| x = self.dense(features) |
| x = self.fc(x) |
| x = self.activation(x) |
| return x |
|
|
|
|
| class Similarity(nn.Module): |
| """ |
| Dot product or cosine similarity |
| """ |
|
|
| def __init__(self, temp): |
| super().__init__() |
| self.temp = temp |
| self.cos = nn.CosineSimilarity(dim=-1) |
|
|
| def forward(self, x, y): |
| return self.cos(x, y) / self.temp |
|
|
|
|
| class Pooler(nn.Module): |
| """ |
| Parameter-free poolers to get the sentence embedding |
| 'cls': [CLS] representation with BERT/RoBERTa's MLP pooler. |
| 'cls_before_pooler': [CLS] representation without the original MLP pooler. |
| 'avg': average of the last layers' hidden states at each token. |
| 'avg_top2': average of the last two layers. |
| 'avg_first_last': average of the first and the last layers. |
| """ |
|
|
| def __init__(self, pooler_type): |
| super().__init__() |
| self.pooler_type = pooler_type |
| assert self.pooler_type in ["cls", "cls_before_pooler", "avg", "avg_top2", |
| "avg_first_last"], "unrecognized pooling type %s" % self.pooler_type |
|
|
| def forward(self, attention_mask, outputs): |
| last_hidden = outputs.last_hidden_state |
| pooler_output = outputs.pooler_output |
| hidden_states = outputs.hidden_states |
|
|
| if self.pooler_type in ['cls_before_pooler', 'cls']: |
| return last_hidden[:, 0] |
| elif self.pooler_type == "avg": |
| return ((last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)) |
| elif self.pooler_type == "avg_first_last": |
| first_hidden = hidden_states[1] |
| last_hidden = hidden_states[-1] |
| pooled_result = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum( |
| 1) / attention_mask.sum(-1).unsqueeze(-1) |
| return pooled_result |
| elif self.pooler_type == "avg_top2": |
| second_last_hidden = hidden_states[-2] |
| last_hidden = hidden_states[-1] |
| pooled_result = ((last_hidden + second_last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum( |
| 1) / attention_mask.sum(-1).unsqueeze(-1) |
| return pooled_result |
| else: |
| raise NotImplementedError |
|
|
|
|
| def mse_loss_mat(tensor_left, tensor_right): |
| cos_sim_matrix = torch.matmul(tensor_left, tensor_right.t()) |
| cos_sim_matrix /= torch.matmul(torch.norm(tensor_left, dim=1, keepdim=True), |
| torch.norm(tensor_right, dim=1, keepdim=True).t()) |
| return cos_sim_matrix |
|
|
|
|
| def cl_init(cls, config): |
| """ |
| Contrastive learning class init function. |
| """ |
| cls.pooler_type = cls.model_args.pooler_type |
| cls.pooler = Pooler(cls.model_args.pooler_type) |
| if cls.model_args.pooler_type == "cls": |
| cls.mlp = MLPLayer(config) |
| cls.sim = Similarity(temp=cls.model_args.temp) |
| cls.init_weights() |
|
|
|
|
| def cl_forward(cls, |
| encoder, |
| input_ids=None, |
| attention_mask=None, |
| token_type_ids=None, |
| position_ids=None, |
| head_mask=None, |
| inputs_embeds=None, |
| labels=None, |
| output_attentions=None, |
| return_dict=None, |
| mlm_input_ids=None, |
| mlm_labels=None, |
| left_emb=None, |
| right_emb=None |
| ): |
| return_dict = return_dict if return_dict is not None else cls.config.use_return_dict |
| batch_size = input_ids.size(0) |
| num_sent = input_ids.size(1) |
|
|
| mlm_outputs = None |
| input_ids = input_ids.view((-1, input_ids.size(-1))) |
| attention_mask = attention_mask.view((-1, attention_mask.size(-1))) |
| if token_type_ids is not None: |
| token_type_ids = token_type_ids.view((-1, token_type_ids.size(-1))) |
|
|
| if inputs_embeds is not None: |
| input_ids = None |
|
|
| |
| outputs = encoder( |
| input_ids, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| output_attentions=output_attentions, |
| output_hidden_states=True if cls.model_args.pooler_type in [ |
| 'avg_top2', 'avg_first_last'] else False, |
| return_dict=True, |
| ) |
|
|
| |
| if mlm_input_ids is not None: |
| mlm_input_ids = mlm_input_ids.view((-1, mlm_input_ids.size(-1))) |
| mlm_outputs = encoder( |
| mlm_input_ids, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| output_attentions=output_attentions, |
| output_hidden_states=True if cls.model_args.pooler_type in [ |
| 'avg_top2', 'avg_first_last'] else False, |
| return_dict=True, |
| ) |
|
|
| |
| print(outputs.last_hidden_state.shape) |
| pooler_output = cls.pooler(attention_mask, outputs) |
| print(pooler_output.shape) |
| pooler_output = pooler_output.view( |
| (batch_size, num_sent, pooler_output.size(-1))) |
| |
| |
| if cls.pooler_type == "cls": |
| pooler_output = cls.mlp(pooler_output) |
|
|
| |
| z1, z2 = pooler_output[:, 0], pooler_output[:, 1] |
|
|
| tensor_left, tensor_right = left_emb, right_emb |
|
|
| |
| if num_sent == 3: |
| z3 = pooler_output[:, 2] |
|
|
| |
| if dist.is_initialized() and cls.training: |
| |
| if num_sent >= 3: |
| z3_list = [torch.zeros_like(z3) |
| for _ in range(dist.get_world_size())] |
| dist.all_gather(tensor_list=z3_list, tensor=z3.contiguous()) |
| z3_list[dist.get_rank()] = z3 |
| z3 = torch.cat(z3_list, 0) |
|
|
| |
| z1_list = [torch.zeros_like(z1) for _ in range(dist.get_world_size())] |
| z2_list = [torch.zeros_like(z2) for _ in range(dist.get_world_size())] |
| |
| dist.all_gather(tensor_list=z1_list, tensor=z1.contiguous()) |
| dist.all_gather(tensor_list=z2_list, tensor=z2.contiguous()) |
|
|
| |
| |
| z1_list[dist.get_rank()] = z1 |
| z2_list[dist.get_rank()] = z2 |
| |
| z1 = torch.cat(z1_list, 0) |
| z2 = torch.cat(z2_list, 0) |
|
|
| mse_loss = F.mse_loss(z1, tensor_left) + F.mse_loss(z2, tensor_right) |
|
|
| """ |
| this is KL div loss |
| """ |
|
|
| KL_loss = nn.KLDivLoss(reduction="batchmean") |
| beta = 5 |
|
|
| cos_sim_matrix_openai = mse_loss_mat(tensor_left, tensor_right) |
| beta_scaled_cos_sim_matrix_openai = beta * cos_sim_matrix_openai |
|
|
| cos_sim_matrix_data = mse_loss_mat(z1, z2) |
| beta_scaled_cos_sim_matrix_data = beta * cos_sim_matrix_data |
|
|
| beta_scaled_cos_sim_matrix_openai_vertical = beta_scaled_cos_sim_matrix_openai.softmax( |
| dim=1) |
| beta_scaled_cos_sim_matrix_openai_horizontal = beta_scaled_cos_sim_matrix_openai.softmax( |
| dim=0) |
|
|
| beta_scaled_cos_sim_matrix_data_vertical = beta_scaled_cos_sim_matrix_data.softmax( |
| dim=1) |
| beta_scaled_cos_sim_matrix_data_horizontal = beta_scaled_cos_sim_matrix_data.softmax( |
| dim=0) |
|
|
| |
| KL_vertical_loss = KL_loss(beta_scaled_cos_sim_matrix_data_vertical.log( |
| ), beta_scaled_cos_sim_matrix_openai_vertical) |
| KL_horizontal_loss = KL_loss(beta_scaled_cos_sim_matrix_data_horizontal.log( |
| ), beta_scaled_cos_sim_matrix_openai_horizontal) |
|
|
| KL_loss = (KL_vertical_loss + KL_horizontal_loss) / 2 |
|
|
| ziang_loss = KL_loss + mse_loss |
|
|
| cos_sim = cls.sim(z1.unsqueeze(1), z2.unsqueeze(0)) |
|
|
| |
| if num_sent >= 3: |
| z1_z3_cos = cls.sim(z1.unsqueeze(1), z3.unsqueeze(0)) |
| cos_sim = torch.cat([cos_sim, z1_z3_cos], 1) |
|
|
| labels = torch.arange(cos_sim.size(0)).long().to(cls.device) |
| loss_fct = nn.CrossEntropyLoss() |
|
|
| |
| if num_sent == 3: |
| |
| z3_weight = cls.model_args.hard_negative_weight |
| weights = torch.tensor( |
| [[0.0] * (cos_sim.size(-1) - z1_z3_cos.size(-1)) + [0.0] * i + [z3_weight] + [0.0] * ( |
| z1_z3_cos.size(-1) - i - 1) for i in range(z1_z3_cos.size(-1))] |
| ).to(cls.device) |
| cos_sim = cos_sim + weights |
|
|
| loss = loss_fct(cos_sim, labels) |
|
|
| |
| if mlm_outputs is not None and mlm_labels is not None: |
| mlm_labels = mlm_labels.view(-1, mlm_labels.size(-1)) |
| prediction_scores = cls.lm_head(mlm_outputs.last_hidden_state) |
| masked_lm_loss = loss_fct( |
| prediction_scores.view(-1, cls.config.vocab_size), mlm_labels.view(-1)) |
| loss = loss + cls.model_args.mlm_weight * masked_lm_loss |
|
|
| if not return_dict: |
| output = (cos_sim,) + outputs[2:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return SequenceClassifierOutput( |
| loss=ziang_loss, |
| logits=cos_sim, |
| hidden_states=outputs.hidden_states, |
| ) |
|
|
|
|
| def sentemb_forward( |
| cls, |
| encoder, |
| input_ids=None, |
| attention_mask=None, |
| token_type_ids=None, |
| position_ids=None, |
| head_mask=None, |
| inputs_embeds=None, |
| labels=None, |
| output_attentions=None, |
| output_hidden_states=None, |
| return_dict=None, |
| ): |
| return_dict = return_dict if return_dict is not None else cls.config.use_return_dict |
|
|
| if inputs_embeds is not None: |
| input_ids = None |
|
|
| outputs = encoder( |
| input_ids, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| output_attentions=output_attentions, |
| output_hidden_states=True if cls.pooler_type in [ |
| 'avg_top2', 'avg_first_last'] else False, |
| return_dict=True, |
| ) |
|
|
| pooler_output = cls.pooler(attention_mask, outputs) |
| if cls.pooler_type == "cls" and not cls.model_args.mlp_only_train: |
| pooler_output = cls.mlp(pooler_output) |
|
|
| if not return_dict: |
| return (outputs[0], pooler_output) + outputs[2:] |
|
|
| return BaseModelOutputWithPoolingAndCrossAttentions( |
| pooler_output=pooler_output, |
| last_hidden_state=outputs.last_hidden_state, |
| hidden_states=outputs.hidden_states, |
| ) |
|
|
|
|
| default_model_args = Namespace( |
| do_mlm=None, |
| pooler_type="cls", |
| temp=0.05, |
| mlp_only_train=False |
| ) |
|
|
|
|
| class BertForCL(BertPreTrainedModel): |
| _keys_to_ignore_on_load_missing = [r"position_ids"] |
|
|
| def __init__(self, config, *model_args, **model_kargs): |
| super().__init__(config) |
| self.model_args = model_kargs.get('model_args') or default_model_args |
| self.bert = BertModel(config, add_pooling_layer=False) |
| if self.model_args.do_mlm: |
| self.lm_head = BertLMPredictionHead(config) |
| cl_init(self, config) |
|
|
| def forward(self, |
| input_ids=None, |
| attention_mask=None, |
| token_type_ids=None, |
| position_ids=None, |
| head_mask=None, |
| inputs_embeds=None, |
| labels=None, |
| output_attentions=None, |
| output_hidden_states=None, |
| return_dict=None, |
| sent_emb=False, |
| mlm_input_ids=None, |
| mlm_labels=None, |
| left_emb=None, |
| right_emb=None, |
| ): |
| if sent_emb: |
| return sentemb_forward(self, self.bert, |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| labels=labels, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| else: |
| return cl_forward(self, self.bert, |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| labels=labels, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| mlm_input_ids=mlm_input_ids, |
| mlm_labels=mlm_labels, |
| left_emb=left_emb, |
| right_emb=right_emb, |
| ) |
|
|
|
|
| class RobertaForCL(RobertaPreTrainedModel): |
| _keys_to_ignore_on_load_missing = [r"position_ids"] |
|
|
| def __init__(self, config, *model_args, **model_kargs): |
| super().__init__(config) |
| self.roberta = RobertaModel(config, add_pooling_layer=False) |
| self.model_args = model_kargs.get('model_args') or default_model_args |
| if self.model_args.do_mlm: |
| self.lm_head = RobertaLMHead(config) |
| cl_init(self, config) |
|
|
| def forward(self, |
| input_ids=None, |
| attention_mask=None, |
| token_type_ids=None, |
| position_ids=None, |
| head_mask=None, |
| inputs_embeds=None, |
| labels=None, |
| output_attentions=None, |
| output_hidden_states=None, |
| return_dict=None, |
| sent_emb=False, |
| mlm_input_ids=None, |
| mlm_labels=None, |
| left_emb=None, |
| right_emb=None, |
| ): |
| if sent_emb: |
| return sentemb_forward(self, self.roberta, |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| labels=labels, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| else: |
| return cl_forward(self, self.roberta, |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| labels=labels, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| mlm_input_ids=mlm_input_ids, |
| mlm_labels=mlm_labels, |
| left_emb=left_emb, |
| right_emb=right_emb, |
| ) |
|
|