|
|
from dataclasses import dataclass |
|
|
from typing import Optional |
|
|
|
|
|
import torch |
|
|
from torch import nn |
|
|
from transformers import ( |
|
|
Wav2Vec2Model, |
|
|
Wav2Vec2PreTrainedModel, |
|
|
) |
|
|
from transformers.utils import ModelOutput |
|
|
from .configuration_wav2vec2multihead import Wav2Vec2MultiHeadConfig |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class Wav2Vec2MultiHeadMultiLabelOutput(ModelOutput): |
|
|
loss: Optional[torch.FloatTensor] = None |
|
|
logits1: torch.FloatTensor = None |
|
|
logits2: torch.FloatTensor = None |
|
|
logits3: torch.FloatTensor = None |
|
|
hidden_states: Optional[tuple[torch.FloatTensor]] = None |
|
|
attentions: Optional[tuple[torch.FloatTensor]] = None |
|
|
|
|
|
|
|
|
class Wav2Vec2ForMultiHeadMultiLabelClassification(Wav2Vec2PreTrainedModel): |
|
|
|
|
|
"""Wav2Vec2ForMultiHeadMultiLabelClassification is a model for multi-label classification using Wav2Vec2 using multiple classifier heads. Three classifier heads are hard-coded for three different tasks, such as action, object, and location classification in FSC-IC dataset. |
|
|
|
|
|
Returns: |
|
|
Wav2Vec2MultiHeadMultiLabelOutput: Contains the loss and logits for each of the three tasks, as well as hidden states and attentions if requested. |
|
|
""" |
|
|
|
|
|
config_class = Wav2Vec2MultiHeadConfig |
|
|
def __init__(self, config): |
|
|
super().__init__(config) |
|
|
self.wav2vec2 = Wav2Vec2Model(config) |
|
|
self.dropout = nn.Dropout(config.final_dropout) |
|
|
self.classifier1 = nn.Linear(config.hidden_size, config.num_labels_1) |
|
|
self.classifier2 = nn.Linear(config.hidden_size, config.num_labels_2) |
|
|
self.classifier3 = nn.Linear(config.hidden_size, config.num_labels_3) |
|
|
self.init_weights() |
|
|
|
|
|
def freeze_feature_extractor(self): |
|
|
self.wav2vec2.feature_extractor._freeze_parameters() |
|
|
|
|
|
def freeze_cnn_projection(self): |
|
|
for param in self.wav2vec2.feature_projection.parameters(): |
|
|
param.requires_grad = False |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
input_values, |
|
|
attention_mask=None, |
|
|
labels1=None, |
|
|
labels2=None, |
|
|
labels3=None, |
|
|
output_attentions=None, |
|
|
output_hidden_states=None, |
|
|
return_dict=None, |
|
|
): |
|
|
return_dict = ( |
|
|
return_dict if return_dict is not None else self.config.use_return_dict |
|
|
) |
|
|
outputs = self.wav2vec2( |
|
|
input_values, |
|
|
attention_mask=attention_mask, |
|
|
output_attentions=output_attentions, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=return_dict, |
|
|
) |
|
|
|
|
|
hidden_states = outputs[0] |
|
|
hidden_states = self.dropout(hidden_states) |
|
|
hidden_states = torch.mean(hidden_states, dim=1) |
|
|
|
|
|
logits1 = self.classifier1(hidden_states) |
|
|
logits2 = self.classifier2(hidden_states) |
|
|
logits3 = self.classifier3(hidden_states) |
|
|
|
|
|
loss = None |
|
|
if labels1 is not None and labels2 is not None and labels3 is not None: |
|
|
loss_fct = nn.CrossEntropyLoss() |
|
|
loss1 = loss_fct( |
|
|
logits1.view(-1, self.config.num_labels_1), labels1.view(-1) |
|
|
) |
|
|
loss2 = loss_fct( |
|
|
logits2.view(-1, self.config.num_labels_2), labels2.view(-1) |
|
|
) |
|
|
loss3 = loss_fct( |
|
|
logits3.view(-1, self.config.num_labels_3), labels3.view(-1) |
|
|
) |
|
|
loss = loss1 + loss2 + loss3 |
|
|
|
|
|
return Wav2Vec2MultiHeadMultiLabelOutput( |
|
|
loss=loss, |
|
|
logits1=logits1, |
|
|
logits2=logits2, |
|
|
logits3=logits3, |
|
|
hidden_states=outputs.hidden_states, |
|
|
attentions=outputs.attentions, |
|
|
) |
|
|
|