| import torch | |
| import torch.nn as nn | |
| from typing import Optional | |
| from transformers.modeling_utils import PreTrainedModel | |
| from transformers.configuration_utils import PretrainedConfig | |
| class TextEmbedding3SmallSentimentHeadConfig(PretrainedConfig): | |
| model_type = "sentiment-head" | |
| def __init__( | |
| self, | |
| input_dim: int = 1536, | |
| hidden_dim: int = 512, | |
| dropout: float = 0.2, | |
| num_labels: int = 3, | |
| **kwargs, | |
| ) -> None: | |
| super().__init__(**kwargs) | |
| self.input_dim = int(input_dim) | |
| self.hidden_dim = int(hidden_dim) | |
| self.dropout = float(dropout) | |
| self.num_labels = int(num_labels) | |
| class TextEmbedding3SmallSentimentHead(PreTrainedModel): | |
| config_class = TextEmbedding3SmallSentimentHeadConfig | |
| def __init__(self, config: TextEmbedding3SmallSentimentHeadConfig) -> None: | |
| super().__init__(config) | |
| if config.hidden_dim and config.hidden_dim > 0: | |
| self.net = nn.Sequential( | |
| nn.Linear(config.input_dim, config.hidden_dim), | |
| nn.ReLU(), | |
| nn.Dropout(p=config.dropout), | |
| nn.Linear(config.hidden_dim, config.num_labels), | |
| ) | |
| else: | |
| self.net = nn.Linear(config.input_dim, config.num_labels) | |
| self.post_init() | |
| def forward( | |
| self, | |
| inputs_embeds: torch.FloatTensor, | |
| labels: Optional[torch.LongTensor] = None, | |
| **kwargs, | |
| ): | |
| logits = self.net(inputs_embeds) | |
| loss = None | |
| if labels is not None: | |
| loss = nn.CrossEntropyLoss()(logits, labels) | |
| return {"logits": logits, "loss": loss} |