Spaces:
Sleeping
Sleeping
| import torch | |
| import torch.nn as nn | |
| import numpy as np | |
| # Positional encoding | |
| class PositionalEncoding(nn.Module): | |
| def __init__(self, d_model, max_len=32): | |
| super().__init__() | |
| pe = torch.zeros(max_len, d_model) | |
| pos = torch.arange(0, max_len).unsqueeze(1) | |
| div = torch.exp(torch.arange(0, d_model, 2) * (-np.log(10000.0) / d_model)) | |
| pe[:, 0::2] = torch.sin(pos * div) | |
| pe[:, 1::2] = torch.cos(pos * div) | |
| self.pe = pe.unsqueeze(0) | |
| def forward(self, x): | |
| return x + self.pe[:, :x.size(1)].to(x.device) | |
| # Transformer emotion classifier | |
| class EmotionTransformer(nn.Module): | |
| def __init__(self, vocab_size, embed_dim=64, num_heads=4, num_classes=5, dropout=0.3): | |
| super().__init__() | |
| self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0) | |
| self.pos_encoder = PositionalEncoding(embed_dim) | |
| encoder_layer = nn.TransformerEncoderLayer( | |
| d_model=embed_dim, nhead=num_heads, batch_first=True | |
| ) | |
| self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=2) | |
| self.dropout = nn.Dropout(dropout) | |
| self.fc = nn.Linear(embed_dim, num_classes) | |
| def forward(self, x): | |
| mask = (x == 0) | |
| x = self.embedding(x) | |
| x = self.pos_encoder(x) | |
| x = self.transformer(x, src_key_padding_mask=mask) | |
| x = self.dropout(x.mean(dim=1)) | |
| return self.fc(x) |