ClassificationSmall / README.md
MarkProMaster229's picture
Update README.md
1a77cae verified
|
raw
history blame
2.06 kB
metadata
license: apache-2.0

class TransformerBlock(nn.Module): def init(self, sizeVector=256, numHeads=8, dropout=0.1): super().init() self.ln1 = nn.LayerNorm(sizeVector) self.attn = nn.MultiheadAttention(sizeVector, numHeads, batch_first=True) self.dropout_attn = nn.Dropout(dropout) self.ln2 = nn.LayerNorm(sizeVector) self.ff = nn.Sequential( nn.Linear(sizeVector, sizeVector4), nn.GELU(), nn.Linear(sizeVector4, sizeVector), nn.Dropout(dropout) )

def forward(self, x, attention_mask=None):
    key_padding_mask = ~attention_mask.bool() if attention_mask is not None else None
    h = self.ln1(x)
    attn_out, _ = self.attn(h, h, h, key_padding_mask=key_padding_mask)
    x = x + self.dropout_attn(attn_out)
    x = x + self.ff(self.ln2(x))
    return x

class TransformerRun(nn.Module): def init(self, vocabSize=120000, maxLen=100, sizeVector=256, numBlocks=4, numHeads=8, numClasses=3, dropout=0.1): super().init() self.token_emb = nn.Embedding(vocabSize, sizeVector) self.pos_emb = nn.Embedding(maxLen, sizeVector) self.layers = nn.ModuleList([ TransformerBlock(sizeVector=sizeVector, numHeads=numHeads, dropout=dropout) for _ in range(numBlocks) ]) self.dropout = nn.Dropout(dropout) self.ln = nn.LayerNorm(sizeVector2) self.classifier = nn.Linear(sizeVector2, numClasses)

def forward(self, x, attention_mask=None):
    B, T = x.shape
    tok = self.token_emb(x)
    pos = self.pos_emb(torch.arange(T, device=x.device).unsqueeze(0).expand(B, T))
    h = tok + pos

    for layer in self.layers:
        h = layer(h, attention_mask)

    cls_token = h[:,0,:]
    mean_pool = h.mean(dim=1)
    combined = torch.cat([cls_token, mean_pool], dim=1)
    combined = self.ln(self.dropout(combined))
    logits = self.classifier(combined)
    return logits