File size: 1,345 Bytes
2501840 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
# How to load this model:
from transformers import AutoModel, AutoTokenizer
import torch
import torch.nn as nn
import json
# Load encoder
encoder = AutoModel.from_pretrained("./outputs/final_baseline_best")
# Load classifier config
with open("./outputs/final_baseline_best/classifier_config.json", 'r') as f:
c_config = json.load(f)
num_labels = c_config.get('num_labels', 1)
hidden_size = c_config.get('hidden_size', 768)
# Reconstruct classifier
classifier = nn.Sequential(
nn.Linear(hidden_size, 256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(256, num_labels)
)
classifier.load_state_dict(torch.load("./outputs/final_baseline_best/classifier.pt"))
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained("./outputs/final_baseline_best")
# Inference function
def predict(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=128)
with torch.no_grad():
outputs = encoder(**inputs)
cls_embedding = outputs.last_hidden_state[:, 0, :]
logits = classifier(cls_embedding)
probs = torch.sigmoid(logits)
return probs.item()
# Example
text = "আপনার বাংলা টেক্সট এখানে"
prob = predict(text)
print(f"Hate Speech Probability: {prob:.4f}")
print(f"Prediction: {'Hate Speech' if prob > 0.5 else 'Non-Hate Speech'}")
|