| |
| """Inference script for HR conversation multi-label classifier.""" |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification |
| import torch |
|
|
| MODEL_ID = "AurelPx/hr-conversations-classifier" |
|
|
| LABELS = [ |
| "Benefits", "Career Development", "Compliance & Legal", "Contracts", |
| "Diversity, Equity & Inclusion", "Expense Management", "Harassment", "Health", |
| "IT & Equipment", "Leave & Absence", "Mobility", "Offboarding", |
| "Onboarding", "Payroll", "Performance Management", "Recruitment", |
| "Safety", "Timetracking", "Training", "Work Arrangements", |
| ] |
|
|
| tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) |
| model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID) |
| model.eval() |
|
|
| def classify(text: str, threshold: float = 0.5): |
| inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512) |
| with torch.no_grad(): |
| logits = model(**inputs).logits |
| probs = torch.sigmoid(logits).numpy()[0] |
| predicted = [LABELS[i] for i, p in enumerate(probs) if p >= threshold] |
| probs_dict = {LABELS[i]: round(float(p), 3) for i, p in enumerate(probs)} |
| return predicted, probs_dict |
|
|
| if __name__ == "__main__": |
| sample = ( |
| "USER: I haven't received my payslip for March yet. Could you please check what's going on?\n" |
| "AGENT: Good morning. I've checked the payroll system and it appears your March payslip " |
| "was generated on the 28th but there was a distribution delay. I've resent it to your " |
| "registered email. You should receive it within the next hour." |
| ) |
| preds, probs = classify(sample, threshold=0.5) |
| print(f"Predicted: {preds}") |
| print(f"Top probs: {sorted(probs.items(), key=lambda x: x[1], reverse=True)[:5]}") |
|
|