Spaces:
Sleeping
Sleeping
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| from textblob import TextBlob | |
| import torch | |
| # Load the Hugging Face model | |
| model_name = "sentinet/suicidality" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
| labels = ["non-suicidal", "suicidal"] | |
| def sentiment_score(text): | |
| """Calculate basic sentiment polarity (-1 = negative, +1 = positive).""" | |
| blob = TextBlob(text) | |
| return round(blob.sentiment.polarity, 3) | |
| def predict_suicidality(text: str): | |
| """Predict suicidality and sentiment for the given (English) text.""" | |
| inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) | |
| with torch.no_grad(): | |
| logits = model(**inputs).logits | |
| probs = torch.softmax(logits, dim=1) | |
| pred_class = torch.argmax(probs, dim=1).item() | |
| confidence = probs[0][pred_class].item() | |
| sentiment = sentiment_score(text) # ✅ this is now correctly scoped | |
| return { | |
| "label": labels[pred_class], | |
| "confidence": round(confidence, 3), | |
| "sentiment": sentiment | |
| } | |