File size: 1,500 Bytes
49d4f01
 
 
 
 
 
caec164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
# predictor.py
import os
import torch
import re
from transformers import BertTokenizer, BertForSequenceClassification

import os

# Force Hugging Face to use /tmp as cache
os.environ["HF_HOME"] = "/tmp/huggingface"

from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
import torch

# ✅ Load all three models
model_names = {
    "label0": "SreyaDvn/savedModelLebel0",
    "label1": "SreyaDvn/savedModelLebel1",
    "balanced": "SreyaDvn/sentiment-model"
}

pipelines = {}
for name, path in model_names.items():
    tokenizer = AutoTokenizer.from_pretrained(path)
    model = AutoModelForSequenceClassification.from_pretrained(path)
    pipelines[name] = pipeline(
        "text-classification",
        model=model,
        tokenizer=tokenizer,
        device=0 if torch.cuda.is_available() else -1
    )

print("✅ All models loaded successfully!")


def predict_sentiment(text: str):
    """
    Runs input text through all models,
    then selects the best model by IF-ELSE logic.
    """

    results = {}
    for name, pipe in pipelines.items():
        out = pipe(text, truncation=True)[0]  # e.g. {'label': 'LABEL_1', 'score': 0.92}
        results[name] = out

    # ---- IF-ELSE LOGIC ----
    # Currently: Pick the prediction with the HIGHEST confidence score
    best_model = max(results, key=lambda k: results[k]['score'])

    return {
        "chosen_model": best_model,
        "prediction": results[best_model],
        "all_results": results
    }