Spaces:
Sleeping
Sleeping
| # predictor.py | |
| import os | |
| import torch | |
| import re | |
| from transformers import BertTokenizer, BertForSequenceClassification | |
| import os | |
| # Force Hugging Face to use /tmp as cache | |
| os.environ["HF_HOME"] = "/tmp/huggingface" | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline | |
| import torch | |
| # ✅ Load all three models | |
| model_names = { | |
| "label0": "SreyaDvn/savedModelLebel0", | |
| "label1": "SreyaDvn/savedModelLebel1", | |
| "balanced": "SreyaDvn/sentiment-model" | |
| } | |
| pipelines = {} | |
| for name, path in model_names.items(): | |
| tokenizer = AutoTokenizer.from_pretrained(path) | |
| model = AutoModelForSequenceClassification.from_pretrained(path) | |
| pipelines[name] = pipeline( | |
| "text-classification", | |
| model=model, | |
| tokenizer=tokenizer, | |
| device=0 if torch.cuda.is_available() else -1 | |
| ) | |
| print("✅ All models loaded successfully!") | |
| def predict_sentiment(text: str): | |
| """ | |
| Runs input text through all models, | |
| then selects the best model by IF-ELSE logic. | |
| """ | |
| results = {} | |
| for name, pipe in pipelines.items(): | |
| out = pipe(text, truncation=True)[0] # e.g. {'label': 'LABEL_1', 'score': 0.92} | |
| results[name] = out | |
| # ---- IF-ELSE LOGIC ---- | |
| # Currently: Pick the prediction with the HIGHEST confidence score | |
| best_model = max(results, key=lambda k: results[k]['score']) | |
| return { | |
| "chosen_model": best_model, | |
| "prediction": results[best_model], | |
| "all_results": results | |
| } | |