Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| import torch | |
| import numpy as np | |
| from sklearn.linear_model import LogisticRegression | |
| from joblib import load | |
| import requests | |
| import os | |
| import matplotlib.pyplot as plt | |
| # Load Models | |
| #@st.cache_resource | |
| def load_models(): | |
| bert_model = AutoModelForSequenceClassification.from_pretrained("shivam-1706/bert-model-stack") | |
| bert_tokenizer = AutoTokenizer.from_pretrained("shivam-1706/bert-model-stack") | |
| roberta_model = AutoModelForSequenceClassification.from_pretrained("shivam-1706/roberta-model-stack") | |
| roberta_tokenizer = AutoTokenizer.from_pretrained("shivam-1706/roberta-model-stack") | |
| # Download logistic regression model | |
| meta_url = "https://huggingface.co/shivam-1706/meta-logistic-model/resolve/main/meta_clf.joblib" | |
| if not os.path.exists("meta_clf.joblib"): | |
| with open("meta_clf.joblib", "wb") as f: | |
| f.write(requests.get(meta_url).content) | |
| meta_clf = load("meta_clf.joblib") | |
| return bert_model, bert_tokenizer, roberta_model, roberta_tokenizer, meta_clf | |
| def get_probs(texts, model, tokenizer): | |
| model.eval() | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| model.to(device) | |
| enc = tokenizer(texts, return_tensors='pt', truncation=True, padding=True, max_length=128) | |
| with torch.no_grad(): | |
| input_ids = enc['input_ids'].to(device) | |
| attn_mask = enc['attention_mask'].to(device) | |
| output = model(input_ids=input_ids, attention_mask=attn_mask) | |
| probs = torch.softmax(output.logits, dim=1) | |
| return probs.cpu().numpy() | |
| # Load all models | |
| bert_model, bert_tokenizer, roberta_model, roberta_tokenizer, meta_clf = load_models() | |
| # Streamlit UI | |
| st.title("π§ Sentiment Classifier (Stacked Ensemble)") | |
| text_input = st.text_area("Enter a comment here:") | |
| if st.button("Analyze"): | |
| if text_input.strip(): | |
| bert_probs = get_probs([text_input], bert_model, bert_tokenizer) | |
| roberta_probs = get_probs([text_input], roberta_model, roberta_tokenizer) | |
| meta_input = np.concatenate([bert_probs, roberta_probs], axis=1) | |
| final_pred = meta_clf.predict(meta_input)[0] | |
| final_probs = meta_clf.predict_proba(meta_input)[0] | |
| label = "Positive π" if final_pred == 1 else "Negative π" | |
| st.success(f"**Prediction:** {label}") | |
| # Show probability chart | |
| st.subheader("π Class Probabilities") | |
| labels = ["Negative", "Positive"] | |
| colors = ['#FF9999', '#99FF99'] | |
| fig, ax = plt.subplots() | |
| ax.bar(labels, final_probs, color=colors) | |
| ax.set_ylim(0, 1) | |
| ax.set_ylabel("Probability") | |
| st.pyplot(fig) | |
| else: | |
| st.warning("Please enter some text.") |