File size: 2,594 Bytes
cdef703
 
 
 
 
 
 
8a06a41
cdef703
 
8a06a41
 
 
 
 
 
 
 
 
 
 
 
 
 
cdef703
 
 
 
 
 
 
 
 
 
 
 
8a06a41
cdef703
 
8a06a41
 
 
 
 
 
 
 
cdef703
8a06a41
cdef703
8a06a41
cdef703
8a06a41
cdef703
8a06a41
cdef703
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import streamlit as st
import pickle
import string
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
import nltk
from huggingface_hub import hf_hub_download

# Download NLTK data
try:
    nltk.data.find('tokenizers/punkt_tab')
except LookupError:
    nltk.download('punkt_tab')

try:
    nltk.data.find('corpora/stopwords')
except LookupError:
    nltk.download('stopwords')

try:
    nltk.data.find('corpora/wordnet')
except LookupError:
    nltk.download('wordnet')

stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()

# Function to preprocess text
def preprocess_text(text):
    text = text.lower()
    text = text.translate(str.maketrans('', '', string.punctuation))
    tokens = word_tokenize(text)
    tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
    return " ".join(tokens)

# --- Load models from Hugging Face Model Repo ---
@st.cache_resource
def load_models():
    HF_MODEL_REPO = "Redfire-1234/Sentiment-analysis"
    
    goboult_model_file = hf_hub_download(HF_MODEL_REPO, "rf_goboult_model.pkl")
    goboult_tfidf_file = hf_hub_download(HF_MODEL_REPO, "tfidf_goboult.pkl")
    flipflop_model_file = hf_hub_download(HF_MODEL_REPO, "rf_flipflop_model.pkl")
    flipflop_tfidf_file = hf_hub_download(HF_MODEL_REPO, "tfidf_flipflop.pkl")
    
    with open(goboult_model_file, 'rb') as f:
        goboult_model = pickle.load(f)
    with open(goboult_tfidf_file, 'rb') as f:
        goboult_tfidf = pickle.load(f)
    with open(flipflop_model_file, 'rb') as f:
        flipflop_model = pickle.load(f)
    with open(flipflop_tfidf_file, 'rb') as f:
        flipflop_tfidf = pickle.load(f)
    
    return goboult_model, goboult_tfidf, flipflop_model, flipflop_tfidf

goboult_model, goboult_tfidf, flipflop_model, flipflop_tfidf = load_models()

# --- Streamlit UI ---
st.title("Sentiment Analysis for Goboult & Flipflop")

dataset = st.selectbox("Select Dataset", ["Goboult", "Flipflop"])
review = st.text_area("Enter your review here:")

if st.button("Predict Sentiment"):
    if review.strip() == "":
        st.warning("Please enter a review!")
    else:
        cleaned = preprocess_text(review)
        
        if dataset.lower() == "goboult":
            vectorized = goboult_tfidf.transform([cleaned])
            pred = goboult_model.predict(vectorized)[0]
        else:
            vectorized = flipflop_tfidf.transform([cleaned])
            pred = flipflop_model.predict(vectorized)[0]
        
        st.success(f"Predicted Sentiment: {pred}")