| import os |
| os.environ["CUDA_VISIBLE_DEVICES"] = "-1" |
|
|
| from huggingface_hub import hf_hub_download |
| import gradio as gr |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification |
| import torch |
|
|
| import pickle |
| import numpy as np |
| from tensorflow.keras.models import load_model |
| from tensorflow.keras.preprocessing.sequence import pad_sequences |
| import re |
|
|
| svm_repo_id = "HighFive-OPJ/Deep_Learning" |
| svm_model_path = hf_hub_download(repo_id=svm_repo_id, filename="svm_model.pkl") |
| with open(svm_model_path, "rb") as f: |
| svm_model = pickle.load(f) |
| vectorizer_path = hf_hub_download(repo_id=svm_repo_id, filename="vectorizer.pkl") |
| with open(vectorizer_path, "rb") as f: |
| vectorizer = pickle.load(f) |
|
|
| lstm_repo_id = "HighFive-OPJ/Deep_Learning" |
| lstm_model_path = hf_hub_download(repo_id=lstm_repo_id, filename="LSTM_model.h5") |
| lstm_model = load_model(lstm_model_path) |
| lstm_tokenizer_path = hf_hub_download(repo_id=lstm_repo_id, filename="my_tokenizer.pkl") |
| with open(lstm_tokenizer_path, "rb") as f: |
| lstm_tokenizer = pickle.load(f) |
|
|
| bert_tokenizer = AutoTokenizer.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment") |
| bert_model = AutoModelForSequenceClassification.from_pretrained("nlptown/bert-base-multilingual-uncased-sentiment") |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| bert_model.to(device) |
|
|
| def preprocess_text(text): |
| text = text.lower() |
| text = re.sub(r"[^a-zA-Z\s]", "", text).strip() |
| return text |
|
|
| def predict_with_svm(text): |
| transformed = vectorizer.transform([text]) |
| prediction = svm_model.predict(transformed) |
| return int(prediction[0]) |
|
|
| def predict_with_lstm(text): |
| cleaned = preprocess_text(text) |
| seq = lstm_tokenizer.texts_to_sequences([cleaned]) |
| padded_seq = pad_sequences(seq, maxlen=200) |
| probs = lstm_model.predict(padded_seq) |
| predicted_class = np.argmax(probs, axis=1)[0] |
| return int(predicted_class) |
|
|
| def predict_with_bert(text): |
| inputs = bert_tokenizer([text], padding=True, truncation=True, max_length=512, return_tensors="pt").to(device) |
| with torch.no_grad(): |
| outputs = bert_model(**inputs) |
| logits = outputs.logits |
| predictions = logits.argmax(axis=-1).cpu().numpy() |
| bert_score = int(predictions[0]) |
| if bert_score <= 2: |
| return 0 |
| elif bert_score == 3: |
| return 1 |
| else: |
| return 2 |
|
|
| def analyze_sentiment(text): |
| results = { |
| "SVM": predict_with_svm(text), |
| "LSTM": predict_with_lstm(text), |
| "BERT": predict_with_bert(text) |
| } |
| scores = list(results.values()) |
| average = np.mean(scores) |
| stats = f"Average Score (0=Neg,1=Neu,2=Pos): {average:.2f}\n" |
| return ( |
| convert_to_stars(results["SVM"]), |
| convert_to_stars(results["LSTM"]), |
| convert_to_stars(results["BERT"]), |
| stats |
| ) |
|
|
| def convert_to_stars(score): |
| |
| star_map = {0: 1, 1: 3, 2: 5} |
| stars = star_map.get(score, 3) |
| return "★" * stars + "☆" * (5 - stars) |
|
|
| def process_input(text): |
| if not text.strip(): |
| return ("", "", "", "Please enter valid text.") |
| return analyze_sentiment(text) |
|
|
| with gr.Blocks() as demo: |
| gr.Markdown("# Sentiment Analysis Demo") |
| gr.Markdown(""" |
| Enter a review and see how different models evaluate its sentiment! This app uses: |
| - SVM for classic machine learning |
| - LSTM for deep learning |
| - BERT for transformer-based analysis |
| """) |
|
|
| with gr.Row(): |
| with gr.Column(): |
| input_text = gr.Textbox(label="Enter your review:", lines=3) |
| analyze_button = gr.Button("Analyze Sentiment") |
|
|
| with gr.Column(): |
| svm_output = gr.Textbox(label="SVM", interactive=False) |
| lstm_output = gr.Textbox(label="LSTM", interactive=False) |
| bert_output = gr.Textbox(label="BERT", interactive=False) |
| stats_output = gr.Textbox(label="Statistics", interactive=False) |
|
|
| analyze_button.click( |
| process_input, |
| inputs=[input_text], |
| outputs=[svm_output, lstm_output, bert_output, stats_output] |
| ) |
|
|
| demo.launch() |