Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification | |
| from scipy.special import softmax | |
| import torch | |
| import re | |
| # Load sentiment model | |
| def load_sentiment_model(): | |
| model = AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") | |
| tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment") | |
| return model, tokenizer | |
| # Load emotion model | |
| def load_emotion_model(): | |
| model = AutoModelForSequenceClassification.from_pretrained("j-hartmann/emotion-english-distilroberta-base") | |
| tokenizer = AutoTokenizer.from_pretrained("j-hartmann/emotion-english-distilroberta-base") | |
| return model, tokenizer | |
| # Load paraphrasing model | |
| def load_paraphrase_model(): | |
| model = AutoModelForSeq2SeqLM.from_pretrained("Vamsi/T5_Paraphrase_Paws") | |
| tokenizer = AutoTokenizer.from_pretrained("Vamsi/T5_Paraphrase_Paws") | |
| return model, tokenizer | |
| # Sentiment analysis | |
| def get_sentiment(text, model, tokenizer): | |
| encoded = tokenizer(text, return_tensors="pt", truncation=True) | |
| with torch.no_grad(): | |
| output = model(**encoded) | |
| probs = softmax(output.logits.numpy()[0]) | |
| labels = ["Negative", "Neutral", "Positive"] | |
| return labels[probs.argmax()], float(probs.max()) * 100 | |
| # Emotion detection | |
| def get_emotion(text, model, tokenizer): | |
| encoded = tokenizer(text, return_tensors="pt", truncation=True) | |
| with torch.no_grad(): | |
| output = model(**encoded) | |
| probs = softmax(output.logits.numpy()[0]) | |
| labels = ['anger', 'disgust', 'fear', 'joy', 'neutral', 'sadness', 'surprise'] | |
| return labels[probs.argmax()], float(probs.max()) * 100 | |
| # Feedback generation | |
| def generate_feedback(sentiment, emotion): | |
| if sentiment == "Negative": | |
| if emotion in ["anger", "disgust", "sadness"]: | |
| return "β οΈ Your message might sound hurtful or emotionally charged. Consider softening it." | |
| else: | |
| return "β οΈ Your message may feel negative. Reflect on your tone before sending." | |
| elif sentiment == "Neutral": | |
| return "π Your message seems neutral. That's okay, but clarity and warmth often help." | |
| elif sentiment == "Positive": | |
| if emotion == "joy": | |
| return "π Great! Your message feels joyful and likely to be well received." | |
| elif emotion == "love": | |
| return "π Your message feels loving. Expressing emotions like this builds trust." | |
| else: | |
| return "π Your message is positive, but think about whether itβs being fully understood." | |
| # Profanity detection | |
| def contains_profanity(text): | |
| profane_words = ['fuck', 'shit', 'bitch', 'stupid', 'idiot', 'dumb', 'asshole'] | |
| return any(re.search(rf"\b{word}\b", text.lower()) for word in profane_words) | |
| # Smart rewrite logic | |
| def smart_rewrite_message(text, model, tokenizer): | |
| if contains_profanity(text): | |
| return ["β οΈ Your message may contain harmful language. Please rephrase it with respect and calm."] | |
| text = "paraphrase: " + text + " </s>" | |
| encoding = tokenizer.encode_plus(text, return_tensors="pt", max_length=128, truncation=True) | |
| with torch.no_grad(): | |
| output = model.generate( | |
| input_ids=encoding['input_ids'], | |
| attention_mask=encoding['attention_mask'], | |
| max_length=128, | |
| num_return_sequences=2, | |
| num_beams=5, | |
| temperature=1.5 | |
| ) | |
| rewrites = [tokenizer.decode(o, skip_special_tokens=True) for o in output] | |
| return list(set(rewrites)) | |
| # Streamlit App UI | |
| st.title("π£οΈ Message Tone & Rewrite Checker (Phase 2)") | |
| st.write("Before you send that message, check how it might be received β and improve it if needed.") | |
| text = st.text_area("βοΈ Enter your message here:") | |
| if st.button("Analyze"): | |
| if text.strip() == "": | |
| st.warning("Please type a message first.") | |
| else: | |
| sent_model, sent_token = load_sentiment_model() | |
| emo_model, emo_token = load_emotion_model() | |
| sentiment, s_conf = get_sentiment(text, sent_model, sent_token) | |
| emotion, e_conf = get_emotion(text, emo_model, emo_token) | |
| feedback = generate_feedback(sentiment, emotion) | |
| st.markdown("### π§ Analysis Result") | |
| st.write(f"**Sentiment:** {sentiment} ({s_conf:.2f}%)") | |
| st.write(f"**Emotion:** {emotion} ({e_conf:.2f}%)") | |
| st.markdown("### π‘ Feedback") | |
| st.info(feedback) | |
| st.markdown("---") | |
| st.markdown("### β¨ Try Rewriting Your Message") | |
| para_model, para_token = load_paraphrase_model() | |
| rewrites = smart_rewrite_message(text, para_model, para_token) | |
| for i, r in enumerate(rewrites, 1): | |
| st.write(f"**Version {i}:** {r}") | |