|
|
import streamlit as st |
|
|
from textblob import TextBlob |
|
|
import spacy |
|
|
from collections import Counter |
|
|
|
|
|
|
|
|
nlp = spacy.load("en_core_web_sm") |
|
|
|
|
|
|
|
|
st.title("NLP Blog with Sidebar and Buttons") |
|
|
|
|
|
|
|
|
st.sidebar.title("Select NLP Task") |
|
|
task = st.sidebar.selectbox("Choose a task:", ["Sentiment Analysis", "Keyword Extraction", "Named Entity Recognition (NER)"]) |
|
|
|
|
|
|
|
|
st.write("Enter text for analysis below:") |
|
|
user_text = st.text_area("Input your text here:", height=200) |
|
|
|
|
|
|
|
|
if st.button("Analyze"): |
|
|
if user_text.strip(): |
|
|
if task == "Sentiment Analysis": |
|
|
|
|
|
blob = TextBlob(user_text) |
|
|
sentiment = blob.sentiment |
|
|
st.subheader("Sentiment Analysis Result") |
|
|
st.write(f"Polarity: {sentiment.polarity:.2f}") |
|
|
st.write(f"Subjectivity: {sentiment.subjectivity:.2f}") |
|
|
|
|
|
elif task == "Keyword Extraction": |
|
|
|
|
|
doc = nlp(user_text) |
|
|
keywords = [token.text for token in doc if token.is_alpha and not token.is_stop] |
|
|
most_common_keywords = Counter(keywords).most_common(10) |
|
|
st.subheader("Keyword Extraction Result") |
|
|
st.write("Most Common Keywords:") |
|
|
st.write(most_common_keywords) |
|
|
|
|
|
elif task == "Named Entity Recognition (NER)": |
|
|
|
|
|
doc = nlp(user_text) |
|
|
st.subheader("Named Entity Recognition Result") |
|
|
for ent in doc.ents: |
|
|
st.write(f"Entity: {ent.text}, Label: {ent.label_}") |
|
|
else: |
|
|
st.error("Please enter some text for analysis.") |
|
|
|
|
|
|
|
|
st.sidebar.write("---") |
|
|
st.sidebar.write("Developed with ❤️ using Streamlit.") |
|
|
|