Update app.py
Browse files
app.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import numpy as np
|
| 3 |
import re
|
|
@@ -8,7 +9,7 @@ from nltk.tokenize import word_tokenize
|
|
| 8 |
from nltk.corpus import stopwords
|
| 9 |
from nltk.stem import WordNetLemmatizer
|
| 10 |
# Download necessary resources
|
| 11 |
-
nltk.download('
|
| 12 |
nltk.download('stopwords')
|
| 13 |
nltk.download('wordnet')
|
| 14 |
|
|
@@ -22,18 +23,21 @@ import pickle
|
|
| 22 |
st.set_page_config(page_title="News Category Classifier", page_icon="📰", layout="centered")
|
| 23 |
|
| 24 |
def set_background(image_path):
|
|
|
|
|
|
|
|
|
|
| 25 |
bg_image_style = f"""
|
| 26 |
<style>
|
| 27 |
.stApp {{
|
| 28 |
-
background: url(
|
| 29 |
background-size: cover;
|
| 30 |
}}
|
| 31 |
</style>
|
| 32 |
"""
|
| 33 |
st.markdown(bg_image_style, unsafe_allow_html=True)
|
| 34 |
|
| 35 |
-
#
|
| 36 |
-
set_background("News image 2.png") # Ensure the image is in the
|
| 37 |
|
| 38 |
# Initialize stopwords and lemmatizer
|
| 39 |
stop_words = set(stopwords.words('english')).union({"pm"})
|
|
@@ -152,4 +156,4 @@ if st.button("Analyze 🏷️"):
|
|
| 152 |
category = predict_category(user_input)
|
| 153 |
st.markdown(f"<div class='result-box'><span class='result-text'>🗂️ Predicted Category: <strong>{category}</strong></span></div>", unsafe_allow_html=True)
|
| 154 |
else:
|
| 155 |
-
st.warning("⚠️ Please enter some text to analyze.")
|
|
|
|
| 1 |
+
import base64
|
| 2 |
import streamlit as st
|
| 3 |
import numpy as np
|
| 4 |
import re
|
|
|
|
| 9 |
from nltk.corpus import stopwords
|
| 10 |
from nltk.stem import WordNetLemmatizer
|
| 11 |
# Download necessary resources
|
| 12 |
+
nltk.download('punkt_tab')
|
| 13 |
nltk.download('stopwords')
|
| 14 |
nltk.download('wordnet')
|
| 15 |
|
|
|
|
| 23 |
st.set_page_config(page_title="News Category Classifier", page_icon="📰", layout="centered")
|
| 24 |
|
| 25 |
def set_background(image_path):
|
| 26 |
+
with open(image_path, "rb") as img_file:
|
| 27 |
+
encoded_img = base64.b64encode(img_file.read()).decode()
|
| 28 |
+
|
| 29 |
bg_image_style = f"""
|
| 30 |
<style>
|
| 31 |
.stApp {{
|
| 32 |
+
background: url("data:image/png;base64,{encoded_img}") no-repeat center center fixed;
|
| 33 |
background-size: cover;
|
| 34 |
}}
|
| 35 |
</style>
|
| 36 |
"""
|
| 37 |
st.markdown(bg_image_style, unsafe_allow_html=True)
|
| 38 |
|
| 39 |
+
# Update the image path
|
| 40 |
+
set_background("page/News image 2.png") # Ensure the image is in the correct folder
|
| 41 |
|
| 42 |
# Initialize stopwords and lemmatizer
|
| 43 |
stop_words = set(stopwords.words('english')).union({"pm"})
|
|
|
|
| 156 |
category = predict_category(user_input)
|
| 157 |
st.markdown(f"<div class='result-box'><span class='result-text'>🗂️ Predicted Category: <strong>{category}</strong></span></div>", unsafe_allow_html=True)
|
| 158 |
else:
|
| 159 |
+
st.warning("⚠️ Please enter some text to analyze.")
|