Spaces:
Sleeping
Sleeping
Delete app.py
Browse files
app.py
DELETED
|
@@ -1,49 +0,0 @@
|
|
| 1 |
-
import streamlit as st
|
| 2 |
-
import pandas as pd
|
| 3 |
-
import joblib
|
| 4 |
-
import re
|
| 5 |
-
from nltk.tokenize import word_tokenize
|
| 6 |
-
from nltk.corpus import stopwords
|
| 7 |
-
import nltk
|
| 8 |
-
|
| 9 |
-
# Ensure required NLTK data is available
|
| 10 |
-
nltk.download('stopwords')
|
| 11 |
-
nltk.download('punkt')
|
| 12 |
-
|
| 13 |
-
# Load the dataset and model
|
| 14 |
-
df = pd.read_csv("C:\\Users\\saipr\\Downloads\\bbc_data.csv\\bbc_data.csv")
|
| 15 |
-
model = joblib.load('model.pkl') # Load your pre-trained model
|
| 16 |
-
vectorizer = joblib.load('vectorizer.pkl') # Load pre-trained vectorizer
|
| 17 |
-
|
| 18 |
-
X = df['data']
|
| 19 |
-
y = df['labels']
|
| 20 |
-
|
| 21 |
-
# Preprocessing function
|
| 22 |
-
def preprocess_text(text):
|
| 23 |
-
text = re.sub(r'[^\w\s]', '', text.lower()) # Remove punctuation
|
| 24 |
-
tokens = word_tokenize(text) # Tokenize the text
|
| 25 |
-
stop_words = set(stopwords.words('english')) # Load stopwords
|
| 26 |
-
tokens = [word for word in tokens if word not in stop_words] # Remove stopwords
|
| 27 |
-
return ' '.join(tokens)
|
| 28 |
-
|
| 29 |
-
# Title of the app
|
| 30 |
-
st.title('News Classification App')
|
| 31 |
-
|
| 32 |
-
# User input
|
| 33 |
-
user_input = st.text_area('Enter a headline')
|
| 34 |
-
|
| 35 |
-
if st.button('Classify'):
|
| 36 |
-
if user_input:
|
| 37 |
-
# Preprocess the input text
|
| 38 |
-
preprocessed_input = preprocess_text(user_input)
|
| 39 |
-
|
| 40 |
-
# Convert preprocessed text to numerical data using the loaded vectorizer
|
| 41 |
-
input_vector = vectorizer.transform([preprocessed_input])
|
| 42 |
-
|
| 43 |
-
# Make prediction
|
| 44 |
-
prediction = model.predict(input_vector)
|
| 45 |
-
|
| 46 |
-
# Display the result
|
| 47 |
-
st.write(f'Predicted Category: {prediction[0]}')
|
| 48 |
-
else:
|
| 49 |
-
st.write('Please enter a headline')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|