File size: 1,534 Bytes
f249bf2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import streamlit as st
import pandas as pd
import joblib
import re
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import nltk

# Ensure required NLTK data is available
nltk.download('stopwords')
nltk.download('punkt')

# Load the dataset and model
df = pd.read_csv("./bbc_data.csv")

model = joblib.load('model.pkl')  # Load your pre-trained model
vectorizer = joblib.load('vectorizer.pkl')  # Load pre-trained vectorizer

X = df['data']
y = df['labels']

# Preprocessing function
def preprocess_text(text):
    text = re.sub(r'[^\w\s]', '', text.lower())  # Remove punctuation
    tokens = word_tokenize(text)  # Tokenize the text
    stop_words = set(stopwords.words('english'))  # Load stopwords
    tokens = [word for word in tokens if word not in stop_words]  # Remove stopwords
    return ' '.join(tokens)

# Title of the app
st.title('News Classification App')

# User input
user_input = st.text_area('Enter a headline')

if st.button('Classify'):
    if user_input:
        # Preprocess the input text
        preprocessed_input = preprocess_text(user_input)
        
        # Convert preprocessed text to numerical data using the loaded vectorizer
        input_vector = vectorizer.transform([preprocessed_input])
        
        # Make prediction
        prediction = model.predict(input_vector)
        
        # Display the result
        st.write(f'Predicted Category: {prediction[0]}')
    else:
        st.write('Please enter a headline')