File size: 1,404 Bytes
74c6a41 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import pandas as pd
import pandas as pd
import re
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
# Download necessary NLTK resources
# nltk.download('stopwords')
# nltk.download('punkt')
# nltk.download('wordnet')
# Read the CSV file
file_path = '/home/darth/#/SEQuestionClassifier/data/all_combined_data.csv'
df = pd.read_csv(file_path)
import ast
df["Tags"] = df["Tags"].apply(ast.literal_eval)
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english'))
def preprocess_text(text):
"""Function to clean text and perform lemitisation"""
text = text.lower()
text = re.sub(r'[^\w\s]', '', text)
words = word_tokenize(text)
words = [lemmatizer.lemmatize(word) for word in words if word not in stop_words]
return " ".join(words)
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MultiLabelBinarizer
def vectorirse_text(text):
""" Recieves text as input and returns TF-IDF vectors"""
text = text.apply(preprocess_text)
tfidf = TfidfVectorizer(max_features=500000)
X = tfidf.fit_transform(text)
return X
def label_encoding(input):
mlb = MultiLabelBinarizer()
return mlb.fit_transform(input)
X = vectorirse_text(df['Input'])
y = label_encoding(df['Tags'])
|