|
|
import pandas as pd |
|
|
import pandas as pd |
|
|
import re |
|
|
import nltk |
|
|
from nltk.corpus import stopwords |
|
|
from nltk.tokenize import word_tokenize |
|
|
from nltk.stem import WordNetLemmatizer |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
file_path = '/home/darth/#/SEQuestionClassifier/data/all_combined_data.csv' |
|
|
df = pd.read_csv(file_path) |
|
|
|
|
|
import ast |
|
|
df["Tags"] = df["Tags"].apply(ast.literal_eval) |
|
|
|
|
|
lemmatizer = WordNetLemmatizer() |
|
|
stop_words = set(stopwords.words('english')) |
|
|
|
|
|
def preprocess_text(text): |
|
|
"""Function to clean text and perform lemitisation""" |
|
|
text = text.lower() |
|
|
text = re.sub(r'[^\w\s]', '', text) |
|
|
words = word_tokenize(text) |
|
|
words = [lemmatizer.lemmatize(word) for word in words if word not in stop_words] |
|
|
return " ".join(words) |
|
|
|
|
|
|
|
|
from sklearn.feature_extraction.text import TfidfVectorizer |
|
|
from sklearn.preprocessing import LabelEncoder |
|
|
from sklearn.preprocessing import MultiLabelBinarizer |
|
|
|
|
|
def vectorirse_text(text): |
|
|
""" Recieves text as input and returns TF-IDF vectors""" |
|
|
text = text.apply(preprocess_text) |
|
|
tfidf = TfidfVectorizer(max_features=500000) |
|
|
X = tfidf.fit_transform(text) |
|
|
return X |
|
|
|
|
|
def label_encoding(input): |
|
|
mlb = MultiLabelBinarizer() |
|
|
return mlb.fit_transform(input) |
|
|
|
|
|
|
|
|
X = vectorirse_text(df['Input']) |
|
|
y = label_encoding(df['Tags']) |
|
|
|
|
|
|
|
|
|
|
|
|