|
|
import torch |
|
|
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification |
|
|
import gradio as gr |
|
|
import re |
|
|
import nltk |
|
|
from nltk.tokenize import word_tokenize |
|
|
from nltk.corpus import stopwords |
|
|
from nltk.stem import WordNetLemmatizer |
|
|
|
|
|
|
|
|
nltk.download('punkt_tab') |
|
|
nltk.download('stopwords') |
|
|
nltk.download('wordnet') |
|
|
|
|
|
|
|
|
def preprocess(text): |
|
|
text = re.sub(r'[^a-zA-Z\s]', '', text).lower() |
|
|
tokens = word_tokenize(text) |
|
|
stop_words = set(stopwords.words('english')) |
|
|
tokens = [word for word in tokens if word not in stop_words] |
|
|
lemmatizer = WordNetLemmatizer() |
|
|
tokens = [lemmatizer.lemmatize(word) for word in tokens] |
|
|
return ' '.join(tokens) |
|
|
|
|
|
|
|
|
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased') |
|
|
model = DistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased', num_labels=2) |
|
|
model.load_state_dict(torch.load('best_model (3).pth', map_location=torch.device('cpu'))) |
|
|
model.eval() |
|
|
|
|
|
|
|
|
def classify_essay(text): |
|
|
cleaned_text = preprocess(text) |
|
|
inputs = tokenizer(cleaned_text, return_tensors='pt', truncation=True, padding=True, max_length=100) |
|
|
with torch.no_grad(): |
|
|
outputs = model(**inputs) |
|
|
probs = torch.nn.functional.softmax(outputs.logits, dim=1) |
|
|
predicted_class = torch.argmax(probs, dim=1).item() |
|
|
labels = ["Human-Written", "AI-Generated"] |
|
|
return {labels[0]: float(probs[0][0]), labels[1]: float(probs[0][1])} |
|
|
|
|
|
|
|
|
iface = gr.Interface( |
|
|
fn=classify_essay, |
|
|
inputs=gr.Textbox(lines=10, placeholder="Paste your essay here..."), |
|
|
outputs=gr.Label(num_top_classes=2), |
|
|
title="Essay Authorship Classifier", |
|
|
description="Detect whether an essay is AI-generated or human-written using a fine-tuned DistilBERT model." |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
iface.launch() |
|
|
|