|
|
import polars as pl
|
|
|
import numpy as np
|
|
|
import re
|
|
|
from datasets import load_dataset, Dataset, concatenate_datasets
|
|
|
|
|
|
|
|
|
def remove_newlines(text):
|
|
|
return text.replace('\n', '')
|
|
|
|
|
|
def remove_urls(text):
|
|
|
url_regex = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
|
|
|
return re.sub(url_regex, '', text)
|
|
|
|
|
|
def remove_html_tags(text):
|
|
|
html_regex = r'<[^>]+>'
|
|
|
return re.sub(html_regex, '', text)
|
|
|
|
|
|
def remove_special_characters(text):
|
|
|
special_chars_regex = r'[^a-zA-Z0-9\s]'
|
|
|
return re.sub(special_chars_regex, '', text)
|
|
|
|
|
|
def remove_numbers(text):
|
|
|
return re.sub(r'\d+', '', text)
|
|
|
|
|
|
def remove_extra_spaces(text):
|
|
|
return re.sub(r'\s+', ' ', text)
|
|
|
|
|
|
def remove_twitter_mentions(text):
|
|
|
return re.sub(r'@([A-Za-z0-9_]+)', '', text)
|
|
|
|
|
|
def remove_emoticons(text):
|
|
|
emoticon_regex = r'[\U0001F600-\U0001F64F\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF\U0001F700-\U0001F77F\U0001F780-\U0001F7FF\U0001F800-\U0001F8FF\U0001F900-\U0001F9FF\U0001FA00-\U0001FA6F\U0001FA70-\U0001FAFF\U00002702-\U000027B0\U000024C2-\U0001F251\U0001F004\U0001F0CF\U0001F170-\U0001F251\U0001F600-\U0001F64F\U00002702-\U000027B0\U000024C2-\U0001F251\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF\U0001F700-\U0001F773\U0001F780-\U0001F7D8\U0001F7E0-\U0001F7EB\U0001F7F0-\U0001F7FF\U0001F800-\U0001F80B\U0001F90D-\U0001F9FF\U0001FA70-\U0001FA74\U0001F600-\U0001F64F\U0001F90D-\U0001F971\U0001F973-\U0001F978\U0001F97A-\U0001F9CB\U0001F9CD-\U0001F9FF]+'
|
|
|
return re.sub(emoticon_regex, '', text)
|
|
|
|
|
|
def normalize_case(text):
|
|
|
return text.lower()
|
|
|
|
|
|
def remove_unnecessary_spaces(text):
|
|
|
text = text.strip()
|
|
|
text = re.sub(r'\s+', ' ', text)
|
|
|
return text
|
|
|
|
|
|
def remove_punctuation_and_brackets(text):
|
|
|
text = re.sub(r'[^\w\s\[\]]', '', text)
|
|
|
return text
|
|
|
|
|
|
def remove_numbered_brackets(text):
|
|
|
text = re.sub(r'\[\d+\]', '', text)
|
|
|
return text
|
|
|
|
|
|
def remove_initial_article(text):
|
|
|
words_to_remove = [
|
|
|
'a', 'an', 'the', 'some', 'many', 'much', 'few', 'little',
|
|
|
'several', 'a few', 'a little', 'a lot of', 'lots of', 'plenty of',
|
|
|
'this', 'that', 'these', 'those', 'its' ]
|
|
|
words = text.split()
|
|
|
if words and words[0].lower() in words_to_remove:
|
|
|
words.pop(0)
|
|
|
return ' '.join(words)
|
|
|
|
|
|
def preprocess_text(text):
|
|
|
text = remove_newlines(text)
|
|
|
text = remove_punctuation_and_brackets(text)
|
|
|
text = remove_special_characters(text)
|
|
|
text = remove_urls(text)
|
|
|
text = remove_html_tags(text)
|
|
|
text = remove_numbers(text)
|
|
|
text = remove_extra_spaces(text)
|
|
|
text = remove_twitter_mentions(text)
|
|
|
text = remove_emoticons(text)
|
|
|
text = normalize_case(text)
|
|
|
text = remove_unnecessary_spaces(text)
|
|
|
text = remove_numbered_brackets(text)
|
|
|
text=remove_initial_article(text)
|
|
|
return text
|
|
|
|
|
|
|
|
|
def softmax(x):
|
|
|
r=np.exp(x - np.max(x))
|
|
|
return r/r.sum(axis=0)
|
|
|
|
|
|
|
|
|
dataset1 = load_dataset("Fizzarolli/wattpad2", "default", split="train")
|
|
|
|
|
|
df2 = pl.read_parquet("wattpad_stories.parquet")
|
|
|
df2 = df2.to_pandas()
|
|
|
dataset2 = Dataset.from_pandas(df2)
|
|
|
|
|
|
dataset = concatenate_datasets([dataset1, dataset2]).shuffle(seed=42)
|
|
|
|
|
|
|
|
|
from lingua import LanguageDetectorBuilder
|
|
|
import unicodedata
|
|
|
|
|
|
detector = LanguageDetectorBuilder.from_all_languages().with_preloaded_language_models().build()
|
|
|
|
|
|
def add_language_column(example):
|
|
|
res = detector.compute_language_confidence_values(unicodedata.normalize('NFKD', example["description"]))
|
|
|
example["language"] = res[0].language.iso_code_639_3.name
|
|
|
confidences = list(map(lambda x: x.value, res))
|
|
|
example["language_confidence"] = np.max(confidences)
|
|
|
return example
|
|
|
|
|
|
dataset = dataset.map(add_language_column)
|
|
|
|
|
|
print(dataset)
|
|
|
print(dataset[0])
|
|
|
|
|
|
|
|
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
|
|
import torch
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("eliasalbouzidi/distilbert-nsfw-text-classifier")
|
|
|
model = AutoModelForSequenceClassification.from_pretrained("eliasalbouzidi/distilbert-nsfw-text-classifier", device_map="cuda")
|
|
|
|
|
|
def add_nsfw_column(example):
|
|
|
if "overall_nsfw_score" not in example:
|
|
|
nsfw_scores = []
|
|
|
for chapter_text in example["chapter_contents"]:
|
|
|
preprocessed_text = preprocess_text(chapter_text)
|
|
|
inputs = tokenizer(preprocessed_text, return_tensors="pt", padding=True, truncation=True, max_length=512).to("cuda")
|
|
|
outputs = model(**inputs).logits
|
|
|
probs = torch.softmax(outputs, dim=1).tolist()[0]
|
|
|
nsfw_scores.append(probs[1])
|
|
|
example["overall_nsfw_score"] = sum(nsfw_scores) / len(nsfw_scores)
|
|
|
example["chapter_nsfw_scores"] = nsfw_scores
|
|
|
return example
|
|
|
|
|
|
dataset = dataset.map(add_nsfw_column)
|
|
|
|
|
|
print(dataset)
|
|
|
print(dataset[0])
|
|
|
|
|
|
dataset.push_to_hub("Fizzarolli/wattpad2", "default", commit_message="Add more stories") |