File size: 5,037 Bytes
86e54c7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 | import polars as pl
import numpy as np
import re
from datasets import load_dataset, Dataset, concatenate_datasets
#region Preprocessing functions
def remove_newlines(text):
return text.replace('\n', '')
def remove_urls(text):
url_regex = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
return re.sub(url_regex, '', text)
def remove_html_tags(text):
html_regex = r'<[^>]+>'
return re.sub(html_regex, '', text)
def remove_special_characters(text):
special_chars_regex = r'[^a-zA-Z0-9\s]'
return re.sub(special_chars_regex, '', text)
def remove_numbers(text):
return re.sub(r'\d+', '', text)
def remove_extra_spaces(text):
return re.sub(r'\s+', ' ', text)
def remove_twitter_mentions(text):
return re.sub(r'@([A-Za-z0-9_]+)', '', text)
def remove_emoticons(text):
emoticon_regex = r'[\U0001F600-\U0001F64F\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF\U0001F700-\U0001F77F\U0001F780-\U0001F7FF\U0001F800-\U0001F8FF\U0001F900-\U0001F9FF\U0001FA00-\U0001FA6F\U0001FA70-\U0001FAFF\U00002702-\U000027B0\U000024C2-\U0001F251\U0001F004\U0001F0CF\U0001F170-\U0001F251\U0001F600-\U0001F64F\U00002702-\U000027B0\U000024C2-\U0001F251\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF\U0001F700-\U0001F773\U0001F780-\U0001F7D8\U0001F7E0-\U0001F7EB\U0001F7F0-\U0001F7FF\U0001F800-\U0001F80B\U0001F90D-\U0001F9FF\U0001FA70-\U0001FA74\U0001F600-\U0001F64F\U0001F90D-\U0001F971\U0001F973-\U0001F978\U0001F97A-\U0001F9CB\U0001F9CD-\U0001F9FF]+'
return re.sub(emoticon_regex, '', text)
def normalize_case(text):
return text.lower()
def remove_unnecessary_spaces(text):
text = text.strip()
text = re.sub(r'\s+', ' ', text)
return text
def remove_punctuation_and_brackets(text):
text = re.sub(r'[^\w\s\[\]]', '', text)
return text
def remove_numbered_brackets(text):
text = re.sub(r'\[\d+\]', '', text)
return text
def remove_initial_article(text):
words_to_remove = [
'a', 'an', 'the', 'some', 'many', 'much', 'few', 'little',
'several', 'a few', 'a little', 'a lot of', 'lots of', 'plenty of',
'this', 'that', 'these', 'those', 'its' ]
words = text.split()
if words and words[0].lower() in words_to_remove:
words.pop(0)
return ' '.join(words)
def preprocess_text(text):
text = remove_newlines(text)
text = remove_punctuation_and_brackets(text)
text = remove_special_characters(text)
text = remove_urls(text)
text = remove_html_tags(text)
text = remove_numbers(text)
text = remove_extra_spaces(text)
text = remove_twitter_mentions(text)
text = remove_emoticons(text)
text = normalize_case(text)
text = remove_unnecessary_spaces(text)
text = remove_numbered_brackets(text)
text=remove_initial_article(text)
return text
#endregion
def softmax(x):
r=np.exp(x - np.max(x))
return r/r.sum(axis=0)
# Load the dataset
dataset1 = load_dataset("Fizzarolli/wattpad2", "default", split="train")
df2 = pl.read_parquet("wattpad_stories.parquet")
df2 = df2.to_pandas()
dataset2 = Dataset.from_pandas(df2)
dataset = concatenate_datasets([dataset1, dataset2]).shuffle(seed=42)
# Language detection
from lingua import LanguageDetectorBuilder
import unicodedata
detector = LanguageDetectorBuilder.from_all_languages().with_preloaded_language_models().build()
def add_language_column(example):
res = detector.compute_language_confidence_values(unicodedata.normalize('NFKD', example["description"]))
example["language"] = res[0].language.iso_code_639_3.name
confidences = list(map(lambda x: x.value, res))
example["language_confidence"] = np.max(confidences)
return example
dataset = dataset.map(add_language_column)
print(dataset)
print(dataset[0])
# NSFW detection
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
tokenizer = AutoTokenizer.from_pretrained("eliasalbouzidi/distilbert-nsfw-text-classifier")
model = AutoModelForSequenceClassification.from_pretrained("eliasalbouzidi/distilbert-nsfw-text-classifier", device_map="cuda")
def add_nsfw_column(example):
if "overall_nsfw_score" not in example:
nsfw_scores = []
for chapter_text in example["chapter_contents"]:
preprocessed_text = preprocess_text(chapter_text)
inputs = tokenizer(preprocessed_text, return_tensors="pt", padding=True, truncation=True, max_length=512).to("cuda")
outputs = model(**inputs).logits
probs = torch.softmax(outputs, dim=1).tolist()[0]
nsfw_scores.append(probs[1])
example["overall_nsfw_score"] = sum(nsfw_scores) / len(nsfw_scores)
example["chapter_nsfw_scores"] = nsfw_scores
return example
dataset = dataset.map(add_nsfw_column)
print(dataset)
print(dataset[0])
dataset.push_to_hub("Fizzarolli/wattpad2", "default", commit_message="Add more stories") |