Fizzarolli commited on
Commit
6da8af3
·
verified ·
1 Parent(s): aade438

Create dataset_filtering.py

Browse files
Files changed (1) hide show
  1. dataset_filtering.py +148 -0
dataset_filtering.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import polars as pl
2
+ import numpy as np
3
+ import re
4
+ from datasets import load_dataset, Dataset, concatenate_datasets
5
+
6
+ #region Preprocessing functions
7
+ def remove_newlines(text):
8
+ return text.replace('\n', '')
9
+
10
+ def remove_urls(text):
11
+ url_regex = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
12
+ return re.sub(url_regex, '', text)
13
+
14
+ def remove_html_tags(text):
15
+ html_regex = r'<[^>]+>'
16
+ return re.sub(html_regex, '', text)
17
+
18
+ def remove_special_characters(text):
19
+ special_chars_regex = r'[^a-zA-Z0-9\s]'
20
+ return re.sub(special_chars_regex, '', text)
21
+
22
+ def remove_numbers(text):
23
+ return re.sub(r'\d+', '', text)
24
+
25
+ def remove_extra_spaces(text):
26
+ return re.sub(r'\s+', ' ', text)
27
+
28
+ def remove_twitter_mentions(text):
29
+ return re.sub(r'@([A-Za-z0-9_]+)', '', text)
30
+
31
+ def remove_emoticons(text):
32
+ emoticon_regex = r'[\U0001F600-\U0001F64F\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF\U0001F700-\U0001F77F\U0001F780-\U0001F7FF\U0001F800-\U0001F8FF\U0001F900-\U0001F9FF\U0001FA00-\U0001FA6F\U0001FA70-\U0001FAFF\U00002702-\U000027B0\U000024C2-\U0001F251\U0001F004\U0001F0CF\U0001F170-\U0001F251\U0001F600-\U0001F64F\U00002702-\U000027B0\U000024C2-\U0001F251\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF\U0001F700-\U0001F773\U0001F780-\U0001F7D8\U0001F7E0-\U0001F7EB\U0001F7F0-\U0001F7FF\U0001F800-\U0001F80B\U0001F90D-\U0001F9FF\U0001FA70-\U0001FA74\U0001F600-\U0001F64F\U0001F90D-\U0001F971\U0001F973-\U0001F978\U0001F97A-\U0001F9CB\U0001F9CD-\U0001F9FF]+'
33
+ return re.sub(emoticon_regex, '', text)
34
+
35
+ def normalize_case(text):
36
+ return text.lower()
37
+
38
+ def remove_unnecessary_spaces(text):
39
+ text = text.strip()
40
+ text = re.sub(r'\s+', ' ', text)
41
+ return text
42
+
43
+ def remove_punctuation_and_brackets(text):
44
+ text = re.sub(r'[^\w\s\[\]]', '', text)
45
+ return text
46
+
47
+ def remove_numbered_brackets(text):
48
+ text = re.sub(r'\[\d+\]', '', text)
49
+ return text
50
+
51
+ def remove_initial_article(text):
52
+ words_to_remove = [
53
+ 'a', 'an', 'the', 'some', 'many', 'much', 'few', 'little',
54
+ 'several', 'a few', 'a little', 'a lot of', 'lots of', 'plenty of',
55
+ 'this', 'that', 'these', 'those', 'its' ]
56
+ words = text.split()
57
+ if words and words[0].lower() in words_to_remove:
58
+ words.pop(0)
59
+ return ' '.join(words)
60
+
61
+ def preprocess_text(text):
62
+ text = remove_newlines(text)
63
+ text = remove_punctuation_and_brackets(text)
64
+ text = remove_special_characters(text)
65
+ text = remove_urls(text)
66
+ text = remove_html_tags(text)
67
+ text = remove_numbers(text)
68
+ text = remove_extra_spaces(text)
69
+ text = remove_twitter_mentions(text)
70
+ text = remove_emoticons(text)
71
+ text = normalize_case(text)
72
+ text = remove_unnecessary_spaces(text)
73
+ text = remove_numbered_brackets(text)
74
+ text=remove_initial_article(text)
75
+ return text
76
+ #endregion
77
+
78
+ def softmax(x):
79
+ r=np.exp(x - np.max(x))
80
+ return r/r.sum(axis=0)
81
+
82
+ # Load the dataset
83
+ dataset1 = load_dataset("Fizzarolli/wattpad", split="train")
84
+
85
+ df2 = pl.read_parquet("wattpad_stories.parquet")
86
+ df2 = df2.to_pandas()
87
+ dataset2 = Dataset.from_pandas(df2)
88
+
89
+ df3 = pl.read_parquet("wattpad_stories_2.parquet")
90
+ df3 = df3.to_pandas()
91
+ dataset3 = Dataset.from_pandas(df2)
92
+
93
+ dataset = concatenate_datasets([dataset1, dataset2, dataset3]).shuffle(seed=42)
94
+
95
+ print(dataset)
96
+ print(dataset[0])
97
+
98
+ # Language detection
99
+ import fasttext
100
+
101
+ model = fasttext.load_model("glotlid.bin") # https://huggingface.co/cis-lmu/glotlid
102
+ def add_language_column(example):
103
+ preds = {}
104
+ pred = model.predict(example["title"], 3)
105
+ for i in range(len(pred[0])):
106
+ label = pred[0][i].replace("__label__", "")
107
+ label_liklihood = pred[1][i]
108
+ if label not in preds:
109
+ preds[label] = 0
110
+ preds[label] += label_liklihood
111
+ if not preds:
112
+ example["language"] = "unk_Unkn"
113
+ example["language_confidence"] = 0.0
114
+ return example
115
+ example["language"] = max(preds, key=preds.get)
116
+ example["language_confidence"] = np.max(softmax(list(preds.values())))
117
+ return example
118
+
119
+ dataset = dataset.map(add_language_column)
120
+
121
+ print(dataset)
122
+ print(dataset[0])
123
+
124
+ # NSFW detection
125
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
126
+ import torch
127
+
128
+ tokenizer = AutoTokenizer.from_pretrained("eliasalbouzidi/distilbert-nsfw-text-classifier")
129
+ model = AutoModelForSequenceClassification.from_pretrained("eliasalbouzidi/distilbert-nsfw-text-classifier", device_map="cuda")
130
+
131
+ def add_nsfw_column(example):
132
+ nsfw_scores = []
133
+ for chapter_text in example["chapter_contents"]:
134
+ preprocessed_text = preprocess_text(chapter_text)
135
+ inputs = tokenizer(preprocessed_text, return_tensors="pt", padding=True, truncation=True, max_length=512).to("cuda")
136
+ outputs = model(**inputs).logits
137
+ probs = torch.softmax(outputs, dim=1).tolist()[0]
138
+ nsfw_scores.append(probs[1])
139
+ example["overall_nsfw_score"] = sum(nsfw_scores) / len(nsfw_scores)
140
+ example["chapter_nsfw_scores"] = nsfw_scores
141
+ return example
142
+
143
+ dataset = dataset.map(add_nsfw_column)
144
+
145
+ print(dataset)
146
+ print(dataset[0])
147
+
148
+ dataset.push_to_hub("Fizzarolli/wattpad2")