Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- cc-multilingual-main/README.md +56 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__init__.py +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__pycache__/__init__.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__pycache__/__init__.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__pycache__/document.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__pycache__/document.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__pycache__/exceptions.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__pycache__/worker.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__pycache__/worker.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/constants.py +2 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/data_types.py +45 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/document.py +178 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/exceptions.py +18 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__init__.py +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/__init__.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/__init__.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/base.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/base.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/classifiers.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/classifiers.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/content.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/content.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/importance_weights.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/importance_weights.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/lines.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/lines.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/natural_language.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/natural_language.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/repetitions.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/repetitions.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/base.py +30 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/classifiers.py +114 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/content.py +189 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/importance_weights.py +303 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/lines.py +153 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/natural_language.py +197 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/repetitions.py +205 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__init__.py +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/__init__.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/classifiers.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/classifiers.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/content.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/content.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/dsir.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/dsir.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/stop_words.cpython-310.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/stop_words.cpython-38.pyc +0 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/classifiers.py +15 -0
- cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/content.py +39 -0
cc-multilingual-main/README.md
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# cc-multilingual
|
| 2 |
+
Downloading and dedup indic multi-lingual from CommonCrawl
|
| 3 |
+
### Installation for cc_net
|
| 4 |
+
```sh
|
| 5 |
+
cd cc_net/
|
| 6 |
+
make install .
|
| 7 |
+
```
|
| 8 |
+
### Choose a snapshot: snapshot-id
|
| 9 |
+
|
| 10 |
+
#### Step 1: Edit the config.myconfig.json file
|
| 11 |
+
```json
|
| 12 |
+
"dump": "snapshot-id",
|
| 13 |
+
"num_shards": 1600,
|
| 14 |
+
"lang_whitelist": ["as","bn","gu","kn","hi","ml","mr","ne","or","pb","sa","sd","ta","ur","te","ks","sat","mai","mni","kok","doi","brx"],
|
| 15 |
+
"mine_num_processes": 16,
|
| 16 |
+
"pipeline": [
|
| 17 |
+
"lid",
|
| 18 |
+
"keep_lang",
|
| 19 |
+
"pp_bucket",
|
| 20 |
+
"split_by_lang"
|
| 21 |
+
],
|
| 22 |
+
"target_size": "100M",
|
| 23 |
+
"output_dir": "data",
|
| 24 |
+
"mined_dir": "mined",
|
| 25 |
+
"cache_dir": "wet_cache"
|
| 26 |
+
```
|
| 27 |
+
|
| 28 |
+
#### Step 2: (Optional) Download data into cache
|
| 29 |
+
```sh
|
| 30 |
+
wget wet_file_path
|
| 31 |
+
python3 script.py wet.paths.gz 90 wet_cache/2023-40/
|
| 32 |
+
```
|
| 33 |
+
#### Step 3: Run the pipeline
|
| 34 |
+
```sh
|
| 35 |
+
python3 -m cc_net --config config/myconfig.json
|
| 36 |
+
```
|
| 37 |
+
|
| 38 |
+
## Deduplication
|
| 39 |
+
```
|
| 40 |
+
pip install app/requirements.txt
|
| 41 |
+
```
|
| 42 |
+
#### Step1: Add list of files downloaded from cc_net to listings/file.txt in format lang_shard.json.gz
|
| 43 |
+
|
| 44 |
+
#### Step 2: Computing minhash signatures
|
| 45 |
+
```
|
| 46 |
+
python3 app/src/pipeline.py --input_base_uri "file://path/to/ccnet/data" --output_base_uri "/path/to/output" --artifacts_dir "file:///path/to/empty/artifacts" --input /path/to/listings/file.txt --cc_snapshot_id 2023-50 --langs "hi" --inputs_per_process 5 --minhash_num_permutations 128 --minhash_ngram_size 13
|
| 47 |
+
```
|
| 48 |
+
|
| 49 |
+
#### Step 3: Applying bloomfilter
|
| 50 |
+
```
|
| 51 |
+
python3 app/src/bloomfilter.py --listings /path/to/listings/file.txt --input_base_uri "file://path/to/ccnet/data" --output_dir "/path/to/output" --parallel_readers 32 --batch_size 10
|
| 52 |
+
```
|
| 53 |
+
#### Step 4: Running LSH
|
| 54 |
+
```
|
| 55 |
+
python3 app/src/run_lsh.py --listings "/path/to/minhash-signature/listings/file.txt" --input_base_uri "file:///path/to/minhash-signature/files" --output_dir "/path/to/output" --similarity "0.8" --num_perm "128"
|
| 56 |
+
```
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__init__.py
ADDED
|
File without changes
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (203 Bytes). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (156 Bytes). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__pycache__/document.cpython-310.pyc
ADDED
|
Binary file (5.42 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__pycache__/document.cpython-38.pyc
ADDED
|
Binary file (5.46 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__pycache__/exceptions.cpython-38.pyc
ADDED
|
Binary file (1.29 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__pycache__/worker.cpython-310.pyc
ADDED
|
Binary file (9.47 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/__pycache__/worker.cpython-38.pyc
ADDED
|
Binary file (9.32 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/constants.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
PRECISION = 8
|
| 2 |
+
CCNET_LABEL = "__label__cc"
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/data_types.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from msgspec import Struct
|
| 3 |
+
|
| 4 |
+
from typing import List, Tuple, Optional, Dict
|
| 5 |
+
from typing_extensions import TypeAlias
|
| 6 |
+
|
| 7 |
+
ScoreType: TypeAlias = Tuple[int, int, Optional[float]]
|
| 8 |
+
SignalType: TypeAlias = List[ScoreType]
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@dataclass
|
| 12 |
+
class TextSlice:
|
| 13 |
+
text: str
|
| 14 |
+
start: int
|
| 15 |
+
end: int
|
| 16 |
+
|
| 17 |
+
def __len__(self):
|
| 18 |
+
return len(self.text)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class InputSpec(Struct):
|
| 22 |
+
raw_content: str
|
| 23 |
+
url: str
|
| 24 |
+
nlines: int
|
| 25 |
+
# original_nlines: int
|
| 26 |
+
source_domain: str
|
| 27 |
+
length: int
|
| 28 |
+
# original_length: int
|
| 29 |
+
language: str
|
| 30 |
+
language_score: float
|
| 31 |
+
# perplexity: float
|
| 32 |
+
bucket: str
|
| 33 |
+
digest: str
|
| 34 |
+
cc_segment: str
|
| 35 |
+
date_download: str
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class OutputSpec(Struct):
|
| 39 |
+
id: str
|
| 40 |
+
id_int: int
|
| 41 |
+
metadata: Dict[str, str]
|
| 42 |
+
quality_signals: Dict[str, List[Tuple[int, int, Optional[float]]]]
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/document.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from nltk.tokenize import WordPunctTokenizer
|
| 2 |
+
import re
|
| 3 |
+
from typing import Optional, Tuple, Callable
|
| 4 |
+
|
| 5 |
+
from utilities.text import normalize, form_ngrams
|
| 6 |
+
from core.data_types import TextSlice
|
| 7 |
+
from core.quality_signals.utils.dsir import hash_feature
|
| 8 |
+
|
| 9 |
+
_word_tokenizer = WordPunctTokenizer()
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def _compute_ngrams(text_seq, n):
|
| 13 |
+
return tuple(form_ngrams(iter(text_seq), n))
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def split_paragraphs(
|
| 17 |
+
text: str, normalizer: Callable[[str], str], remove_empty: bool = True
|
| 18 |
+
) -> Tuple[TextSlice]:
|
| 19 |
+
"""
|
| 20 |
+
This function is adapted from dolma: https://github.com/allenai/dolma
|
| 21 |
+
|
| 22 |
+
Split a string into paragraphs. A paragraph is defined as a sequence of
|
| 23 |
+
zero or more characters, followed by a newline character, or a sequence
|
| 24 |
+
of one or more characters, followed by the end of the string.
|
| 25 |
+
"""
|
| 26 |
+
text_slices = tuple(
|
| 27 |
+
TextSlice(normalizer(text[match.start():match.end()]), match.start(),
|
| 28 |
+
match.end())
|
| 29 |
+
for match in re.finditer(r"([^\n]*\n|[^\n]+$)", text)
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
if remove_empty is True:
|
| 33 |
+
text_slices = tuple(
|
| 34 |
+
text_slice for text_slice in text_slices if text_slice[0].strip()
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
return text_slices
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class Document:
|
| 41 |
+
__slots__ = (
|
| 42 |
+
"_raw_content", "_normalized_content", "_raw_lines",
|
| 43 |
+
"_normalized_lines", "_raw_words", "_normalized_words",
|
| 44 |
+
"_num_raw_words", "_num_normalized_words", "_domain", "_raw_2grams",
|
| 45 |
+
"_raw_3grams", "_norm_2grams", "_norm_3grams", "_norm_4grams",
|
| 46 |
+
"_hash_features"
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
def __init__(
|
| 50 |
+
self, content: str, domain: Optional[str],
|
| 51 |
+
precompute_ngrams: bool = False,
|
| 52 |
+
precompute_hash_features: bool = False,
|
| 53 |
+
dsir_buckets: Optional[int] = None
|
| 54 |
+
):
|
| 55 |
+
self._raw_content = content
|
| 56 |
+
self._domain = domain
|
| 57 |
+
|
| 58 |
+
# the normalized content: lowercased and punctuation removed
|
| 59 |
+
self._normalized_content = normalize(content)
|
| 60 |
+
|
| 61 |
+
# the lines of the document (split by newline)
|
| 62 |
+
self._raw_lines: Tuple[TextSlice] = split_paragraphs(
|
| 63 |
+
text=content, normalizer=lambda x: x, remove_empty=False
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
# the lines of the document (split by newline), normalized
|
| 67 |
+
self._normalized_lines: Tuple[TextSlice] = split_paragraphs(
|
| 68 |
+
text=content, normalizer=normalize, remove_empty=False
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
# the words of the document after normalization
|
| 72 |
+
self._raw_words = tuple(_word_tokenizer.tokenize(self._raw_content))
|
| 73 |
+
|
| 74 |
+
# the normalized words of the document (split by whitespace)
|
| 75 |
+
self._normalized_words = tuple(self._normalized_content.split())
|
| 76 |
+
|
| 77 |
+
# get number of words before and after normalization
|
| 78 |
+
self._num_raw_words = len(self._raw_words)
|
| 79 |
+
self._num_normalized_words = len(self._normalized_words)
|
| 80 |
+
|
| 81 |
+
# precompute ngrams
|
| 82 |
+
if precompute_ngrams:
|
| 83 |
+
# raw grams
|
| 84 |
+
self._raw_2grams = _compute_ngrams(self._raw_words, 2)
|
| 85 |
+
self._raw_3grams = _compute_ngrams(self._raw_words, 3)
|
| 86 |
+
|
| 87 |
+
# normalized grams
|
| 88 |
+
self._norm_2grams = _compute_ngrams(self._normalized_words, 2)
|
| 89 |
+
self._norm_3grams = _compute_ngrams(self._normalized_words, 3)
|
| 90 |
+
self._norm_4grams = _compute_ngrams(self._normalized_words, 4)
|
| 91 |
+
else:
|
| 92 |
+
self._raw_2grams = None
|
| 93 |
+
self._raw_3grams = None
|
| 94 |
+
self._norm_2grams = None
|
| 95 |
+
self._norm_3grams = None
|
| 96 |
+
self._norm_4grams = None
|
| 97 |
+
|
| 98 |
+
# precomupte hash features
|
| 99 |
+
if precompute_hash_features:
|
| 100 |
+
bigrams = self._raw_2grams or _compute_ngrams(self._raw_words, 2)
|
| 101 |
+
self._hash_features = hash_feature(
|
| 102 |
+
unigrams=self._raw_words,
|
| 103 |
+
bigrams=bigrams,
|
| 104 |
+
buckets=dsir_buckets
|
| 105 |
+
)
|
| 106 |
+
else:
|
| 107 |
+
self._hash_features = None
|
| 108 |
+
|
| 109 |
+
def __len__(self):
|
| 110 |
+
return len(self._raw_content)
|
| 111 |
+
|
| 112 |
+
@property
|
| 113 |
+
def raw_content(self):
|
| 114 |
+
return self._raw_content
|
| 115 |
+
|
| 116 |
+
@property
|
| 117 |
+
def normalized_content(self):
|
| 118 |
+
return self._normalized_content
|
| 119 |
+
|
| 120 |
+
@property
|
| 121 |
+
def raw_lines(self):
|
| 122 |
+
return self._raw_lines
|
| 123 |
+
|
| 124 |
+
@property
|
| 125 |
+
def normalized_lines(self):
|
| 126 |
+
return self._normalized_lines
|
| 127 |
+
|
| 128 |
+
@property
|
| 129 |
+
def raw_words(self):
|
| 130 |
+
return self._raw_words
|
| 131 |
+
|
| 132 |
+
@property
|
| 133 |
+
def normalized_words(self):
|
| 134 |
+
return self._normalized_words
|
| 135 |
+
|
| 136 |
+
@property
|
| 137 |
+
def num_raw_words(self):
|
| 138 |
+
return self._num_raw_words
|
| 139 |
+
|
| 140 |
+
@property
|
| 141 |
+
def num_normalized_words(self):
|
| 142 |
+
return self._num_normalized_words
|
| 143 |
+
|
| 144 |
+
@property
|
| 145 |
+
def domain(self):
|
| 146 |
+
return self._domain
|
| 147 |
+
|
| 148 |
+
@property
|
| 149 |
+
def raw_1grams(self):
|
| 150 |
+
return self._raw_words
|
| 151 |
+
|
| 152 |
+
@property
|
| 153 |
+
def raw_2grams(self):
|
| 154 |
+
return self._raw_2grams
|
| 155 |
+
|
| 156 |
+
@property
|
| 157 |
+
def raw_3grams(self):
|
| 158 |
+
return self._raw_3grams
|
| 159 |
+
|
| 160 |
+
@property
|
| 161 |
+
def norm_1grams(self):
|
| 162 |
+
return self._normalized_words
|
| 163 |
+
|
| 164 |
+
@property
|
| 165 |
+
def norm_2grams(self):
|
| 166 |
+
return self._norm_2grams
|
| 167 |
+
|
| 168 |
+
@property
|
| 169 |
+
def norm_3grams(self):
|
| 170 |
+
return self._norm_3grams
|
| 171 |
+
|
| 172 |
+
@property
|
| 173 |
+
def norm_4grams(self):
|
| 174 |
+
return self._norm_4grams
|
| 175 |
+
|
| 176 |
+
@property
|
| 177 |
+
def hash_features(self):
|
| 178 |
+
return self._hash_features
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/exceptions.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class S3ReadError(Exception):
|
| 2 |
+
def __init__(self, message):
|
| 3 |
+
super().__init__(message)
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class S3WriteError(Exception):
|
| 7 |
+
def __init__(self, message):
|
| 8 |
+
super().__init__(message)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class LocalReadError(Exception):
|
| 12 |
+
def __init__(self, message):
|
| 13 |
+
super().__init__(message)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class UnknownReadError(Exception):
|
| 17 |
+
def __init__(self, message):
|
| 18 |
+
super().__init__(message)
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__init__.py
ADDED
|
File without changes
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (219 Bytes). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (172 Bytes). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/base.cpython-310.pyc
ADDED
|
Binary file (1.38 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/base.cpython-38.pyc
ADDED
|
Binary file (1.35 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/classifiers.cpython-310.pyc
ADDED
|
Binary file (4.16 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/classifiers.cpython-38.pyc
ADDED
|
Binary file (4.25 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/content.cpython-310.pyc
ADDED
|
Binary file (7.3 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/content.cpython-38.pyc
ADDED
|
Binary file (7.29 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/importance_weights.cpython-310.pyc
ADDED
|
Binary file (8.45 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/importance_weights.cpython-38.pyc
ADDED
|
Binary file (8.84 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/lines.cpython-310.pyc
ADDED
|
Binary file (6.51 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/lines.cpython-38.pyc
ADDED
|
Binary file (6.86 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/natural_language.cpython-310.pyc
ADDED
|
Binary file (7.77 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/natural_language.cpython-38.pyc
ADDED
|
Binary file (7.84 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/repetitions.cpython-310.pyc
ADDED
|
Binary file (7.02 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/__pycache__/repetitions.cpython-38.pyc
ADDED
|
Binary file (7.27 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/base.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from core.document import Document
|
| 2 |
+
from core.data_types import SignalType
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class RPSBase:
|
| 6 |
+
r""" Base class for RP signal functions. Each child class must implement
|
| 7 |
+
the __call__ method. The __call__ method takes a document as input and
|
| 8 |
+
returns a score. """
|
| 9 |
+
DATA_TYPE = SignalType
|
| 10 |
+
|
| 11 |
+
RPS_PREFIX: str = "RPS_"
|
| 12 |
+
|
| 13 |
+
__slots__ = ["__field_name"]
|
| 14 |
+
|
| 15 |
+
def __init__(self, *args, **kwargs): # noqa
|
| 16 |
+
# make sure all classes start with RPS_; this is to ensure that
|
| 17 |
+
# the get_rule_based_signals function works correctly when new signal
|
| 18 |
+
# functions are added
|
| 19 |
+
assert self.__class__.__name__.startswith(self.RPS_PREFIX), \
|
| 20 |
+
f"Name of signal function must" \
|
| 21 |
+
f" start with {self.RPS_PREFIX}; got {self.__class__.__name__}"
|
| 22 |
+
|
| 23 |
+
self.__field_name = self.__class__.__name__.lower()
|
| 24 |
+
|
| 25 |
+
def __call__(self, document: Document):
|
| 26 |
+
raise NotImplementedError
|
| 27 |
+
|
| 28 |
+
@property
|
| 29 |
+
def field_name(self):
|
| 30 |
+
return self.__field_name
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/classifiers.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
from typing import List, Tuple, Type
|
| 3 |
+
import fasttext
|
| 4 |
+
|
| 5 |
+
from core.constants import PRECISION, CCNET_LABEL
|
| 6 |
+
from core.quality_signals.base import RPSBase
|
| 7 |
+
from core.document import Document
|
| 8 |
+
from core.data_types import SignalType
|
| 9 |
+
from core.quality_signals.utils.classifiers import \
|
| 10 |
+
preprocess_quality_classifier
|
| 11 |
+
from utilities.register.registry_utils import *
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
"register_classifier_callables", "classifier_schema"
|
| 15 |
+
]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def classifier_schema() -> List[Tuple[str, Type]]:
|
| 19 |
+
r""" Returns a list of signal names and their data types """
|
| 20 |
+
return signal_schema(module=sys.modules[__name__])
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def register_classifier_callables(
|
| 24 |
+
wikiref_model: str,
|
| 25 |
+
palm_model: str,
|
| 26 |
+
wikipedia_model: str
|
| 27 |
+
) -> List[RPSBase]:
|
| 28 |
+
r""" Returns a list of signal functions (i.e., RPSBase instances) that
|
| 29 |
+
are used to extract content signals from a document.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
wikiref_model: A fasttext model trained on Wikipedia references.
|
| 33 |
+
palm_model: A fasttext model trained on ccnet vs
|
| 34 |
+
{books, openwebtext, wikipedia}.
|
| 35 |
+
wikipedia_model: A fasttext model trained on Wikipedia articles.
|
| 36 |
+
|
| 37 |
+
Returns:
|
| 38 |
+
A list of signal function class instances.
|
| 39 |
+
"""
|
| 40 |
+
return list(map(
|
| 41 |
+
lambda cls: cls(
|
| 42 |
+
wikiref_model=wikiref_model,
|
| 43 |
+
palm_model=palm_model,
|
| 44 |
+
wikipedia_model=wikipedia_model,
|
| 45 |
+
),
|
| 46 |
+
get_callables_from_module(module=sys.modules[__name__])
|
| 47 |
+
))
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class BaseMLSignal(RPSBase):
|
| 51 |
+
__slots__ = "_ft_model"
|
| 52 |
+
|
| 53 |
+
def __init__(self, ft_model_file: str):
|
| 54 |
+
super(BaseMLSignal, self).__init__()
|
| 55 |
+
if ft_model_file is None:
|
| 56 |
+
self._ft_model = None
|
| 57 |
+
else:
|
| 58 |
+
self._ft_model = fasttext.load_model(str(ft_model_file))
|
| 59 |
+
|
| 60 |
+
def __call__(self, document: Document) -> SignalType:
|
| 61 |
+
if self._ft_model is None:
|
| 62 |
+
return [(0, len(document), None)]
|
| 63 |
+
|
| 64 |
+
if len(document.raw_content) == 0:
|
| 65 |
+
return [(0, len(document), None)]
|
| 66 |
+
|
| 67 |
+
text = preprocess_quality_classifier(document=document)
|
| 68 |
+
pred = self._ft_model.predict(text=text)
|
| 69 |
+
|
| 70 |
+
(pred_label, pred_prob) = pred
|
| 71 |
+
pred_label = pred_label[0]
|
| 72 |
+
pred_prob = pred_prob[0]
|
| 73 |
+
|
| 74 |
+
if pred_label == CCNET_LABEL:
|
| 75 |
+
high_quality_score = 1 - pred_prob
|
| 76 |
+
else:
|
| 77 |
+
high_quality_score = pred_prob
|
| 78 |
+
|
| 79 |
+
score = round(float(high_quality_score), PRECISION)
|
| 80 |
+
return [(0, len(document), score)]
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class RPS_Doc_ML_Wikiref_Score(BaseMLSignal): # noqa
|
| 84 |
+
r""" Fasttext classifier prediction for the document being a Wikipedia
|
| 85 |
+
reference. This is the same fasttext model as in the RedPajama-1T
|
| 86 |
+
dataset."""
|
| 87 |
+
__slots__ = ()
|
| 88 |
+
|
| 89 |
+
def __init__(self, wikiref_model: str, *args, **kwargs): # noqa
|
| 90 |
+
super(RPS_Doc_ML_Wikiref_Score, self).__init__(
|
| 91 |
+
ft_model_file=wikiref_model
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class RPS_Doc_ML_Palm_Score(BaseMLSignal): # noqa
|
| 96 |
+
r""" Fasttext classifier prediction for the document being a Wikipedia
|
| 97 |
+
article, OpenWebText sample or a RedPajama-V1 book."""
|
| 98 |
+
__slots__ = ()
|
| 99 |
+
|
| 100 |
+
def __init__(self, palm_model: str, *args, **kwargs): # noqa
|
| 101 |
+
super(RPS_Doc_ML_Palm_Score, self).__init__(
|
| 102 |
+
ft_model_file=palm_model
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class RPS_Doc_ML_Wikipedia_Score(BaseMLSignal): # noqa
|
| 107 |
+
r""" Fasttext classifier prediction for the document being a Wikipedia
|
| 108 |
+
article."""
|
| 109 |
+
__slots__ = ()
|
| 110 |
+
|
| 111 |
+
def __init__(self, wikipedia_model: str, *args, **kwargs): # noqa
|
| 112 |
+
super(RPS_Doc_ML_Wikipedia_Score, self).__init__(
|
| 113 |
+
ft_model_file=wikipedia_model
|
| 114 |
+
)
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/content.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import sys
|
| 3 |
+
import operator
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from typing import List, Tuple, Type
|
| 6 |
+
|
| 7 |
+
from core.constants import PRECISION
|
| 8 |
+
from core.quality_signals.base import RPSBase
|
| 9 |
+
from core.quality_signals.utils.stop_words import get_stop_words
|
| 10 |
+
from core.document import Document
|
| 11 |
+
from core.data_types import SignalType
|
| 12 |
+
from core.quality_signals.utils.content import \
|
| 13 |
+
load_bad_words, load_bad_urls_index
|
| 14 |
+
from utilities.register.registry_utils import *
|
| 15 |
+
from utilities.text import form_ngrams
|
| 16 |
+
|
| 17 |
+
__all__ = ["register_content_callables", "content_schema"]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def content_schema() -> List[Tuple[str, Type]]:
|
| 21 |
+
r""" Returns a list of signal names and their data types """
|
| 22 |
+
return signal_schema(module=sys.modules[__name__])
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def register_content_callables(
|
| 26 |
+
language: str, bad_urls_dir: str, bad_words_dir: str
|
| 27 |
+
) -> List[RPSBase]:
|
| 28 |
+
r""" Returns a list of signal functions (i.e., RPSBase instances) that
|
| 29 |
+
are used to extract content signals from a document.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
language: The language of the document.
|
| 33 |
+
bad_urls_dir: directory containing the UT1 blacklist.
|
| 34 |
+
bad_words_dir: directory containing the LDNOOBW blacklist.
|
| 35 |
+
|
| 36 |
+
Returns:
|
| 37 |
+
A list of signal function class instances.
|
| 38 |
+
"""
|
| 39 |
+
return list(map(
|
| 40 |
+
lambda cls: cls(
|
| 41 |
+
language=language,
|
| 42 |
+
bad_urls_dir=bad_urls_dir,
|
| 43 |
+
bad_words_dir=bad_words_dir
|
| 44 |
+
),
|
| 45 |
+
get_callables_from_module(module=sys.modules[__name__])
|
| 46 |
+
))
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class RPS_Doc_LDNOOBW_Words(RPSBase): # noqa
|
| 50 |
+
r""" The number of sequences of words that are contained in the
|
| 51 |
+
List-of-Dirty-Naughty-Obscene-and-Otherwise-Bad-Words blocklist. The
|
| 52 |
+
blocklist is obtained from
|
| 53 |
+
https://github.com/LDNOOBW/List-of-Dirty-Naughty-Obscene-and-Otherwise-Bad-Words
|
| 54 |
+
"""
|
| 55 |
+
__slots__ = ["_block_words", "_gram_vals"]
|
| 56 |
+
|
| 57 |
+
def __init__(
|
| 58 |
+
self, bad_words_dir: str, language: str, *args, **kwargs # noqa
|
| 59 |
+
):
|
| 60 |
+
super(RPS_Doc_LDNOOBW_Words, self).__init__()
|
| 61 |
+
self._block_words = load_bad_words(
|
| 62 |
+
bad_words_dir=Path(bad_words_dir), lang=language
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
# cache the number of words in each block list entry
|
| 66 |
+
self._gram_vals = set(map(
|
| 67 |
+
lambda w: 1 + operator.countOf(w, " "), self._block_words
|
| 68 |
+
))
|
| 69 |
+
|
| 70 |
+
def __call__(self, document: Document) -> SignalType:
|
| 71 |
+
if len(document.normalized_content) == 0:
|
| 72 |
+
return [(0, len(document), .0)]
|
| 73 |
+
|
| 74 |
+
num_dirty = 0
|
| 75 |
+
|
| 76 |
+
# for each ngram value, count the number of ngrams in the document
|
| 77 |
+
# which are also in the block words list
|
| 78 |
+
for n in self._gram_vals:
|
| 79 |
+
if n == 1:
|
| 80 |
+
num_dirty += sum(
|
| 81 |
+
1 for _ in filter(
|
| 82 |
+
lambda w: w in self._block_words,
|
| 83 |
+
document.normalized_words
|
| 84 |
+
)
|
| 85 |
+
)
|
| 86 |
+
continue
|
| 87 |
+
|
| 88 |
+
num_dirty += sum(
|
| 89 |
+
1 for _ in filter(
|
| 90 |
+
lambda t: " ".join(t) in self._block_words,
|
| 91 |
+
# try to fetch the cached ngrams, otherwise compute them
|
| 92 |
+
# on the fly
|
| 93 |
+
getattr(document, f"norm_{n}grams", None)
|
| 94 |
+
or
|
| 95 |
+
form_ngrams(iter(document.normalized_words), n)
|
| 96 |
+
)
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
score = float(num_dirty)
|
| 100 |
+
return [(0, len(document), score)]
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class RPS_Doc_Lorem_Ipsum(RPSBase): # noqa
|
| 104 |
+
r""" The ratio between the number of occurences of 'lorem ipsum'
|
| 105 |
+
and the number of characters in the text after normalization. Text is
|
| 106 |
+
normalized by lowercasing and removing punctuation. """
|
| 107 |
+
SEARCH_TEXT = "lorem ipsum"
|
| 108 |
+
SEARCH_REGEX = re.compile(r"lorem ipsum", re.IGNORECASE)
|
| 109 |
+
|
| 110 |
+
__slots__ = ()
|
| 111 |
+
|
| 112 |
+
def __call__(self, document: Document) -> SignalType:
|
| 113 |
+
if len(document.normalized_content) == 0:
|
| 114 |
+
return [(0, len(document), 0.0)]
|
| 115 |
+
|
| 116 |
+
if self.SEARCH_TEXT not in document.normalized_content:
|
| 117 |
+
return [(0, len(document), .0)]
|
| 118 |
+
|
| 119 |
+
num_occurences = len(self.SEARCH_REGEX.findall(
|
| 120 |
+
document.normalized_content
|
| 121 |
+
))
|
| 122 |
+
|
| 123 |
+
score = float(num_occurences) / len(document.normalized_content)
|
| 124 |
+
score = round(score, PRECISION)
|
| 125 |
+
|
| 126 |
+
return [(0, len(document), score)]
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class RPS_Doc_Curly_Bracket(RPSBase): # noqa
|
| 130 |
+
r""" The ratio between the number of occurences of '{' or '}' and the
|
| 131 |
+
number of characters in the raw text. """
|
| 132 |
+
SEARCH_TEXT = ("{", "}")
|
| 133 |
+
__slots__ = ()
|
| 134 |
+
|
| 135 |
+
def __call__(self, document: Document) -> SignalType:
|
| 136 |
+
if len(document.raw_content) == 0:
|
| 137 |
+
return [(0, len(document), .0)]
|
| 138 |
+
|
| 139 |
+
if all(map(lambda x: x not in document.raw_content, self.SEARCH_TEXT)):
|
| 140 |
+
return [(0, len(document), .0)]
|
| 141 |
+
|
| 142 |
+
num_occurences = sum(
|
| 143 |
+
map(lambda x: operator.countOf(document.raw_content, x),
|
| 144 |
+
self.SEARCH_TEXT)
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
score = float(num_occurences) / len(document.raw_content)
|
| 148 |
+
score = round(score, PRECISION)
|
| 149 |
+
|
| 150 |
+
return [(0, len(document), score)]
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
class RPS_Doc_UT1_Blacklist(RPSBase): # noqa
|
| 154 |
+
r""" An categorical id of the list of categories of the domain of the
|
| 155 |
+
document. Categories are obtained from the UT1 blacklist.
|
| 156 |
+
"""
|
| 157 |
+
__slots__ = ["_ut1_mapping"]
|
| 158 |
+
|
| 159 |
+
def __init__(self, bad_urls_dir: str, *args, **kwargs): # noqa
|
| 160 |
+
super(RPS_Doc_UT1_Blacklist, self).__init__()
|
| 161 |
+
# self._ut1_mapping = load_bad_urls_index(Path(bad_urls_dir))
|
| 162 |
+
self._ut1_mapping = {}
|
| 163 |
+
|
| 164 |
+
def __call__(self, document: Document) -> SignalType:
|
| 165 |
+
score: int = self._ut1_mapping.get(document.domain, None)
|
| 166 |
+
return [(0, len(document), score)]
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
class RPS_Doc_Stop_Word_Fraction(RPSBase): # noqa
|
| 170 |
+
r""" The ratio between the number of stop words and the number of words in
|
| 171 |
+
the document. """
|
| 172 |
+
__slots__ = ["_stop_words"]
|
| 173 |
+
|
| 174 |
+
def __init__(self, language: str, *args, **kwargs): # noqa
|
| 175 |
+
super(RPS_Doc_Stop_Word_Fraction, self).__init__()
|
| 176 |
+
self._stop_words = get_stop_words(language)
|
| 177 |
+
|
| 178 |
+
def __call__(self, document: Document) -> SignalType:
|
| 179 |
+
if len(document.normalized_words) == 0:
|
| 180 |
+
return [(0, len(document), .0)]
|
| 181 |
+
|
| 182 |
+
num_stop_words = sum(
|
| 183 |
+
map(lambda w: w in self._stop_words, document.raw_words)
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
score = float(num_stop_words) / document.num_raw_words
|
| 187 |
+
score = round(score, PRECISION)
|
| 188 |
+
|
| 189 |
+
return [(0, len(document), score)]
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/importance_weights.py
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import scipy.stats as stats
|
| 3 |
+
import sys
|
| 4 |
+
from typing import List, Tuple, Type, Optional
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
from core.constants import PRECISION
|
| 8 |
+
from core.quality_signals.base import RPSBase
|
| 9 |
+
from core.quality_signals.utils.dsir import hash_feature
|
| 10 |
+
from core.document import Document
|
| 11 |
+
from core.data_types import SignalType
|
| 12 |
+
|
| 13 |
+
from utilities.register.registry_utils import *
|
| 14 |
+
from utilities.text import form_ngrams
|
| 15 |
+
|
| 16 |
+
__all__ = [
|
| 17 |
+
"register_importance_weights_callables",
|
| 18 |
+
"importance_weights_schema"
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def importance_weights_schema() -> List[Tuple[str, Type]]:
|
| 23 |
+
r""" Returns a list of signal names and their data types """
|
| 24 |
+
return signal_schema(module=sys.modules[__name__])
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def register_importance_weights_callables(
|
| 28 |
+
source_fps: Optional[Tuple[str]],
|
| 29 |
+
wiki_fps: Optional[Tuple[str]],
|
| 30 |
+
openwebtext_fps: Optional[Tuple[str]],
|
| 31 |
+
books_fps: Optional[Tuple[str]],
|
| 32 |
+
language: str
|
| 33 |
+
) -> List[RPSBase]:
|
| 34 |
+
r""" Returns a list of signal functions (i.e., RPSBase instances) that
|
| 35 |
+
are used to extract content signals from a document.
|
| 36 |
+
|
| 37 |
+
Returns:
|
| 38 |
+
A list of signal function class instances.
|
| 39 |
+
"""
|
| 40 |
+
return list(map(
|
| 41 |
+
lambda cls: cls(
|
| 42 |
+
language=language,
|
| 43 |
+
source_fps=source_fps,
|
| 44 |
+
wiki_fps=wiki_fps,
|
| 45 |
+
openwebtext_fps=openwebtext_fps,
|
| 46 |
+
books_fps=books_fps
|
| 47 |
+
),
|
| 48 |
+
get_callables_from_module(module=sys.modules[__name__])
|
| 49 |
+
))
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class Base_Importance(RPSBase): # noqa
|
| 53 |
+
r""" Base class for functions which return the log ratio of the likelihood
|
| 54 |
+
of the document's features with respect to the target domain
|
| 55 |
+
versus the source domain. """
|
| 56 |
+
|
| 57 |
+
__slots__ = (
|
| 58 |
+
"_log_diff_dist", "_feature_dim", "_target_lambda",
|
| 59 |
+
"_source_lambda", "_length_correction"
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
def __init__(
|
| 63 |
+
self,
|
| 64 |
+
target_fps: Tuple[str, str],
|
| 65 |
+
source_fps: Tuple[str, str],
|
| 66 |
+
language: str,
|
| 67 |
+
length_correction: bool = False
|
| 68 |
+
):
|
| 69 |
+
super(Base_Importance, self).__init__()
|
| 70 |
+
self._length_correction = length_correction
|
| 71 |
+
|
| 72 |
+
if target_fps is None or source_fps is None:
|
| 73 |
+
self._log_diff_dist = None
|
| 74 |
+
self._feature_dim = None
|
| 75 |
+
return
|
| 76 |
+
|
| 77 |
+
target_count_fp, target_lambbda_fp = target_fps
|
| 78 |
+
source_count_fp, source_lambda_fp = source_fps
|
| 79 |
+
|
| 80 |
+
assert language == Path(target_count_fp).stem.split(".")[1], \
|
| 81 |
+
f"Language mismatch between {target_count_fp} and {language}"
|
| 82 |
+
|
| 83 |
+
assert language == Path(source_count_fp).stem.split(".")[1], \
|
| 84 |
+
f"Language mismatch between {target_count_fp} and {language}"
|
| 85 |
+
|
| 86 |
+
# load hash counts
|
| 87 |
+
target_counts = np.load(target_count_fp)
|
| 88 |
+
target_dist = target_counts / target_counts.sum()
|
| 89 |
+
source_counts = np.load(source_count_fp)
|
| 90 |
+
source_dist = source_counts / source_counts.sum()
|
| 91 |
+
|
| 92 |
+
if length_correction:
|
| 93 |
+
self._target_lambda = np.load(target_lambbda_fp)
|
| 94 |
+
self._source_lambda = np.load(source_lambda_fp)
|
| 95 |
+
else:
|
| 96 |
+
self._target_lambda = None
|
| 97 |
+
self._source_lambda = None
|
| 98 |
+
|
| 99 |
+
# compute log diff dist
|
| 100 |
+
self._feature_dim = target_counts.shape[0]
|
| 101 |
+
self._log_diff_dist = np.array(
|
| 102 |
+
np.log(target_dist + 1e-8) - np.log(source_dist + 1e-8)
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
def __call__(self, document: Document) -> SignalType:
|
| 106 |
+
if self._log_diff_dist is None:
|
| 107 |
+
return [(0, len(document), None)]
|
| 108 |
+
|
| 109 |
+
doc_len = len(document)
|
| 110 |
+
|
| 111 |
+
if doc_len == 0:
|
| 112 |
+
return [(0, doc_len, None)]
|
| 113 |
+
|
| 114 |
+
# try to fetch cached features, if not compute them
|
| 115 |
+
features = (
|
| 116 |
+
document.hash_features
|
| 117 |
+
if document.hash_features is not None
|
| 118 |
+
else
|
| 119 |
+
hash_feature(
|
| 120 |
+
unigrams=document.raw_words,
|
| 121 |
+
# fetch cached bigrams, otherwise comptue them
|
| 122 |
+
bigrams=(
|
| 123 |
+
document.raw_2grams
|
| 124 |
+
or
|
| 125 |
+
tuple(form_ngrams(iter(document.raw_words), 2))
|
| 126 |
+
),
|
| 127 |
+
buckets=self._feature_dim
|
| 128 |
+
)
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
logratio = np.inner(features, self._log_diff_dist)
|
| 132 |
+
score = float(logratio)
|
| 133 |
+
|
| 134 |
+
if not self._length_correction:
|
| 135 |
+
score = round(score, PRECISION)
|
| 136 |
+
return [(0, doc_len, score)]
|
| 137 |
+
|
| 138 |
+
# correct for the length assuming a Poisson distribution
|
| 139 |
+
return self.__add_length_penalty(score, doc_len)
|
| 140 |
+
|
| 141 |
+
def __add_length_penalty(self, score, doc_len):
|
| 142 |
+
# correct for the length assuming a Poisson distribution
|
| 143 |
+
len_prob_source = stats.poisson.pmf(doc_len, self._source_lambda)
|
| 144 |
+
len_prob_target = stats.poisson.pmf(doc_len, self._target_lambda)
|
| 145 |
+
|
| 146 |
+
len_correction = np.log(len_prob_target + 1e-8) - \
|
| 147 |
+
np.log(len_prob_source + 1e-8)
|
| 148 |
+
|
| 149 |
+
score += float(len_correction)
|
| 150 |
+
score = round(score, PRECISION)
|
| 151 |
+
return [(0, doc_len, score)]
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
class RPS_Doc_Wikipedia_Importance(Base_Importance): # noqa
|
| 155 |
+
r""" Given a bag of {1,2}-wordgram model trained on Wikipedia articles p,
|
| 156 |
+
and a model trained on the source domain q. This is the logarithm of the
|
| 157 |
+
ratio p(doc)/q(doc). If length_correction is enabled, then the length of
|
| 158 |
+
score is adjusted by adding the term log(p_poisson(len) / q_poisson(len))
|
| 159 |
+
to the final score.
|
| 160 |
+
"""
|
| 161 |
+
__slots__ = ()
|
| 162 |
+
|
| 163 |
+
def __init__(
|
| 164 |
+
self,
|
| 165 |
+
wiki_fps: Tuple[str, str],
|
| 166 |
+
source_fps: Tuple[str, str],
|
| 167 |
+
language: str,
|
| 168 |
+
*args, **kwargs # noqa
|
| 169 |
+
):
|
| 170 |
+
super(RPS_Doc_Wikipedia_Importance, self).__init__(
|
| 171 |
+
target_fps=wiki_fps,
|
| 172 |
+
source_fps=source_fps,
|
| 173 |
+
language=language,
|
| 174 |
+
length_correction=False
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
class RPS_Doc_Wikipedia_Importance_Length_Correction( # noqa
|
| 179 |
+
Base_Importance
|
| 180 |
+
):
|
| 181 |
+
r""" Given a bag of {1,2}-wordgram model trained on Wikipedia articles p,
|
| 182 |
+
and a model trained on the source domain q. This is the logarithm of the
|
| 183 |
+
ratio p(doc)/q(doc). If length_correction is enabled, then the length of
|
| 184 |
+
score is adjusted by adding the term log(p_poisson(len) / q_poisson(len))
|
| 185 |
+
to the final score. Corrects for length by adding a length penalty term.
|
| 186 |
+
"""
|
| 187 |
+
__slots__ = ()
|
| 188 |
+
|
| 189 |
+
def __init__(
|
| 190 |
+
self,
|
| 191 |
+
wiki_fps: Tuple[str, str],
|
| 192 |
+
source_fps: Tuple[str, str],
|
| 193 |
+
language: str,
|
| 194 |
+
*args, **kwargs # noqa
|
| 195 |
+
):
|
| 196 |
+
super(RPS_Doc_Wikipedia_Importance_Length_Correction,
|
| 197 |
+
self).__init__(
|
| 198 |
+
target_fps=wiki_fps,
|
| 199 |
+
source_fps=source_fps,
|
| 200 |
+
language=language,
|
| 201 |
+
length_correction=True
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
class RPS_Doc_Books_Importance(Base_Importance): # noqa
|
| 206 |
+
r""" Given a bag of {1,2}-wordgram model trained on Books p,
|
| 207 |
+
and a model trained on the source domain q. This is the logarithm of the
|
| 208 |
+
ratio p(doc)/q(doc). If length_correction is enabled, then the length of
|
| 209 |
+
score is adjusted by adding the term log(p_poisson(len) / q_poisson(len))
|
| 210 |
+
to the final score.
|
| 211 |
+
"""
|
| 212 |
+
__slots__ = ()
|
| 213 |
+
|
| 214 |
+
def __init__(
|
| 215 |
+
self,
|
| 216 |
+
books_fps: Tuple[str, str],
|
| 217 |
+
source_fps: Tuple[str, str],
|
| 218 |
+
language: str,
|
| 219 |
+
*args, **kwargs # noqa
|
| 220 |
+
):
|
| 221 |
+
super(RPS_Doc_Books_Importance, self).__init__(
|
| 222 |
+
target_fps=books_fps,
|
| 223 |
+
source_fps=source_fps,
|
| 224 |
+
language=language,
|
| 225 |
+
length_correction=False
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
class RPS_Doc_Books_Importance_Length_Correction( # noqa
|
| 230 |
+
Base_Importance
|
| 231 |
+
): # noqa
|
| 232 |
+
r""" Given a bag of {1,2}-wordgram model trained on Books p,
|
| 233 |
+
and a model trained on the source domain q. This is the logarithm of the
|
| 234 |
+
ratio p(doc)/q(doc). If length_correction is enabled, then the length of
|
| 235 |
+
score is adjusted by adding the term log(p_poisson(len) / q_poisson(len))
|
| 236 |
+
to the final score. Corrects for length by adding a length penalty term.
|
| 237 |
+
"""
|
| 238 |
+
__slots__ = ()
|
| 239 |
+
|
| 240 |
+
def __init__(
|
| 241 |
+
self,
|
| 242 |
+
books_fps: Tuple[str, str],
|
| 243 |
+
source_fps: Tuple[str, str],
|
| 244 |
+
language: str,
|
| 245 |
+
*args, **kwargs # noqa
|
| 246 |
+
):
|
| 247 |
+
super(RPS_Doc_Books_Importance_Length_Correction, self).__init__(
|
| 248 |
+
target_fps=books_fps,
|
| 249 |
+
source_fps=source_fps,
|
| 250 |
+
language=language,
|
| 251 |
+
length_correction=True
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
class RPS_Doc_OpenWebText_Importance(Base_Importance): # noqa
|
| 256 |
+
r""" Given a bag of {1,2}-wordgram model trained on OpenWebText p,
|
| 257 |
+
and a model trained on the source domain q. This is the logarithm of the
|
| 258 |
+
ratio p(doc)/q(doc). If length_correction is enabled, then the length of
|
| 259 |
+
score is adjusted by adding the term log(p_poisson(len) / q_poisson(len))
|
| 260 |
+
to the final score.
|
| 261 |
+
"""
|
| 262 |
+
__slots__ = ()
|
| 263 |
+
|
| 264 |
+
def __init__(
|
| 265 |
+
self,
|
| 266 |
+
openwebtext_fps: Tuple[str, str],
|
| 267 |
+
source_fps: Tuple[str, str],
|
| 268 |
+
language: str,
|
| 269 |
+
*args, **kwargs # noqa
|
| 270 |
+
):
|
| 271 |
+
super(RPS_Doc_OpenWebText_Importance, self).__init__(
|
| 272 |
+
target_fps=openwebtext_fps,
|
| 273 |
+
source_fps=source_fps,
|
| 274 |
+
language=language,
|
| 275 |
+
length_correction=False
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class RPS_Doc_OpenWebText_Importance_Length_Correction( # noqa
|
| 280 |
+
Base_Importance): # noqa
|
| 281 |
+
r""" Given a bag of {1,2}-wordgram model trained on OpenWebText p,
|
| 282 |
+
and a model trained on the source domain q. This is the logarithm of the
|
| 283 |
+
ratio p(doc)/q(doc). If length_correction is enabled, then the length of
|
| 284 |
+
score is adjusted by adding the term log(p_poisson(len) / q_poisson(len))
|
| 285 |
+
to the final score. Corrects for length by adding a length penalty term.
|
| 286 |
+
"""
|
| 287 |
+
__slots__ = ()
|
| 288 |
+
|
| 289 |
+
def __init__(
|
| 290 |
+
self,
|
| 291 |
+
openwebtext_fps: Tuple[str, str],
|
| 292 |
+
source_fps: Tuple[str, str],
|
| 293 |
+
language: str,
|
| 294 |
+
*args, **kwargs # noqa
|
| 295 |
+
):
|
| 296 |
+
super(
|
| 297 |
+
RPS_Doc_OpenWebText_Importance_Length_Correction, self
|
| 298 |
+
).__init__(
|
| 299 |
+
target_fps=openwebtext_fps,
|
| 300 |
+
source_fps=source_fps,
|
| 301 |
+
language=language,
|
| 302 |
+
length_correction=True
|
| 303 |
+
)
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/lines.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
from typing import List, Tuple, Type
|
| 3 |
+
|
| 4 |
+
from core.constants import PRECISION
|
| 5 |
+
from core.quality_signals.base import RPSBase
|
| 6 |
+
from core.data_types import SignalType, ScoreType, TextSlice
|
| 7 |
+
from core.document import Document
|
| 8 |
+
from utilities.register.registry_utils import *
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
"register_lines_callables", "lines_schema"
|
| 12 |
+
]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def lines_schema() -> List[Tuple[str, Type]]:
|
| 16 |
+
r""" Returns a list of signal names and their data types """
|
| 17 |
+
return signal_schema(module=sys.modules[__name__])
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def register_lines_callables() -> List[RPSBase]:
|
| 21 |
+
r""" Returns a list of signal functions (i.e., RPSBase instances) that
|
| 22 |
+
are used to extract line signals from a document.
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
A list of signal function class instances.
|
| 26 |
+
"""
|
| 27 |
+
return list(map(
|
| 28 |
+
lambda cls: cls(),
|
| 29 |
+
get_callables_from_module(module=sys.modules[__name__])
|
| 30 |
+
))
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class RPS_Lines_Javascript_Counts(RPSBase): # noqa
|
| 34 |
+
r""" The number of occurences of the word "javascript" in each line. """
|
| 35 |
+
SEARCH_TEXT = "javascript"
|
| 36 |
+
__slots__ = ()
|
| 37 |
+
|
| 38 |
+
def _process_line(self, text_slice: TextSlice) -> ScoreType:
|
| 39 |
+
if len(text_slice.text) == 0:
|
| 40 |
+
return tuple((text_slice.start, text_slice.end, 0.0))
|
| 41 |
+
|
| 42 |
+
score = float(sum(
|
| 43 |
+
1 for w in text_slice.text.split() if w == self.SEARCH_TEXT
|
| 44 |
+
))
|
| 45 |
+
|
| 46 |
+
return tuple((text_slice.start, text_slice.end, score))
|
| 47 |
+
|
| 48 |
+
def __call__(self, document: Document) -> SignalType:
|
| 49 |
+
return list(map(self._process_line, document.normalized_lines))
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class RPS_Lines_Ending_With_Terminal_Punctution_Mark(RPSBase): # noqa
|
| 53 |
+
r""" A list of integers indicating whether (1) or not (0) a line ends with
|
| 54 |
+
a terminal punctuation mark. A terminal punctation mark is defined as
|
| 55 |
+
one of the following: ".", "!", "?", "”" """
|
| 56 |
+
TERMINAL_PUNCTUATION_MARKS = (".", "!", "?", "”")
|
| 57 |
+
__slots__ = ()
|
| 58 |
+
|
| 59 |
+
def _process_line(self, text_slice: TextSlice) -> ScoreType:
|
| 60 |
+
score = text_slice.text.rstrip().endswith(
|
| 61 |
+
self.TERMINAL_PUNCTUATION_MARKS
|
| 62 |
+
)
|
| 63 |
+
score = float(score)
|
| 64 |
+
return tuple((text_slice.start, text_slice.end, score))
|
| 65 |
+
|
| 66 |
+
def __call__(self, document: Document) -> SignalType:
|
| 67 |
+
return list(map(self._process_line, document.raw_lines))
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class RPS_Lines_Num_Words(RPSBase): # noqa
|
| 71 |
+
r""" The number of words in each line. This is computed based on the
|
| 72 |
+
normalied text. Normalization is done by lowercasing the text and
|
| 73 |
+
removing punctuation."""
|
| 74 |
+
__slots__ = ()
|
| 75 |
+
|
| 76 |
+
def _process_line(self, text_slice: TextSlice) -> ScoreType: # noqa
|
| 77 |
+
score = len(text_slice.text.split())
|
| 78 |
+
return tuple((text_slice.start, text_slice.end, score))
|
| 79 |
+
|
| 80 |
+
def __call__(self, document: Document) -> SignalType:
|
| 81 |
+
return list(map(self._process_line, document.normalized_lines))
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class RPS_Lines_Uppercase_Letter_Fraction(RPSBase): # noqa
|
| 85 |
+
r""" The ratio between number of uppercase letters and total number of
|
| 86 |
+
characters in each line. This is based on the raw text. """
|
| 87 |
+
__slots__ = ()
|
| 88 |
+
|
| 89 |
+
def _process_line(self, text_slice: TextSlice) -> ScoreType: # noqa
|
| 90 |
+
if len(text_slice) == 0:
|
| 91 |
+
return tuple((text_slice.start, text_slice.end, 0.0))
|
| 92 |
+
|
| 93 |
+
score = sum(map(str.isupper, text_slice.text)) / len(text_slice)
|
| 94 |
+
score = round(score, PRECISION)
|
| 95 |
+
return tuple((text_slice.start, text_slice.end, score))
|
| 96 |
+
|
| 97 |
+
def __call__(self, document: Document) -> SignalType:
|
| 98 |
+
return list(map(self._process_line, document.raw_lines))
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class RPS_Lines_Numerical_Chars_Fraction(RPSBase): # noqa
|
| 102 |
+
r""" The ratio between number of numerical characters and total number of
|
| 103 |
+
characters in each line. This is based on text after lowercasing and
|
| 104 |
+
removing punctuation."""
|
| 105 |
+
__slots__ = ()
|
| 106 |
+
|
| 107 |
+
def _process_line(self, text_slice: TextSlice) -> ScoreType: # noqa
|
| 108 |
+
if len(text_slice) == 0:
|
| 109 |
+
return tuple((text_slice.start, text_slice.end, 0.0))
|
| 110 |
+
|
| 111 |
+
score = sum(map(str.isnumeric, text_slice.text)) / len(text_slice)
|
| 112 |
+
score = round(score, PRECISION)
|
| 113 |
+
return tuple((text_slice.start, text_slice.end, score))
|
| 114 |
+
|
| 115 |
+
def __call__(self, document: Document) -> SignalType:
|
| 116 |
+
return list(map(self._process_line, document.normalized_lines))
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class RPS_Lines_Start_With_Bulletpoint(RPSBase): # noqa
|
| 120 |
+
r""" Whether the lines that start with a bullet point symbol. The
|
| 121 |
+
following set of unicodes are considered a bullet point:
|
| 122 |
+
\u2022 (bullet point), \u2023 (triangular bullet point), \u25B6 (black
|
| 123 |
+
right pointing triangle), \u25C0 (black left pointing triangle),
|
| 124 |
+
\u25E6 (white bullet point), \u25A0 (black square), \u25A1 (white
|
| 125 |
+
square), \u25AA (black small square), \u25AB (white small square),
|
| 126 |
+
\u2013 (en dash)."""
|
| 127 |
+
BULLET_POINT_SYMBOLS = (
|
| 128 |
+
"\u2022", # bullet point
|
| 129 |
+
"\u2023", # triangular bullet point
|
| 130 |
+
"\u25B6", # black right pointing triangle
|
| 131 |
+
"\u25C0", # black left pointing triangle
|
| 132 |
+
"\u25E6", # white bullet point
|
| 133 |
+
"\u25A0", # black square
|
| 134 |
+
"\u25A1", # white square
|
| 135 |
+
"\u25AA", # black small square
|
| 136 |
+
"\u25AB", # white small square
|
| 137 |
+
"\u2013", # en dash
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
__slots__ = ()
|
| 141 |
+
|
| 142 |
+
def _process_line(self, text_slice: TextSlice) -> ScoreType: # noqa
|
| 143 |
+
score = text_slice.text.lstrip().startswith(self.BULLET_POINT_SYMBOLS)
|
| 144 |
+
score = float(score)
|
| 145 |
+
return tuple((text_slice.start, text_slice.end, score))
|
| 146 |
+
|
| 147 |
+
def __call__(self, document: Document) -> SignalType:
|
| 148 |
+
num_lines = len(document.raw_lines)
|
| 149 |
+
|
| 150 |
+
if num_lines == 0:
|
| 151 |
+
return [(0, len(document), None)]
|
| 152 |
+
|
| 153 |
+
return list(map(self._process_line, document.raw_lines))
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/natural_language.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import Counter
|
| 2 |
+
import math
|
| 3 |
+
import re
|
| 4 |
+
import sys
|
| 5 |
+
from typing import List, Tuple, Type
|
| 6 |
+
|
| 7 |
+
from core.constants import PRECISION
|
| 8 |
+
from core.data_types import SignalType
|
| 9 |
+
from core.quality_signals.base import RPSBase
|
| 10 |
+
from core.document import Document
|
| 11 |
+
from utilities.register.registry_utils import *
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
"register_natural_language_callables",
|
| 15 |
+
"natural_language_schema"
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def natural_language_schema() -> List[Tuple[str, Type]]:
|
| 20 |
+
r""" Returns a list of signal names and their data types """
|
| 21 |
+
return signal_schema(module=sys.modules[__name__])
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def register_natural_language_callables() -> List[RPSBase]:
|
| 25 |
+
r""" Returns a list of signal functions (i.e., RPSBase instances) that
|
| 26 |
+
are used to extract natural language signals from a document.
|
| 27 |
+
|
| 28 |
+
Returns:
|
| 29 |
+
A list of signal function class instances.
|
| 30 |
+
"""
|
| 31 |
+
return list(map(
|
| 32 |
+
lambda cls: cls(),
|
| 33 |
+
get_callables_from_module(module=sys.modules[__name__])
|
| 34 |
+
))
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class RPS_Doc_Num_Sentences(RPSBase): # noqa
|
| 38 |
+
r""" The number of sentences in the content. This is calculated using
|
| 39 |
+
the regex r'\b[^.!?]+[.!?]*' """
|
| 40 |
+
SENT_PATTERN = re.compile(r'\b[^.!?]+[.!?]*', flags=re.UNICODE)
|
| 41 |
+
|
| 42 |
+
__slots__ = ()
|
| 43 |
+
|
| 44 |
+
def __call__(self, document: Document) -> SignalType:
|
| 45 |
+
r""" count the number of sentences in the content using regex"""
|
| 46 |
+
score = float(len(self.SENT_PATTERN.findall(document.raw_content)))
|
| 47 |
+
return [(0, len(document), score)]
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class RPS_Doc_Word_Count(RPSBase): # noqa
|
| 51 |
+
r""" The number of words in the content after normalization. """
|
| 52 |
+
__slots__ = ()
|
| 53 |
+
|
| 54 |
+
def __call__(self, document: Document) -> SignalType:
|
| 55 |
+
return [(0, len(document), document.num_normalized_words)]
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class RPS_Doc_Mean_Word_Length(RPSBase): # noqa
|
| 59 |
+
r""" The mean length of words in the content normalization. """
|
| 60 |
+
__slots__ = ()
|
| 61 |
+
|
| 62 |
+
def __call__(self, document: Document) -> SignalType:
|
| 63 |
+
if document.num_normalized_words == 0:
|
| 64 |
+
return [(0, len(document), None)]
|
| 65 |
+
|
| 66 |
+
num_chars = float(sum(map(len, document.normalized_words)))
|
| 67 |
+
score = num_chars / document.num_normalized_words
|
| 68 |
+
score = round(score, PRECISION)
|
| 69 |
+
return [(0, len(document), score)]
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class RPS_Doc_Symbol_To_Word_Ratio(RPSBase): # noqa
|
| 73 |
+
r""" The ratio of symbols to words in the content. This is analogous to
|
| 74 |
+
the signal used in Gopher. Symbols are defined "#", "...", and "…". """
|
| 75 |
+
SYMBOLS = ("#", "...", "…")
|
| 76 |
+
|
| 77 |
+
__slots__ = ()
|
| 78 |
+
|
| 79 |
+
def __call__(self, document: Document) -> SignalType:
|
| 80 |
+
num_words = document.num_raw_words
|
| 81 |
+
|
| 82 |
+
if num_words == 0:
|
| 83 |
+
return [(0, len(document), None)]
|
| 84 |
+
|
| 85 |
+
# count the number of symbols in the content
|
| 86 |
+
num_symbols = float(sum(
|
| 87 |
+
document.raw_content.count(x) for x in self.SYMBOLS
|
| 88 |
+
))
|
| 89 |
+
|
| 90 |
+
score = num_symbols / num_words
|
| 91 |
+
score = round(score, PRECISION)
|
| 92 |
+
return [(0, len(document), score)]
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class RPS_Doc_Frac_Lines_End_With_Ellipsis(RPSBase): # noqa
|
| 96 |
+
r""" The fraction of lines that end with an ellipsis, where an ellipsis
|
| 97 |
+
is defined as either "..." or "…". """
|
| 98 |
+
ELLIPSIS_SYMBOLS = ("...", "…")
|
| 99 |
+
|
| 100 |
+
__slots__ = ()
|
| 101 |
+
|
| 102 |
+
def __call__(self, document: Document) -> SignalType:
|
| 103 |
+
num_lines = len(document.raw_lines)
|
| 104 |
+
|
| 105 |
+
if num_lines == 0:
|
| 106 |
+
return [(0, len(document), None)]
|
| 107 |
+
|
| 108 |
+
total_ellipsis_lines = float(sum(
|
| 109 |
+
text_slice.text.rstrip().endswith(self.ELLIPSIS_SYMBOLS)
|
| 110 |
+
for text_slice in document.raw_lines
|
| 111 |
+
))
|
| 112 |
+
|
| 113 |
+
score = total_ellipsis_lines / num_lines
|
| 114 |
+
score = round(score, PRECISION)
|
| 115 |
+
return [(0, len(document), score)]
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class RPS_Doc_Frac_No_Alph_Words(RPSBase): # noqa
|
| 119 |
+
r""" The fraction of words that contain no alphabetical character.
|
| 120 |
+
This is based on the raw content. """
|
| 121 |
+
ALPH_REGEX = re.compile(r"[a-zA-Z]")
|
| 122 |
+
|
| 123 |
+
__slots__ = ()
|
| 124 |
+
|
| 125 |
+
def __call__(self, document: Document) -> SignalType:
|
| 126 |
+
num_words = document.num_raw_words
|
| 127 |
+
|
| 128 |
+
if num_words == 0:
|
| 129 |
+
return [(0, len(document), None)]
|
| 130 |
+
|
| 131 |
+
num_words_with_alpha = float(sum(
|
| 132 |
+
int(self.ALPH_REGEX.search(word) is not None)
|
| 133 |
+
for word in document.raw_words
|
| 134 |
+
))
|
| 135 |
+
|
| 136 |
+
score = 1.0 - num_words_with_alpha / num_words
|
| 137 |
+
score = round(score, PRECISION)
|
| 138 |
+
return [(0, len(document), score)]
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class RPS_Doc_Frac_Unique_Words(RPSBase): # noqa
|
| 142 |
+
r""" The fraction of unique words in the content. This is also known as
|
| 143 |
+
the degeneracy of a text sample. Calculated based on the normalized
|
| 144 |
+
content. """
|
| 145 |
+
__slots__ = ()
|
| 146 |
+
|
| 147 |
+
def __call__(self, document: Document) -> SignalType:
|
| 148 |
+
num_words = document.num_normalized_words
|
| 149 |
+
|
| 150 |
+
if num_words == 0:
|
| 151 |
+
return [(0, len(document), None)]
|
| 152 |
+
|
| 153 |
+
score = float(len(set(document.normalized_words))) / num_words
|
| 154 |
+
score = round(score, PRECISION)
|
| 155 |
+
return [(0, len(document), score)]
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
class RPS_Doc_Unigram_Entropy(RPSBase): # noqa
|
| 159 |
+
r""" The entropy of the unigram distribution of the
|
| 160 |
+
content. This measures the diversity of the content and is computed
|
| 161 |
+
using sum(-x / total * log(x / total)) where the sum is taken over
|
| 162 |
+
over counts of unique words in the noramlized (punctuation removed,
|
| 163 |
+
lowercased) content."""
|
| 164 |
+
__slots__ = ()
|
| 165 |
+
|
| 166 |
+
def __call__(self, document: Document) -> SignalType:
|
| 167 |
+
if len(document.normalized_words) == 0:
|
| 168 |
+
return [(0, len(document), None)]
|
| 169 |
+
|
| 170 |
+
# count the number of times each word appears in the content
|
| 171 |
+
counter = Counter(document.normalized_words)
|
| 172 |
+
|
| 173 |
+
# calculate the entropy of the unigram distribution
|
| 174 |
+
total = sum(counter.values())
|
| 175 |
+
entropy = sum(map(
|
| 176 |
+
lambda x: -x / total * math.log(x / total) if x > 0 else 0.0,
|
| 177 |
+
counter.values()
|
| 178 |
+
))
|
| 179 |
+
|
| 180 |
+
score = round(entropy, PRECISION)
|
| 181 |
+
return [(0, len(document), score)]
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
class RPS_Doc_Frac_All_Caps_Words(RPSBase): # noqa
|
| 185 |
+
r""" The fraction of words in the content that only conist of uppercase
|
| 186 |
+
letters. This is based on the raw content."""
|
| 187 |
+
__slots__ = ()
|
| 188 |
+
|
| 189 |
+
def __call__(self, document: Document) -> SignalType:
|
| 190 |
+
num_words = document.num_raw_words
|
| 191 |
+
|
| 192 |
+
if num_words == 0:
|
| 193 |
+
return [(0, len(document), None)]
|
| 194 |
+
|
| 195 |
+
score = float(sum(map(str.isupper, document.raw_words))) / num_words
|
| 196 |
+
score = round(score, PRECISION)
|
| 197 |
+
return [(0, len(document), score)]
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/repetitions.py
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import Counter
|
| 2 |
+
import numpy as np
|
| 3 |
+
import sys
|
| 4 |
+
from typing import List, Tuple, Type
|
| 5 |
+
|
| 6 |
+
from core.constants import PRECISION
|
| 7 |
+
from core.quality_signals.base import RPSBase
|
| 8 |
+
from core.document import Document
|
| 9 |
+
from core.data_types import SignalType
|
| 10 |
+
from utilities.register.registry_utils import *
|
| 11 |
+
from utilities.text import form_ngrams
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
"register_repetitions_callables",
|
| 15 |
+
"repetitions_schema"
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def repetitions_schema() -> List[Tuple[str, Type]]:
|
| 20 |
+
r""" Returns a list of signal names and their data types """
|
| 21 |
+
return signal_schema(module=sys.modules[__name__])
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def register_repetitions_callables() -> List[RPSBase]:
|
| 25 |
+
r""" Returns a list of signal functions (i.e., RPSBase instances) that
|
| 26 |
+
are used to extract repetition related signals from a document.
|
| 27 |
+
|
| 28 |
+
Returns:
|
| 29 |
+
A list of signal function class instances.
|
| 30 |
+
"""
|
| 31 |
+
return list(map(
|
| 32 |
+
lambda cls: cls(),
|
| 33 |
+
get_callables_from_module(module=sys.modules[__name__])
|
| 34 |
+
))
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class Base_RPS_Frac_Chars_In_Top_NGram(RPSBase): # noqa
|
| 38 |
+
r""" Base class for calculating the fraction of characters in the
|
| 39 |
+
top N-gram. This operates on the lower-cased, punctation removed
|
| 40 |
+
content."""
|
| 41 |
+
NGRAM_SIZE: int = None
|
| 42 |
+
|
| 43 |
+
__slots__ = []
|
| 44 |
+
|
| 45 |
+
def __call__(self, document: Document) -> SignalType:
|
| 46 |
+
if self.NGRAM_SIZE is None:
|
| 47 |
+
raise NotImplementedError(
|
| 48 |
+
"NGRAM_SIZE must be set in the subclass"
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
# get the most common ngram
|
| 52 |
+
most_common_ngram = Counter(
|
| 53 |
+
# fetch the ngrams from the document if they exist, otherwise
|
| 54 |
+
# compute them
|
| 55 |
+
getattr(document, f"norm_{self.NGRAM_SIZE}grams", None)
|
| 56 |
+
or
|
| 57 |
+
form_ngrams(iter(document.normalized_words), self.NGRAM_SIZE)
|
| 58 |
+
).most_common(1)
|
| 59 |
+
|
| 60 |
+
if len(most_common_ngram) == 0:
|
| 61 |
+
return [(0, len(document), 0.0)]
|
| 62 |
+
|
| 63 |
+
ngram, count = most_common_ngram[0]
|
| 64 |
+
|
| 65 |
+
if count <= 1:
|
| 66 |
+
return [(0, len(document), 0.0)]
|
| 67 |
+
|
| 68 |
+
total_chars = sum(len(w) for w in document.normalized_words)
|
| 69 |
+
score = sum(len(w) for w in ngram) * count / total_chars
|
| 70 |
+
score = round(score, PRECISION)
|
| 71 |
+
return [(0, len(document), score)]
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class RPS_Doc_Frac_Chars_Top_2gram(Base_RPS_Frac_Chars_In_Top_NGram): # noqa
|
| 75 |
+
r""" The fraction of characters in the top word Bigram. Operates on the
|
| 76 |
+
lower-cased, punctation removed content."""
|
| 77 |
+
NGRAM_SIZE = 2
|
| 78 |
+
__slots__ = []
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class RPS_Doc_Frac_Chars_Top_3gram(Base_RPS_Frac_Chars_In_Top_NGram): # noqa
|
| 82 |
+
r""" The fraction of characters in the top word Trigram. Operates on the
|
| 83 |
+
lower-cased, punctation removed content."""
|
| 84 |
+
NGRAM_SIZE = 3
|
| 85 |
+
__slots__ = []
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class RPS_Doc_Frac_Chars_Top_4gram(Base_RPS_Frac_Chars_In_Top_NGram): # noqa
|
| 89 |
+
r""" The fraction of characters in the top word 4gram. Operates on the
|
| 90 |
+
lower-cased, punctation removed content."""
|
| 91 |
+
NGRAM_SIZE = 4
|
| 92 |
+
__slots__ = []
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class Base_RPS_Frac_Chars_In_Dupe_NGrams(RPSBase): # noqa
|
| 96 |
+
r""" Base class for calculating the fraction of characters in
|
| 97 |
+
duplicate word N-grams. This operates on the lower-cased, punctation
|
| 98 |
+
removed content. The function also ensures that characters in overlapping
|
| 99 |
+
ngrams are only counted once."""
|
| 100 |
+
NGRAM_SIZE: int = None
|
| 101 |
+
__slots__ = []
|
| 102 |
+
|
| 103 |
+
def __call__(self, document: Document) -> SignalType:
|
| 104 |
+
if self.NGRAM_SIZE is None:
|
| 105 |
+
raise NotImplementedError(
|
| 106 |
+
"NGRAM_SIZE must be set in the subclass"
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
if len(document.normalized_words) < self.NGRAM_SIZE:
|
| 110 |
+
return [(0, len(document), 0.0)]
|
| 111 |
+
|
| 112 |
+
# fetch the ngrams from the document if they exist, otherwise
|
| 113 |
+
# compute them
|
| 114 |
+
doc_n_grams = (
|
| 115 |
+
getattr(document, f"norm_{self.NGRAM_SIZE}grams", None)
|
| 116 |
+
or
|
| 117 |
+
tuple(form_ngrams(
|
| 118 |
+
iter(document.normalized_words), self.NGRAM_SIZE
|
| 119 |
+
))
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
# keep only ngrams which occur at least twice
|
| 123 |
+
ngram_dupes = {
|
| 124 |
+
ngram for ngram, count in Counter(doc_n_grams).items() if count > 1
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
duplicated_grams = np.zeros(len(document.normalized_words), dtype=int)
|
| 128 |
+
|
| 129 |
+
i = 0
|
| 130 |
+
for ngram in doc_n_grams:
|
| 131 |
+
if ngram in ngram_dupes:
|
| 132 |
+
duplicated_grams[i: i + self.NGRAM_SIZE] = 1
|
| 133 |
+
|
| 134 |
+
i += 1
|
| 135 |
+
|
| 136 |
+
word_lengths = np.array(list(map(len, document.normalized_words)))
|
| 137 |
+
chars_duped = np.sum(word_lengths * duplicated_grams)
|
| 138 |
+
total_chars = np.sum(word_lengths)
|
| 139 |
+
|
| 140 |
+
if total_chars == 0:
|
| 141 |
+
return [(0, len(document), 0.0)]
|
| 142 |
+
|
| 143 |
+
score = float(chars_duped / total_chars)
|
| 144 |
+
score = round(score, PRECISION)
|
| 145 |
+
return [(0, len(document), score)]
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
class RPS_Doc_Frac_Chars_Dupe_5Grams( # noqa
|
| 149 |
+
Base_RPS_Frac_Chars_In_Dupe_NGrams
|
| 150 |
+
):
|
| 151 |
+
r""" The fraction of characters in duplicate word 5grams. This operates on
|
| 152 |
+
the lower-cased, punctation removed content. It is also ensured that
|
| 153 |
+
characters in overlapping ngrams are only counted once. """
|
| 154 |
+
NGRAM_SIZE = 5
|
| 155 |
+
__slots__ = []
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
class RPS_Doc_Frac_Chars_Dupe_6Grams( # noqa
|
| 159 |
+
Base_RPS_Frac_Chars_In_Dupe_NGrams
|
| 160 |
+
):
|
| 161 |
+
r""" The fraction of characters in duplicate word 6grams. This operates on
|
| 162 |
+
the lower-cased, punctation removed content. It is also ensured that
|
| 163 |
+
characters in overlapping ngrams are only counted once. """
|
| 164 |
+
NGRAM_SIZE = 6
|
| 165 |
+
__slots__ = []
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
class RPS_Doc_Frac_Chars_Dupe_7Grams( # noqa
|
| 169 |
+
Base_RPS_Frac_Chars_In_Dupe_NGrams
|
| 170 |
+
):
|
| 171 |
+
r""" The fraction of characters in duplicate word 7grams. This operates on
|
| 172 |
+
the lower-cased, punctation removed content. It is also ensured that
|
| 173 |
+
characters in overlapping ngrams are only counted once. """
|
| 174 |
+
NGRAM_SIZE = 7
|
| 175 |
+
__slots__ = []
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
class RPS_Doc_Frac_Chars_Dupe_8Grams( # noqa
|
| 179 |
+
Base_RPS_Frac_Chars_In_Dupe_NGrams
|
| 180 |
+
):
|
| 181 |
+
r""" The fraction of characters in duplicate word 8grams. This operates on
|
| 182 |
+
the lower-cased, punctation removed content. It is also ensured that
|
| 183 |
+
characters in overlapping ngrams are only counted once. """
|
| 184 |
+
NGRAM_SIZE = 8
|
| 185 |
+
__slots__ = []
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
class RPS_Doc_Frac_Chars_Dupe_9Grams( # noqa
|
| 189 |
+
Base_RPS_Frac_Chars_In_Dupe_NGrams
|
| 190 |
+
):
|
| 191 |
+
r""" The fraction of characters in duplicate word 9grams. This operates on
|
| 192 |
+
the lower-cased, punctation removed content. It is also ensured that
|
| 193 |
+
characters in overlapping ngrams are only counted once. """
|
| 194 |
+
NGRAM_SIZE = 9
|
| 195 |
+
__slots__ = []
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
class RPS_Doc_Frac_Chars_Dupe_10Grams( # noqa
|
| 199 |
+
Base_RPS_Frac_Chars_In_Dupe_NGrams
|
| 200 |
+
):
|
| 201 |
+
r""" The fraction of characters in duplicate word 10grams. This operates on
|
| 202 |
+
the lower-cased, punctation removed content. It is also ensured that
|
| 203 |
+
characters in overlapping ngrams are only counted once. """
|
| 204 |
+
NGRAM_SIZE = 10
|
| 205 |
+
__slots__ = []
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__init__.py
ADDED
|
File without changes
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (225 Bytes). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (178 Bytes). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/classifiers.cpython-310.pyc
ADDED
|
Binary file (684 Bytes). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/classifiers.cpython-38.pyc
ADDED
|
Binary file (637 Bytes). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/content.cpython-310.pyc
ADDED
|
Binary file (1.61 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/content.cpython-38.pyc
ADDED
|
Binary file (1.51 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/dsir.cpython-310.pyc
ADDED
|
Binary file (817 Bytes). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/dsir.cpython-38.pyc
ADDED
|
Binary file (756 Bytes). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/stop_words.cpython-310.pyc
ADDED
|
Binary file (34.3 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/__pycache__/stop_words.cpython-38.pyc
ADDED
|
Binary file (56.4 kB). View file
|
|
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/classifiers.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from core.document import Document
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def preprocess_quality_classifier(document: Document):
|
| 5 |
+
r""" Preprocesses a document for quality classification. This function
|
| 6 |
+
removes all newlines and trailing whitespaces from the document.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
document: A document.
|
| 10 |
+
|
| 11 |
+
Returns:
|
| 12 |
+
A string.
|
| 13 |
+
"""
|
| 14 |
+
# remove newlines and trailing and leading whitespaces
|
| 15 |
+
return " ".join(document.raw_content.splitlines()).strip()
|
cc-multilingual-main/dedup/RedPajama-Data/app/src/core/quality_signals/utils/content.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Dict, Set
|
| 4 |
+
|
| 5 |
+
_DEFAULT_LANGS = ("en", "fr", "it", "es", "de")
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def load_bad_urls_index(bad_urls_dir: Path) -> Dict[str, int]:
|
| 9 |
+
with open(bad_urls_dir / "domain_to_category_id.json", "r") as f:
|
| 10 |
+
domain_to_category_id = json.load(f)
|
| 11 |
+
return domain_to_category_id
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def load_bad_words(bad_words_dir: Path, lang: str) -> Set[str]:
|
| 15 |
+
r""" load the LDNOOBW word list for a given language
|
| 16 |
+
|
| 17 |
+
Source:
|
| 18 |
+
https://github.com/LDNOOBW/List-of-Dirty-Naughty-Obscene-and-Otherwise-Bad-Words
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
bad_words_dir (Path): The path to the resources directory where the
|
| 22 |
+
list is stored
|
| 23 |
+
lang (str): The language for which to fetch the word list
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
A set of words
|
| 27 |
+
"""
|
| 28 |
+
if lang not in _DEFAULT_LANGS:
|
| 29 |
+
return set()
|
| 30 |
+
|
| 31 |
+
ldnoobw_fp = bad_words_dir / f"{lang}.txt"
|
| 32 |
+
|
| 33 |
+
if not ldnoobw_fp.exists():
|
| 34 |
+
raise FileNotFoundError(f"LDNOOBW word list {ldnoobw_fp} not found!")
|
| 35 |
+
|
| 36 |
+
with open(ldnoobw_fp, 'r') as f:
|
| 37 |
+
data = set(ln.strip() for ln in f.readlines())
|
| 38 |
+
|
| 39 |
+
return data
|