Datasets:
Sub-tasks:
semantic-similarity-classification
Languages:
English
Size:
100K<n<1M
Tags:
text segmentation
document segmentation
topic segmentation
topic shift detection
semantic chunking
chunking
License:
Update sent_ids to ids; Update README
Browse files- README.md +37 -3
- preprocess_util.py +5 -5
- wiki727k.py +3 -3
README.md
CHANGED
|
@@ -32,7 +32,7 @@ dataset_info:
|
|
| 32 |
features:
|
| 33 |
- name: id
|
| 34 |
dtype: string
|
| 35 |
-
- name:
|
| 36 |
sequence: string
|
| 37 |
- name: sentences
|
| 38 |
sequence: string
|
|
@@ -45,7 +45,7 @@ dataset_info:
|
|
| 45 |
class_label:
|
| 46 |
names:
|
| 47 |
'0': semantic-continuity
|
| 48 |
-
'1': semantic-
|
| 49 |
splits:
|
| 50 |
- name: train
|
| 51 |
num_bytes: 4754764877
|
|
@@ -59,4 +59,38 @@ dataset_info:
|
|
| 59 |
download_size: 1569504207
|
| 60 |
dataset_size: 5958006898
|
| 61 |
---
|
| 62 |
-
# Dataset Card for Wiki-727K Dataset
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
features:
|
| 33 |
- name: id
|
| 34 |
dtype: string
|
| 35 |
+
- name: ids
|
| 36 |
sequence: string
|
| 37 |
- name: sentences
|
| 38 |
sequence: string
|
|
|
|
| 45 |
class_label:
|
| 46 |
names:
|
| 47 |
'0': semantic-continuity
|
| 48 |
+
'1': semantic-shift
|
| 49 |
splits:
|
| 50 |
- name: train
|
| 51 |
num_bytes: 4754764877
|
|
|
|
| 59 |
download_size: 1569504207
|
| 60 |
dataset_size: 5958006898
|
| 61 |
---
|
| 62 |
+
# Dataset Card for Wiki-727K Dataset
|
| 63 |
+
|
| 64 |
+
Wiki-727K is a large dataset for text segmentation, automatically extracted and labeled from Wikipedia. It is designed as a sentence-level sequence labeling task for identifying semantic or topic shift in documents.
|
| 65 |
+
|
| 66 |
+
## Dataset Overview
|
| 67 |
+
|
| 68 |
+
- **Train**: 582k
|
| 69 |
+
- **Validation**: 72k
|
| 70 |
+
- **Test**: 73k
|
| 71 |
+
|
| 72 |
+
## Features
|
| 73 |
+
|
| 74 |
+
- **id (string):** Document ID.
|
| 75 |
+
- **ids (sequence of string):** Sentence IDs for each document.
|
| 76 |
+
- **sentences (sequence of string):** Sentences in each document.
|
| 77 |
+
- **titles_mask (sequence of uint8):** Mask indicating if a sentence is a title (optional).
|
| 78 |
+
- **levels (sequence of uint8):** Hierarchical level of each sentence (optional).
|
| 79 |
+
- **labels (sequence of class):** Binary labels: `semantic-continuity` or `semantic-shift`.
|
| 80 |
+
|
| 81 |
+
## Usage
|
| 82 |
+
|
| 83 |
+
The dataset can be loaded using the HuggingFace `datasets` library:
|
| 84 |
+
|
| 85 |
+
```python
|
| 86 |
+
from datasets import load_dataset
|
| 87 |
+
|
| 88 |
+
titled_dataset = load_dataset('saeedabc/wiki727k', num_proc=8, trust_remote_code=True)
|
| 89 |
+
|
| 90 |
+
untitled_dataset = load_dataset('saeedabc/wiki727k', drop_titles=True, num_proc=8, trust_remote_code=True)
|
| 91 |
+
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
## Dataset Details
|
| 95 |
+
|
| 96 |
+
- **Homepage**: [Wiki-727K GitHub](https://github.com/koomri/text-segmentation)
|
preprocess_util.py
CHANGED
|
@@ -25,7 +25,7 @@ def _parse_article(text: str, id: str, drop_titles: bool = False, prepend_title_
|
|
| 25 |
|
| 26 |
### Parse the sections into sentences
|
| 27 |
doc_id = id
|
| 28 |
-
|
| 29 |
doc_sentences = []
|
| 30 |
doc_levels = []
|
| 31 |
doc_titles_mask = []
|
|
@@ -58,8 +58,8 @@ def _parse_article(text: str, id: str, drop_titles: bool = False, prepend_title_
|
|
| 58 |
|
| 59 |
# Add the title as a single sentence
|
| 60 |
if not drop_titles and non_empty(title_str):
|
| 61 |
-
#
|
| 62 |
-
|
| 63 |
doc_sentences.append(title_str)
|
| 64 |
doc_titles_mask.append(1)
|
| 65 |
doc_levels.append(level)
|
|
@@ -67,7 +67,7 @@ def _parse_article(text: str, id: str, drop_titles: bool = False, prepend_title_
|
|
| 67 |
|
| 68 |
# Add the sentences
|
| 69 |
for sent_idx, sent in enumerate(sentences):
|
| 70 |
-
|
| 71 |
doc_sentences.append(sent)
|
| 72 |
doc_titles_mask.append(0)
|
| 73 |
doc_levels.append(level)
|
|
@@ -75,7 +75,7 @@ def _parse_article(text: str, id: str, drop_titles: bool = False, prepend_title_
|
|
| 75 |
|
| 76 |
out = {
|
| 77 |
'id': doc_id,
|
| 78 |
-
'
|
| 79 |
'sentences': doc_sentences,
|
| 80 |
'titles_mask': doc_titles_mask,
|
| 81 |
'levels': doc_levels,
|
|
|
|
| 25 |
|
| 26 |
### Parse the sections into sentences
|
| 27 |
doc_id = id
|
| 28 |
+
doc_ids = []
|
| 29 |
doc_sentences = []
|
| 30 |
doc_levels = []
|
| 31 |
doc_titles_mask = []
|
|
|
|
| 58 |
|
| 59 |
# Add the title as a single sentence
|
| 60 |
if not drop_titles and non_empty(title_str):
|
| 61 |
+
# doc_ids.append(f'{doc_id}_sec{sec_idx}_title')
|
| 62 |
+
doc_ids.append(f'{sec_idx}')
|
| 63 |
doc_sentences.append(title_str)
|
| 64 |
doc_titles_mask.append(1)
|
| 65 |
doc_levels.append(level)
|
|
|
|
| 67 |
|
| 68 |
# Add the sentences
|
| 69 |
for sent_idx, sent in enumerate(sentences):
|
| 70 |
+
doc_ids.append(f'{sec_idx}_{sent_idx}')
|
| 71 |
doc_sentences.append(sent)
|
| 72 |
doc_titles_mask.append(0)
|
| 73 |
doc_levels.append(level)
|
|
|
|
| 75 |
|
| 76 |
out = {
|
| 77 |
'id': doc_id,
|
| 78 |
+
'ids': doc_ids,
|
| 79 |
'sentences': doc_sentences,
|
| 80 |
'titles_mask': doc_titles_mask,
|
| 81 |
'levels': doc_levels,
|
wiki727k.py
CHANGED
|
@@ -18,7 +18,7 @@ See https://github.com/koomri/text-segmentation for more information.
|
|
| 18 |
|
| 19 |
Usage:
|
| 20 |
>>> from datasets import load_dataset
|
| 21 |
-
>>> dataset = load_dataset('saeedabc/wiki727k',
|
| 22 |
"""
|
| 23 |
|
| 24 |
|
|
@@ -121,7 +121,7 @@ class Wiki727k(datasets.GeneratorBasedBuilder):
|
|
| 121 |
features = datasets.Features(
|
| 122 |
{
|
| 123 |
"id": datasets.Value("string"), # document id --> [doc0, doc1, ...]
|
| 124 |
-
"
|
| 125 |
datasets.Value("string")
|
| 126 |
),
|
| 127 |
"sentences": datasets.Sequence(
|
|
@@ -134,7 +134,7 @@ class Wiki727k(datasets.GeneratorBasedBuilder):
|
|
| 134 |
datasets.Value("uint8")
|
| 135 |
),
|
| 136 |
"labels": datasets.Sequence(
|
| 137 |
-
datasets.ClassLabel(num_classes=2, names=['semantic-continuity', 'semantic-
|
| 138 |
),
|
| 139 |
}
|
| 140 |
)
|
|
|
|
| 18 |
|
| 19 |
Usage:
|
| 20 |
>>> from datasets import load_dataset
|
| 21 |
+
>>> dataset = load_dataset('saeedabc/wiki727k', num_proc=8, trust_remote_code=True)
|
| 22 |
"""
|
| 23 |
|
| 24 |
|
|
|
|
| 121 |
features = datasets.Features(
|
| 122 |
{
|
| 123 |
"id": datasets.Value("string"), # document id --> [doc0, doc1, ...]
|
| 124 |
+
"ids": datasets.Sequence( # document sentence ids --> [[doc0_sent0, doc0_sent1, ...], ...]
|
| 125 |
datasets.Value("string")
|
| 126 |
),
|
| 127 |
"sentences": datasets.Sequence(
|
|
|
|
| 134 |
datasets.Value("uint8")
|
| 135 |
),
|
| 136 |
"labels": datasets.Sequence(
|
| 137 |
+
datasets.ClassLabel(num_classes=2, names=['semantic-continuity', 'semantic-shift'])
|
| 138 |
),
|
| 139 |
}
|
| 140 |
)
|