Datasets:
Sub-tasks:
semantic-similarity-classification
Languages:
English
Size:
10K<n<100K
Tags:
text segmentation
document segmentation
topic segmentation
topic shift detection
semantic chunking
chunking
License:
Update column name sent_ids to ids
Browse files- README.md +8 -8
- preprocess_util.py +3 -3
- wikisection.py +1 -1
README.md
CHANGED
|
@@ -49,16 +49,16 @@ dataset_info:
|
|
| 49 |
'1': semantic-break
|
| 50 |
splits:
|
| 51 |
- name: train
|
| 52 |
-
num_bytes:
|
| 53 |
num_examples: 13679
|
| 54 |
- name: validation
|
| 55 |
-
num_bytes:
|
| 56 |
num_examples: 1953
|
| 57 |
- name: test
|
| 58 |
-
num_bytes:
|
| 59 |
num_examples: 3907
|
| 60 |
download_size: 94042594
|
| 61 |
-
dataset_size:
|
| 62 |
- config_name: en_disease
|
| 63 |
features:
|
| 64 |
- name: id
|
|
@@ -79,15 +79,15 @@ dataset_info:
|
|
| 79 |
'1': semantic-break
|
| 80 |
splits:
|
| 81 |
- name: train
|
| 82 |
-
num_bytes:
|
| 83 |
num_examples: 2513
|
| 84 |
- name: validation
|
| 85 |
-
num_bytes:
|
| 86 |
num_examples: 359
|
| 87 |
- name: test
|
| 88 |
-
num_bytes:
|
| 89 |
num_examples: 718
|
| 90 |
download_size: 94042594
|
| 91 |
-
dataset_size:
|
| 92 |
---
|
| 93 |
# Dataset Card for WikiSection (en_city, en_disease) Dataset
|
|
|
|
| 49 |
'1': semantic-break
|
| 50 |
splits:
|
| 51 |
- name: train
|
| 52 |
+
num_bytes: 99374519
|
| 53 |
num_examples: 13679
|
| 54 |
- name: validation
|
| 55 |
+
num_bytes: 14819645
|
| 56 |
num_examples: 1953
|
| 57 |
- name: test
|
| 58 |
+
num_bytes: 29405354
|
| 59 |
num_examples: 3907
|
| 60 |
download_size: 94042594
|
| 61 |
+
dataset_size: 143599518
|
| 62 |
- config_name: en_disease
|
| 63 |
features:
|
| 64 |
- name: id
|
|
|
|
| 79 |
'1': semantic-break
|
| 80 |
splits:
|
| 81 |
- name: train
|
| 82 |
+
num_bytes: 21279366
|
| 83 |
num_examples: 2513
|
| 84 |
- name: validation
|
| 85 |
+
num_bytes: 3029140
|
| 86 |
num_examples: 359
|
| 87 |
- name: test
|
| 88 |
+
num_bytes: 5778276
|
| 89 |
num_examples: 718
|
| 90 |
download_size: 94042594
|
| 91 |
+
dataset_size: 30086782
|
| 92 |
---
|
| 93 |
# Dataset Card for WikiSection (en_city, en_disease) Dataset
|
preprocess_util.py
CHANGED
|
@@ -72,7 +72,7 @@ def parse_split(filepath: str, drop_titles: bool = False, sent_tokenize_method:
|
|
| 72 |
doc = {
|
| 73 |
'id': id,
|
| 74 |
'title': title,
|
| 75 |
-
'
|
| 76 |
'sentences': [],
|
| 77 |
'titles_mask': [],
|
| 78 |
'labels': [],
|
|
@@ -93,14 +93,14 @@ def parse_split(filepath: str, drop_titles: bool = False, sent_tokenize_method:
|
|
| 93 |
# Add the title as a single sentence
|
| 94 |
if not drop_titles and sec_title:
|
| 95 |
# if not drop_titles and non_empty(sec_title):
|
| 96 |
-
doc['
|
| 97 |
doc['sentences'].append(sec_title)
|
| 98 |
doc['titles_mask'].append(1)
|
| 99 |
doc['labels'].append(0)
|
| 100 |
|
| 101 |
# Add the sentences
|
| 102 |
for sent_idx, sent in enumerate(sentences):
|
| 103 |
-
doc['
|
| 104 |
doc['sentences'].append(sent)
|
| 105 |
doc['titles_mask'].append(0)
|
| 106 |
doc['labels'].append(1 if sent_idx == len(sentences) - 1 else 0)
|
|
|
|
| 72 |
doc = {
|
| 73 |
'id': id,
|
| 74 |
'title': title,
|
| 75 |
+
'ids': [],
|
| 76 |
'sentences': [],
|
| 77 |
'titles_mask': [],
|
| 78 |
'labels': [],
|
|
|
|
| 93 |
# Add the title as a single sentence
|
| 94 |
if not drop_titles and sec_title:
|
| 95 |
# if not drop_titles and non_empty(sec_title):
|
| 96 |
+
doc['ids'].append(f'{sec_idx}')
|
| 97 |
doc['sentences'].append(sec_title)
|
| 98 |
doc['titles_mask'].append(1)
|
| 99 |
doc['labels'].append(0)
|
| 100 |
|
| 101 |
# Add the sentences
|
| 102 |
for sent_idx, sent in enumerate(sentences):
|
| 103 |
+
doc['ids'].append(f'{sec_idx}_{sent_idx}')
|
| 104 |
doc['sentences'].append(sent)
|
| 105 |
doc['titles_mask'].append(0)
|
| 106 |
doc['labels'].append(1 if sent_idx == len(sentences) - 1 else 0)
|
wikisection.py
CHANGED
|
@@ -110,7 +110,7 @@ class WikiSection(datasets.GeneratorBasedBuilder):
|
|
| 110 |
{
|
| 111 |
"id": datasets.Value("string"), # document id --> [doc0, doc1, ...]
|
| 112 |
"title": datasets.Value("string"),
|
| 113 |
-
"
|
| 114 |
datasets.Value("string")
|
| 115 |
),
|
| 116 |
"sentences": datasets.Sequence(
|
|
|
|
| 110 |
{
|
| 111 |
"id": datasets.Value("string"), # document id --> [doc0, doc1, ...]
|
| 112 |
"title": datasets.Value("string"),
|
| 113 |
+
"ids": datasets.Sequence( # document sentence ids --> [[doc0_sent0, doc0_sent1, ...], ...]
|
| 114 |
datasets.Value("string")
|
| 115 |
),
|
| 116 |
"sentences": datasets.Sequence(
|