holylovenia commited on
Commit
94afd07
·
verified ·
1 Parent(s): 5754736

Upload crosssum.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. crosssum.py +140 -0
crosssum.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from typing import Dict, List, Tuple
16
+
17
+ import datasets
18
+
19
+ from seacrowd.utils import schemas
20
+ from seacrowd.utils.configs import SEACrowdConfig
21
+ from seacrowd.utils.constants import Licenses, Tasks
22
+
23
+ _CITATION = """
24
+ @inproceedings{bhattacharjee-etal-2023-crosssum,
25
+ author = {Bhattacharjee, Abhik and Hasan, Tahmid and Ahmad, Wasi Uddin and Li, Yuan-Fang and Kang, Yong-Bin and Shahriyar, Rifat},
26
+ title = {CrossSum: Beyond English-Centric Cross-Lingual Summarization for 1,500+ Language Pairs},
27
+ booktitle = {Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics},
28
+ publisher = {Association for Computational Linguistics},
29
+ year = {2023},
30
+ url = {https://aclanthology.org/2023.acl-long.143},
31
+ doi = {10.18653/v1/2023.acl-long.143},
32
+ pages = {2541--2564},
33
+ }
34
+ """
35
+
36
+ _LOCAL = False
37
+ _LANGUAGES = ["ind", "mya", "vie"]
38
+ _DATASETNAME = "crosssum"
39
+ _DESCRIPTION = """
40
+ This is a large-scale cross-lingual summarization dataset containing article-summary samples in 1,500+ language pairs,
41
+ including pairs with the Burmese, Indonesian and Vietnamese languages. Articles in the first language are assigned
42
+ summaries in the second language.
43
+ """
44
+
45
+ _HOMEPAGE = "https://huggingface.co/datasets/csebuetnlp/CrossSum"
46
+ _LICENSE = Licenses.CC_BY_NC_SA_4_0.value
47
+ _URL = "https://huggingface.co/datasets/csebuetnlp/CrossSum"
48
+
49
+
50
+ _SUPPORTED_TASKS = [Tasks.CROSS_LINGUAL_SUMMARIZATION]
51
+ _SOURCE_VERSION = "1.0.0"
52
+ _SEACROWD_VERSION = "2024.06.20"
53
+
54
+
55
+ class CrossSumDataset(datasets.GeneratorBasedBuilder):
56
+ """Dataset of cross-lingual article-summary samples."""
57
+
58
+ SUBSETS = [
59
+ "ind_mya",
60
+ "ind_vie",
61
+ "mya_ind",
62
+ "mya_vie",
63
+ "vie_mya",
64
+ "vie_ind",
65
+ ]
66
+ LANG_CODE_MAPPER = {"ind": "indonesian", "mya": "burmese", "vie": "vietnamese"}
67
+
68
+ BUILDER_CONFIGS = [
69
+ SEACrowdConfig(
70
+ name=f"{_DATASETNAME}_{subset}_source",
71
+ version=datasets.Version(_SOURCE_VERSION),
72
+ description=f"{_DATASETNAME} source schema for {subset} subset",
73
+ schema="source",
74
+ subset_id=f"{_DATASETNAME}_{subset}",
75
+ )
76
+ for subset in SUBSETS
77
+ ] + [
78
+ SEACrowdConfig(
79
+ name=f"{_DATASETNAME}_{subset}_seacrowd_t2t",
80
+ version=datasets.Version(_SEACROWD_VERSION),
81
+ description=f"{_DATASETNAME} SEACrowd schema for {subset} subset",
82
+ schema="seacrowd_t2t",
83
+ subset_id=f"{_DATASETNAME}_{subset}",
84
+ )
85
+ for subset in SUBSETS
86
+ ]
87
+
88
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_ind_mya_source"
89
+
90
+ def _info(self) -> datasets.DatasetInfo:
91
+ if self.config.schema == "source":
92
+ features = datasets.Features(
93
+ {
94
+ "source_url": datasets.Value("string"),
95
+ "target_url": datasets.Value("string"),
96
+ "summary": datasets.Value("string"),
97
+ "text": datasets.Value("string"),
98
+ }
99
+ )
100
+
101
+ elif self.config.schema == "seacrowd_t2t":
102
+ features = schemas.text2text_features
103
+
104
+ return datasets.DatasetInfo(
105
+ description=_DESCRIPTION,
106
+ features=features,
107
+ homepage=_HOMEPAGE,
108
+ license=_LICENSE,
109
+ citation=_CITATION,
110
+ )
111
+
112
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
113
+ """Returns SplitGenerators."""
114
+ # dl_manager not used since dataloader uses HF 'load_dataset'
115
+ return [
116
+ datasets.SplitGenerator(name=split, gen_kwargs={"split": split._name})
117
+ for split in (
118
+ datasets.Split.TRAIN,
119
+ datasets.Split.VALIDATION,
120
+ datasets.Split.TEST,
121
+ )
122
+ ]
123
+
124
+ def _load_hf_data_from_remote(self, split: str) -> datasets.DatasetDict:
125
+ """Load dataset from HuggingFace."""
126
+ source_lang = self.LANG_CODE_MAPPER[self.config.subset_id.split("_")[-2]]
127
+ target_lang = self.LANG_CODE_MAPPER[self.config.subset_id.split("_")[-1]]
128
+ HF_REMOTE_REF = "/".join(_URL.split("/")[-2:])
129
+ _hf_dataset_source = datasets.load_dataset(HF_REMOTE_REF, f"{source_lang}-{target_lang}", split=split)
130
+ return _hf_dataset_source
131
+
132
+ def _generate_examples(self, split: str) -> Tuple[int, Dict]:
133
+ """Yields examples as (key, example) tuples."""
134
+ data = self._load_hf_data_from_remote(split)
135
+ for index, row in enumerate(data):
136
+ if self.config.schema == "source":
137
+ example = row
138
+ elif self.config.schema == "seacrowd_t2t":
139
+ example = {"id": str(index), "text_1": row["text"], "text_2": row["summary"], "text_1_name": "document", "text_2_name": "summary"}
140
+ yield index, example