Datasets:

Modalities:
Text
Formats:
parquet
Languages:
Catalan
DOI:
Libraries:
Datasets
pandas
License:
mmarimon commited on
Commit
5c7e0db
·
1 Parent(s): 9c1a931

Upload caBreu.py

Browse files
Files changed (1) hide show
  1. caBreu.py +129 -0
caBreu.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Loading script for the CaSum dataset.
2
+ import json
3
+ import datasets
4
+
5
+ logger = datasets.logging.get_logger(__name__)
6
+
7
+ _CITATION = """"""
8
+
9
+ _DESCRIPTION = """caBreu is a summarization dataset.
10
+ It is extracted from a newswire corpus crawled from the Catalan News Agency ([Agència Catalana de Notícies; ACN](https://www.acn.cat/)), the Catalan news portal [VilaWeb](https://www.vilaweb.cat/) and [NacióDigital](https://www.naciodigital.cat/)
11
+
12
+ The corpus consists of 3,000 articles and extreme, abstractive and extractive summaries manually done by three annotators.
13
+ """
14
+
15
+ _HOMEPAGE = """https://github.com/TeMU-BSC/seq-to-seq-catalan"""
16
+
17
+ _URL = "https://huggingface.co/datasets/projecte-aina/caBreu/resolve/main/"
18
+ _TRAIN_FILE = "train.jsonl"
19
+ _VAL_FILE = "dev.jsonl"
20
+ _TEST_FILE = "test.jsonl"
21
+
22
+ class caBreuConfig(datasets.BuilderConfig):
23
+ """ Builder config for the caBreu dataset """
24
+
25
+ def __init__(self, **kwargs):
26
+ """BuilderConfig for caBreu.
27
+ Args:
28
+ **kwargs: keyword arguments forwarded to super.
29
+ """
30
+ super(CaSumConfig, self).__init__(**kwargs)
31
+
32
+
33
+ class caBreu(datasets.GeneratorBasedBuilder):
34
+ """caBreu Dataset."""
35
+
36
+ BUILDER_CONFIGS = [
37
+ CaSumConfig(
38
+ name="caBreu",
39
+ version=datasets.Version("1.0.0"),
40
+ description="caBreu dataset"
41
+ ),
42
+ ]
43
+
44
+ def _info(self):
45
+ return datasets.DatasetInfo(
46
+ description=_DESCRIPTION,
47
+ features=datasets.Features(
48
+ {
49
+ "id": datasets.Value("string"),
50
+ "title": datasets.Value("string")
51
+ "subtitle": datasets.Value("string")
52
+ "content": datasets.Value("string")
53
+ "category": datasets.Value("list")
54
+ "source": datasets.Value("string")
55
+ "summaries":
56
+ {
57
+ "extreme":
58
+ {
59
+ "a1": datasets.Value("string")
60
+ "a2": datasets.Value("string")
61
+ "a3": datasets.Value("string")
62
+ },
63
+ "abstractive":
64
+ {
65
+ "a1": datasets.Value("string")
66
+ "a2": datasets.Value("string")
67
+ "a3": datasets.Value("string")
68
+ },
69
+ "extractive":
70
+ {
71
+ "a1": datasets.Value("string")
72
+ "a2": datasets.Value("string")
73
+ "a3": datasets.Value("string")
74
+ }
75
+ }
76
+ }
77
+
78
+ ),
79
+ supervised_keys=None,
80
+ homepage=_HOMEPAGE,
81
+ citation=_CITATION
82
+ )
83
+
84
+ def _split_generators(self, dl_manager):
85
+ """Returns SplitGenerators."""
86
+ urls_to_download = {
87
+ "train": f"{_URL}{_TRAIN_FILE}",
88
+ "dev": f"{_URL}{_VAL_FILE}",
89
+ "test": f"{_URL}{_TEST_FILE}"
90
+ }
91
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
92
+
93
+ return [
94
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
95
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
96
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
97
+ ]
98
+
99
+ def _generate_examples(self, filepath):
100
+ """This function returns the examples in the raw (text) form."""
101
+ logger.info("generating examples from = %s", filepath)
102
+ with open(filepath) as f:
103
+ for id_, row in enumerate(f):
104
+ article = json.loads(row)
105
+ id_ = article['id']
106
+ title = article['title']
107
+ subtitle = article['subtitle']
108
+ content = article['content']
109
+ category = article['category']
110
+ source = article['source']
111
+ summaries = article['summaries']
112
+ extreme = article['summaries']['extreme']
113
+ a1 = article['summaries']['extreme']['a1']
114
+ a2 = article['summaries']['extreme']['a2']
115
+ a3 = article['summaries']['extreme']['a3']
116
+ yield id_, {
117
+ "id": id_,
118
+ "title": title,
119
+ "subtitle": subtitle,
120
+ "content": content,
121
+ "category": category,
122
+ "source": source,
123
+ "summaries":
124
+ {
125
+ "extreme": { "a1": a1,"a2": a2,"a3": a3 },
126
+ "abstractive": { "a1": a1,"a2": a2,"a3": a3 },
127
+ "extractive": { "a1": a1,"a2": a2,"a3": a3 }
128
+ }
129
+ }