Uri commited on
Commit
8a2bf29
·
1 Parent(s): bf19c4f

Update fs.py

Browse files
Files changed (1) hide show
  1. fs.py +50 -97
fs.py CHANGED
@@ -18,11 +18,11 @@ from citations_and_descriptions import (
18
  class FSConfig(datasets.BuilderConfig):
19
  """BuilderConfig for FS."""
20
 
21
- def __init__(self, data_url, citation, url, max_source_length, tokenizer, **kwargs):
22
  """BuilderConfig for FS.
23
  Args:
24
- features: `list[string]`, list of the features that will appear in the
25
- feature dict. Should not include "label".
26
  data_url: `string`, url to download the zip file from.
27
  citation: `string`, citation for the data set.
28
  url: `string`, url for information about the data set.
@@ -32,29 +32,14 @@ class FSConfig(datasets.BuilderConfig):
32
  **kwargs: keyword arguments forwarded to super.
33
  """
34
  super(FSConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
35
- self.features = ["pid", self.source_key, self.target_key]
36
  self.data_url = data_url
37
  self.citation = citation
38
  self.url = url
39
- self.max_source_length = max_source_length
40
- self.tokenizer = tokenizer
41
-
42
- def remove_redundant_fields(self, example):
43
- for field in self.redundant_fields:
44
- del example[field]
45
-
46
- @abstractmethod
47
- def postprocess(self, s):
48
- pass
49
 
50
  @property
51
  @abstractmethod
52
- def original_source_key(self):
53
- pass
54
-
55
- @property
56
- @abstractmethod
57
- def original_target_key(self):
58
  pass
59
 
60
  @property
@@ -73,25 +58,19 @@ class FSConfig(datasets.BuilderConfig):
73
  pass
74
 
75
  @property
 
76
  def source_key(self):
77
- return "source"
78
-
79
- @property
80
- def target_key(self):
81
- return "target"
82
 
83
  @property
84
  @abstractmethod
85
- def id_key(self):
86
  pass
87
 
88
- @property
89
- def redundant_fields(self):
90
- return []
91
-
92
- def process(self, example): # TODO perhaps we can use this for base
93
- example[self.source_key] = example[self.original_source_key].strip()
94
- example[self.target_key] = example[self.original_target_key].strip() if example[self.original_target_key] else None
95
 
96
 
97
  class ScrollsConfig(FSConfig):
@@ -99,11 +78,11 @@ class ScrollsConfig(FSConfig):
99
  super().__init__(**kwargs)
100
 
101
  @property
102
- def original_source_key(self):
103
  return "input"
104
 
105
  @property
106
- def original_target_key(self):
107
  return "output"
108
 
109
  @property
@@ -122,89 +101,70 @@ class ScrollsConfig(FSConfig):
122
  def id_key(self):
123
  return "pid"
124
 
125
- @property
126
- def redundant_fields(self):
127
- return [self.original_source_key, self.original_target_key, "id"]
128
-
129
-
130
-
131
- def process_input(self, s):
132
- prefix = s.strip()
133
- suffix = "\nSummarize the above:"
134
- prefix = _truncate_prefix(prefix, suffix, self.max_source_length, self.tokenizer)
135
- return prefix + suffix
136
-
137
 
138
  class ArxivConfig(FSConfig):
139
- # TODO properties etc...
140
  def __init__(self, **kwargs):
141
  super().__init__(**kwargs)
142
- self.train_file = "train.txt"
143
- self.validation_file = "val.txt"
144
- self.test_file = "test.txt"
145
 
146
- self.input_key = "article_text"
147
- self.output_key = "abstract_text"
148
- self.id_key = "article_id"
149
- self.redundant_fields = [self.input_key, self.output_key, self.id_key, 'labels', 'section_names', 'sections']
150
 
151
- def process_input(self, s):
152
- prefix = ' '.join(s)
153
- suffix = "\nSummarize the above:"
154
- prefix = _truncate_prefix(prefix, suffix, self.max_source_length, self.tokenizer)
155
- return prefix + suffix
156
 
157
- def process_output(self, s):
158
- # TODO remove "<S>" and "</S>" ?
159
- return ' '.join(s).replace("<S>", "").replace("</S>", "")
160
 
 
 
 
161
 
162
- def _truncate_prefix(prefix, suffix, max_source_length, tokenizer):
163
- encoded_input = tokenizer.encode(prefix + suffix)
 
164
 
165
- while len(encoded_input) > max_source_length:
166
- overflow = len(encoded_input) - max_source_length
167
- tokenized_prefix = tokenizer.encode(prefix, add_special_tokens=False)
168
- if overflow > 0:
169
- tokenized_prefix = tokenized_prefix[:-overflow]
170
- prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip()
171
- encoded_input = tokenizer.encode(prefix + suffix)
172
 
173
- return prefix
 
 
 
 
174
 
175
 
176
  class Fs(datasets.GeneratorBasedBuilder):
177
  """The SCROLLS benchmark."""
178
-
179
  DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
180
  BUILDER_CONFIGS = [
181
  ScrollsConfig(
 
182
  name="summ_screen_fd_debug",
183
  description=_SUMM_SCREEN_DESCRIPTION,
184
  data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/summ_screen_fd_debug.zip",
185
  citation=_SUMM_SCREEN_CITATION,
186
- url="https://github.com/mingdachen/SummScreen",
187
- max_source_length=None,
188
- tokenizer=None,
189
  ),
190
  ScrollsConfig(
 
191
  name="gov_report",
192
  description=_GOV_REPORT_CITATION,
193
  data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/gov_report.zip",
194
  citation=_GOV_REPORT_DESCRIPTION,
195
- url="https://gov-report-data.github.io/",
196
- max_source_length=None,
197
- tokenizer=None,
 
 
 
 
 
 
198
  ),
199
- # ArxivConfig(
200
- # name="arxiv_debug",
201
- # description=_ARXIV_CITATION,
202
- # data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/arxiv_debug.zip",
203
- # citation=_ARXIV_DESCRIPTION,
204
- # url="https://github.com/armancohan/long-summarization",
205
- # max_source_length=None,
206
- # tokenizer=None,
207
- # ),
208
  ]
209
 
210
  def _info(self):
@@ -251,12 +211,5 @@ class Fs(datasets.GeneratorBasedBuilder):
251
  with open(data_file, encoding="utf-8") as f:
252
  for line in f:
253
  row = json.loads(line)
254
-
255
- row["pid"] = row[self.config.id_key]
256
  self.config.process(row)
257
- self.config.remove_redundant_fields(row)
258
- yield row["pid"], row
259
-
260
-
261
- def _get_task_name_from_data_url(data_url):
262
- return data_url.split("/")[-1].split(".")[0]
 
18
  class FSConfig(datasets.BuilderConfig):
19
  """BuilderConfig for FS."""
20
 
21
+ def __init__(self, additional_features, data_url, citation, url, **kwargs):
22
  """BuilderConfig for FS.
23
  Args:
24
+ additional_features: `list[string]`, list of the features that will appear in the feature dict
25
+ additionally to the self.id_key, self.source_key and self.target_key. Should not include "label".
26
  data_url: `string`, url to download the zip file from.
27
  citation: `string`, citation for the data set.
28
  url: `string`, url for information about the data set.
 
32
  **kwargs: keyword arguments forwarded to super.
33
  """
34
  super(FSConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
35
+ self.features = [self.id_key, self.source_key, self.target_key] + additional_features
36
  self.data_url = data_url
37
  self.citation = citation
38
  self.url = url
 
 
 
 
 
 
 
 
 
 
39
 
40
  @property
41
  @abstractmethod
42
+ def id_key(self):
 
 
 
 
 
43
  pass
44
 
45
  @property
 
58
  pass
59
 
60
  @property
61
+ @abstractmethod
62
  def source_key(self):
63
+ pass
 
 
 
 
64
 
65
  @property
66
  @abstractmethod
67
+ def target_key(self):
68
  pass
69
 
70
+ def process(self, example):
71
+ example[self.source_key] = example[self.source_key].strip()
72
+ example[self.target_key] = example[self.target_key].strip() if example[
73
+ self.target_key] else None
 
 
 
74
 
75
 
76
  class ScrollsConfig(FSConfig):
 
78
  super().__init__(**kwargs)
79
 
80
  @property
81
+ def source_key(self):
82
  return "input"
83
 
84
  @property
85
+ def target_key(self):
86
  return "output"
87
 
88
  @property
 
101
  def id_key(self):
102
  return "pid"
103
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
  class ArxivConfig(FSConfig):
 
106
  def __init__(self, **kwargs):
107
  super().__init__(**kwargs)
 
 
 
108
 
109
+ @property
110
+ def id_key(self):
111
+ return "article_id"
 
112
 
113
+ @property
114
+ def source_key(self):
115
+ return "article_text"
 
 
116
 
117
+ @property
118
+ def target_key(self):
119
+ return "abstract_text"
120
 
121
+ @property
122
+ def train_file(self):
123
+ return "train.txt"
124
 
125
+ @property
126
+ def validation_file(self):
127
+ return "val.txt"
128
 
129
+ @property
130
+ def test_file(self):
131
+ return "test.txt"
 
 
 
 
132
 
133
+ def process(self, example):
134
+ example[self.source_key] = " ".join(example[self.source_key])
135
+ example[self.target_key] = " ".join(example[self.target_key]).replace("<S>", "").replace("</S>", "")
136
+ del example["labels"]
137
+ super().process(example)
138
 
139
 
140
  class Fs(datasets.GeneratorBasedBuilder):
141
  """The SCROLLS benchmark."""
 
142
  DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
143
  BUILDER_CONFIGS = [
144
  ScrollsConfig(
145
+ additional_features=["id"],
146
  name="summ_screen_fd_debug",
147
  description=_SUMM_SCREEN_DESCRIPTION,
148
  data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/summ_screen_fd_debug.zip",
149
  citation=_SUMM_SCREEN_CITATION,
150
+ url="https://github.com/mingdachen/SummScreen"
 
 
151
  ),
152
  ScrollsConfig(
153
+ additional_features=["id"],
154
  name="gov_report",
155
  description=_GOV_REPORT_CITATION,
156
  data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/gov_report.zip",
157
  citation=_GOV_REPORT_DESCRIPTION,
158
+ url="https://gov-report-data.github.io/"
159
+ ),
160
+ ArxivConfig(
161
+ additional_features=['section_names', 'sections'],
162
+ name="arxiv_debug",
163
+ description=_ARXIV_CITATION,
164
+ data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/arxiv_debug.zip",
165
+ citation=_ARXIV_DESCRIPTION,
166
+ url="https://github.com/armancohan/long-summarization"
167
  ),
 
 
 
 
 
 
 
 
 
168
  ]
169
 
170
  def _info(self):
 
211
  with open(data_file, encoding="utf-8") as f:
212
  for line in f:
213
  row = json.loads(line)
 
 
214
  self.config.process(row)
215
+ yield row[self.config.id_key], row