gharshit412 commited on
Commit
9218915
·
verified ·
1 Parent(s): c394758

Delete DeepScholarBench.py

Browse files
Files changed (1) hide show
  1. DeepScholarBench.py +0 -337
DeepScholarBench.py DELETED
@@ -1,337 +0,0 @@
1
- """
2
- DeepScholarBench: Academic papers with extracted related works sections and recovered citations.
3
-
4
- This dataset contains academic papers from ArXiv with their related works sections and recovered citations,
5
- providing a rich resource for research generation and citation analysis tasks.
6
- """
7
-
8
- import csv
9
- import datasets
10
- from typing import Dict, List, Any, Optional
11
-
12
-
13
- # Dataset URLs - these would typically point to hosted files
14
- _DESCRIPTION = """\
15
- A comprehensive dataset of academic papers with extracted related works sections and recovered citations,
16
- designed for training and evaluating research generation systems.
17
-
18
- This dataset contains 63 academic papers from ArXiv with their related works sections and 1630 recovered citations,
19
- providing a rich resource for research generation and citation analysis tasks.
20
- """
21
-
22
- _CITATION = """\
23
- @misc{patel2025deepscholarbenchlivebenchmarkautomated,
24
- title={DeepScholar-Bench: A Live Benchmark and Automated Evaluation for Generative Research Synthesis},
25
- author={Liana Patel and Negar Arabzadeh and Harshit Gupta and Ankita Sundar and Ion Stoica and Matei Zaharia and Carlos Guestrin},
26
- year={2025},
27
- eprint={2412.19698},
28
- archivePrefix={arXiv},
29
- primaryClass={cs.CL},
30
- url={https://arxiv.org/abs/2508.20033},
31
- }
32
- """
33
-
34
- _HOMEPAGE = "https://github.com/guestrin-lab/deepscholar-bench"
35
- _LICENSE = "MIT"
36
-
37
- # URLs to the dataset files
38
- _URLS = {
39
- "papers": "papers_with_related_works.csv",
40
- "citations": "recovered_citations.csv",
41
- "important_citations": "important_citations.csv",
42
- }
43
-
44
-
45
- class DeepScholarBenchConfig(datasets.BuilderConfig):
46
- """BuilderConfig for DeepScholarBench dataset."""
47
-
48
- def __init__(self, name: str, description: str, **kwargs):
49
- """BuilderConfig for DeepScholarBench.
50
-
51
- Args:
52
- name: Configuration name
53
- description: Description of this configuration
54
- **kwargs: Additional keyword arguments
55
- """
56
- super(DeepScholarBenchConfig, self).__init__(
57
- name=name,
58
- description=description,
59
- version=datasets.Version("1.0.0"),
60
- **kwargs
61
- )
62
-
63
-
64
- class DeepScholarBench(datasets.GeneratorBasedBuilder):
65
- """DeepScholarBench dataset."""
66
-
67
- VERSION = datasets.Version("1.0.0")
68
-
69
- BUILDER_CONFIGS = [
70
- DeepScholarBenchConfig(
71
- name="papers",
72
- description="Academic papers with extracted related works sections (63 papers)",
73
- ),
74
- DeepScholarBenchConfig(
75
- name="citations",
76
- description="Recovered citations with metadata (1630 citations)",
77
- ),
78
- DeepScholarBenchConfig(
79
- name="important_citations",
80
- description="Important citations with enhanced metadata (1050 citations)",
81
- ),
82
- DeepScholarBenchConfig(
83
- name="full",
84
- description="Complete dataset with papers, citations, and important citations",
85
- ),
86
- ]
87
-
88
- DEFAULT_CONFIG_NAME = "papers"
89
-
90
- def _info(self) -> datasets.DatasetInfo:
91
- """Return the dataset info."""
92
-
93
- if self.config.name == "papers":
94
- features = datasets.Features({
95
- "arxiv_id": datasets.Value("string"),
96
- "title": datasets.Value("string"),
97
- "authors": datasets.Value("string"),
98
- "abstract": datasets.Value("string"),
99
- "categories": datasets.Value("string"),
100
- "published_date": datasets.Value("string"),
101
- "updated_date": datasets.Value("string"),
102
- "abs_url": datasets.Value("string"),
103
- "arxiv_link": datasets.Value("string"),
104
- "publication_date": datasets.Value("string"),
105
- "raw_latex_related_works": datasets.Value("string"),
106
- "clean_latex_related_works": datasets.Value("string"),
107
- "pdf_related_works": datasets.Value("string"),
108
- })
109
- elif self.config.name == "citations":
110
- features = datasets.Features({
111
- "parent_paper_title": datasets.Value("string"),
112
- "parent_paper_arxiv_id": datasets.Value("string"),
113
- "citation_shorthand": datasets.Value("string"),
114
- "raw_citation_text": datasets.Value("string"),
115
- "cited_paper_title": datasets.Value("string"),
116
- "cited_paper_arxiv_link": datasets.Value("string"),
117
- "cited_paper_abstract": datasets.Value("string"),
118
- "has_metadata": datasets.Value("bool"),
119
- "is_arxiv_paper": datasets.Value("bool"),
120
- "bib_paper_authors": datasets.Value("string"),
121
- "bib_paper_year": datasets.Value("float32"),
122
- "bib_paper_month": datasets.Value("string"),
123
- "bib_paper_url": datasets.Value("string"),
124
- "bib_paper_doi": datasets.Value("string"),
125
- "bib_paper_journal": datasets.Value("string"),
126
- "original_title": datasets.Value("string"),
127
- "search_res_title": datasets.Value("string"),
128
- "search_res_url": datasets.Value("string"),
129
- "search_res_content": datasets.Value("string"),
130
- })
131
- elif self.config.name == "important_citations":
132
- features = datasets.Features({
133
- "parent_paper_title": datasets.Value("string"),
134
- "parent_paper_arxiv_id": datasets.Value("string"),
135
- "citation_shorthand": datasets.Value("string"),
136
- "raw_citation_text": datasets.Value("string"),
137
- "cited_paper_title": datasets.Value("string"),
138
- "cited_paper_arxiv_link": datasets.Value("string"),
139
- "cited_paper_abstract": datasets.Value("string"),
140
- "has_metadata": datasets.Value("bool"),
141
- "is_arxiv_paper": datasets.Value("bool"),
142
- "cited_paper_authors": datasets.Value("string"),
143
- "bib_paper_year": datasets.Value("float32"),
144
- "bib_paper_month": datasets.Value("string"),
145
- "bib_paper_url": datasets.Value("string"),
146
- "bib_paper_doi": datasets.Value("string"),
147
- "bib_paper_journal": datasets.Value("string"),
148
- "original_title": datasets.Value("string"),
149
- "search_res_title": datasets.Value("string"),
150
- "search_res_url": datasets.Value("string"),
151
- "search_res_content": datasets.Value("string"),
152
- "arxiv_id": datasets.Value("string"),
153
- "arxiv_link": datasets.Value("string"),
154
- "publication_date": datasets.Value("string"),
155
- "title": datasets.Value("string"),
156
- "abstract": datasets.Value("string"),
157
- "raw_latex_related_works": datasets.Value("string"),
158
- "related_work_section": datasets.Value("string"),
159
- "pdf_related_works": datasets.Value("string"),
160
- "cited_paper_content": datasets.Value("string"),
161
- })
162
- else: # full config
163
- features = datasets.Features({
164
- "split": datasets.Value("string"),
165
- "data": datasets.Value("string"), # JSON encoded data
166
- })
167
-
168
- return datasets.DatasetInfo(
169
- description=_DESCRIPTION,
170
- features=features,
171
- homepage=_HOMEPAGE,
172
- license=_LICENSE,
173
- citation=_CITATION,
174
- )
175
-
176
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
177
- """Return the dataset splits."""
178
-
179
- if self.config.name == "papers":
180
- data_file = dl_manager.download_and_extract(_URLS["papers"])
181
- return [
182
- datasets.SplitGenerator(
183
- name=datasets.Split.TRAIN,
184
- gen_kwargs={
185
- "filepath": data_file,
186
- "split": "papers"
187
- },
188
- ),
189
- ]
190
- elif self.config.name == "citations":
191
- data_file = dl_manager.download_and_extract(_URLS["citations"])
192
- return [
193
- datasets.SplitGenerator(
194
- name=datasets.Split.TRAIN,
195
- gen_kwargs={
196
- "filepath": data_file,
197
- "split": "citations"
198
- },
199
- ),
200
- ]
201
- elif self.config.name == "important_citations":
202
- data_file = dl_manager.download_and_extract(_URLS["important_citations"])
203
- return [
204
- datasets.SplitGenerator(
205
- name=datasets.Split.TRAIN,
206
- gen_kwargs={
207
- "filepath": data_file,
208
- "split": "important_citations"
209
- },
210
- ),
211
- ]
212
- else: # full config
213
- papers_file = dl_manager.download_and_extract(_URLS["papers"])
214
- citations_file = dl_manager.download_and_extract(_URLS["citations"])
215
- important_citations_file = dl_manager.download_and_extract(_URLS["important_citations"])
216
- return [
217
- datasets.SplitGenerator(
218
- name="papers",
219
- gen_kwargs={
220
- "filepath": papers_file,
221
- "split": "papers"
222
- },
223
- ),
224
- datasets.SplitGenerator(
225
- name="citations",
226
- gen_kwargs={
227
- "filepath": citations_file,
228
- "split": "citations"
229
- },
230
- ),
231
- datasets.SplitGenerator(
232
- name="important_citations",
233
- gen_kwargs={
234
- "filepath": important_citations_file,
235
- "split": "important_citations"
236
- },
237
- ),
238
- ]
239
-
240
- def _generate_examples(self, filepath: str, split: str):
241
- """Generate examples from the dataset."""
242
-
243
- def _safe_bool_convert(value: str) -> bool:
244
- """Safely convert string to boolean."""
245
- if isinstance(value, str):
246
- return value.lower() in ('true', 'yes', '1')
247
- return bool(value)
248
-
249
- def _safe_float_convert(value: str) -> Optional[float]:
250
- """Safely convert string to float."""
251
- if not value or value.strip() == '' or value.lower() == 'nan':
252
- return None
253
- try:
254
- return float(value)
255
- except (ValueError, TypeError):
256
- return None
257
-
258
- if split == "papers":
259
- with open(filepath, encoding="utf-8") as f:
260
- reader = csv.DictReader(f)
261
- for key, row in enumerate(reader):
262
- yield key, {
263
- "arxiv_id": row.get("arxiv_id", ""),
264
- "title": row.get("title", ""),
265
- "authors": row.get("authors", ""),
266
- "abstract": row.get("abstract", ""),
267
- "categories": row.get("categories", ""),
268
- "published_date": row.get("published_date", ""),
269
- "updated_date": row.get("updated_date", ""),
270
- "abs_url": row.get("abs_url", ""),
271
- "arxiv_link": row.get("arxiv_link", ""),
272
- "publication_date": row.get("publication_date", ""),
273
- "raw_latex_related_works": row.get("raw_latex_related_works", ""),
274
- "clean_latex_related_works": row.get("clean_latex_related_works", ""),
275
- "pdf_related_works": row.get("pdf_related_works", ""),
276
- }
277
-
278
- elif split == "citations":
279
- with open(filepath, encoding="utf-8") as f:
280
- reader = csv.DictReader(f)
281
- for key, row in enumerate(reader):
282
- yield key, {
283
- "parent_paper_title": row.get("parent_paper_title", ""),
284
- "parent_paper_arxiv_id": row.get("parent_paper_arxiv_id", ""),
285
- "citation_shorthand": row.get("citation_shorthand", ""),
286
- "raw_citation_text": row.get("raw_citation_text", ""),
287
- "cited_paper_title": row.get("cited_paper_title", ""),
288
- "cited_paper_arxiv_link": row.get("cited_paper_arxiv_link", ""),
289
- "cited_paper_abstract": row.get("cited_paper_abstract", ""),
290
- "has_metadata": _safe_bool_convert(row.get("has_metadata", "False")),
291
- "is_arxiv_paper": _safe_bool_convert(row.get("is_arxiv_paper", "False")),
292
- "bib_paper_authors": row.get("bib_paper_authors", ""),
293
- "bib_paper_year": _safe_float_convert(row.get("bib_paper_year", "")),
294
- "bib_paper_month": row.get("bib_paper_month", ""),
295
- "bib_paper_url": row.get("bib_paper_url", ""),
296
- "bib_paper_doi": row.get("bib_paper_doi", ""),
297
- "bib_paper_journal": row.get("bib_paper_journal", ""),
298
- "original_title": row.get("original_title", ""),
299
- "search_res_title": row.get("search_res_title", ""),
300
- "search_res_url": row.get("search_res_url", ""),
301
- "search_res_content": row.get("search_res_content", ""),
302
- }
303
-
304
- elif split == "important_citations":
305
- with open(filepath, encoding="utf-8") as f:
306
- reader = csv.DictReader(f)
307
- for key, row in enumerate(reader):
308
- yield key, {
309
- "parent_paper_title": row.get("parent_paper_title", ""),
310
- "parent_paper_arxiv_id": row.get("parent_paper_arxiv_id", ""),
311
- "citation_shorthand": row.get("citation_shorthand", ""),
312
- "raw_citation_text": row.get("raw_citation_text", ""),
313
- "cited_paper_title": row.get("cited_paper_title", ""),
314
- "cited_paper_arxiv_link": row.get("cited_paper_arxiv_link", ""),
315
- "cited_paper_abstract": row.get("cited_paper_abstract", ""),
316
- "has_metadata": _safe_bool_convert(row.get("has_metadata", "False")),
317
- "is_arxiv_paper": _safe_bool_convert(row.get("is_arxiv_paper", "False")),
318
- "cited_paper_authors": row.get("cited_paper_authors", ""),
319
- "bib_paper_year": _safe_float_convert(row.get("bib_paper_year", "")),
320
- "bib_paper_month": row.get("bib_paper_month", ""),
321
- "bib_paper_url": row.get("bib_paper_url", ""),
322
- "bib_paper_doi": row.get("bib_paper_doi", ""),
323
- "bib_paper_journal": row.get("bib_paper_journal", ""),
324
- "original_title": row.get("original_title", ""),
325
- "search_res_title": row.get("search_res_title", ""),
326
- "search_res_url": row.get("search_res_url", ""),
327
- "search_res_content": row.get("search_res_content", ""),
328
- "arxiv_id": row.get("arxiv_id", ""),
329
- "arxiv_link": row.get("arxiv_link", ""),
330
- "publication_date": row.get("publication_date", ""),
331
- "title": row.get("title", ""),
332
- "abstract": row.get("abstract", ""),
333
- "raw_latex_related_works": row.get("raw_latex_related_works", ""),
334
- "related_work_section": row.get("related_work_section", ""),
335
- "pdf_related_works": row.get("pdf_related_works", ""),
336
- "cited_paper_content": row.get("cited_paper_content", ""),
337
- }