gharshit412 commited on
Commit
0fe892e
·
1 Parent(s): 7374fac
Files changed (2) hide show
  1. DeepScholarBench.py +6 -10
  2. README.md +43 -4
DeepScholarBench.py CHANGED
@@ -176,12 +176,8 @@ class DeepScholarBench(datasets.GeneratorBasedBuilder):
176
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
177
  """Return the dataset splits."""
178
 
179
- # For local files, use the actual file paths
180
- import os
181
- current_dir = os.path.dirname(os.path.abspath(__file__))
182
-
183
  if self.config.name == "papers":
184
- data_file = os.path.join(current_dir, "papers_with_related_works.csv")
185
  return [
186
  datasets.SplitGenerator(
187
  name=datasets.Split.TRAIN,
@@ -192,7 +188,7 @@ class DeepScholarBench(datasets.GeneratorBasedBuilder):
192
  ),
193
  ]
194
  elif self.config.name == "citations":
195
- data_file = os.path.join(current_dir, "recovered_citations.csv")
196
  return [
197
  datasets.SplitGenerator(
198
  name=datasets.Split.TRAIN,
@@ -203,7 +199,7 @@ class DeepScholarBench(datasets.GeneratorBasedBuilder):
203
  ),
204
  ]
205
  elif self.config.name == "important_citations":
206
- data_file = os.path.join(current_dir, "important_citations.csv")
207
  return [
208
  datasets.SplitGenerator(
209
  name=datasets.Split.TRAIN,
@@ -214,9 +210,9 @@ class DeepScholarBench(datasets.GeneratorBasedBuilder):
214
  ),
215
  ]
216
  else: # full config
217
- papers_file = os.path.join(current_dir, "papers_with_related_works.csv")
218
- citations_file = os.path.join(current_dir, "recovered_citations.csv")
219
- important_citations_file = os.path.join(current_dir, "important_citations.csv")
220
  return [
221
  datasets.SplitGenerator(
222
  name="papers",
 
176
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
177
  """Return the dataset splits."""
178
 
 
 
 
 
179
  if self.config.name == "papers":
180
+ data_file = dl_manager.download_and_extract(_URLS["papers"])
181
  return [
182
  datasets.SplitGenerator(
183
  name=datasets.Split.TRAIN,
 
188
  ),
189
  ]
190
  elif self.config.name == "citations":
191
+ data_file = dl_manager.download_and_extract(_URLS["citations"])
192
  return [
193
  datasets.SplitGenerator(
194
  name=datasets.Split.TRAIN,
 
199
  ),
200
  ]
201
  elif self.config.name == "important_citations":
202
+ data_file = dl_manager.download_and_extract(_URLS["important_citations"])
203
  return [
204
  datasets.SplitGenerator(
205
  name=datasets.Split.TRAIN,
 
210
  ),
211
  ]
212
  else: # full config
213
+ papers_file = dl_manager.download_and_extract(_URLS["papers"])
214
+ citations_file = dl_manager.download_and_extract(_URLS["citations"])
215
+ important_citations_file = dl_manager.download_and_extract(_URLS["important_citations"])
216
  return [
217
  datasets.SplitGenerator(
218
  name="papers",
README.md CHANGED
@@ -141,15 +141,15 @@ Contains enhanced citations with full paper metadata and content:
141
  from datasets import load_dataset
142
 
143
  # Load papers dataset
144
- papers = load_dataset("deepscholar-bench/DeepScholarBench", name="papers")["train"]
145
  print(f"Loaded {len(papers)} papers")
146
 
147
  # Load citations dataset
148
- citations = load_dataset("deepscholar-bench/DeepScholarBench", name="citations")["train"]
149
  print(f"Loaded {len(citations)} citations")
150
 
151
  # Load important citations with enhanced metadata
152
- important_citations = load_dataset("deepscholar-bench/DeepScholarBench", name="important_citations")["train"]
153
  print(f"Loaded {len(important_citations)} important citations")
154
 
155
  # Convert to pandas for analysis
@@ -157,6 +157,26 @@ papers_df = papers.to_pandas()
157
  citations_df = citations.to_pandas()
158
  ```
159
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  ### Example: Extract Related Works for a Paper
161
 
162
  ```python
@@ -170,6 +190,25 @@ paper_citations = citations_df[citations_df['parent_paper_arxiv_id'] == '2506.02
170
  print(f"Number of citations: {len(paper_citations)}")
171
  ```
172
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  ## 📈 Dataset Statistics
174
 
175
  - **Total Papers**: 63
@@ -179,7 +218,7 @@ print(f"Number of citations: {len(paper_citations)}")
179
 
180
  ## 🔧 Data Collection Process
181
 
182
- This dataset was created using the [Lotus Deep Research](https://github.com/guestrin-lab/deepscholar-bench) pipeline:
183
 
184
  1. **ArXiv Scraping**: Collected papers by category and date range
185
  2. **Author Filtering**: Focused on high-impact researchers (h-index ≥ 25)
 
141
  from datasets import load_dataset
142
 
143
  # Load papers dataset
144
+ papers = load_dataset("deepscholar-bench/DeepScholarBench", name="papers", trust_remote_code=True)["train"]
145
  print(f"Loaded {len(papers)} papers")
146
 
147
  # Load citations dataset
148
+ citations = load_dataset("deepscholar-bench/DeepScholarBench", name="citations", trust_remote_code=True)["train"]
149
  print(f"Loaded {len(citations)} citations")
150
 
151
  # Load important citations with enhanced metadata
152
+ important_citations = load_dataset("deepscholar-bench/DeepScholarBench", name="important_citations", trust_remote_code=True)["train"]
153
  print(f"Loaded {len(important_citations)} important citations")
154
 
155
  # Convert to pandas for analysis
 
157
  citations_df = citations.to_pandas()
158
  ```
159
 
160
+ **Note**: This dataset uses a custom loading script, so you need to include `trust_remote_code=True` when loading.
161
+
162
+ ### Alternative: Direct CSV Loading
163
+
164
+ ```python
165
+ import pandas as pd
166
+
167
+ # Load papers dataset
168
+ papers_df = pd.read_csv('papers_with_related_works.csv')
169
+ print(f"Loaded {len(papers_df)} papers")
170
+
171
+ # Load citations dataset
172
+ citations_df = pd.read_csv('recovered_citations.csv')
173
+ print(f"Loaded {len(citations_df)} citations")
174
+
175
+ # Load important citations
176
+ important_citations_df = pd.read_csv('important_citations.csv')
177
+ print(f"Loaded {len(important_citations_df)} important citations")
178
+ ```
179
+
180
  ### Example: Extract Related Works for a Paper
181
 
182
  ```python
 
190
  print(f"Number of citations: {len(paper_citations)}")
191
  ```
192
 
193
+ ### Example: Working with Important Citations
194
+
195
+ ```python
196
+ # Load important citations (enhanced with paper metadata)
197
+ important_citations = load_dataset("deepscholar-bench/DeepScholarBench", name="important_citations", trust_remote_code=True)["train"]
198
+
199
+ # This configuration includes both citation data AND the parent paper information
200
+ sample = important_citations[0]
201
+ print(f"Citation: {sample['cited_paper_title']}")
202
+ print(f"Parent Paper: {sample['title']}")
203
+ print(f"Paper Abstract: {sample['abstract'][:200]}...")
204
+ print(f"Related Work Section: {sample['related_work_section'][:200]}...")
205
+
206
+ # Analyze citation patterns
207
+ important_df = important_citations.to_pandas()
208
+ print(f"Citations with full paper content: {important_df['cited_paper_content'].notna().sum()}")
209
+ print(f"Citations with related work sections: {important_df['related_work_section'].notna().sum()}")
210
+ ```
211
+
212
  ## 📈 Dataset Statistics
213
 
214
  - **Total Papers**: 63
 
218
 
219
  ## 🔧 Data Collection Process
220
 
221
+ This dataset was created using the [DeepScholarBench](https://github.com/guestrin-lab/deepscholar-bench) pipeline:
222
 
223
  1. **ArXiv Scraping**: Collected papers by category and date range
224
  2. **Author Filtering**: Focused on high-impact researchers (h-index ≥ 25)