cfahlgren1 HF Staff commited on
Commit
65552fd
·
verified ·
1 Parent(s): c9de258

Upload daily_papers_sync.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. daily_papers_sync.py +266 -0
daily_papers_sync.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # /// script
3
+ # requires-python = ">=3.10"
4
+ # dependencies = [
5
+ # "datasets",
6
+ # "requests",
7
+ # "huggingface-hub",
8
+ # ]
9
+ # ///
10
+ """
11
+ Daily sync for huggingface/trending-papers-x dataset.
12
+ Indexes new papers and updates GitHub/project URLs via HF Papers API.
13
+
14
+ Run locally: uv run daily_papers_sync.py
15
+ Run as HF Job: hf jobs uv run daily_papers_sync.py --secrets HF_TOKEN
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import os
21
+ import re
22
+ from datetime import datetime, timedelta, timezone
23
+ from typing import Any, Optional
24
+ from urllib.parse import urlparse
25
+
26
+ import requests
27
+ from datasets import load_dataset
28
+
29
+ REPO_ID = "huggingface/trending-papers-x"
30
+ API_BASE = "https://huggingface.co/api"
31
+ HARD_LIMIT = 150
32
+ HOURS_LOOKBACK = 24
33
+
34
+ # Regex patterns for arXiv ID validation
35
+ _ARXIV_URL_RE = re.compile(r"https?://(?:www\.)?arxiv\.org/(?:abs|pdf)/(?P<id>[^?#]+)", re.I)
36
+ _ARXIV_NEW_RE = re.compile(r"^\d{4}\.\d{4,5}$")
37
+
38
+
39
+ def normalize_arxiv_id(value: Any) -> Optional[str]:
40
+ """Extract and validate arXiv ID from various formats."""
41
+ if not value:
42
+ return None
43
+ s = str(value).strip()
44
+
45
+ # Extract from URL if present
46
+ if m := _ARXIV_URL_RE.search(s):
47
+ s = m.group("id")
48
+
49
+ s = s.strip().rstrip("/")
50
+ if s.lower().endswith(".pdf"):
51
+ s = s[:-4]
52
+ if s.lower().startswith("arxiv:"):
53
+ s = s[6:]
54
+
55
+ # Remove version suffix
56
+ s = re.sub(r"v\d+$", "", s)
57
+
58
+ # Validate new-style arXiv ID format
59
+ if not _ARXIV_NEW_RE.fullmatch(s):
60
+ return None
61
+
62
+ # Validate month (positions 2-3)
63
+ month = int(s[2:4])
64
+ return s if 1 <= month <= 12 else None
65
+
66
+
67
+ def normalize_github_repo(value: Any) -> Optional[str]:
68
+ """Extract and normalize GitHub repo URL."""
69
+ if not value:
70
+ return None
71
+ s = str(value).strip()
72
+
73
+ if s.startswith("git@github.com:"):
74
+ s = f"https://github.com/{s[15:]}"
75
+ elif s.startswith("github.com/"):
76
+ s = f"https://{s}"
77
+
78
+ p = urlparse(s)
79
+ if p.scheme not in ("http", "https"):
80
+ return None
81
+
82
+ host = (p.netloc or "").lower().removeprefix("www.")
83
+ if host != "github.com":
84
+ return None
85
+
86
+ parts = [x for x in p.path.split("/") if x]
87
+ if len(parts) < 2:
88
+ return None
89
+
90
+ owner, repo = parts[0], parts[1].removesuffix(".git")
91
+ return f"https://github.com/{owner}/{repo}"
92
+
93
+
94
+ def normalize_url(value: Any) -> Optional[str]:
95
+ """Validate and normalize a URL."""
96
+ if not value:
97
+ return None
98
+ s = str(value).strip()
99
+ p = urlparse(s)
100
+ return s if p.scheme in ("http", "https") and p.netloc else None
101
+
102
+
103
+ def parse_date(value: Any) -> Optional[datetime]:
104
+ """Parse date string into datetime."""
105
+ if isinstance(value, datetime):
106
+ return value.replace(tzinfo=timezone.utc) if value.tzinfo is None else value
107
+ if not value:
108
+ return None
109
+
110
+ for fmt in ["%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]:
111
+ try:
112
+ dt = datetime.strptime(str(value).strip(), fmt)
113
+ return dt.replace(tzinfo=timezone.utc)
114
+ except ValueError:
115
+ continue
116
+ return None
117
+
118
+
119
+ def get_token() -> str:
120
+ """Get HF token from environment or huggingface-cli."""
121
+ if token := os.environ.get("HF_TOKEN", "").strip():
122
+ return token
123
+ try:
124
+ from huggingface_hub import HfFolder
125
+ return (HfFolder.get_token() or "").strip()
126
+ except Exception:
127
+ return ""
128
+
129
+
130
+ def get_paper(session: requests.Session, arxiv_id: str) -> Optional[dict]:
131
+ """Fetch paper from API, returns None if not found."""
132
+ try:
133
+ r = session.get(f"{API_BASE}/papers/{arxiv_id}", timeout=30)
134
+ return r.json() if r.status_code == 200 else None
135
+ except Exception:
136
+ return None
137
+
138
+
139
+ def index_paper(session: requests.Session, arxiv_id: str) -> bool:
140
+ """Index a paper by arXiv ID. Returns True on success."""
141
+ try:
142
+ r = session.post(f"{API_BASE}/papers/index", json={"arxivId": arxiv_id}, timeout=30)
143
+ return r.status_code == 200
144
+ except Exception:
145
+ return False
146
+
147
+
148
+ def update_paper_links(
149
+ session: requests.Session,
150
+ arxiv_id: str,
151
+ github_repo: Optional[str] = None,
152
+ project_page: Optional[str] = None,
153
+ ) -> bool:
154
+ """Update GitHub repo and/or project page for a paper."""
155
+ payload = {}
156
+ if github_repo:
157
+ payload["githubRepo"] = github_repo
158
+ if project_page:
159
+ payload["projectPage"] = project_page
160
+
161
+ if not payload:
162
+ return False
163
+
164
+ try:
165
+ r = session.post(f"{API_BASE}/papers/{arxiv_id}/links", json=payload, timeout=30)
166
+ return r.status_code == 200
167
+ except Exception:
168
+ return False
169
+
170
+
171
+ def main() -> None:
172
+ token = get_token()
173
+ if not token:
174
+ print("ERROR: HF token not found. Set HF_TOKEN or run `huggingface-cli login`.")
175
+ exit(1)
176
+
177
+ session = requests.Session()
178
+ session.headers.update({
179
+ "Content-Type": "application/json",
180
+ "Authorization": f"Bearer {token}",
181
+ })
182
+
183
+ cutoff_time = datetime.now(timezone.utc) - timedelta(hours=HOURS_LOOKBACK)
184
+
185
+ print(f"Dataset: {REPO_ID}")
186
+ print(f"Lookback: {HOURS_LOOKBACK}h (since {cutoff_time.strftime('%Y-%m-%d %H:%M UTC')})")
187
+ print(f"Limit: {HARD_LIMIT} papers")
188
+ print("-" * 50)
189
+
190
+ dataset = load_dataset(REPO_ID, split="train", streaming=True)
191
+
192
+ stats = {"indexed": 0, "github": 0, "project": 0, "not_found": 0, "skipped": 0}
193
+ processed = 0
194
+
195
+ for row in dataset:
196
+ if processed >= HARD_LIMIT:
197
+ break
198
+
199
+ arxiv_id = normalize_arxiv_id(row.get("arxiv_id") or row.get("paper_id"))
200
+ if not arxiv_id:
201
+ continue
202
+
203
+ # Check date
204
+ date_str = row.get("date") or row.get("published_at") or row.get("created_at")
205
+ if (paper_date := parse_date(date_str)) and paper_date < cutoff_time:
206
+ stats["skipped"] += 1
207
+ continue
208
+
209
+ # Get links from dataset
210
+ github_repo = normalize_github_repo(row.get("github") or row.get("github_url"))
211
+ project_page = normalize_url(row.get("project_page_url") or row.get("project_page"))
212
+
213
+ # Skip if no links
214
+ if not github_repo and not project_page:
215
+ continue
216
+
217
+ processed += 1
218
+
219
+ # Check if paper exists
220
+ paper = get_paper(session, arxiv_id)
221
+ just_indexed = False
222
+
223
+ # Index if not found
224
+ if paper is None:
225
+ if index_paper(session, arxiv_id):
226
+ stats["indexed"] += 1
227
+ just_indexed = True
228
+ print(f"INDEXED: {arxiv_id}")
229
+ else:
230
+ stats["not_found"] += 1
231
+ print(f"SKIP: {arxiv_id} - could not index")
232
+ continue
233
+
234
+ # Determine what to update (if just indexed, paper has no links yet)
235
+ has_github = False if just_indexed else bool(paper.get("githubRepo"))
236
+ has_project = False if just_indexed else bool(paper.get("projectPage"))
237
+
238
+ github_to_set = github_repo if github_repo and not has_github else None
239
+ project_to_set = project_page if project_page and not has_project else None
240
+
241
+ if not github_to_set and not project_to_set:
242
+ print(f"SKIP: {arxiv_id} - already has links")
243
+ continue
244
+
245
+ # Update links
246
+ if update_paper_links(session, arxiv_id, github_to_set, project_to_set):
247
+ if github_to_set:
248
+ stats["github"] += 1
249
+ print(f"SET GITHUB: {arxiv_id} -> {github_to_set}")
250
+ if project_to_set:
251
+ stats["project"] += 1
252
+ print(f"SET PROJECT: {arxiv_id} -> {project_to_set}")
253
+ else:
254
+ print(f"ERROR: {arxiv_id} - failed to update links")
255
+
256
+ print("-" * 50)
257
+ print(f"Processed: {processed}")
258
+ print(f"Indexed: {stats['indexed']}")
259
+ print(f"GitHub added: {stats['github']}")
260
+ print(f"Project added: {stats['project']}")
261
+ print(f"Not found: {stats['not_found']}")
262
+ print(f"Skipped (old): {stats['skipped']}")
263
+
264
+
265
+ if __name__ == "__main__":
266
+ main()