File size: 8,017 Bytes
65552fd ee9fff1 65552fd ee9fff1 65552fd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 |
#!/usr/bin/env python3
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "datasets",
# "requests",
# "huggingface-hub",
# ]
# ///
"""
Daily sync for huggingface/trending-papers-x dataset.
Indexes new papers and updates GitHub/project URLs via HF Papers API.
Run locally: uv run daily_papers_sync.py
Run as HF Job: hf jobs uv run daily_papers_sync.py --secrets HF_TOKEN
"""
from __future__ import annotations
import os
import re
import time
from datetime import datetime, timedelta, timezone
from typing import Any, Optional
from urllib.parse import urlparse
import requests
from datasets import load_dataset
REPO_ID = "huggingface/trending-papers-x"
API_BASE = "https://huggingface.co/api"
HARD_LIMIT = 150
HOURS_LOOKBACK = 24
# Regex patterns for arXiv ID validation
_ARXIV_URL_RE = re.compile(r"https?://(?:www\.)?arxiv\.org/(?:abs|pdf)/(?P<id>[^?#]+)", re.I)
_ARXIV_NEW_RE = re.compile(r"^\d{4}\.\d{4,5}$")
def normalize_arxiv_id(value: Any) -> Optional[str]:
"""Extract and validate arXiv ID from various formats."""
if not value:
return None
s = str(value).strip()
# Extract from URL if present
if m := _ARXIV_URL_RE.search(s):
s = m.group("id")
s = s.strip().rstrip("/")
if s.lower().endswith(".pdf"):
s = s[:-4]
if s.lower().startswith("arxiv:"):
s = s[6:]
# Remove version suffix
s = re.sub(r"v\d+$", "", s)
# Validate new-style arXiv ID format
if not _ARXIV_NEW_RE.fullmatch(s):
return None
# Validate month (positions 2-3)
month = int(s[2:4])
return s if 1 <= month <= 12 else None
def normalize_github_repo(value: Any) -> Optional[str]:
"""Extract and normalize GitHub repo URL."""
if not value:
return None
s = str(value).strip()
if s.startswith("git@github.com:"):
s = f"https://github.com/{s[15:]}"
elif s.startswith("github.com/"):
s = f"https://{s}"
p = urlparse(s)
if p.scheme not in ("http", "https"):
return None
host = (p.netloc or "").lower().removeprefix("www.")
if host != "github.com":
return None
parts = [x for x in p.path.split("/") if x]
if len(parts) < 2:
return None
owner, repo = parts[0], parts[1].removesuffix(".git")
return f"https://github.com/{owner}/{repo}"
def normalize_url(value: Any) -> Optional[str]:
"""Validate and normalize a URL."""
if not value:
return None
s = str(value).strip()
p = urlparse(s)
return s if p.scheme in ("http", "https") and p.netloc else None
def parse_date(value: Any) -> Optional[datetime]:
"""Parse date string into datetime."""
if isinstance(value, datetime):
return value.replace(tzinfo=timezone.utc) if value.tzinfo is None else value
if not value:
return None
for fmt in ["%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%d"]:
try:
dt = datetime.strptime(str(value).strip(), fmt)
return dt.replace(tzinfo=timezone.utc)
except ValueError:
continue
return None
def get_token() -> str:
"""Get HF token from environment or huggingface-cli."""
if token := os.environ.get("HF_TOKEN", "").strip():
return token
try:
from huggingface_hub import HfFolder
return (HfFolder.get_token() or "").strip()
except Exception:
return ""
def get_paper(session: requests.Session, arxiv_id: str) -> Optional[dict]:
"""Fetch paper from API, returns None if not found."""
try:
r = session.get(f"{API_BASE}/papers/{arxiv_id}", timeout=30)
return r.json() if r.status_code == 200 else None
except Exception:
return None
def index_paper(session: requests.Session, arxiv_id: str) -> bool:
"""Index a paper by arXiv ID. Returns True on success."""
try:
r = session.post(f"{API_BASE}/papers/index", json={"arxivId": arxiv_id}, timeout=30)
return r.status_code == 200
except Exception:
return False
def update_paper_links(
session: requests.Session,
arxiv_id: str,
github_repo: Optional[str] = None,
project_page: Optional[str] = None,
) -> bool:
"""Update GitHub repo and/or project page for a paper."""
payload = {}
if github_repo:
payload["githubRepo"] = github_repo
if project_page:
payload["projectPage"] = project_page
if not payload:
return False
try:
r = session.post(f"{API_BASE}/papers/{arxiv_id}/links", json=payload, timeout=30)
return r.status_code == 200
except Exception:
return False
def main() -> None:
token = get_token()
if not token:
print("ERROR: HF token not found. Set HF_TOKEN or run `huggingface-cli login`.")
exit(1)
session = requests.Session()
session.headers.update({
"Content-Type": "application/json",
"Authorization": f"Bearer {token}",
})
cutoff_time = datetime.now(timezone.utc) - timedelta(hours=HOURS_LOOKBACK)
print(f"Dataset: {REPO_ID}")
print(f"Lookback: {HOURS_LOOKBACK}h (since {cutoff_time.strftime('%Y-%m-%d %H:%M UTC')})")
print(f"Limit: {HARD_LIMIT} papers")
print("-" * 50)
dataset = load_dataset(REPO_ID, split="train", streaming=True)
stats = {"indexed": 0, "github": 0, "project": 0, "not_found": 0, "skipped": 0}
processed = 0
for row in dataset:
if processed >= HARD_LIMIT:
break
arxiv_id = normalize_arxiv_id(row.get("arxiv_id") or row.get("paper_id"))
if not arxiv_id:
continue
# Check date
date_str = row.get("date") or row.get("published_at") or row.get("created_at")
if (paper_date := parse_date(date_str)) and paper_date < cutoff_time:
stats["skipped"] += 1
continue
# Get links from dataset
github_repo = normalize_github_repo(row.get("github") or row.get("github_url"))
project_page = normalize_url(row.get("project_page_url") or row.get("project_page"))
# Skip if no links
if not github_repo and not project_page:
continue
processed += 1
# Check if paper exists
paper = get_paper(session, arxiv_id)
just_indexed = False
# Index if not found
if paper is None:
if index_paper(session, arxiv_id):
stats["indexed"] += 1
just_indexed = True
print(f"INDEXED: {arxiv_id}")
time.sleep(30)
else:
stats["not_found"] += 1
print(f"SKIP: {arxiv_id} - could not index")
continue
# Determine what to update (if just indexed, paper has no links yet)
has_github = False if just_indexed else bool(paper.get("githubRepo"))
has_project = False if just_indexed else bool(paper.get("projectPage"))
github_to_set = github_repo if github_repo and not has_github else None
project_to_set = project_page if project_page and not has_project else None
if not github_to_set and not project_to_set:
print(f"SKIP: {arxiv_id} - already has links")
continue
# Update links
if update_paper_links(session, arxiv_id, github_to_set, project_to_set):
if github_to_set:
stats["github"] += 1
print(f"SET GITHUB: {arxiv_id} -> {github_to_set}")
if project_to_set:
stats["project"] += 1
print(f"SET PROJECT: {arxiv_id} -> {project_to_set}")
else:
print(f"ERROR: {arxiv_id} - failed to update links")
print("-" * 50)
print(f"Processed: {processed}")
print(f"Indexed: {stats['indexed']}")
print(f"GitHub added: {stats['github']}")
print(f"Project added: {stats['project']}")
print(f"Not found: {stats['not_found']}")
print(f"Skipped (old): {stats['skipped']}")
if __name__ == "__main__":
main()
|