File size: 6,558 Bytes
7527970 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 |
from __future__ import annotations
import argparse
import csv
import re
from pathlib import Path
from typing import Dict, Iterable, List
import sys
try:
from scripts.defextra_markers import (
normalize_paper_id,
normalize_arxiv,
normalize_doi,
)
from scripts.defextra_pdf_aliases import candidate_pdf_aliases
except ModuleNotFoundError as exc:
if exc.name != "scripts":
raise
PROJECT_ROOT = Path(__file__).resolve().parent.parent
if str(PROJECT_ROOT) not in sys.path:
sys.path.insert(0, str(PROJECT_ROOT))
from scripts.defextra_markers import (
normalize_paper_id,
normalize_arxiv,
normalize_doi,
)
from scripts.defextra_pdf_aliases import candidate_pdf_aliases
S2_ID_RE = re.compile(r"^[0-9a-f]{40}$", re.IGNORECASE)
def _safe_join(values: Iterable[str]) -> str:
return ";".join(v for v in values if v)
def _semanticscholar_url(paper_id: str) -> str:
if S2_ID_RE.match(paper_id):
return f"https://www.semanticscholar.org/paper/{paper_id}"
return ""
def _acl_url(paper_id: str) -> str:
if paper_id.startswith("https://aclanthology.org/"):
return paper_id
if re.match(r"^[0-9]{4}\\.[a-z-]+\\.[0-9]+$", paper_id, re.IGNORECASE):
return f"https://aclanthology.org/{paper_id}"
return ""
def _doi_url(doi: str, paper_id: str) -> str:
if doi:
return f"https://doi.org/{normalize_doi(doi)}"
if paper_id.startswith("10."):
return f"https://doi.org/{normalize_doi(paper_id)}"
if "doi.org/" in paper_id:
return f"https://doi.org/{normalize_doi(paper_id)}"
return ""
def _arxiv_url(arxiv: str, paper_id: str) -> str:
if arxiv:
return f"https://arxiv.org/abs/{normalize_arxiv(arxiv)}"
match = re.search(r"arxiv\\.org/(abs|pdf)/([^?#]+)", paper_id)
if match:
return f"https://arxiv.org/abs/{match.group(2).replace('.pdf', '')}"
return ""
def _collect_papers(rows: List[Dict[str, str]]) -> Dict[str, Dict[str, str]]:
papers: Dict[str, Dict[str, str]] = {}
for row in rows:
paper_id = (row.get("paper_id") or "").strip()
if not paper_id:
continue
record = papers.setdefault(
paper_id,
{
"paper_id": paper_id,
"paper_title": (row.get("paper_title") or "").strip(),
"paper_doi": (row.get("paper_doi") or "").strip(),
"paper_arxiv": (row.get("paper_arxiv") or "").strip(),
},
)
if not record["paper_title"] and row.get("paper_title"):
record["paper_title"] = row.get("paper_title", "").strip()
if not record["paper_doi"] and row.get("paper_doi"):
record["paper_doi"] = row.get("paper_doi", "").strip()
if not record["paper_arxiv"] and row.get("paper_arxiv"):
record["paper_arxiv"] = row.get("paper_arxiv", "").strip()
return papers
def main() -> None:
parser = argparse.ArgumentParser(
description=(
"List required PDFs for DefExtra and generate helper links."
),
)
parser.add_argument(
"--legal-csv",
type=Path,
default=Path("data/defextra_legal.csv"),
help="Path to DefExtra legal CSV.",
)
parser.add_argument(
"--output-csv",
type=Path,
default=None,
help="Optional output CSV path.",
)
parser.add_argument(
"--output-md",
type=Path,
default=None,
help="Optional output Markdown list path.",
)
parser.add_argument(
"--limit",
type=int,
default=10,
help="How many entries to print to stdout (0 = none).",
)
args = parser.parse_args()
if not args.legal_csv.exists():
raise SystemExit(f"Legal CSV not found: {args.legal_csv}")
with args.legal_csv.open("r", encoding="utf-8", newline="") as handle:
rows = list(csv.DictReader(handle))
papers = _collect_papers(rows)
output_rows: List[Dict[str, str]] = []
for paper_id, record in sorted(papers.items()):
doi = record.get("paper_doi", "")
arxiv = record.get("paper_arxiv", "")
normalized_id = normalize_paper_id(paper_id)
aliases = candidate_pdf_aliases(paper_id, doi, arxiv)
output_rows.append(
{
"paper_id": paper_id,
"normalized_id": normalized_id,
"paper_title": record.get("paper_title", ""),
"paper_doi": doi,
"paper_arxiv": arxiv,
"preferred_pdf_name": f"{normalized_id}.pdf",
"alias_pdf_names": _safe_join(
f"{alias}.pdf" for alias in aliases
),
"url_semanticscholar": _semanticscholar_url(paper_id),
"url_doi": _doi_url(doi, paper_id),
"url_arxiv": _arxiv_url(arxiv, paper_id),
"url_acl": _acl_url(paper_id),
},
)
if args.output_csv:
args.output_csv.parent.mkdir(parents=True, exist_ok=True)
with args.output_csv.open("w", encoding="utf-8", newline="") as handle:
fieldnames = list(output_rows[0].keys()) if output_rows else []
writer = csv.DictWriter(handle, fieldnames=fieldnames)
writer.writeheader()
for row in output_rows:
writer.writerow(row)
print(f"Wrote {len(output_rows)} rows to {args.output_csv}")
if args.output_md:
lines = ["# DefExtra required PDFs", ""]
for row in output_rows:
line = f"- {row['paper_id']} — {row['paper_title']}"
links = [
row["url_semanticscholar"],
row["url_doi"],
row["url_arxiv"],
row["url_acl"],
]
links = [link for link in links if link]
if links:
line += " (" + ", ".join(links) + ")"
lines.append(line)
args.output_md.parent.mkdir(parents=True, exist_ok=True)
args.output_md.write_text("\n".join(lines) + "\n", encoding="utf-8")
print(f"Wrote {len(output_rows)} rows to {args.output_md}")
if args.limit > 0:
for row in output_rows[: args.limit]:
print(
f"{row['paper_id']} | {row['preferred_pdf_name']} | "
f"{row['url_semanticscholar'] or row['url_doi'] or row['url_arxiv'] or row['url_acl']}",
)
if __name__ == "__main__":
main()
|