| from __future__ import annotations | |
| import argparse | |
| import csv | |
| import re | |
| from pathlib import Path | |
| from typing import Dict, Iterable, List | |
| import sys | |
| try: | |
| from scripts.defextra_markers import ( | |
| normalize_paper_id, | |
| normalize_arxiv, | |
| normalize_doi, | |
| ) | |
| from scripts.defextra_pdf_aliases import candidate_pdf_aliases | |
| except ModuleNotFoundError as exc: | |
| if exc.name != "scripts": | |
| raise | |
| PROJECT_ROOT = Path(__file__).resolve().parent.parent | |
| if str(PROJECT_ROOT) not in sys.path: | |
| sys.path.insert(0, str(PROJECT_ROOT)) | |
| from scripts.defextra_markers import ( | |
| normalize_paper_id, | |
| normalize_arxiv, | |
| normalize_doi, | |
| ) | |
| from scripts.defextra_pdf_aliases import candidate_pdf_aliases | |
| S2_ID_RE = re.compile(r"^[0-9a-f]{40}$", re.IGNORECASE) | |
| def _safe_join(values: Iterable[str]) -> str: | |
| return ";".join(v for v in values if v) | |
| def _semanticscholar_url(paper_id: str) -> str: | |
| if S2_ID_RE.match(paper_id): | |
| return f"https://www.semanticscholar.org/paper/{paper_id}" | |
| return "" | |
| def _acl_url(paper_id: str) -> str: | |
| if paper_id.startswith("https://aclanthology.org/"): | |
| return paper_id | |
| if re.match(r"^[0-9]{4}\\.[a-z-]+\\.[0-9]+$", paper_id, re.IGNORECASE): | |
| return f"https://aclanthology.org/{paper_id}" | |
| return "" | |
| def _doi_url(doi: str, paper_id: str) -> str: | |
| if doi: | |
| return f"https://doi.org/{normalize_doi(doi)}" | |
| if paper_id.startswith("10."): | |
| return f"https://doi.org/{normalize_doi(paper_id)}" | |
| if "doi.org/" in paper_id: | |
| return f"https://doi.org/{normalize_doi(paper_id)}" | |
| return "" | |
| def _arxiv_url(arxiv: str, paper_id: str) -> str: | |
| if arxiv: | |
| return f"https://arxiv.org/abs/{normalize_arxiv(arxiv)}" | |
| match = re.search(r"arxiv\\.org/(abs|pdf)/([^?#]+)", paper_id) | |
| if match: | |
| return f"https://arxiv.org/abs/{match.group(2).replace('.pdf', '')}" | |
| return "" | |
| def _collect_papers(rows: List[Dict[str, str]]) -> Dict[str, Dict[str, str]]: | |
| papers: Dict[str, Dict[str, str]] = {} | |
| for row in rows: | |
| paper_id = (row.get("paper_id") or "").strip() | |
| if not paper_id: | |
| continue | |
| record = papers.setdefault( | |
| paper_id, | |
| { | |
| "paper_id": paper_id, | |
| "paper_title": (row.get("paper_title") or "").strip(), | |
| "paper_doi": (row.get("paper_doi") or "").strip(), | |
| "paper_arxiv": (row.get("paper_arxiv") or "").strip(), | |
| }, | |
| ) | |
| if not record["paper_title"] and row.get("paper_title"): | |
| record["paper_title"] = row.get("paper_title", "").strip() | |
| if not record["paper_doi"] and row.get("paper_doi"): | |
| record["paper_doi"] = row.get("paper_doi", "").strip() | |
| if not record["paper_arxiv"] and row.get("paper_arxiv"): | |
| record["paper_arxiv"] = row.get("paper_arxiv", "").strip() | |
| return papers | |
| def main() -> None: | |
| parser = argparse.ArgumentParser( | |
| description=( | |
| "List required PDFs for DefExtra and generate helper links." | |
| ), | |
| ) | |
| parser.add_argument( | |
| "--legal-csv", | |
| type=Path, | |
| default=Path("data/defextra_legal.csv"), | |
| help="Path to DefExtra legal CSV.", | |
| ) | |
| parser.add_argument( | |
| "--output-csv", | |
| type=Path, | |
| default=None, | |
| help="Optional output CSV path.", | |
| ) | |
| parser.add_argument( | |
| "--output-md", | |
| type=Path, | |
| default=None, | |
| help="Optional output Markdown list path.", | |
| ) | |
| parser.add_argument( | |
| "--limit", | |
| type=int, | |
| default=10, | |
| help="How many entries to print to stdout (0 = none).", | |
| ) | |
| args = parser.parse_args() | |
| if not args.legal_csv.exists(): | |
| raise SystemExit(f"Legal CSV not found: {args.legal_csv}") | |
| with args.legal_csv.open("r", encoding="utf-8", newline="") as handle: | |
| rows = list(csv.DictReader(handle)) | |
| papers = _collect_papers(rows) | |
| output_rows: List[Dict[str, str]] = [] | |
| for paper_id, record in sorted(papers.items()): | |
| doi = record.get("paper_doi", "") | |
| arxiv = record.get("paper_arxiv", "") | |
| normalized_id = normalize_paper_id(paper_id) | |
| aliases = candidate_pdf_aliases(paper_id, doi, arxiv) | |
| output_rows.append( | |
| { | |
| "paper_id": paper_id, | |
| "normalized_id": normalized_id, | |
| "paper_title": record.get("paper_title", ""), | |
| "paper_doi": doi, | |
| "paper_arxiv": arxiv, | |
| "preferred_pdf_name": f"{normalized_id}.pdf", | |
| "alias_pdf_names": _safe_join( | |
| f"{alias}.pdf" for alias in aliases | |
| ), | |
| "url_semanticscholar": _semanticscholar_url(paper_id), | |
| "url_doi": _doi_url(doi, paper_id), | |
| "url_arxiv": _arxiv_url(arxiv, paper_id), | |
| "url_acl": _acl_url(paper_id), | |
| }, | |
| ) | |
| if args.output_csv: | |
| args.output_csv.parent.mkdir(parents=True, exist_ok=True) | |
| with args.output_csv.open("w", encoding="utf-8", newline="") as handle: | |
| fieldnames = list(output_rows[0].keys()) if output_rows else [] | |
| writer = csv.DictWriter(handle, fieldnames=fieldnames) | |
| writer.writeheader() | |
| for row in output_rows: | |
| writer.writerow(row) | |
| print(f"Wrote {len(output_rows)} rows to {args.output_csv}") | |
| if args.output_md: | |
| lines = ["# DefExtra required PDFs", ""] | |
| for row in output_rows: | |
| line = f"- {row['paper_id']} — {row['paper_title']}" | |
| links = [ | |
| row["url_semanticscholar"], | |
| row["url_doi"], | |
| row["url_arxiv"], | |
| row["url_acl"], | |
| ] | |
| links = [link for link in links if link] | |
| if links: | |
| line += " (" + ", ".join(links) + ")" | |
| lines.append(line) | |
| args.output_md.parent.mkdir(parents=True, exist_ok=True) | |
| args.output_md.write_text("\n".join(lines) + "\n", encoding="utf-8") | |
| print(f"Wrote {len(output_rows)} rows to {args.output_md}") | |
| if args.limit > 0: | |
| for row in output_rows[: args.limit]: | |
| print( | |
| f"{row['paper_id']} | {row['preferred_pdf_name']} | " | |
| f"{row['url_semanticscholar'] or row['url_doi'] or row['url_arxiv'] or row['url_acl']}", | |
| ) | |
| if __name__ == "__main__": | |
| main() | |