| | |
| | """ |
| | fetch_gh_meta.py |
| | |
| | Pipeline step to fetch repository metadata from GitHub using GraphQL and save it as Parquet. |
| | This script is separate from docs scraping so you can pre-filter repos before downloading docs. |
| | |
| | Inputs (configured via YAML file next to this script: fetch_gh_meta_config.yaml): |
| | - input_parquet: list of parquet files, each with a column 'link' containing GitHub repo URLs |
| | - out_parquet: where to write the output metadata parquet (default: ../output/repometa.parquet) |
| | - batch_size: number of repositories to fetch per GraphQL request (default: 20) |
| | - quiet: reduce logging verbosity |
| | |
| | Usage: |
| | uv run data_collection_utils/fetch_gh_meta.py |
| | """ |
| |
|
| | from __future__ import annotations |
| |
|
| | import os |
| | import sys |
| | from pathlib import Path |
| | from urllib.parse import urlparse |
| | from typing import List, Dict, Any, Optional, Tuple |
| |
|
| | import pandas as pd |
| | import yaml |
| | from tqdm import tqdm |
| | import logging |
| | from datetime import datetime |
| |
|
| | from github_api_utils import fetch_repos_metadata_graphql, find_readme_file |
| |
|
| |
|
| | class TqdmLoggingHandler(logging.Handler): |
| | def emit(self, record): |
| | try: |
| | msg = self.format(record) |
| | tqdm.write(msg) |
| | except Exception: |
| | sys.stderr.write(str(record.getMessage()) + "\n") |
| |
|
| |
|
| | logger = logging.getLogger("fetch_gh_meta") |
| |
|
| |
|
| | def setup_logging(quiet: bool = False): |
| | logger.setLevel(logging.DEBUG) |
| | logger.propagate = False |
| | logger.handlers.clear() |
| | handler = TqdmLoggingHandler() |
| | handler.setFormatter(logging.Formatter("[%(levelname)s] %(message)s")) |
| | handler.setLevel(logging.WARNING if quiet else logging.INFO) |
| | logger.addHandler(handler) |
| |
|
| |
|
| | def ensure_github_token() -> str: |
| | tok = os.getenv("GITHUB_TOKEN") |
| | assert tok is not None and tok.strip() != "", ( |
| | "GITHUB_TOKEN is required. Export it in your environment or put it in a .env file." |
| | ) |
| | return tok |
| |
|
| |
|
| | def _parse_owner_repo(s: str) -> Optional[Tuple[str, str]]: |
| | s = s.strip() |
| | if not s: |
| | return None |
| | if s.startswith("http://") or s.startswith("https://"): |
| | p = urlparse(s) |
| | if p.netloc != "github.com": |
| | return None |
| | parts = [part for part in p.path.split("/") if part] |
| | if len(parts) < 2: |
| | return None |
| | owner, repo = parts[0], parts[1] |
| | if repo.endswith(".git"): |
| | repo = repo[:-4] |
| | return owner, repo |
| | if "/" in s: |
| | owner, repo = s.split("/", 1) |
| | return owner, repo |
| | return None |
| |
|
| |
|
| | def main(): |
| | |
| |
|
| | |
| | cfg: Dict[str, Any] = {} |
| | cfg_path = Path(__file__).with_name("fetch_gh_meta_config.yaml") |
| | if cfg_path.exists(): |
| | cfg = yaml.safe_load(cfg_path.read_text(encoding="utf-8")) or {} |
| |
|
| | def _resolve_cfg_path(val: Optional[str]) -> Optional[str]: |
| | if val is None: |
| | return None |
| | p = Path(val) |
| | if not p.is_absolute(): |
| | p = (cfg_path.parent / p).resolve() |
| | return str(p) |
| |
|
| | def _resolve_cfg_paths(val) -> List[str]: |
| | if val is None: |
| | return [] |
| | if isinstance(val, (list, tuple)): |
| | return [_resolve_cfg_path(v) for v in val if v is not None] |
| | return [_resolve_cfg_path(val)] |
| |
|
| | input_parquet_values = _resolve_cfg_paths(cfg.get("input_parquet")) |
| | out_parquet_value = _resolve_cfg_path( |
| | cfg.get("out_parquet", "../output/repometa.parquet") |
| | ) |
| | resume = bool(cfg.get("resume", True)) |
| | batch_size = int(cfg.get("batch_size", 20)) |
| | quiet = bool(cfg.get("quiet", False)) |
| |
|
| | setup_logging(quiet=quiet) |
| |
|
| | ensure_github_token() |
| |
|
| | assert input_parquet_values, ( |
| | "input_parquet must be configured in fetch_gh_meta_config.yaml" |
| | ) |
| | pairs: List[Tuple[str, str]] = [] |
| | seen: set[str] = set() |
| | for pth in input_parquet_values: |
| | df = pd.read_parquet(pth) |
| | assert "link" in df.columns, f"Parquet {pth} must contain 'link' column" |
| | for u in df["link"].tolist(): |
| | s = str(u).strip() |
| | if not s: |
| | continue |
| | parsed = _parse_owner_repo(s) |
| | if not parsed: |
| | continue |
| | owner, repo = parsed |
| | key = f"{owner}/{repo}" |
| | if key in seen: |
| | continue |
| | seen.add(key) |
| | pairs.append((owner, repo)) |
| |
|
| | |
| | existing_map = {} |
| | out_path = Path(out_parquet_value) |
| | if resume and out_path.exists(): |
| | try: |
| | existing_df = pd.read_parquet(out_path) |
| | if {"owner", "repo"}.issubset(existing_df.columns): |
| | existing_map = { |
| | f"{o}/{r}": True |
| | for o, r in zip(existing_df["owner"], existing_df["repo"]) |
| | } |
| | except Exception: |
| | existing_map = {} |
| | if existing_map: |
| | pairs = [(o, r) for (o, r) in pairs if f"{o}/{r}" not in existing_map] |
| | logger.info( |
| | f"Total unique repos to fetch: {len(pairs)} (resume={'on' if resume else 'off'})" |
| | ) |
| |
|
| | |
| | records: List[Dict[str, Any]] = [] |
| | run_ts = datetime.utcnow().isoformat() |
| | for i in tqdm(range(0, len(pairs), batch_size), desc="GraphQL batches"): |
| | batch = pairs[i : i + batch_size] |
| | meta = fetch_repos_metadata_graphql(batch) |
| | for owner, repo in batch: |
| | key = f"{owner}/{repo}" |
| | m = meta.get(key) or {} |
| | |
| | ri = find_readme_file(owner, repo, ref=m.get("default_branch")) |
| | records.append( |
| | { |
| | "owner": owner, |
| | "repo": repo, |
| | "link": f"https://github.com/{owner}/{repo}", |
| | "name": m.get("name"), |
| | "description": m.get("description"), |
| | "stars": m.get("stars"), |
| | "default_branch": m.get("default_branch"), |
| | "last_commit_date": m.get("last_commit_date"), |
| | "language": m.get("language"), |
| | |
| | "topics": ( |
| | ",".join(m.get("topics", [])) |
| | if isinstance(m.get("topics"), list) |
| | else None |
| | ), |
| | "is_fork": m.get("is_fork"), |
| | "parent_url": m.get("parent_url"), |
| | "updated_at": run_ts, |
| | "readme_found": bool(ri), |
| | "readme_filename": (ri.get("name") if ri else None), |
| | } |
| | ) |
| |
|
| | df_out = pd.DataFrame(records) |
| | out_path = Path(out_parquet_value) |
| | out_path.parent.mkdir(parents=True, exist_ok=True) |
| | if resume and out_path.exists(): |
| | try: |
| | existing_df = pd.read_parquet(out_path) |
| | |
| | if "updated_at" not in existing_df.columns: |
| | existing_df["updated_at"] = None |
| | combined = pd.concat([existing_df, df_out], ignore_index=True) |
| | |
| | combined = combined.drop_duplicates(subset=["owner", "repo"], keep="last") |
| | combined.to_parquet(out_path, index=False) |
| | logger.info( |
| | f"Appended {len(df_out)} new repos (resume) to {out_path} (total {len(combined)})" |
| | ) |
| | return |
| | except Exception: |
| | |
| | pass |
| | df_out.to_parquet(out_path, index=False) |
| | logger.info(f"Wrote metadata for {len(df_out)} repos to {out_path}") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|