File size: 7,903 Bytes
cffe6c7 20c3639 cffe6c7 01c7c47 cffe6c7 01c7c47 20c3639 cffe6c7 20c3639 01c7c47 20c3639 01c7c47 cffe6c7 1068e07 cffe6c7 01c7c47 cffe6c7 20c3639 01c7c47 cffe6c7 20c3639 01c7c47 20c3639 cffe6c7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 | #!/usr/bin/env python3
"""
fetch_gh_meta.py
Pipeline step to fetch repository metadata from GitHub using GraphQL and save it as Parquet.
This script is separate from docs scraping so you can pre-filter repos before downloading docs.
Inputs (configured via YAML file next to this script: fetch_gh_meta_config.yaml):
- input_parquet: list of parquet files, each with a column 'link' containing GitHub repo URLs
- out_parquet: where to write the output metadata parquet (default: ../output/repometa.parquet)
- batch_size: number of repositories to fetch per GraphQL request (default: 20)
- quiet: reduce logging verbosity
Usage:
uv run data_collection_utils/fetch_gh_meta.py
"""
from __future__ import annotations
import os
import sys
from pathlib import Path
from urllib.parse import urlparse
from typing import List, Dict, Any, Optional, Tuple
import pandas as pd
import yaml
from tqdm import tqdm
import logging
from datetime import datetime
from github_api_utils import fetch_repos_metadata_graphql, find_readme_file
class TqdmLoggingHandler(logging.Handler):
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg)
except Exception:
sys.stderr.write(str(record.getMessage()) + "\n")
logger = logging.getLogger("fetch_gh_meta")
def setup_logging(quiet: bool = False):
logger.setLevel(logging.DEBUG)
logger.propagate = False
logger.handlers.clear()
handler = TqdmLoggingHandler()
handler.setFormatter(logging.Formatter("[%(levelname)s] %(message)s"))
handler.setLevel(logging.WARNING if quiet else logging.INFO)
logger.addHandler(handler)
def ensure_github_token() -> str:
tok = os.getenv("GITHUB_TOKEN")
assert tok is not None and tok.strip() != "", (
"GITHUB_TOKEN is required. Export it in your environment or put it in a .env file."
)
return tok
def _parse_owner_repo(s: str) -> Optional[Tuple[str, str]]:
s = s.strip()
if not s:
return None
if s.startswith("http://") or s.startswith("https://"):
p = urlparse(s)
if p.netloc != "github.com":
return None
parts = [part for part in p.path.split("/") if part]
if len(parts) < 2:
return None
owner, repo = parts[0], parts[1]
if repo.endswith(".git"):
repo = repo[:-4]
return owner, repo
if "/" in s:
owner, repo = s.split("/", 1)
return owner, repo
return None
def main():
# No CLI args; configuration is via YAML next to this script
# Load YAML config next to this script if present
cfg: Dict[str, Any] = {}
cfg_path = Path(__file__).with_name("fetch_gh_meta_config.yaml")
if cfg_path.exists():
cfg = yaml.safe_load(cfg_path.read_text(encoding="utf-8")) or {}
def _resolve_cfg_path(val: Optional[str]) -> Optional[str]:
if val is None:
return None
p = Path(val)
if not p.is_absolute():
p = (cfg_path.parent / p).resolve()
return str(p)
def _resolve_cfg_paths(val) -> List[str]:
if val is None:
return []
if isinstance(val, (list, tuple)):
return [_resolve_cfg_path(v) for v in val if v is not None]
return [_resolve_cfg_path(val)]
input_parquet_values = _resolve_cfg_paths(cfg.get("input_parquet"))
out_parquet_value = _resolve_cfg_path(
cfg.get("out_parquet", "../output/repometa.parquet")
)
resume = bool(cfg.get("resume", True))
batch_size = int(cfg.get("batch_size", 20))
quiet = bool(cfg.get("quiet", False))
setup_logging(quiet=quiet)
ensure_github_token()
assert input_parquet_values, (
"input_parquet must be configured in fetch_gh_meta_config.yaml"
)
pairs: List[Tuple[str, str]] = []
seen: set[str] = set()
for pth in input_parquet_values:
df = pd.read_parquet(pth)
assert "link" in df.columns, f"Parquet {pth} must contain 'link' column"
for u in df["link"].tolist():
s = str(u).strip()
if not s:
continue
parsed = _parse_owner_repo(s)
if not parsed:
continue
owner, repo = parsed
key = f"{owner}/{repo}"
if key in seen:
continue
seen.add(key)
pairs.append((owner, repo))
# Resume: if output exists and resume=true, skip already-present repos
existing_map = {}
out_path = Path(out_parquet_value)
if resume and out_path.exists():
try:
existing_df = pd.read_parquet(out_path)
if {"owner", "repo"}.issubset(existing_df.columns):
existing_map = {
f"{o}/{r}": True
for o, r in zip(existing_df["owner"], existing_df["repo"])
}
except Exception:
existing_map = {}
if existing_map:
pairs = [(o, r) for (o, r) in pairs if f"{o}/{r}" not in existing_map]
logger.info(
f"Total unique repos to fetch: {len(pairs)} (resume={'on' if resume else 'off'})"
)
# Fetch in batches via GraphQL
records: List[Dict[str, Any]] = []
run_ts = datetime.utcnow().isoformat()
for i in tqdm(range(0, len(pairs), batch_size), desc="GraphQL batches"):
batch = pairs[i : i + batch_size]
meta = fetch_repos_metadata_graphql(batch)
for owner, repo in batch:
key = f"{owner}/{repo}"
m = meta.get(key) or {}
# Detect README file at repo root to store filename
ri = find_readme_file(owner, repo, ref=m.get("default_branch"))
records.append(
{
"owner": owner,
"repo": repo,
"link": f"https://github.com/{owner}/{repo}",
"name": m.get("name"),
"description": m.get("description"),
"stars": m.get("stars"),
"default_branch": m.get("default_branch"),
"last_commit_date": m.get("last_commit_date"),
"language": m.get("language"),
# store topics as JSON string for portability in parquet, can be parsed downstream
"topics": (
",".join(m.get("topics", []))
if isinstance(m.get("topics"), list)
else None
),
"is_fork": m.get("is_fork"),
"parent_url": m.get("parent_url"),
"updated_at": run_ts,
"readme_found": bool(ri),
"readme_filename": (ri.get("name") if ri else None),
}
)
df_out = pd.DataFrame(records)
out_path = Path(out_parquet_value)
out_path.parent.mkdir(parents=True, exist_ok=True)
if resume and out_path.exists():
try:
existing_df = pd.read_parquet(out_path)
# Ensure updated_at exists on existing_df as well
if "updated_at" not in existing_df.columns:
existing_df["updated_at"] = None
combined = pd.concat([existing_df, df_out], ignore_index=True)
# Drop duplicates by owner/repo keeping last (newest fetch)
combined = combined.drop_duplicates(subset=["owner", "repo"], keep="last")
combined.to_parquet(out_path, index=False)
logger.info(
f"Appended {len(df_out)} new repos (resume) to {out_path} (total {len(combined)})"
)
return
except Exception:
# If any issue, fall back to overwrite with new
pass
df_out.to_parquet(out_path, index=False)
logger.info(f"Wrote metadata for {len(df_out)} repos to {out_path}")
if __name__ == "__main__":
main()
|