MRiabov commited on
Commit
fd67ea2
·
1 Parent(s): 24c4174

GraphQL for top1000 repos scraping

Browse files
.windsurf/rules/creating-cli-tooling.md CHANGED
@@ -3,4 +3,4 @@ trigger: model_decision
3
  description: When creating CLI tooling
4
  ---
5
 
6
- CLI tooling is good, but I prefer configuring through YAML. When creating CLI tooling, create a "config.yaml" or "[name_of_the_code_function]_config.yaml", and put all config there. Secrets should be put in a `.env` file, and loaded with `dotenv.load_dotenv()`.
 
3
  description: When creating CLI tooling
4
  ---
5
 
6
+ I prefer configuring scripts through YAML. When creating CLI tooling, "[module_name]_config.yaml", and put all config there. Secrets should be put in a `.env` file, and loaded with `dotenv.load_dotenv()`.
data_collection_utils/github_api_utils.py CHANGED
@@ -328,10 +328,15 @@ def fetch_repos_metadata_graphql(
328
  Batch-fetch repository metadata via GitHub GraphQL for a list of (owner, repo) pairs.
329
 
330
  Returns a mapping from "owner/repo" -> {
 
331
  "description": str | None,
332
  "stars": int | None,
333
  "default_branch": str | None,
334
  "last_commit_date": str | None,
 
 
 
 
335
  }
336
 
337
  Notes:
@@ -347,35 +352,40 @@ def fetch_repos_metadata_graphql(
347
  # repo_0: repository(owner: $owner0, name: $name0) { ...fields }
348
  # }
349
  vars: Dict[str, Any] = {}
350
- fields = []
351
  for i, (owner, name) in enumerate(pairs):
352
  vars[f"owner{i}"] = owner
353
  vars[f"name{i}"] = name
354
  alias = f"repo_{i}"
355
- fields.append(
356
- f"""
357
- {alias}: repository(owner: $owner{i}, name: $name{i}) {{
358
- description
359
- stargazerCount
360
- pushedAt
361
- defaultBranchRef {{
362
- name
363
- target {{
364
- ... on Commit {{
365
- history(first: 1) {{
366
- nodes {{
367
- committedDate
368
- }}
369
- }}
370
- }}
371
- }}
372
- }}
373
- }}
374
- """.strip().format(alias=alias)
 
 
 
 
375
  )
 
376
  # Assemble query
377
  var_decls = " ".join([f"$owner{i}: String!, $name{i}: String!" for i in range(len(pairs))])
378
- query = f"query({var_decls}) {{\n" + "\n".join(fields) + "\n}"
379
 
380
  payload = {"query": query, "variables": vars}
381
  max_retries = 3
@@ -428,11 +438,39 @@ def fetch_repos_metadata_graphql(
428
  r = repos.get(alias)
429
  key = f"{owner}/{name}"
430
  if not isinstance(r, dict):
431
- out[key] = {"description": None, "stars": None, "default_branch": None, "last_commit_date": None}
 
 
 
 
 
 
 
 
 
 
432
  continue
 
433
  desc = r.get("description")
434
  stars = r.get("stargazerCount")
435
  pushed_at = r.get("pushedAt")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436
  dbr = r.get("defaultBranchRef") or {}
437
  default_branch = dbr.get("name") if isinstance(dbr, dict) else None
438
  last_commit_date = pushed_at
@@ -445,9 +483,14 @@ def fetch_repos_metadata_graphql(
445
  if isinstance(lcd, str):
446
  last_commit_date = lcd
447
  out[key] = {
 
448
  "description": desc if isinstance(desc, str) else None,
449
  "stars": stars,
450
  "default_branch": default_branch,
451
  "last_commit_date": last_commit_date,
 
 
 
 
452
  }
453
  return out
 
328
  Batch-fetch repository metadata via GitHub GraphQL for a list of (owner, repo) pairs.
329
 
330
  Returns a mapping from "owner/repo" -> {
331
+ "name": str | None,
332
  "description": str | None,
333
  "stars": int | None,
334
  "default_branch": str | None,
335
  "last_commit_date": str | None,
336
+ "language": str | None,
337
+ "topics": List[str],
338
+ "is_fork": bool | None,
339
+ "parent_url": str | None,
340
  }
341
 
342
  Notes:
 
352
  # repo_0: repository(owner: $owner0, name: $name0) { ...fields }
353
  # }
354
  vars: Dict[str, Any] = {}
355
+ fields: List[str] = []
356
  for i, (owner, name) in enumerate(pairs):
357
  vars[f"owner{i}"] = owner
358
  vars[f"name{i}"] = name
359
  alias = f"repo_{i}"
360
+ header = f"{alias}: repository(owner: $owner{i}, name: $name{i}) "
361
+ body = (
362
+ "{\n"
363
+ " name\n"
364
+ " description\n"
365
+ " stargazerCount\n"
366
+ " pushedAt\n"
367
+ " isFork\n"
368
+ " parent { url nameWithOwner }\n"
369
+ " primaryLanguage { name }\n"
370
+ " repositoryTopics(first: 20) { nodes { topic { name } } }\n"
371
+ " defaultBranchRef {\n"
372
+ " name\n"
373
+ " target {\n"
374
+ " ... on Commit {\n"
375
+ " history(first: 1) {\n"
376
+ " nodes {\n"
377
+ " committedDate\n"
378
+ " }\n"
379
+ " }\n"
380
+ " }\n"
381
+ " }\n"
382
+ " }\n"
383
+ "}\n"
384
  )
385
+ fields.append(header + body)
386
  # Assemble query
387
  var_decls = " ".join([f"$owner{i}: String!, $name{i}: String!" for i in range(len(pairs))])
388
+ query = "query(" + var_decls + ") {\n" + "\n".join(fields) + "\n}"
389
 
390
  payload = {"query": query, "variables": vars}
391
  max_retries = 3
 
438
  r = repos.get(alias)
439
  key = f"{owner}/{name}"
440
  if not isinstance(r, dict):
441
+ out[key] = {
442
+ "name": None,
443
+ "description": None,
444
+ "stars": None,
445
+ "default_branch": None,
446
+ "last_commit_date": None,
447
+ "language": None,
448
+ "topics": [],
449
+ "is_fork": None,
450
+ "parent_url": None,
451
+ }
452
  continue
453
+ repo_name = r.get("name")
454
  desc = r.get("description")
455
  stars = r.get("stargazerCount")
456
  pushed_at = r.get("pushedAt")
457
+ is_fork = r.get("isFork")
458
+ parent = r.get("parent") or {}
459
+ parent_url = parent.get("url") if isinstance(parent, dict) else None
460
+ lang_obj = r.get("primaryLanguage") or {}
461
+ language = lang_obj.get("name") if isinstance(lang_obj, dict) else None
462
+ topics_list: List[str] = []
463
+ rt = r.get("repositoryTopics") or {}
464
+ nodes = rt.get("nodes") if isinstance(rt, dict) else None
465
+ if isinstance(nodes, list):
466
+ for node in nodes:
467
+ t = None
468
+ if isinstance(node, dict):
469
+ topic = node.get("topic")
470
+ if isinstance(topic, dict):
471
+ t = topic.get("name")
472
+ if isinstance(t, str):
473
+ topics_list.append(t)
474
  dbr = r.get("defaultBranchRef") or {}
475
  default_branch = dbr.get("name") if isinstance(dbr, dict) else None
476
  last_commit_date = pushed_at
 
483
  if isinstance(lcd, str):
484
  last_commit_date = lcd
485
  out[key] = {
486
+ "name": repo_name if isinstance(repo_name, str) else None,
487
  "description": desc if isinstance(desc, str) else None,
488
  "stars": stars,
489
  "default_branch": default_branch,
490
  "last_commit_date": last_commit_date,
491
+ "language": language if isinstance(language, str) else None,
492
+ "topics": topics_list,
493
+ "is_fork": is_fork if isinstance(is_fork, bool) else None,
494
+ "parent_url": parent_url if isinstance(parent_url, str) else None,
495
  }
496
  return out
data_collection_utils/top_1000_repos.py CHANGED
@@ -2,10 +2,10 @@ from pathlib import Path
2
  from playwright.sync_api import sync_playwright
3
  from urllib.parse import urlparse
4
  from typing import List, Optional, Dict, Any
5
- from concurrent.futures import ThreadPoolExecutor, as_completed
6
  import argparse
7
  import pandas as pd
8
- from github_api_utils import get_repo_info
 
9
 
10
  URL = "https://top1000repos.com/"
11
 
@@ -54,40 +54,43 @@ def normalize_github_repo_links(links: List[str]) -> List[str]:
54
  return sorted(repos)
55
 
56
 
57
- def resolve_to_original_repo(url: str) -> Optional[str]:
58
- """Return the canonical URL of the original (non-fork) repository.
59
 
60
- If the input URL points to a fork, map it to its parent/original repository.
61
- If the repository metadata cannot be retrieved, return None.
62
- """
63
  p = urlparse(url)
64
  parts = [part for part in p.path.split("/") if part]
65
  if len(parts) < 2:
66
  return None
67
- owner, repo = parts[0], parts[1]
68
- info = get_repo_info(owner, repo)
69
- if not info:
70
- return None
71
- if info.get("fork"):
72
- parent = info.get("parent") or {}
73
- html_url = parent.get("html_url")
74
- if not html_url:
75
- return None
76
- return canonical_repo_url(html_url)
77
- return canonical_repo_url(url)
78
 
79
 
80
- def map_to_original_repos(urls: List[str]) -> List[str]:
81
- originals = set()
 
 
 
 
 
82
  for u in urls:
83
- o = resolve_to_original_repo(u)
84
- if o is not None:
85
- originals.add(o)
86
- else:
87
- # keep the canonical URL if we couldn't resolve
88
- cu = canonical_repo_url(u) or u
89
- originals.add(cu)
90
- return sorted(originals)
 
 
 
 
 
 
 
 
 
 
91
 
92
 
93
  def main() -> None:
@@ -99,6 +102,9 @@ def main() -> None:
99
  )
100
  args = ap.parse_args()
101
 
 
 
 
102
  project_root = Path(__file__).resolve().parents[1]
103
  out_html = Path(__file__).with_name(
104
  "Top 1000 GitHub repositories, updated daily, all on one page..html"
@@ -147,36 +153,47 @@ def main() -> None:
147
  repo_links = normalize_github_repo_links(links)
148
 
149
  # Optionally map any fork links to their original repositories and deduplicate
150
- repo_links = map_to_original_repos(repo_links)
151
 
152
  # Persist github_links.txt for visibility/debug (even if not used downstream)
153
  with out_links.open("w", encoding="utf-8") as f:
154
  f.write("\n".join(repo_links) + "\n")
155
 
156
- # Enrich via GitHub API concurrently
157
- def _one(url: str) -> Dict[str, Any]:
158
- owner_repo = urlparse(url).path.strip("/").split("/")[:2]
159
- owner, repo = owner_repo[0], owner_repo[1]
160
- info = get_repo_info(owner, repo) or {}
161
- name = info.get("name") or repo
162
- desc = info.get("description") or None
163
- stars = info.get("stargazers_count")
164
- return {
165
- "name": name,
166
- "link": f"https://github.com/{owner}/{repo}",
167
- "description": desc,
168
- "stars": int(stars) if isinstance(stars, int) else None,
169
- }
 
 
170
 
171
  rows: List[Dict[str, Any]] = []
172
- with ThreadPoolExecutor(max_workers=args.workers) as ex:
173
- futs = [ex.submit(_one, u) for u in repo_links]
174
- for fut in as_completed(futs):
175
- try:
176
- rows.append(fut.result())
177
- except Exception:
178
- # Skip on error; we aim for stability over strict completeness
179
- continue
 
 
 
 
 
 
 
 
 
180
 
181
  df = pd.DataFrame(rows)
182
  df.to_parquet(out_parquet, index=False)
 
2
  from playwright.sync_api import sync_playwright
3
  from urllib.parse import urlparse
4
  from typing import List, Optional, Dict, Any
 
5
  import argparse
6
  import pandas as pd
7
+ from github_api_utils import fetch_repos_metadata_graphql
8
+ from dotenv import load_dotenv
9
 
10
  URL = "https://top1000repos.com/"
11
 
 
54
  return sorted(repos)
55
 
56
 
57
+ # removed: resolve_to_original_repo (replaced by GraphQL-based mapping)
 
58
 
59
+
60
+ def parse_owner_repo(url: str) -> Optional[tuple[str, str]]:
 
61
  p = urlparse(url)
62
  parts = [part for part in p.path.split("/") if part]
63
  if len(parts) < 2:
64
  return None
65
+ return parts[0], parts[1]
 
 
 
 
 
 
 
 
 
 
66
 
67
 
68
+ def map_to_original_repos_graphql(urls: List[str]) -> List[str]:
69
+ """Resolve forks to their parent repos using a single round of GraphQL metadata.
70
+
71
+ - For each input repo URL, if it's a fork and parent_url is available, map to parent.
72
+ - Returns the sorted unique list of canonical GitHub URLs.
73
+ """
74
+ pairs: List[tuple[str, str]] = []
75
  for u in urls:
76
+ pr = parse_owner_repo(u)
77
+ if pr is None:
78
+ continue
79
+ pairs.append(pr)
80
+ # Batch query
81
+ meta = fetch_repos_metadata_graphql(pairs)
82
+ out: set[str] = set()
83
+ for (owner, repo) in pairs:
84
+ key = f"{owner}/{repo}"
85
+ m = meta.get(key) or {}
86
+ parent_url = m.get("parent_url")
87
+ if m.get("is_fork") and isinstance(parent_url, str):
88
+ cu = canonical_repo_url(parent_url)
89
+ if cu is not None:
90
+ out.add(cu)
91
+ continue
92
+ out.add(f"https://github.com/{owner}/{repo}")
93
+ return sorted(out)
94
 
95
 
96
  def main() -> None:
 
102
  )
103
  args = ap.parse_args()
104
 
105
+ # Load token from .env for GraphQL
106
+ load_dotenv()
107
+
108
  project_root = Path(__file__).resolve().parents[1]
109
  out_html = Path(__file__).with_name(
110
  "Top 1000 GitHub repositories, updated daily, all on one page..html"
 
153
  repo_links = normalize_github_repo_links(links)
154
 
155
  # Optionally map any fork links to their original repositories and deduplicate
156
+ repo_links = map_to_original_repos_graphql(repo_links)
157
 
158
  # Persist github_links.txt for visibility/debug (even if not used downstream)
159
  with out_links.open("w", encoding="utf-8") as f:
160
  f.write("\n".join(repo_links) + "\n")
161
 
162
+ # Enrich via GraphQL in batches
163
+ pairs: List[tuple[str, str]] = []
164
+ for u in repo_links:
165
+ pr = parse_owner_repo(u)
166
+ if pr is not None:
167
+ pairs.append(pr)
168
+ meta_map: Dict[str, Dict[str, Any]] = {}
169
+ batch_size = args.workers
170
+ for i in range(0, len(pairs), batch_size):
171
+ chunk = pairs[i : i + batch_size]
172
+ try:
173
+ mm = fetch_repos_metadata_graphql(chunk)
174
+ except Exception:
175
+ mm = {}
176
+ if mm:
177
+ meta_map.update(mm)
178
 
179
  rows: List[Dict[str, Any]] = []
180
+ for owner, repo in pairs:
181
+ m = meta_map.get(f"{owner}/{repo}") or {}
182
+ name = m.get("name") or repo
183
+ desc = m.get("description")
184
+ stars = m.get("stars")
185
+ language = m.get("language")
186
+ topics = m.get("topics")
187
+ rows.append(
188
+ {
189
+ "name": name,
190
+ "link": f"https://github.com/{owner}/{repo}",
191
+ "description": desc if isinstance(desc, str) else None,
192
+ "stars": stars if isinstance(stars, int) else None,
193
+ "language": language if isinstance(language, str) else None,
194
+ "topics": topics if isinstance(topics, list) else [],
195
+ }
196
+ )
197
 
198
  df = pd.DataFrame(rows)
199
  df.to_parquet(out_parquet, index=False)