MRiabov commited on
Commit
1d44b3f
·
1 Parent(s): fd67ea2

feat: config to yaml

Browse files
data_collection_utils/github_api_utils.py CHANGED
@@ -323,6 +323,7 @@ def _graphql_headers() -> Dict[str, str]:
323
 
324
  def fetch_repos_metadata_graphql(
325
  pairs: List[tuple[str, str]],
 
326
  ) -> Dict[str, Dict[str, Any]]:
327
  """
328
  Batch-fetch repository metadata via GitHub GraphQL for a list of (owner, repo) pairs.
@@ -367,7 +368,9 @@ def fetch_repos_metadata_graphql(
367
  " isFork\n"
368
  " parent { url nameWithOwner }\n"
369
  " primaryLanguage { name }\n"
370
- " repositoryTopics(first: 20) { nodes { topic { name } } }\n"
 
 
371
  " defaultBranchRef {\n"
372
  " name\n"
373
  " target {\n"
 
323
 
324
  def fetch_repos_metadata_graphql(
325
  pairs: List[tuple[str, str]],
326
+ topics_limit: int = 20,
327
  ) -> Dict[str, Dict[str, Any]]:
328
  """
329
  Batch-fetch repository metadata via GitHub GraphQL for a list of (owner, repo) pairs.
 
368
  " isFork\n"
369
  " parent { url nameWithOwner }\n"
370
  " primaryLanguage { name }\n"
371
+ " repositoryTopics(first: "
372
+ + str(topics_limit)
373
+ + ") { nodes { topic { name } } }\n"
374
  " defaultBranchRef {\n"
375
  " name\n"
376
  " target {\n"
data_collection_utils/parse_gh_docs_config.yaml CHANGED
@@ -7,23 +7,35 @@
7
  # - data_collection_utils/awesome_final_repos.py -> awesome-repos.parquet
8
  # - data_collection_utils/top_1000_repos.py -> top-1000-repos.parquet
9
  input_parquet:
10
- - ./awesome-repos.parquet
11
  - ./top-1000-repos.parquet
12
 
13
  # Output directories/files
14
  outdir: ../output
15
  md_failed: ../md-failed.txt
16
  texts_parquet: ../output/texts.parquet
 
 
17
 
18
  # Concurrency and behavior
19
- workers: 16
20
  dry_run: false
21
- no_fetch: true
22
  quiet: false
 
 
 
 
 
 
 
 
23
 
24
  # Auth
25
- # Path to a file that contains your GitHub token (no quotes, single line)
26
- token_file: ./gh_token.secret
 
 
 
27
 
28
  # Strategies
29
  # Use git sparse-checkout to try to download only documentation folders first.
@@ -34,10 +46,12 @@ prefer_zip: false
34
  # Content selection
35
  # Download only Markdown files from any chosen strategy
36
  only_md: true
 
 
37
 
38
  # Filtering
39
  # Skip repos younger than this many years
40
- min_repo_age_years: 3
41
 
42
  # Language filtering for texts parquet
43
  lang_filter: en
 
7
  # - data_collection_utils/awesome_final_repos.py -> awesome-repos.parquet
8
  # - data_collection_utils/top_1000_repos.py -> top-1000-repos.parquet
9
  input_parquet:
 
10
  - ./top-1000-repos.parquet
11
 
12
  # Output directories/files
13
  outdir: ../output
14
  md_failed: ../md-failed.txt
15
  texts_parquet: ../output/texts.parquet
16
+ # Optional: path to write repository-level metadata parquet
17
+ repometa_parquet: ../output/repometa.parquet
18
 
19
  # Concurrency and behavior
20
+ workers: 1
21
  dry_run: false
22
+ no_fetch: false
23
  quiet: false
24
+ # How often to checkpoint partial outputs (in processed repos)
25
+ checkpoint_every: 50
26
+
27
+ # GraphQL batching and metadata
28
+ # Max repos per GraphQL request; keep modest to avoid cost limits
29
+ graphql_batch_size: 50
30
+ # Number of topics to fetch per repository via GraphQL
31
+ topics_limit: 20
32
 
33
  # Auth
34
+ # Secrets are NOT configured here. Put your GitHub token in a .env file (recommended)
35
+ # or export it in your shell environment. Required env var:
36
+ # GITHUB_TOKEN
37
+ # Example .env:
38
+ # GITHUB_TOKEN=ghp_your_token_here
39
 
40
  # Strategies
41
  # Use git sparse-checkout to try to download only documentation folders first.
 
46
  # Content selection
47
  # Download only Markdown files from any chosen strategy
48
  only_md: true
49
+ # Minimum number of .md files for a repo to be considered useful (otherwise marked low-md-count)
50
+ min_docs_md_count: 10
51
 
52
  # Filtering
53
  # Skip repos younger than this many years
54
+ min_repo_age_years: 0
55
 
56
  # Language filtering for texts parquet
57
  lang_filter: en
data_collection_utils/scrape_gh_docs.py CHANGED
@@ -49,7 +49,6 @@ import yaml
49
  from datetime import datetime, timezone
50
  import logging
51
  import langid # https://github.com/saffsd/langid.py
52
- from dotenv import load_dotenv
53
 
54
 
55
  from github_api_utils import (
@@ -62,7 +61,6 @@ from github_api_utils import (
62
  search_repos,
63
  get_repo_tree_paths,
64
  get_repo_tree_md_paths,
65
- fetch_repos_metadata_graphql,
66
  )
67
  from repo_tree import (
68
  build_tree_from_local_dir,
@@ -235,6 +233,7 @@ def compute_md_failed_for_existing(outdir: Path, md_failed_path: Path):
235
  append_line_threadsafe(
236
  md_failed_path, f"{owner}/{repo} # md-count={md_count}\n"
237
  )
 
238
 
239
 
240
  # === Main logic for a single repo ===
@@ -692,7 +691,6 @@ def process_repo_entry(
692
  )
693
  result["status"] = "docs-not-found"
694
  return result
695
-
696
  if not got_any:
697
  # API-driven path (only if neither prefer_sparse nor prefer_zip succeeded)
698
  repo_json = get_repo_info(owner, repo)
@@ -841,124 +839,6 @@ def process_repo_entry(
841
  return result
842
 
843
 
844
- def _checkpoint_persist(
845
- outdir: Path,
846
- md_failed_path: Path,
847
- results_snapshot: List[Dict[str, Any]],
848
- workers_value: int,
849
- lang_filter_value: Optional[str],
850
- min_text_chars_value: int,
851
- texts_parquet_value: Optional[str],
852
- repometa_parquet_value: Optional[str],
853
- ):
854
- """Persist partial outputs to Parquet files as a checkpoint.
855
-
856
- - texts.parquet: rescanned from current outdir contents
857
- - repometa.parquet: built from current results with GraphQL metadata; docs structures are
858
- derived from local files when available to avoid heavy API usage during checkpoints.
859
- """
860
- # 1) Re-scan outdir for .md files and write texts parquet
861
- repo_dirs = [
862
- d for d in outdir.iterdir() if d.is_dir() and "__" in d.name and not d.name.startswith("tmp_")
863
- ]
864
- md_rows: List[Dict[str, Any]] = []
865
- with concurrent.futures.ThreadPoolExecutor(max_workers=workers_value) as executor:
866
- futures = [
867
- executor.submit(
868
- collect_md_rows_for_repo_dir,
869
- d,
870
- outdir,
871
- lang_filter_value,
872
- min_text_chars_value,
873
- )
874
- for d in repo_dirs
875
- ]
876
- for fut in concurrent.futures.as_completed(futures):
877
- try:
878
- rows = fut.result()
879
- except Exception:
880
- rows = []
881
- if rows:
882
- md_rows.extend(rows)
883
-
884
- texts_parquet_path = (
885
- Path(texts_parquet_value) if texts_parquet_value else (outdir / "texts.parquet")
886
- )
887
- df_txt = pd.DataFrame(md_rows)
888
- texts_parquet_path.parent.mkdir(parents=True, exist_ok=True)
889
- df_txt.to_parquet(texts_parquet_path, index=False)
890
-
891
- # 2) Build repometa (metadata via GraphQL; structures from local if possible)
892
- repometa_rows: List[Dict[str, Any]] = []
893
- # Batch GraphQL metadata
894
- pairs: List[tuple[str, str]] = []
895
- for r in results_snapshot:
896
- o = r.get("owner")
897
- n = r.get("repo")
898
- if not o or not n:
899
- continue
900
- pairs.append((o, n))
901
- meta_map: Dict[str, Dict[str, Any]] = {}
902
- batch_size = 30
903
- for i in range(0, len(pairs), batch_size):
904
- chunk = pairs[i : i + batch_size]
905
- mm = fetch_repos_metadata_graphql(chunk)
906
- if mm:
907
- meta_map.update(mm)
908
-
909
- for res in results_snapshot:
910
- owner = res.get("owner")
911
- repo = res.get("repo")
912
- if not owner or not repo:
913
- continue
914
- meta = meta_map.get(f"{owner}/{repo}") or {}
915
- latest_commit_date = meta.get("last_commit_date")
916
- stars = meta.get("stars")
917
- link = f"https://github.com/{owner}/{repo}"
918
- language = meta.get("language")
919
- topics = meta.get("topics")
920
- docs_found_in = res.get("docs_found_in")
921
-
922
- # Attempt to build structures from local files only (avoid remote API during checkpoint)
923
- docs_tree_json = None
924
- docs_folder_rel = res.get("docs_folder")
925
- if docs_folder_rel:
926
- docs_dir = outdir / docs_folder_rel
927
- if docs_dir.exists() and docs_dir.is_dir():
928
- docs_tree = build_tree_from_local_dir(docs_dir, only_md=True)
929
- docs_tree_json = json.dumps(docs_tree, ensure_ascii=False)
930
-
931
- full_tree_json = None
932
- saved_root = outdir / safe_name(f"{owner}__{repo}")
933
- if saved_root.exists():
934
- full_tree = build_tree_from_local_dir(saved_root, only_md=False)
935
- full_tree_json = json.dumps(full_tree, ensure_ascii=False)
936
-
937
- repometa_rows.append(
938
- {
939
- "latest_commit_date": latest_commit_date,
940
- "name": repo,
941
- "parent_org": owner,
942
- "stars": stars,
943
- "link": link,
944
- "language": language,
945
- "topics": topics,
946
- "docs_found_in": docs_found_in,
947
- "docs_repo_structure": docs_tree_json,
948
- "repo_structure_all_files": full_tree_json,
949
- }
950
- )
951
-
952
- repometa_parquet_path = (
953
- Path(repometa_parquet_value)
954
- if repometa_parquet_value
955
- else (outdir / "repometa.parquet")
956
- )
957
- df_meta = pd.DataFrame(repometa_rows)
958
- repometa_parquet_path.parent.mkdir(parents=True, exist_ok=True)
959
- df_meta.to_parquet(repometa_parquet_path, index=False)
960
-
961
-
962
  # === CLI ===
963
 
964
 
@@ -1009,7 +889,11 @@ def main():
1009
  if val is None:
1010
  return []
1011
  if isinstance(val, (list, tuple)):
1012
- return [_resolve_cfg_path(v) for v in val if v is not None]
 
 
 
 
1013
  return [_resolve_cfg_path(val)]
1014
 
1015
  input_parquet_values = _resolve_cfg_paths(cfg.get("input_parquet"))
@@ -1019,8 +903,6 @@ def main():
1019
 
1020
  # Ensure we have a GitHub token before making any requests (unless no-fetch)
1021
  if not no_fetch_value:
1022
- # Load secrets from .env as per CLI tooling rules
1023
- load_dotenv()
1024
  ensure_github_token(token_file_value)
1025
 
1026
  outdir = Path(outdir_value)
@@ -1035,9 +917,7 @@ def main():
1035
  else:
1036
  lines: List[str] = []
1037
  if not input_parquet_values:
1038
- logger.error(
1039
- "'input_parquet' is required. Configure one or more Parquet files with a 'link' column in parse_gh_docs_config.yaml."
1040
- )
1041
  sys.exit(2)
1042
  # Read repositories from one or more Parquet files; use 'link' column
1043
  seen = set()
@@ -1101,30 +981,8 @@ def main():
1101
  max_workers=workers_value
1102
  ) as executor:
1103
  futures = [executor.submit(_run, lr) for lr in lines]
1104
- processed = 0
1105
  for _ in concurrent.futures.as_completed(futures):
1106
  pbar.update(1)
1107
- processed += 1
1108
- # Periodic checkpoint every 300 processed repos
1109
- if processed % 300 == 0:
1110
- try:
1111
- # Snapshot results safely
1112
- with results_lock:
1113
- results_snapshot = list(results)
1114
- # Persist current outputs (texts + repometa)
1115
- _checkpoint_persist(
1116
- outdir,
1117
- md_failed_path,
1118
- results_snapshot,
1119
- workers_value,
1120
- lang_filter_value,
1121
- min_text_chars_value,
1122
- texts_parquet_value,
1123
- repometa_parquet_value,
1124
- )
1125
- logger.info(f"Checkpoint persisted after {processed} repos")
1126
- except Exception as e:
1127
- logger.warning(f"Checkpoint failed after {processed} repos: {e}")
1128
 
1129
  # In no-fetch mode, compute md-failed from existing directories after (skipped) fetch
1130
  if no_fetch_value:
@@ -1217,49 +1075,22 @@ def main():
1217
  }
1218
  )
1219
  else:
1220
- # Pre-fetch repo metadata via GraphQL in batches to reduce REST calls
1221
- meta_map: Dict[str, Dict[str, Any]] = {}
1222
- pairs_unique: List[tuple[str, str]] = []
1223
- seen_pairs = set()
1224
- for r in results:
1225
- o = r.get("owner")
1226
- n = r.get("repo")
1227
- if not o or not n:
1228
- continue
1229
- key = f"{o}/{n}"
1230
- if key in seen_pairs:
1231
- continue
1232
- seen_pairs.add(key)
1233
- pairs_unique.append((o, n))
1234
-
1235
- if pairs_unique:
1236
- batch_size = 30
1237
- for i in range(0, len(pairs_unique), batch_size):
1238
- chunk = pairs_unique[i : i + batch_size]
1239
- try:
1240
- mm = fetch_repos_metadata_graphql(chunk)
1241
- except Exception as e:
1242
- logger.warning(f"GraphQL metadata fetch failed for chunk starting {i}: {e}")
1243
- mm = {}
1244
- if mm:
1245
- meta_map.update(mm)
1246
-
1247
  for res in results:
1248
  owner = res.get("owner")
1249
  repo = res.get("repo")
1250
  if not owner or not repo:
1251
  continue
1252
  try:
1253
- meta = meta_map.get(f"{owner}/{repo}") or {}
1254
- default_branch = (
1255
- res.get("default_branch")
1256
- or meta.get("default_branch")
1257
- or "main"
1258
  )
1259
- latest_commit_date = meta.get("last_commit_date")
1260
- stars = meta.get("stars")
1261
- language = meta.get("language")
1262
- topics = meta.get("topics")
 
 
1263
  link = f"https://github.com/{owner}/{repo}"
1264
  docs_found_in = res.get("docs_found_in")
1265
 
@@ -1348,8 +1179,6 @@ def main():
1348
  "parent_org": owner,
1349
  "stars": stars,
1350
  "link": link,
1351
- "language": language,
1352
- "topics": topics,
1353
  "docs_found_in": docs_found_in,
1354
  "docs_repo_structure": docs_tree_json,
1355
  "repo_structure_all_files": full_tree_json,
 
49
  from datetime import datetime, timezone
50
  import logging
51
  import langid # https://github.com/saffsd/langid.py
 
52
 
53
 
54
  from github_api_utils import (
 
61
  search_repos,
62
  get_repo_tree_paths,
63
  get_repo_tree_md_paths,
 
64
  )
65
  from repo_tree import (
66
  build_tree_from_local_dir,
 
233
  append_line_threadsafe(
234
  md_failed_path, f"{owner}/{repo} # md-count={md_count}\n"
235
  )
236
+ pbar.update(1)
237
 
238
 
239
  # === Main logic for a single repo ===
 
691
  )
692
  result["status"] = "docs-not-found"
693
  return result
 
694
  if not got_any:
695
  # API-driven path (only if neither prefer_sparse nor prefer_zip succeeded)
696
  repo_json = get_repo_info(owner, repo)
 
839
  return result
840
 
841
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  # === CLI ===
843
 
844
 
 
889
  if val is None:
890
  return []
891
  if isinstance(val, (list, tuple)):
892
+ return [
893
+ _resolve_cfg_path(v)
894
+ for v in val
895
+ if v is not None
896
+ ]
897
  return [_resolve_cfg_path(val)]
898
 
899
  input_parquet_values = _resolve_cfg_paths(cfg.get("input_parquet"))
 
903
 
904
  # Ensure we have a GitHub token before making any requests (unless no-fetch)
905
  if not no_fetch_value:
 
 
906
  ensure_github_token(token_file_value)
907
 
908
  outdir = Path(outdir_value)
 
917
  else:
918
  lines: List[str] = []
919
  if not input_parquet_values:
920
+ logger.error("'input_parquet' is required. Configure one or more Parquet files with a 'link' column in parse_gh_docs_config.yaml.")
 
 
921
  sys.exit(2)
922
  # Read repositories from one or more Parquet files; use 'link' column
923
  seen = set()
 
981
  max_workers=workers_value
982
  ) as executor:
983
  futures = [executor.submit(_run, lr) for lr in lines]
 
984
  for _ in concurrent.futures.as_completed(futures):
985
  pbar.update(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
986
 
987
  # In no-fetch mode, compute md-failed from existing directories after (skipped) fetch
988
  if no_fetch_value:
 
1075
  }
1076
  )
1077
  else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1078
  for res in results:
1079
  owner = res.get("owner")
1080
  repo = res.get("repo")
1081
  if not owner or not repo:
1082
  continue
1083
  try:
1084
+ repo_json = get_repo_info(owner, repo) or {}
1085
+ default_branch = res.get("default_branch") or repo_json.get(
1086
+ "default_branch", "main"
 
 
1087
  )
1088
+ # latest commit date via utility (fallback to pushed_at handled inside)
1089
+ latest_commit_date = get_latest_commit_date(
1090
+ owner, repo, default_branch, repo_json
1091
+ )
1092
+
1093
+ stars = repo_json.get("stargazers_count")
1094
  link = f"https://github.com/{owner}/{repo}"
1095
  docs_found_in = res.get("docs_found_in")
1096
 
 
1179
  "parent_org": owner,
1180
  "stars": stars,
1181
  "link": link,
 
 
1182
  "docs_found_in": docs_found_in,
1183
  "docs_repo_structure": docs_tree_json,
1184
  "repo_structure_all_files": full_tree_json,
data_collection_utils/top_1000_repos.py CHANGED
@@ -2,7 +2,7 @@ from pathlib import Path
2
  from playwright.sync_api import sync_playwright
3
  from urllib.parse import urlparse
4
  from typing import List, Optional, Dict, Any
5
- import argparse
6
  import pandas as pd
7
  from github_api_utils import fetch_repos_metadata_graphql
8
  from dotenv import load_dotenv
@@ -94,26 +94,45 @@ def map_to_original_repos_graphql(urls: List[str]) -> List[str]:
94
 
95
 
96
  def main() -> None:
97
- ap = argparse.ArgumentParser(
98
- description="Fetch Top 1000 repos and enrich via GitHub API"
99
- )
100
- ap.add_argument(
101
- "--workers", type=int, default=16, help="Concurrency for GitHub API requests"
102
- )
103
- args = ap.parse_args()
104
-
105
  # Load token from .env for GraphQL
106
  load_dotenv()
107
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  project_root = Path(__file__).resolve().parents[1]
109
- out_html = Path(__file__).with_name(
110
  "Top 1000 GitHub repositories, updated daily, all on one page..html"
111
  )
112
- out_links = project_root / "github_links.txt"
113
- out_parquet = Path(__file__).with_name("top-1000-repos.parquet")
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
  with sync_playwright() as p:
116
- browser = p.chromium.launch(headless=True)
117
  context = browser.new_context()
118
  page = context.new_page()
119
  page.goto(URL, wait_until="networkidle")
@@ -121,7 +140,7 @@ def main() -> None:
121
  page.wait_for_selector('a[href*="https://github.com/"]', timeout=30000)
122
 
123
  # Auto-scroll to force lazy loading/virtualized list to render all items
124
- def _scroll_all(max_iters: int = 200, pause_ms: int = 300) -> None:
125
  prev_count = 0
126
  stable = 0
127
  for _ in range(max_iters):
@@ -136,7 +155,7 @@ def main() -> None:
136
  stable = 0
137
  prev_count = count
138
  # Stop after several iterations without growth or when clearly above 1000 anchors
139
- if stable >= 10 or prev_count >= 1500:
140
  break
141
 
142
  _scroll_all()
@@ -153,7 +172,8 @@ def main() -> None:
153
  repo_links = normalize_github_repo_links(links)
154
 
155
  # Optionally map any fork links to their original repositories and deduplicate
156
- repo_links = map_to_original_repos_graphql(repo_links)
 
157
 
158
  # Persist github_links.txt for visibility/debug (even if not used downstream)
159
  with out_links.open("w", encoding="utf-8") as f:
@@ -166,11 +186,11 @@ def main() -> None:
166
  if pr is not None:
167
  pairs.append(pr)
168
  meta_map: Dict[str, Dict[str, Any]] = {}
169
- batch_size = args.workers
170
  for i in range(0, len(pairs), batch_size):
171
  chunk = pairs[i : i + batch_size]
172
  try:
173
- mm = fetch_repos_metadata_graphql(chunk)
174
  except Exception:
175
  mm = {}
176
  if mm:
 
2
  from playwright.sync_api import sync_playwright
3
  from urllib.parse import urlparse
4
  from typing import List, Optional, Dict, Any
5
+ import yaml
6
  import pandas as pd
7
  from github_api_utils import fetch_repos_metadata_graphql
8
  from dotenv import load_dotenv
 
94
 
95
 
96
  def main() -> None:
 
 
 
 
 
 
 
 
97
  # Load token from .env for GraphQL
98
  load_dotenv()
99
 
100
+ # Load YAML config next to this script if present
101
+ cfg: Dict[str, Any] = {}
102
+ cfg_path = Path(__file__).with_name("top_1000_repos_config.yaml")
103
+ if cfg_path.exists():
104
+ cfg = yaml.safe_load(cfg_path.read_text(encoding="utf-8")) or {}
105
+
106
+ def _resolve_cfg_path(val: Optional[str]) -> Optional[Path]:
107
+ if val is None:
108
+ return None
109
+ p = Path(val)
110
+ if not p.is_absolute():
111
+ p = (cfg_path.parent / p).resolve()
112
+ return p
113
+
114
  project_root = Path(__file__).resolve().parents[1]
115
+ out_html = _resolve_cfg_path(cfg.get("out_html")) or Path(__file__).with_name(
116
  "Top 1000 GitHub repositories, updated daily, all on one page..html"
117
  )
118
+ out_links = _resolve_cfg_path(cfg.get("out_links")) or (project_root / "github_links.txt")
119
+ out_parquet = _resolve_cfg_path(cfg.get("out_parquet")) or Path(__file__).with_name(
120
+ "top-1000-repos.parquet"
121
+ )
122
+
123
+ headless = bool(cfg.get("headless", True))
124
+ # Scrolling config
125
+ scroll_max_iters = int(cfg.get("scroll_max_iters", 200))
126
+ scroll_pause_ms = int(cfg.get("scroll_pause_ms", 300))
127
+ stable_threshold = int(cfg.get("stable_threshold", 10))
128
+ min_anchors = int(cfg.get("min_anchors", 1500))
129
+ # GraphQL config
130
+ graphql_batch_size = int(cfg.get("graphql_batch_size", 30))
131
+ topics_limit = int(cfg.get("topics_limit", 20))
132
+ fork_resolution = bool(cfg.get("fork_resolution", True))
133
 
134
  with sync_playwright() as p:
135
+ browser = p.chromium.launch(headless=headless)
136
  context = browser.new_context()
137
  page = context.new_page()
138
  page.goto(URL, wait_until="networkidle")
 
140
  page.wait_for_selector('a[href*="https://github.com/"]', timeout=30000)
141
 
142
  # Auto-scroll to force lazy loading/virtualized list to render all items
143
+ def _scroll_all(max_iters: int = scroll_max_iters, pause_ms: int = scroll_pause_ms) -> None:
144
  prev_count = 0
145
  stable = 0
146
  for _ in range(max_iters):
 
155
  stable = 0
156
  prev_count = count
157
  # Stop after several iterations without growth or when clearly above 1000 anchors
158
+ if stable >= stable_threshold or prev_count >= min_anchors:
159
  break
160
 
161
  _scroll_all()
 
172
  repo_links = normalize_github_repo_links(links)
173
 
174
  # Optionally map any fork links to their original repositories and deduplicate
175
+ if fork_resolution:
176
+ repo_links = map_to_original_repos_graphql(repo_links)
177
 
178
  # Persist github_links.txt for visibility/debug (even if not used downstream)
179
  with out_links.open("w", encoding="utf-8") as f:
 
186
  if pr is not None:
187
  pairs.append(pr)
188
  meta_map: Dict[str, Dict[str, Any]] = {}
189
+ batch_size = graphql_batch_size
190
  for i in range(0, len(pairs), batch_size):
191
  chunk = pairs[i : i + batch_size]
192
  try:
193
+ mm = fetch_repos_metadata_graphql(chunk, topics_limit=topics_limit)
194
  except Exception:
195
  mm = {}
196
  if mm:
data_collection_utils/top_1000_repos_config.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration for top_1000_repos.py (YAML-driven)
2
+ # Secrets are not stored here. Put GITHUB_TOKEN in a .env file or export it in the shell.
3
+
4
+ # Outputs (paths can be absolute or relative to this file)
5
+ out_html: ./Top 1000 GitHub repositories, updated daily, all on one page..html
6
+ out_links: ../github_links.txt
7
+ out_parquet: ./top-1000-repos.parquet
8
+
9
+ # Browser
10
+ headless: true
11
+
12
+ # Scrolling behavior to load all repo links
13
+ scroll_max_iters: 200
14
+ scroll_pause_ms: 300
15
+ stable_threshold: 10
16
+ min_anchors: 1500
17
+
18
+ # GraphQL
19
+ graphql_batch_size: 50
20
+ topics_limit: 20
21
+ fork_resolution: true