MRiabov commited on
Commit
5d305bb
·
1 Parent(s): 1d44b3f

feat: add DuckDB persistence and optimize GraphQL batching for repo scraping

Browse files
.windsurf/rules/executing-python-files.md CHANGED
@@ -2,4 +2,4 @@
2
  trigger: always_on
3
  ---
4
 
5
- When executing python files, use python3 instead of python because that adheres to project's venv. Additionally, if you haven't activated venv yet, you have to activate it or else the execution will fail with module not found exception. For this project, it's venv is in another project's, so activate it by running "source ../documentation-qa/.venv/bin/activate" - do NOT modify this command if you are willing to activate. Using bash -lc won't work, use the exact command.
 
2
  trigger: always_on
3
  ---
4
 
5
+ When executing python files, use python3 instead of python because that adheres to project's venv. Additionally, if you haven't activated venv yet, you have to activate it or else the execution will fail with module not found exception. For this project, it's venv is in another project's, so activate it by running `source ../../doc-venv/bin/activate` - do NOT modify this command if you are willing to activate. Using bash -lc won't work, use the exact command.
data_collection_utils/github_api_utils.py CHANGED
@@ -27,6 +27,7 @@ _thread_local = threading.local()
27
 
28
  def github_headers() -> Dict[str, str]:
29
  token = os.getenv("GITHUB_TOKEN")
 
30
  h = {"Accept": "application/vnd.github.v3+json", "User-Agent": "docs-scraper/1.0"}
31
  if token:
32
  h["Authorization"] = f"token {token}"
@@ -359,6 +360,13 @@ def fetch_repos_metadata_graphql(
359
  vars[f"name{i}"] = name
360
  alias = f"repo_{i}"
361
  header = f"{alias}: repository(owner: $owner{i}, name: $name{i}) "
 
 
 
 
 
 
 
362
  body = (
363
  "{\n"
364
  " name\n"
@@ -368,9 +376,7 @@ def fetch_repos_metadata_graphql(
368
  " isFork\n"
369
  " parent { url nameWithOwner }\n"
370
  " primaryLanguage { name }\n"
371
- " repositoryTopics(first: "
372
- + str(topics_limit)
373
- + ") { nodes { topic { name } } }\n"
374
  " defaultBranchRef {\n"
375
  " name\n"
376
  " target {\n"
 
27
 
28
  def github_headers() -> Dict[str, str]:
29
  token = os.getenv("GITHUB_TOKEN")
30
+ assert token is not None, "Set the Github token to run this script!"
31
  h = {"Accept": "application/vnd.github.v3+json", "User-Agent": "docs-scraper/1.0"}
32
  if token:
33
  h["Authorization"] = f"token {token}"
 
360
  vars[f"name{i}"] = name
361
  alias = f"repo_{i}"
362
  header = f"{alias}: repository(owner: $owner{i}, name: $name{i}) "
363
+ topics_fragment = (
364
+ " repositoryTopics(first: "
365
+ + str(topics_limit)
366
+ + ") { nodes { topic { name } } }\n"
367
+ if topics_limit and topics_limit > 0
368
+ else ""
369
+ )
370
  body = (
371
  "{\n"
372
  " name\n"
 
376
  " isFork\n"
377
  " parent { url nameWithOwner }\n"
378
  " primaryLanguage { name }\n"
379
+ + topics_fragment +
 
 
380
  " defaultBranchRef {\n"
381
  " name\n"
382
  " target {\n"
data_collection_utils/parse_gh_docs_config.yaml CHANGED
@@ -26,7 +26,7 @@ checkpoint_every: 50
26
 
27
  # GraphQL batching and metadata
28
  # Max repos per GraphQL request; keep modest to avoid cost limits
29
- graphql_batch_size: 50
30
  # Number of topics to fetch per repository via GraphQL
31
  topics_limit: 20
32
 
 
26
 
27
  # GraphQL batching and metadata
28
  # Max repos per GraphQL request; keep modest to avoid cost limits
29
+ graphql_batch_size: 20
30
  # Number of topics to fetch per repository via GraphQL
31
  topics_limit: 20
32
 
data_collection_utils/scrape_gh_docs.py CHANGED
@@ -46,6 +46,7 @@ import json
46
  import pandas as pd
47
  import subprocess
48
  import yaml
 
49
  from datetime import datetime, timezone
50
  import logging
51
  import langid # https://github.com/saffsd/langid.py
@@ -839,6 +840,40 @@ def process_repo_entry(
839
  return result
840
 
841
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  # === CLI ===
843
 
844
 
@@ -875,13 +910,15 @@ def main():
875
  workers_value = int(cfg.get("workers", 4))
876
  texts_parquet_value = _resolve_cfg_path(cfg.get("texts_parquet"))
877
  repometa_parquet_value = _resolve_cfg_path(cfg.get("repometa_parquet"))
 
878
  token_file_value = _resolve_cfg_path(cfg.get("token_file"))
879
  prefer_zip_value = bool(cfg.get("prefer_zip", False))
880
  prefer_sparse_value = bool(cfg.get("prefer_sparse", False))
881
  only_md_value = bool(cfg.get("only_md", False))
882
  min_repo_age_years_value = int(cfg.get("min_repo_age_years", 0))
883
  quiet_value = bool(cfg.get("quiet", False))
884
- no_fetch_value = bool(cfg.get("no_fetch", args.no_fetch))
 
885
  lang_filter_value = cfg.get("lang_filter", "en")
886
  min_text_chars_value = int(cfg.get("min_text_chars", 200))
887
 
@@ -889,11 +926,7 @@ def main():
889
  if val is None:
890
  return []
891
  if isinstance(val, (list, tuple)):
892
- return [
893
- _resolve_cfg_path(v)
894
- for v in val
895
- if v is not None
896
- ]
897
  return [_resolve_cfg_path(val)]
898
 
899
  input_parquet_values = _resolve_cfg_paths(cfg.get("input_parquet"))
@@ -908,6 +941,13 @@ def main():
908
  outdir = Path(outdir_value)
909
  outdir.mkdir(parents=True, exist_ok=True)
910
 
 
 
 
 
 
 
 
911
  md_failed_path = Path(md_failed_value)
912
  # create/empty md_failed file
913
  md_failed_path.write_text("")
@@ -917,7 +957,9 @@ def main():
917
  else:
918
  lines: List[str] = []
919
  if not input_parquet_values:
920
- logger.error("'input_parquet' is required. Configure one or more Parquet files with a 'link' column in parse_gh_docs_config.yaml.")
 
 
921
  sys.exit(2)
922
  # Read repositories from one or more Parquet files; use 'link' column
923
  seen = set()
@@ -937,6 +979,7 @@ def main():
937
  md_failed_lock = threading.Lock()
938
  results_lock = threading.Lock()
939
  results: List[Dict[str, Any]] = []
 
940
 
941
  # Process repositories concurrently
942
  with tqdm(total=len(lines), desc="Repos") as pbar:
@@ -957,6 +1000,43 @@ def main():
957
  if res is not None:
958
  with results_lock:
959
  results.append(res)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
960
  except Exception as e:
961
  logger.error(f"Exception while processing {lr}: {e}")
962
  append_line_threadsafe(
@@ -1019,19 +1099,40 @@ def main():
1019
  md_rows.extend(rows)
1020
  pbar.update(1)
1021
 
1022
- # Save per-file dataset (texts only)
1023
  texts_parquet_path = (
1024
  Path(texts_parquet_value) if texts_parquet_value else (outdir / "texts.parquet")
1025
  )
1026
  try:
1027
- df_txt = pd.DataFrame(md_rows)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1028
  texts_parquet_path.parent.mkdir(parents=True, exist_ok=True)
1029
- df_txt.to_parquet(texts_parquet_path, index=False)
 
 
 
1030
  logger.info(
1031
  f"Wrote per-file dataset to {texts_parquet_path} (rows={len(md_rows)})"
1032
  )
1033
  except Exception as e:
1034
- logger.error(f"Failed to write per-file Parquet to {texts_parquet_path}: {e}")
1035
 
1036
  # Build and save repo metadata dataset
1037
  repometa_rows: List[Dict[str, Any]] = []
@@ -1064,11 +1165,13 @@ def main():
1064
 
1065
  repometa_rows.append(
1066
  {
1067
- "latest_commit_date": None, # unknown without network
1068
  "name": repo,
1069
  "parent_org": owner,
1070
- "stars": None, # unknown without network
1071
  "link": link,
 
 
1072
  "docs_found_in": None,
1073
  "docs_repo_structure": docs_tree_json,
1074
  "repo_structure_all_files": full_tree_json,
@@ -1085,29 +1188,23 @@ def main():
1085
  default_branch = res.get("default_branch") or repo_json.get(
1086
  "default_branch", "main"
1087
  )
1088
- # latest commit date via utility (fallback to pushed_at handled inside)
1089
  latest_commit_date = get_latest_commit_date(
1090
  owner, repo, default_branch, repo_json
1091
  )
1092
-
1093
  stars = repo_json.get("stargazers_count")
1094
  link = f"https://github.com/{owner}/{repo}"
1095
  docs_found_in = res.get("docs_found_in")
1096
 
1097
- # docs repo structure (only .md) via Git Trees API; scope it to the actual container that held the docs
1098
- # Examples:
1099
- # - docker: use repo 'docker/docs' (entire repo)
1100
- # - flutter: use 'flutter/flutter' restricted to the 'docs/' folder
1101
  method = res.get("method")
1102
  docs_tree_json = None
1103
  try:
1104
  docs_src_owner = owner
1105
  docs_src_repo = repo
1106
  docs_src_ref = default_branch
1107
- path_filters: List[str] | None = None # prefix filters
1108
 
1109
  if method in ("org_docs_repo_zip", "search_repo_zip"):
1110
- # docs are in a separate repository; parse from docs_found_in URL
1111
  dfi = res.get("docs_found_in")
1112
  if isinstance(dfi, str) and dfi.startswith("http"):
1113
  u = urlparse(dfi)
@@ -1118,14 +1215,10 @@ def main():
1118
  get_repo_info(docs_src_owner, docs_src_repo) or {}
1119
  )
1120
  docs_src_ref = info.get("default_branch", "main")
1121
- # else: fallback to original owner/repo
1122
- # For full separate docs repos, include all .md files (no path filter)
1123
  path_filters = None
1124
  elif method in ("docs_folder_in_repo", "docs_file_in_repo"):
1125
- # Restrict to top-level docs/ folder
1126
  path_filters = ["docs"]
1127
  elif method in ("sparse_docs", "zip_whole_repo"):
1128
- # Include common doc directory names anywhere in the tree
1129
  path_filters = ["docs", "doc", "documentation"]
1130
 
1131
  md_paths_all = get_repo_tree_md_paths(
@@ -1145,7 +1238,6 @@ def main():
1145
  except Exception:
1146
  pass
1147
  if docs_tree_json is None:
1148
- # Fallback to local docs dir if available
1149
  docs_folder_rel = res.get("docs_folder")
1150
  if docs_folder_rel:
1151
  docs_dir = outdir / docs_folder_rel
@@ -1155,7 +1247,7 @@ def main():
1155
  )
1156
  docs_tree_json = json.dumps(docs_tree, ensure_ascii=False)
1157
 
1158
- # full repo structure via GitHub Tree API; fallback to local saved root if API fails
1159
  full_tree_json = None
1160
  try:
1161
  paths = get_repo_tree_paths(owner, repo, default_branch)
@@ -1179,6 +1271,12 @@ def main():
1179
  "parent_org": owner,
1180
  "stars": stars,
1181
  "link": link,
 
 
 
 
 
 
1182
  "docs_found_in": docs_found_in,
1183
  "docs_repo_structure": docs_tree_json,
1184
  "repo_structure_all_files": full_tree_json,
@@ -1187,22 +1285,49 @@ def main():
1187
  except Exception as e:
1188
  logger.warning(f"Failed to build repometa for {owner}/{repo}: {e}")
1189
 
 
1190
  repometa_parquet_path = (
1191
  Path(repometa_parquet_value)
1192
  if repometa_parquet_value
1193
  else (outdir / "repometa.parquet")
1194
  )
1195
  try:
1196
- df_meta = pd.DataFrame(repometa_rows)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1197
  repometa_parquet_path.parent.mkdir(parents=True, exist_ok=True)
1198
- df_meta.to_parquet(repometa_parquet_path, index=False)
 
 
 
1199
  logger.info(
1200
  f"Wrote repo metadata dataset to {repometa_parquet_path} (rows={len(repometa_rows)})"
1201
  )
1202
  except Exception as e:
1203
- logger.error(
1204
- f"Failed to write repo metadata Parquet to {repometa_parquet_path}: {e}"
1205
- )
 
 
 
 
1206
 
1207
  logger.info("Done. Check output directory and md-failed.txt")
1208
 
 
46
  import pandas as pd
47
  import subprocess
48
  import yaml
49
+ import duckdb
50
  from datetime import datetime, timezone
51
  import logging
52
  import langid # https://github.com/saffsd/langid.py
 
840
  return result
841
 
842
 
843
+ def _init_duckdb(con):
844
+ con.execute(
845
+ """
846
+ CREATE TABLE IF NOT EXISTS texts (
847
+ owner TEXT,
848
+ repo TEXT,
849
+ repo_dir TEXT,
850
+ file_rel_repo TEXT,
851
+ file_rel_outdir TEXT,
852
+ size BIGINT,
853
+ mtime BIGINT,
854
+ lang TEXT,
855
+ content TEXT
856
+ );
857
+ """
858
+ )
859
+ con.execute(
860
+ """
861
+ CREATE TABLE IF NOT EXISTS repometa (
862
+ latest_commit_date TEXT,
863
+ name TEXT,
864
+ parent_org TEXT,
865
+ stars BIGINT,
866
+ link TEXT,
867
+ language TEXT,
868
+ topics TEXT,
869
+ docs_found_in TEXT,
870
+ docs_repo_structure TEXT,
871
+ repo_structure_all_files TEXT
872
+ );
873
+ """
874
+ )
875
+
876
+
877
  # === CLI ===
878
 
879
 
 
910
  workers_value = int(cfg.get("workers", 4))
911
  texts_parquet_value = _resolve_cfg_path(cfg.get("texts_parquet"))
912
  repometa_parquet_value = _resolve_cfg_path(cfg.get("repometa_parquet"))
913
+ duckdb_path_value = _resolve_cfg_path(cfg.get("duckdb_path"))
914
  token_file_value = _resolve_cfg_path(cfg.get("token_file"))
915
  prefer_zip_value = bool(cfg.get("prefer_zip", False))
916
  prefer_sparse_value = bool(cfg.get("prefer_sparse", False))
917
  only_md_value = bool(cfg.get("only_md", False))
918
  min_repo_age_years_value = int(cfg.get("min_repo_age_years", 0))
919
  quiet_value = bool(cfg.get("quiet", False))
920
+ # CLI should override YAML for convenience
921
+ no_fetch_value = bool(args.no_fetch or cfg.get("no_fetch", False))
922
  lang_filter_value = cfg.get("lang_filter", "en")
923
  min_text_chars_value = int(cfg.get("min_text_chars", 200))
924
 
 
926
  if val is None:
927
  return []
928
  if isinstance(val, (list, tuple)):
929
+ return [_resolve_cfg_path(v) for v in val if v is not None]
 
 
 
 
930
  return [_resolve_cfg_path(val)]
931
 
932
  input_parquet_values = _resolve_cfg_paths(cfg.get("input_parquet"))
 
941
  outdir = Path(outdir_value)
942
  outdir.mkdir(parents=True, exist_ok=True)
943
 
944
+ # Initialize DuckDB connection (file-backed)
945
+ duckdb_path = (
946
+ Path(duckdb_path_value) if duckdb_path_value else (outdir / "gh_docs.duckdb")
947
+ )
948
+ con = duckdb.connect(str(duckdb_path))
949
+ _init_duckdb(con)
950
+
951
  md_failed_path = Path(md_failed_value)
952
  # create/empty md_failed file
953
  md_failed_path.write_text("")
 
957
  else:
958
  lines: List[str] = []
959
  if not input_parquet_values:
960
+ logger.error(
961
+ "'input_parquet' is required. Configure one or more Parquet files with a 'link' column in parse_gh_docs_config.yaml."
962
+ )
963
  sys.exit(2)
964
  # Read repositories from one or more Parquet files; use 'link' column
965
  seen = set()
 
979
  md_failed_lock = threading.Lock()
980
  results_lock = threading.Lock()
981
  results: List[Dict[str, Any]] = []
982
+ duckdb_lock = threading.Lock()
983
 
984
  # Process repositories concurrently
985
  with tqdm(total=len(lines), desc="Repos") as pbar:
 
1000
  if res is not None:
1001
  with results_lock:
1002
  results.append(res)
1003
+ # Incremental per-repo texts persistence into DuckDB
1004
+ owner = res.get("owner")
1005
+ repo = res.get("repo")
1006
+ if owner and repo and res.get("docs_found"):
1007
+ d = outdir / safe_name(f"{owner}__{repo}")
1008
+ if d.exists() and d.is_dir():
1009
+ rows_one = collect_md_rows_for_repo_dir(
1010
+ d,
1011
+ outdir,
1012
+ lang_filter_value,
1013
+ min_text_chars_value,
1014
+ )
1015
+ if rows_one:
1016
+ cols = [
1017
+ "owner",
1018
+ "repo",
1019
+ "repo_dir",
1020
+ "file_rel_repo",
1021
+ "file_rel_outdir",
1022
+ "size",
1023
+ "mtime",
1024
+ "lang",
1025
+ "content",
1026
+ ]
1027
+ df_one = pd.DataFrame(rows_one, columns=cols)
1028
+ with duckdb_lock:
1029
+ con.execute("BEGIN")
1030
+ con.execute(
1031
+ "DELETE FROM texts WHERE owner = ? AND repo = ?",
1032
+ [owner, repo],
1033
+ )
1034
+ con.register("df_txt_one", df_one)
1035
+ con.execute(
1036
+ "INSERT INTO texts SELECT * FROM df_txt_one"
1037
+ )
1038
+ con.unregister("df_txt_one")
1039
+ con.execute("COMMIT")
1040
  except Exception as e:
1041
  logger.error(f"Exception while processing {lr}: {e}")
1042
  append_line_threadsafe(
 
1099
  md_rows.extend(rows)
1100
  pbar.update(1)
1101
 
1102
+ # Save per-file dataset (texts) into DuckDB and export Parquet
1103
  texts_parquet_path = (
1104
  Path(texts_parquet_value) if texts_parquet_value else (outdir / "texts.parquet")
1105
  )
1106
  try:
1107
+ cols = [
1108
+ "owner",
1109
+ "repo",
1110
+ "repo_dir",
1111
+ "file_rel_repo",
1112
+ "file_rel_outdir",
1113
+ "size",
1114
+ "mtime",
1115
+ "lang",
1116
+ "content",
1117
+ ]
1118
+ df_txt = pd.DataFrame(md_rows, columns=cols)
1119
+ with duckdb_lock:
1120
+ con.execute("BEGIN")
1121
+ con.execute("DELETE FROM texts")
1122
+ con.register("df_txt_all", df_txt)
1123
+ con.execute("INSERT INTO texts SELECT * FROM df_txt_all")
1124
+ con.unregister("df_txt_all")
1125
+ con.execute("COMMIT")
1126
  texts_parquet_path.parent.mkdir(parents=True, exist_ok=True)
1127
+ con.execute(
1128
+ "COPY (SELECT * FROM texts) TO ? (FORMAT PARQUET)",
1129
+ [str(texts_parquet_path)],
1130
+ )
1131
  logger.info(
1132
  f"Wrote per-file dataset to {texts_parquet_path} (rows={len(md_rows)})"
1133
  )
1134
  except Exception as e:
1135
+ logger.error(f"Failed to persist/export texts: {e}")
1136
 
1137
  # Build and save repo metadata dataset
1138
  repometa_rows: List[Dict[str, Any]] = []
 
1165
 
1166
  repometa_rows.append(
1167
  {
1168
+ "latest_commit_date": None,
1169
  "name": repo,
1170
  "parent_org": owner,
1171
+ "stars": None,
1172
  "link": link,
1173
+ "language": None,
1174
+ "topics": None,
1175
  "docs_found_in": None,
1176
  "docs_repo_structure": docs_tree_json,
1177
  "repo_structure_all_files": full_tree_json,
 
1188
  default_branch = res.get("default_branch") or repo_json.get(
1189
  "default_branch", "main"
1190
  )
 
1191
  latest_commit_date = get_latest_commit_date(
1192
  owner, repo, default_branch, repo_json
1193
  )
 
1194
  stars = repo_json.get("stargazers_count")
1195
  link = f"https://github.com/{owner}/{repo}"
1196
  docs_found_in = res.get("docs_found_in")
1197
 
1198
+ # Build docs tree
 
 
 
1199
  method = res.get("method")
1200
  docs_tree_json = None
1201
  try:
1202
  docs_src_owner = owner
1203
  docs_src_repo = repo
1204
  docs_src_ref = default_branch
1205
+ path_filters: List[str] | None = None
1206
 
1207
  if method in ("org_docs_repo_zip", "search_repo_zip"):
 
1208
  dfi = res.get("docs_found_in")
1209
  if isinstance(dfi, str) and dfi.startswith("http"):
1210
  u = urlparse(dfi)
 
1215
  get_repo_info(docs_src_owner, docs_src_repo) or {}
1216
  )
1217
  docs_src_ref = info.get("default_branch", "main")
 
 
1218
  path_filters = None
1219
  elif method in ("docs_folder_in_repo", "docs_file_in_repo"):
 
1220
  path_filters = ["docs"]
1221
  elif method in ("sparse_docs", "zip_whole_repo"):
 
1222
  path_filters = ["docs", "doc", "documentation"]
1223
 
1224
  md_paths_all = get_repo_tree_md_paths(
 
1238
  except Exception:
1239
  pass
1240
  if docs_tree_json is None:
 
1241
  docs_folder_rel = res.get("docs_folder")
1242
  if docs_folder_rel:
1243
  docs_dir = outdir / docs_folder_rel
 
1247
  )
1248
  docs_tree_json = json.dumps(docs_tree, ensure_ascii=False)
1249
 
1250
+ # Build full tree
1251
  full_tree_json = None
1252
  try:
1253
  paths = get_repo_tree_paths(owner, repo, default_branch)
 
1271
  "parent_org": owner,
1272
  "stars": stars,
1273
  "link": link,
1274
+ "language": repo_json.get("language"),
1275
+ "topics": json.dumps(
1276
+ repo_json.get("topics"), ensure_ascii=False
1277
+ )
1278
+ if repo_json.get("topics") is not None
1279
+ else None,
1280
  "docs_found_in": docs_found_in,
1281
  "docs_repo_structure": docs_tree_json,
1282
  "repo_structure_all_files": full_tree_json,
 
1285
  except Exception as e:
1286
  logger.warning(f"Failed to build repometa for {owner}/{repo}: {e}")
1287
 
1288
+ # Persist repo metadata to DuckDB and export Parquet
1289
  repometa_parquet_path = (
1290
  Path(repometa_parquet_value)
1291
  if repometa_parquet_value
1292
  else (outdir / "repometa.parquet")
1293
  )
1294
  try:
1295
+ cols_meta = [
1296
+ "latest_commit_date",
1297
+ "name",
1298
+ "parent_org",
1299
+ "stars",
1300
+ "link",
1301
+ "language",
1302
+ "topics",
1303
+ "docs_found_in",
1304
+ "docs_repo_structure",
1305
+ "repo_structure_all_files",
1306
+ ]
1307
+ df_meta = pd.DataFrame(repometa_rows, columns=cols_meta)
1308
+ with duckdb_lock:
1309
+ con.execute("BEGIN")
1310
+ con.execute("DELETE FROM repometa")
1311
+ con.register("df_meta_all", df_meta)
1312
+ con.execute("INSERT INTO repometa SELECT * FROM df_meta_all")
1313
+ con.unregister("df_meta_all")
1314
+ con.execute("COMMIT")
1315
  repometa_parquet_path.parent.mkdir(parents=True, exist_ok=True)
1316
+ con.execute(
1317
+ "COPY (SELECT * FROM repometa) TO ? (FORMAT PARQUET)",
1318
+ [str(repometa_parquet_path)],
1319
+ )
1320
  logger.info(
1321
  f"Wrote repo metadata dataset to {repometa_parquet_path} (rows={len(repometa_rows)})"
1322
  )
1323
  except Exception as e:
1324
+ logger.error(f"Failed to persist/export repo metadata: {e}")
1325
+
1326
+ # Close DB connection
1327
+ try:
1328
+ con.close()
1329
+ except Exception:
1330
+ pass
1331
 
1332
  logger.info("Done. Check output directory and md-failed.txt")
1333
 
data_collection_utils/top_1000_repos.py CHANGED
@@ -65,11 +65,14 @@ def parse_owner_repo(url: str) -> Optional[tuple[str, str]]:
65
  return parts[0], parts[1]
66
 
67
 
68
- def map_to_original_repos_graphql(urls: List[str]) -> List[str]:
69
- """Resolve forks to their parent repos using a single round of GraphQL metadata.
 
 
70
 
71
  - For each input repo URL, if it's a fork and parent_url is available, map to parent.
72
  - Returns the sorted unique list of canonical GitHub URLs.
 
73
  """
74
  pairs: List[tuple[str, str]] = []
75
  for u in urls:
@@ -77,10 +80,15 @@ def map_to_original_repos_graphql(urls: List[str]) -> List[str]:
77
  if pr is None:
78
  continue
79
  pairs.append(pr)
80
- # Batch query
81
- meta = fetch_repos_metadata_graphql(pairs)
 
 
 
 
 
82
  out: set[str] = set()
83
- for (owner, repo) in pairs:
84
  key = f"{owner}/{repo}"
85
  m = meta.get(key) or {}
86
  parent_url = m.get("parent_url")
@@ -96,6 +104,7 @@ def map_to_original_repos_graphql(urls: List[str]) -> List[str]:
96
  def main() -> None:
97
  # Load token from .env for GraphQL
98
  load_dotenv()
 
99
 
100
  # Load YAML config next to this script if present
101
  cfg: Dict[str, Any] = {}
@@ -115,7 +124,9 @@ def main() -> None:
115
  out_html = _resolve_cfg_path(cfg.get("out_html")) or Path(__file__).with_name(
116
  "Top 1000 GitHub repositories, updated daily, all on one page..html"
117
  )
118
- out_links = _resolve_cfg_path(cfg.get("out_links")) or (project_root / "github_links.txt")
 
 
119
  out_parquet = _resolve_cfg_path(cfg.get("out_parquet")) or Path(__file__).with_name(
120
  "top-1000-repos.parquet"
121
  )
@@ -131,6 +142,7 @@ def main() -> None:
131
  topics_limit = int(cfg.get("topics_limit", 20))
132
  fork_resolution = bool(cfg.get("fork_resolution", True))
133
 
 
134
  with sync_playwright() as p:
135
  browser = p.chromium.launch(headless=headless)
136
  context = browser.new_context()
@@ -140,7 +152,9 @@ def main() -> None:
140
  page.wait_for_selector('a[href*="https://github.com/"]', timeout=30000)
141
 
142
  # Auto-scroll to force lazy loading/virtualized list to render all items
143
- def _scroll_all(max_iters: int = scroll_max_iters, pause_ms: int = scroll_pause_ms) -> None:
 
 
144
  prev_count = 0
145
  stable = 0
146
  for _ in range(max_iters):
@@ -159,10 +173,12 @@ def main() -> None:
159
  break
160
 
161
  _scroll_all()
 
162
 
163
  # Save rendered HTML
164
  html = page.content()
165
  out_html.write_text(html, encoding="utf-8")
 
166
 
167
  # Extract canonical GitHub repo URLs from the DOM after full scroll
168
  links = page.eval_on_selector_all(
@@ -173,9 +189,14 @@ def main() -> None:
173
 
174
  # Optionally map any fork links to their original repositories and deduplicate
175
  if fork_resolution:
176
- repo_links = map_to_original_repos_graphql(repo_links)
 
 
177
 
178
  # Persist github_links.txt for visibility/debug (even if not used downstream)
 
 
 
179
  with out_links.open("w", encoding="utf-8") as f:
180
  f.write("\n".join(repo_links) + "\n")
181
 
@@ -215,6 +236,9 @@ def main() -> None:
215
  }
216
  )
217
 
 
 
 
218
  df = pd.DataFrame(rows)
219
  df.to_parquet(out_parquet, index=False)
220
  print(f"Wrote HTML to {out_html}")
 
65
  return parts[0], parts[1]
66
 
67
 
68
+ def map_to_original_repos_graphql(
69
+ urls: List[str], *, batch_size: int = 30, topics_limit: int = 0
70
+ ) -> List[str]:
71
+ """Resolve forks to their parent repos using batched GraphQL metadata requests.
72
 
73
  - For each input repo URL, if it's a fork and parent_url is available, map to parent.
74
  - Returns the sorted unique list of canonical GitHub URLs.
75
+ - Queries are sent in chunks of ``batch_size`` to avoid oversized GraphQL payloads.
76
  """
77
  pairs: List[tuple[str, str]] = []
78
  for u in urls:
 
80
  if pr is None:
81
  continue
82
  pairs.append(pr)
83
+ # Batch query to avoid 502s on oversized GraphQL requests
84
+ meta: Dict[str, Dict[str, Any]] = {}
85
+ for i in range(0, len(pairs), batch_size):
86
+ chunk = pairs[i : i + batch_size]
87
+ mm = fetch_repos_metadata_graphql(chunk, topics_limit=topics_limit)
88
+ if mm:
89
+ meta.update(mm)
90
  out: set[str] = set()
91
+ for owner, repo in pairs:
92
  key = f"{owner}/{repo}"
93
  m = meta.get(key) or {}
94
  parent_url = m.get("parent_url")
 
104
  def main() -> None:
105
  # Load token from .env for GraphQL
106
  load_dotenv()
107
+ print("[top_1000_repos] Starting script execution. This may take a minute or so...")
108
 
109
  # Load YAML config next to this script if present
110
  cfg: Dict[str, Any] = {}
 
124
  out_html = _resolve_cfg_path(cfg.get("out_html")) or Path(__file__).with_name(
125
  "Top 1000 GitHub repositories, updated daily, all on one page..html"
126
  )
127
+ out_links = _resolve_cfg_path(cfg.get("out_links")) or (
128
+ project_root / "github_links.txt"
129
+ )
130
  out_parquet = _resolve_cfg_path(cfg.get("out_parquet")) or Path(__file__).with_name(
131
  "top-1000-repos.parquet"
132
  )
 
142
  topics_limit = int(cfg.get("topics_limit", 20))
143
  fork_resolution = bool(cfg.get("fork_resolution", True))
144
 
145
+ print("[top_1000_repos] Launching Playwright browser...")
146
  with sync_playwright() as p:
147
  browser = p.chromium.launch(headless=headless)
148
  context = browser.new_context()
 
152
  page.wait_for_selector('a[href*="https://github.com/"]', timeout=30000)
153
 
154
  # Auto-scroll to force lazy loading/virtualized list to render all items
155
+ def _scroll_all(
156
+ max_iters: int = scroll_max_iters, pause_ms: int = scroll_pause_ms
157
+ ) -> None:
158
  prev_count = 0
159
  stable = 0
160
  for _ in range(max_iters):
 
173
  break
174
 
175
  _scroll_all()
176
+ print("[top_1000_repos] Scrolling completed. Extracting links...")
177
 
178
  # Save rendered HTML
179
  html = page.content()
180
  out_html.write_text(html, encoding="utf-8")
181
+ print(f"[top_1000_repos] Saved rendered HTML to {out_html}")
182
 
183
  # Extract canonical GitHub repo URLs from the DOM after full scroll
184
  links = page.eval_on_selector_all(
 
189
 
190
  # Optionally map any fork links to their original repositories and deduplicate
191
  if fork_resolution:
192
+ repo_links = map_to_original_repos_graphql(
193
+ repo_links, batch_size=graphql_batch_size, topics_limit=0
194
+ )
195
 
196
  # Persist github_links.txt for visibility/debug (even if not used downstream)
197
+ print(
198
+ f"[top_1000_repos] Writing {len(repo_links)} repository links to {out_links}"
199
+ )
200
  with out_links.open("w", encoding="utf-8") as f:
201
  f.write("\n".join(repo_links) + "\n")
202
 
 
236
  }
237
  )
238
 
239
+ print(
240
+ f"[top_1000_repos] Enriching repository metadata via GraphQL (batch size {graphql_batch_size})..."
241
+ )
242
  df = pd.DataFrame(rows)
243
  df.to_parquet(out_parquet, index=False)
244
  print(f"Wrote HTML to {out_html}")