beyarkay commited on
Commit
4e8b9a5
·
1 Parent(s): 738c14c

Improve download archive.org scraper

Browse files
src/download_archive_dot_org.py CHANGED
@@ -1,104 +1,307 @@
 
1
  # /// script
2
- # requires-python = ">=3.11"
3
- # dependencies = ["tqdm", "sqlite3"]
 
 
 
4
  # ///
5
- import csv
6
- import urllib.parse
7
- import urllib.request
8
- from pathlib import Path
9
- from datetime import date, timedelta
 
 
 
 
 
 
 
 
 
 
10
  from tqdm import tqdm
11
 
12
 
13
- # Create an index of archive.org identifiers of possibly useful text documents.
14
- # Store all the indices in a sqlite3 DB
15
- # make it parallel, so we can download faster
16
- # Avoid the 10k item limit by having as large a range as possible, then
17
- # reducing the range down until we have less than 10k
18
-
19
- #
20
-
21
- BASE_URL = "https://archive.org/advancedsearch.php"
22
- FIELDS = ["creator", "date", "downloads", "identifier", "item_size", "subject", "title"]
23
- ROWS_PER_PAGE = 1000
24
- QUERY = f'mediatype:(texts) AND (language:eng OR language:"English") AND (date:[1600-01-01 TO 1949-12-31] OR year:[1600 TO 1949]) AND downloads:[5 TO *]'
25
- QUERY = 'mediatype:(texts) AND (language:eng OR language:"English") AND (date:[1600-01-01 TO 1949-12-31] OR year:[1600 TO 1950]) AND downloads:[5 TO *] AND format:("Full Text" OR "txt" OR "text")'
26
-
27
- OUT_ROOT = Path("data/archive-dot-org/indices")
28
- BY_YEAR = OUT_ROOT / "by_year"
29
- BY_DAY = OUT_ROOT / "by_day"
30
- BY_YEAR.mkdir(parents=True, exist_ok=True)
31
- BY_DAY.mkdir(parents=True, exist_ok=True)
32
-
33
- def parse_csv(body: str) -> list[dict]:
34
- lines = body.strip().splitlines()
35
- if not lines or lines[0].startswith('<!DOCTYPE html>'):
36
- return []
37
- reader = csv.DictReader(lines)
38
- return list(reader)
39
-
40
- def fetch_range(start: str, end: str, pbar=None) -> list[dict]:
41
- all_rows = []
42
- page = 1
43
- while True:
44
 
45
- if pbar is not None:
46
- pbar.set_description(f"Fetching {start} page {page}")
47
- params = {
48
- 'q': QUERY,
49
- 'fl[]': FIELDS,
50
- 'sort[]': 'date asc',
51
- 'rows': str(ROWS_PER_PAGE),
52
- 'page': str(page),
53
- 'output': 'csv',
54
- }
55
- url = BASE_URL + '?' + urllib.parse.urlencode(params, doseq=True)
56
  try:
57
- with urllib.request.urlopen(url) as r:
58
- body = r.read().decode('utf-8')
59
- except Exception as e:
60
- print(f" Failed {start}–{end} page {page}: {e}")
61
- break
62
- rows = parse_csv(body)
63
- if not rows:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  break
65
- all_rows.extend(rows)
66
- if len(rows) < ROWS_PER_PAGE:
 
 
67
  break
68
- page += 1
69
- return all_rows
70
-
71
- # 1. Pre-1860: yearly
72
- pbar = tqdm(range(1600, 1860), desc="Year bins")
73
- for year in pbar:
74
- out_file = BY_YEAR / f"{year}.csv"
75
- if out_file.exists():
76
- continue
77
- pbar.set_description(f"Fetching {year}")
78
- rows = fetch_range(f"{year}-01-01", f"{year}-12-31", pbar=pbar)
79
- if rows:
80
- pbar.set_description(f"Fetching {year} (writing {len(rows)} rows)")
81
- with out_file.open("w", newline='', encoding="utf-8") as f:
82
- writer = csv.DictWriter(f, fieldnames=FIELDS)
83
- writer.writeheader()
84
- writer.writerows(rows)
85
-
86
- # 2. 1860–1949: daily
87
- start_date = date(1860, 1, 1)
88
- end_date = date(1950, 1, 1)
89
- cur = start_date
90
- pbar = tqdm(total=(end_date - start_date).days, desc="Daily bins")
91
- while cur < end_date:
92
- day_str = cur.isoformat()
93
- out_file = BY_DAY / f"{day_str}.csv"
94
- if not out_file.exists():
95
- pbar.set_description(f"Fetching {day_str}")
96
- rows = fetch_range(day_str, day_str, pbar=pbar)
97
- if rows:
98
- pbar.set_description(f"Fetching {day_str} (writing {len(rows)} rows)")
99
- with out_file.open("w", newline='', encoding="utf-8") as f:
100
- writer = csv.DictWriter(f, fieldnames=FIELDS)
101
- writer.writeheader()
102
- writer.writerows(rows)
103
- cur += timedelta(days=1)
104
- pbar.update(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
  # /// script
3
+ # requires-python = ">=3.8"
4
+ # dependencies = [
5
+ # "requests",
6
+ # "tqdm",
7
+ # ]
8
  # ///
9
+
10
+ from __future__ import annotations
11
+
12
+ import contextlib
13
+ import datetime as dt
14
+ import json
15
+ import queue
16
+ import sqlite3
17
+ import threading
18
+ from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple
19
+ from concurrent.futures import ThreadPoolExecutor, as_completed
20
+ import time as _time
21
+
22
+ import requests
23
+ from requests import Session
24
  from tqdm import tqdm
25
 
26
 
27
+ def build_query(start_date: dt.date, end_date: dt.date) -> str:
28
+ assert start_date <= end_date
29
+ start_iso = start_date.isoformat()
30
+ end_iso = end_date.isoformat()
31
+ return (
32
+ 'mediatype:(texts) '
33
+ 'AND (language:eng OR language:"English") '
34
+ f'AND (date:[{start_iso} TO {end_iso}] OR year:[{start_date.year} TO {end_date.year}]) '
35
+ # 'AND downloads:[5 TO *] '
36
+ 'AND format:("Full Text" OR "txt" OR "text")'
37
+ )
38
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ def http_get_json(
41
+ session: Session,
42
+ url: str,
43
+ params: Sequence[Tuple[str, Any]],
44
+ timeout_seconds: float,
45
+ retries: int,
46
+ ) -> Dict[str, Any]:
47
+ print(f"\nRequesting from {url} {params}")
48
+ backoff = 1.0
49
+ for attempt in range(retries + 1):
 
50
  try:
51
+ resp = session.get(url, params=params, timeout=timeout_seconds)
52
+ resp.raise_for_status()
53
+ return resp.json()
54
+ except requests.exceptions.Timeout as e:
55
+ print(f"Exception: {e}, {url=} {params=}")
56
+ if attempt == retries:
57
+ raise
58
+ _time.sleep(backoff)
59
+ backoff *= 2.0
60
+ except requests.exceptions.ConnectionError as e:
61
+ print(f"Exception: {e}, {url=} {params=}")
62
+ if attempt == retries:
63
+ raise
64
+ _time.sleep(backoff)
65
+ backoff *= 2.0
66
+ except requests.exceptions.HTTPError as e:
67
+ print(f"Exception: {e}, {url=} {params=}")
68
+ code = e.response.status_code if e.response is not None else None
69
+ if code is not None and 500 <= code < 600 and attempt < retries:
70
+ _time.sleep(backoff)
71
+ backoff *= 2.0
72
+ continue
73
+ raise
74
+ except json.JSONDecodeError as e:
75
+ print(f"Exception: {e}, {url=} {params=}")
76
+ if attempt == retries:
77
+ raise
78
+ _time.sleep(backoff)
79
+ backoff *= 2.0
80
+ raise RuntimeError("unreachable")
81
+
82
+
83
+ def scrape_total_only(
84
+ session: Session,
85
+ endpoint: str,
86
+ query: str,
87
+ timeout_seconds: float,
88
+ retries: int,
89
+ ) -> int:
90
+ params: List[Tuple[str, Any]] = [
91
+ ("q", query),
92
+ ("total_only", "true"),
93
+ # count is optional when total_only=true; include to be explicit and satisfy min constraint.
94
+ ("count", 100),
95
+ ]
96
+ data = http_get_json(session, endpoint, params, timeout_seconds, retries)
97
+ total = int(data.get("total", 0))
98
+ assert total >= 0
99
+ return total
100
+
101
+
102
+ def month_ranges(start_date: dt.date, end_date: dt.date) -> List[Tuple[dt.date, dt.date]]:
103
+ assert start_date <= end_date
104
+ ranges: List[Tuple[dt.date, dt.date]] = []
105
+ y, m = start_date.year, start_date.month
106
+ end_ym = (end_date.year, end_date.month)
107
+ while (y, m) <= end_ym:
108
+ first = dt.date(y, m, 1)
109
+ if m == 12:
110
+ next_month = dt.date(y + 1, 1, 1)
111
+ else:
112
+ next_month = dt.date(y, m + 1, 1)
113
+ last = min(next_month - dt.timedelta(days=1), end_date)
114
+ if first < start_date:
115
+ first = start_date
116
+ ranges.append((first, last))
117
+ if m == 12:
118
+ y, m = y + 1, 1
119
+ else:
120
+ m += 1
121
+ return ranges
122
+
123
+
124
+ def ensure_schema(connection: sqlite3.Connection) -> None:
125
+ connection.execute(
126
+ """
127
+ CREATE TABLE IF NOT EXISTS items (
128
+ identifier TEXT PRIMARY KEY,
129
+ title TEXT,
130
+ creator TEXT,
131
+ date TEXT,
132
+ downloads INTEGER,
133
+ item_size INTEGER,
134
+ subject TEXT
135
+ )
136
+ """
137
+ )
138
+ connection.commit()
139
+
140
+
141
+ def coerce_str(value: Any) -> Optional[str]:
142
+ if value is None:
143
+ return None
144
+ if isinstance(value, list):
145
+ return "; ".join(str(x) for x in value if x is not None)
146
+ return str(value)
147
+
148
+
149
+ def coerce_int(value: Any) -> Optional[int]:
150
+ if value is None:
151
+ return None
152
+ try:
153
+ return int(value)
154
+ except (ValueError, TypeError):
155
+ return None
156
+
157
+
158
+ def docs_to_rows(docs: Iterable[Dict[str, Any]]) -> List[Tuple]:
159
+ rows: List[Tuple] = []
160
+ for d in docs:
161
+ identifier = coerce_str(d.get("identifier"))
162
+ if not identifier:
163
+ continue
164
+ title = coerce_str(d.get("title"))
165
+ creator = coerce_str(d.get("creator"))
166
+ date_val = coerce_str(d.get("date"))
167
+ subject = coerce_str(d.get("subject"))
168
+ downloads = coerce_int(d.get("downloads"))
169
+ item_size = coerce_int(d.get("item_size"))
170
+ rows.append((identifier, title, creator, date_val, downloads, item_size, subject))
171
+ return rows
172
+
173
+
174
+ def writer_loop(
175
+ database_path: str,
176
+ input_queue: "queue.Queue[Optional[List[Tuple]]]",
177
+ commit_batch_size: int,
178
+ ) -> None:
179
+ with contextlib.closing(sqlite3.connect(database_path, timeout=30.0)) as conn:
180
+ ensure_schema(conn)
181
+ pending: List[Tuple] = []
182
+ while True:
183
+ item = input_queue.get()
184
+ if item is None:
185
+ if pending:
186
+ conn.executemany(
187
+ "INSERT OR REPLACE INTO items(identifier, title, creator, date, downloads, item_size, subject) VALUES(?,?,?,?,?,?,?)",
188
+ pending,
189
+ )
190
+ conn.commit()
191
+ break
192
+ pending.extend(item)
193
+ if len(pending) >= commit_batch_size:
194
+ conn.executemany(
195
+ "INSERT OR REPLACE INTO items(identifier, title, creator, date, downloads, item_size, subject) VALUES(?,?,?,?,?,?,?)",
196
+ pending,
197
+ )
198
+ conn.commit()
199
+ pending.clear()
200
+
201
+
202
+ def scrape_month_cursor_loop(
203
+ session: Session,
204
+ endpoint: str,
205
+ query: str,
206
+ fields: Sequence[str],
207
+ batch_count: int,
208
+ timeout_seconds: float,
209
+ retries: int,
210
+ rows_out: "queue.Queue[Optional[List[Tuple]]]",
211
+ items_bar: tqdm,
212
+ ) -> None:
213
+ print(f"\nRequesting {batch_count} from {endpoint} {query}")
214
+ params_base: Dict[str, Any] = {
215
+ "q": query,
216
+ "fields": ",".join(fields),
217
+ "count": batch_count,
218
+ }
219
+ params = list(params_base.items())
220
+ try:
221
+ data = http_get_json(session, endpoint, params, timeout_seconds, retries)
222
+ except (requests.exceptions.RequestException, json.JSONDecodeError):
223
+ return
224
+ while True:
225
+ docs = data.get("items", [])
226
+ if docs:
227
+ rows = docs_to_rows(docs)
228
+ if rows:
229
+ rows_out.put(rows)
230
+ items_bar.update(len(docs))
231
+ # items_bar.set_description(f"items ({docs.get('date')})")
232
+ cursor = data.get("cursor")
233
+ if not cursor:
234
  break
235
+ params = list(params_base.items()) + [("cursor", cursor)]
236
+ try:
237
+ data = http_get_json(session, endpoint, params, timeout_seconds, retries)
238
+ except (requests.exceptions.RequestException, json.JSONDecodeError):
239
  break
240
+
241
+
242
+ def run() -> None:
243
+ database_path = "archive.org.db"
244
+ endpoint = "https://archive.org/services/search/v1/scrape"
245
+
246
+ start_date = dt.date(1881, 1, 1)
247
+ end_date = dt.date(1881, 1, 2)
248
+
249
+ batch_count = 10000 # scraping API requires count >= 100
250
+ request_timeout_seconds = 20.0
251
+ request_retries = 5
252
+ writer_commit_batch_size = 500
253
+ max_workers = 2
254
+
255
+ fields = ["creator", "date", "downloads", "identifier", "item_size", "subject", "title"]
256
+
257
+ session = requests.Session()
258
+
259
+ # Plan monthly queries and totals
260
+ monthly_queries: List[Tuple[str, int]] = []
261
+ months = month_ranges(start_date, end_date)
262
+ with tqdm(desc="planning-months", total=len(months), unit="month") as plan_bar:
263
+ for m_start, m_end in months:
264
+ q = build_query(m_start, m_end)
265
+ try:
266
+ total = scrape_total_only(session, endpoint, q, request_timeout_seconds, request_retries)
267
+ except (requests.exceptions.RequestException, json.JSONDecodeError):
268
+ total = 0
269
+ monthly_queries.append((q, total))
270
+ plan_bar.update(1)
271
+
272
+ total_items = sum(t for _, t in monthly_queries)
273
+
274
+ rows_queue: "queue.Queue[Optional[List[Tuple]]]" = queue.Queue(maxsize=max_workers * 4)
275
+ writer_thread = threading.Thread(
276
+ target=writer_loop,
277
+ args=(database_path, rows_queue, writer_commit_batch_size),
278
+ daemon=True,
279
+ )
280
+ writer_thread.start()
281
+
282
+ with tqdm(total=total_items if total_items > 0 else None, desc="items", unit="it") as items_bar, \
283
+ ThreadPoolExecutor(max_workers=max_workers) as pool:
284
+
285
+ futures = []
286
+ for q, total in monthly_queries:
287
+ futures.append(
288
+ pool.submit(
289
+ scrape_month_cursor_loop,
290
+ session, endpoint, q, fields, batch_count,
291
+ request_timeout_seconds, request_retries,
292
+ rows_queue, items_bar,
293
+ )
294
+ )
295
+ for f in as_completed(futures):
296
+ try:
297
+ # f.get(timeout=None)
298
+ f.result()
299
+ except Exception:
300
+ raise
301
+
302
+ rows_queue.put(None)
303
+ writer_thread.join()
304
+
305
+
306
+ if __name__ == "__main__":
307
+ run()
src/download_archive_dot_org2.py DELETED
@@ -1,378 +0,0 @@
1
- #!/usr/bin/env python3
2
- # /// script
3
- # requires-python = ">=3.8"
4
- # dependencies = [
5
- # "requests",
6
- # "tqdm",
7
- # ]
8
- # ///
9
-
10
- from __future__ import annotations
11
-
12
- import contextlib
13
- import datetime as dt
14
- import json
15
- import math
16
- import queue
17
- import sqlite3
18
- import threading
19
- from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple
20
- from concurrent.futures import ThreadPoolExecutor, as_completed
21
- import time as _time
22
-
23
- import requests
24
- from requests import Session
25
- from tqdm import tqdm
26
-
27
-
28
- def build_query(start_date: dt.date, end_date: dt.date) -> str:
29
- assert start_date <= end_date
30
- start_iso = start_date.isoformat()
31
- end_iso = end_date.isoformat()
32
- return (
33
- 'mediatype:(texts) '
34
- 'AND (language:eng OR language:"English") '
35
- f'AND (date:[{start_iso} TO {end_iso}] OR year:[{start_date.year} TO {end_date.year}]) '
36
- 'AND downloads:[5 TO *] '
37
- 'AND format:("Full Text" OR "txt" OR "text")'
38
- )
39
-
40
-
41
- def http_get_json(
42
- session: Session,
43
- url: str,
44
- params: Sequence[Tuple[str, Any]],
45
- timeout_seconds: float,
46
- retries: int,
47
- ) -> Dict[str, Any]:
48
- print(f"\nFetching {url} {params}")
49
- backoff = 1.0
50
- for attempt in range(retries + 1):
51
- try:
52
- resp = session.get(url, params=params, timeout=timeout_seconds)
53
- resp.raise_for_status()
54
- return resp.json()
55
- except requests.exceptions.Timeout as e:
56
- if attempt == retries:
57
- raise
58
- print(f"\n Timed out requesting {url}, sleeping")
59
- _sleep(backoff)
60
- backoff *= 2.0
61
- except requests.exceptions.ConnectionError as e:
62
- if attempt == retries:
63
- raise
64
- print(f"\n Connection error requesting {url}, sleeping")
65
- _sleep(backoff)
66
- backoff *= 2.0
67
- except requests.exceptions.HTTPError as e:
68
- code = e.response.status_code if e.response is not None else None
69
- if code is not None and 500 <= code < 600 and attempt < retries:
70
- print(f"\n HTTP Error, sleeping {url} {params}: {e}")
71
- _sleep(backoff)
72
- backoff *= 2.0
73
- continue
74
- raise
75
- except json.JSONDecodeError as e:
76
- print(f"\n JSON decode error requesting {url}, sleeping")
77
- if attempt == retries:
78
- raise
79
- _sleep(backoff)
80
- backoff *= 2.0
81
- raise RuntimeError("unreachable after retry loop")
82
-
83
-
84
- def _sleep(seconds: float) -> None:
85
- # Local import to avoid global state
86
- _time.sleep(seconds)
87
-
88
-
89
- def probe_count(session: Session, query: str, timeout_seconds: float, retries: int) -> int:
90
- url = "https://archive.org/advancedsearch.php"
91
- params: List[Tuple[str, Any]] = [
92
- ("q", query),
93
- ("rows", 0),
94
- ("page", 1),
95
- ("output", "json"),
96
- ]
97
- data = http_get_json(session, url, params, timeout_seconds, retries)
98
- response = data.get("response", {})
99
- num_found = int(response.get("numFound", 0))
100
- assert num_found >= 0
101
- return num_found
102
-
103
-
104
- def enumerate_pages(num_found: int, rows_per_page: int) -> List[int]:
105
- assert rows_per_page > 0
106
- if num_found == 0:
107
- return []
108
- return list(range(1, math.ceil(num_found / rows_per_page) + 1))
109
-
110
-
111
- def fetch_page_docs(
112
- session: Session,
113
- query: str,
114
- page_number: int,
115
- rows_per_page: int,
116
- timeout_seconds: float,
117
- retries: int,
118
- fields: Sequence[str],
119
- ) -> List[Dict[str, Any]]:
120
- url = "https://archive.org/advancedsearch.php"
121
- params: List[Tuple[str, Any]] = [
122
- ("q", query),
123
- ("rows", rows_per_page),
124
- ("page", page_number),
125
- ("output", "json"),
126
- ] + [("fl[]", f) for f in fields]
127
- data = http_get_json(session, url, params, timeout_seconds, retries)
128
- docs = data.get("response", {}).get("docs", [])
129
- assert isinstance(docs, list)
130
- return docs
131
-
132
-
133
- def month_ranges(start_date: dt.date, end_date: dt.date) -> List[Tuple[dt.date, dt.date]]:
134
- assert start_date <= end_date
135
- ranges: List[Tuple[dt.date, dt.date]] = []
136
- y, m = start_date.year, start_date.month
137
- end_ym = (end_date.year, end_date.month)
138
- while (y, m) <= end_ym:
139
- first = dt.date(y, m, 1)
140
- if m == 12:
141
- next_month = dt.date(y + 1, 1, 1)
142
- else:
143
- next_month = dt.date(y, m + 1, 1)
144
- last = min(next_month - dt.timedelta(days=1), end_date)
145
- if first < start_date:
146
- first = start_date
147
- ranges.append((first, last))
148
- if m == 12:
149
- y, m = y + 1, 1
150
- else:
151
- m += 1
152
- return ranges
153
-
154
-
155
- def split_date_range(start_date: dt.date, end_date: dt.date) -> Tuple[Tuple[dt.date, dt.date], Tuple[dt.date, dt.date]]:
156
- assert start_date <= end_date
157
- if start_date == end_date:
158
- raise ValueError("cannot split a single day further")
159
- total_days = (end_date - start_date).days
160
- mid = start_date + dt.timedelta(days=total_days // 2)
161
- left = (start_date, mid)
162
- right = (mid + dt.timedelta(days=1), end_date)
163
- return left, right
164
-
165
-
166
- def plan_queries_by_date(
167
- session: Session,
168
- start_date: dt.date,
169
- end_date: dt.date,
170
- timeout_seconds: float,
171
- retries: int,
172
- ) -> List[Tuple[str, int]]:
173
- planned: List[Tuple[str, int]] = []
174
- for m_start, m_end in month_ranges(start_date, end_date):
175
- _plan_queries_recursive(session, m_start, m_end, planned, timeout_seconds, retries)
176
- return planned
177
-
178
-
179
- def _plan_queries_recursive(
180
- session: Session,
181
- start_date: dt.date,
182
- end_date: dt.date,
183
- planned: List[Tuple[str, int]],
184
- timeout_seconds: float,
185
- retries: int,
186
- ) -> None:
187
- q = build_query(start_date, end_date)
188
- count = probe_count(session, q, timeout_seconds, retries)
189
- if count == 10000 and start_date < end_date:
190
- left, right = split_date_range(start_date, end_date)
191
- _plan_queries_recursive(session, left[0], left[1], planned, timeout_seconds, retries)
192
- _plan_queries_recursive(session, right[0], right[1], planned, timeout_seconds, retries)
193
- else:
194
- # If single day still 10k, accept loss; advancedsearch caps results.
195
- planned.append((q, count))
196
-
197
-
198
- def ensure_schema(connection: sqlite3.Connection) -> None:
199
- connection.execute(
200
- """
201
- CREATE TABLE IF NOT EXISTS items (
202
- identifier TEXT PRIMARY KEY,
203
- title TEXT,
204
- creator TEXT,
205
- date TEXT,
206
- downloads INTEGER,
207
- item_size INTEGER,
208
- subject TEXT
209
- )
210
- """
211
- )
212
- connection.commit()
213
-
214
-
215
- def coerce_str(value: Any) -> Optional[str]:
216
- if value is None:
217
- return None
218
- if isinstance(value, list):
219
- return "; ".join(str(x) for x in value if x is not None)
220
- return str(value)
221
-
222
-
223
- def coerce_int(value: Any) -> Optional[int]:
224
- if value is None:
225
- return None
226
- try:
227
- return int(value)
228
- except (ValueError, TypeError):
229
- return None
230
-
231
-
232
- def docs_to_rows(docs: Iterable[Dict[str, Any]]) -> List[Tuple]:
233
- rows: List[Tuple] = []
234
- for d in docs:
235
- identifier = coerce_str(d.get("identifier"))
236
- if not identifier:
237
- continue
238
- title = coerce_str(d.get("title"))
239
- creator = coerce_str(d.get("creator"))
240
- date_val = coerce_str(d.get("date"))
241
- subject = coerce_str(d.get("subject"))
242
- downloads = coerce_int(d.get("downloads"))
243
- item_size = coerce_int(d.get("item_size"))
244
- rows.append((identifier, title, creator, date_val, downloads, item_size, subject))
245
- return rows
246
-
247
-
248
- def writer_loop(
249
- database_path: str,
250
- input_queue: "queue.Queue[Optional[List[Tuple]]]",
251
- commit_batch_size: int,
252
- ) -> None:
253
- with contextlib.closing(sqlite3.connect(database_path, timeout=30.0)) as conn:
254
- ensure_schema(conn)
255
- pending: List[Tuple] = []
256
- while True:
257
- item = input_queue.get()
258
- if item is None:
259
- if pending:
260
- try:
261
- conn.executemany(
262
- "INSERT OR REPLACE INTO items(identifier, title, creator, date, downloads, item_size, subject) VALUES(?,?,?,?,?,?,?)",
263
- pending,
264
- )
265
- except sqlite3.DatabaseError as e:
266
- raise
267
- conn.commit()
268
- break
269
- pending.extend(item)
270
- if len(pending) >= commit_batch_size:
271
- try:
272
- conn.executemany(
273
- "INSERT OR REPLACE INTO items(identifier, title, creator, date, downloads, item_size, subject) VALUES(?,?,?,?,?,?,?)",
274
- pending,
275
- )
276
- except sqlite3.DatabaseError as e:
277
- raise
278
- conn.commit()
279
- pending.clear()
280
-
281
-
282
- def fetch_all_pages_for_query(
283
- session: Session,
284
- query: str,
285
- count: int,
286
- rows_per_page: int,
287
- timeout_seconds: float,
288
- retries: int,
289
- fields: Sequence[str],
290
- rows_out: "queue.Queue[Optional[List[Tuple]]]",
291
- items_bar: tqdm,
292
- pages_bar: tqdm,
293
- ) -> None:
294
- for page in enumerate_pages(count, rows_per_page):
295
- try:
296
- docs = fetch_page_docs(session, query, page, rows_per_page, timeout_seconds, retries, fields)
297
- except requests.exceptions.RequestException as e:
298
- # Skip this page; keep going
299
- continue
300
- except json.JSONDecodeError as e:
301
- continue
302
- rows = docs_to_rows(docs)
303
- if rows:
304
- rows_out.put(rows)
305
- items_bar.update(len(docs))
306
- pages_bar.update(1)
307
-
308
-
309
- def run() -> None:
310
- # Configuration (no globals)
311
- database_path = "archive.org.db"
312
- # start_date = dt.date(1600, 1, 1)
313
- # end_date = dt.date(1949, 12, 31)
314
-
315
- start_date = dt.date(1880, 1, 1)
316
- end_date = dt.date(1881, 12, 31)
317
-
318
- rows_per_page = 1000
319
- request_timeout_seconds = 5.0
320
- request_retries = 2
321
- writer_commit_batch_size = 500
322
-
323
- fields = ["creator", "date", "downloads", "identifier", "item_size", "subject", "title"]
324
-
325
- session = requests.Session()
326
-
327
- # Plan queries by date with progress
328
- planned: List[Tuple[str, int]] = []
329
- with tqdm(desc="building recursive queries", total=None, unit=" queries") as plan_bar:
330
- for m_start, m_end in month_ranges(start_date, end_date):
331
- _plan_queries_recursive(session, m_start, m_end, planned, request_timeout_seconds, request_retries)
332
- plan_bar.update(1)
333
-
334
- total_docs_planned = sum(max(0, c) for _, c in planned)
335
- total_pages_planned = sum(len(enumerate_pages(c, rows_per_page)) for _, c in planned)
336
-
337
- rows_queue: "queue.Queue[Optional[List[Tuple]]]" = queue.Queue(maxsize=128)
338
- writer_thread = threading.Thread(
339
- target=writer_loop,
340
- args=(database_path, rows_queue, writer_commit_batch_size),
341
- daemon=True,
342
- )
343
- writer_thread.start()
344
-
345
- # Parallel fetch using threads from tqdm via map with chunks
346
- # Use per-query parallelism to avoid oversubscribing
347
- max_workers = 2
348
-
349
- with tqdm(total=total_docs_planned, desc="items", unit="items") as items_bar, \
350
- tqdm(total=total_pages_planned, desc="pages", unit="pages") as pages_bar, \
351
- ThreadPoolExecutor(max_workers=max_workers) as pool:
352
-
353
- futures = []
354
- for query, count in planned:
355
- if count <= 0:
356
- continue
357
- futures.append(
358
- pool.submit(
359
- fetch_all_pages_for_query,
360
- session, query, count, rows_per_page,
361
- request_timeout_seconds, request_retries,
362
- fields, rows_queue,
363
- items_bar, pages_bar,
364
- )
365
- )
366
- for f in as_completed(futures):
367
- try:
368
- f.result()
369
- except Exception as e:
370
- # Fail fast on unexpected exceptions
371
- raise
372
-
373
- rows_queue.put(None)
374
- writer_thread.join()
375
-
376
-
377
- if __name__ == "__main__":
378
- run()