File size: 2,535 Bytes
71306a3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import json
from pathlib import Path
AUTH_BASE = "https://auth.enterprise.wikimedia.com/v1"
API_BASE  = "https://api.enterprise.wikimedia.com/v2"

# Configuration constants
S3_DESTINATION = 's3://wikipedia-bucket/wikipedia/raw_html_dumps/'

# Files are relative to this script directory
HERE = Path(__file__).resolve().parent
SNAPSHOTS_PATH = HERE / "available_snapshots.json"
TOKEN_PATH = HERE / "wm_enterprise_token.json"

with SNAPSHOTS_PATH.open() as f:
    snapshots = json.load(f)

def load_access_token():
    """Load access token from file if present; otherwise login."""
    if TOKEN_PATH.exists():
        try:
            tok = json.loads(TOKEN_PATH.read_text())
            return tok["access_token"]
        except Exception:
            pass

def auth_headers():
    return {"Authorization": f"Bearer {load_access_token()}", "Accept": "application/json"}

import requests
import re
from pathlib import Path
from tqdm.auto import tqdm
from datatrove.io import get_datafolder

out_df = get_datafolder(S3_DESTINATION)

for wiki in snapshots:
    # if any(wiki['is_part_of']['identifier'].endswith(y) for y in ['wikibooks', 'wiktionary', 'wikiquote', 'wikivoyage', 'wikiversity', 'wikisource', 'wikinews']):
        # continue

    for chunk_idx in range(len(wiki['chunks'])):
        url = f"{API_BASE}/snapshots/{wiki['identifier']}/chunks/{wiki['chunks'][chunk_idx]}/download"
        filename = f"{wiki['identifier']}_{wiki['chunks'][chunk_idx]}.json.tar.gz"
        out_path = wiki['identifier'] + "/" + filename

        if out_df.exists(out_path):
            continue

        headers = auth_headers().copy()
        headers["Accept"] = "*/*"  # ensure binary ok

        with requests.get(url, headers=headers, stream=True, timeout=600) as r:
            r.raise_for_status()
            
            chunk_size = 20 * 1 << 20  # 20 MiB
            total_header = r.headers.get("Content-Length")
            total_bytes = int(total_header) if total_header and total_header.isdigit() else None

            with out_df.open(out_path, "wb") as f, tqdm(
                total=total_bytes,
                unit="B",
                unit_scale=True,
                unit_divisor=1024,
                desc=filename,
                dynamic_ncols=True,
            ) as pbar:
                for chunk in r.iter_content(chunk_size=chunk_size):
                    if chunk:
                        f.write(chunk)
                        pbar.update(len(chunk))

        print(f"Saved → {out_path} ({total_bytes} bytes)")