File size: 2,548 Bytes
1da30d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
#!/usr/bin/env python3
"""Download 300GB from HuggingFace datasets - FineWeb, RedPajama, etc."""
import os
import json
from datasets import load_dataset
from huggingface_hub import snapshot_download
import time

TARGET_GB = 300
OUTPUT_DIR = "/workspace/scraped_data"
os.makedirs(OUTPUT_DIR, exist_ok=True)

# Best large text datasets on HF
DATASETS = [
    ("HuggingFaceFW/fineweb", "sample-10BT", None),  # ~50GB sample
    ("togethercomputer/RedPajama-Data-V2", "sample", "en_head_middle"),  # ~30GB
    ("allenai/dolma", "v1_6-sample", None),  # ~40GB
    ("cerebras/SlimPajama-627B", None, None),  # Big
]

def get_size_gb():
    total = 0
    for root, dirs, files in os.walk(OUTPUT_DIR):
        for f in files:
            total += os.path.getsize(os.path.join(root, f))
    return total / 1e9

def download_streaming(name, config, split):
    """Stream download to avoid OOM"""
    print(f"\n📥 Downloading {name} ({config or 'default'})...")
    try:
        ds = load_dataset(name, config, split=split or "train", streaming=True, trust_remote_code=True)
        
        shard_num = 0
        batch = []
        batch_size = 10000
        
        for i, example in enumerate(ds):
            text = example.get("text") or example.get("content") or str(example)
            batch.append({"text": text, "source": name})
            
            if len(batch) >= batch_size:
                outfile = f"{OUTPUT_DIR}/{name.replace('/', '_')}_{shard_num:05d}.jsonl"
                with open(outfile, 'w') as f:
                    for item in batch:
                        f.write(json.dumps(item) + "\n")
                batch = []
                shard_num += 1
                
                size_gb = get_size_gb()
                print(f"  Progress: {size_gb:.1f} GB / {TARGET_GB} GB ({i:,} examples)")
                
                if size_gb >= TARGET_GB:
                    print(f"✅ Target reached!")
                    return True
                    
    except Exception as e:
        print(f"  Error: {e}")
    return False

if __name__ == "__main__":
    print(f"🚀 Goddess HF Scraper - Target: {TARGET_GB} GB")
    print(f"Output: {OUTPUT_DIR}")
    
    start = time.time()
    
    for name, config, split in DATASETS:
        if get_size_gb() >= TARGET_GB:
            break
        done = download_streaming(name, config, split)
        if done:
            break
    
    elapsed = time.time() - start
    final_size = get_size_gb()
    print(f"\n✨ Done! {final_size:.1f} GB in {elapsed/3600:.1f} hours")