File size: 4,271 Bytes
47df44d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
"""

OpenWebText Data Extraction Pipeline

====================================



This module processes compressed OpenWebText dataset files (.xz) in parallel,

extracting text content and building character vocabularies for GPT training.



Features:

- Parallel processing using ProcessPoolExecutor

- 90/10 train/validation split

- Character-level vocabulary extraction

- Windows multiprocessing support



Author: Your Name

Date: September 2025

"""

import os
import lzma
from tqdm import tqdm
from multiprocessing import Pool, cpu_count, freeze_support
import concurrent.futures

def process_file(args):
    """

    Process a single .xz compressed file and extract text content.

    

    Args:

        args (tuple): Contains (directory, filename, output_file, vocab)

            - directory (str): Path to the directory containing the file

            - filename (str): Name of the .xz file to process

            - output_file (str): Path to output file for appending text

            - vocab (set): Character vocabulary set (for consistency)

    

    Returns:

        set: Set of unique characters found in the processed file

    """
    directory, filename, output_file, vocab = args
    file_path = os.path.join(directory, filename)
    with lzma.open(file_path, "rt", encoding="utf-8") as infile:
        text = infile.read()
    with open(output_file, "a", encoding="utf-8") as outfile:
        outfile.write(text)
    characters = set(text)
    return characters

def xz_files_in_dir(directory):
    """

    Get all .xz files in the specified directory.

    

    Args:

        directory (str): Path to directory to scan

        

    Returns:

        list: List of .xz filenames in the directory

    """
    return [filename for filename in os.listdir(directory) if filename.endswith(".xz") and os.path.isfile(os.path.join(directory, filename))]

def process_files_in_parallel(files, folder_path, output_file):
    """

    Process multiple .xz files in parallel using ProcessPoolExecutor.

    

    Args:

        files (list): List of filenames to process

        folder_path (str): Directory containing the files

        output_file (str): Output file path for extracted text

        

    Returns:

        set: Combined character vocabulary from all processed files

    """
    vocab = set()
    with concurrent.futures.ProcessPoolExecutor(max_workers=cpu_count()) as executor:
        args = [(folder_path, filename, output_file, vocab) for filename in files]
        for characters in tqdm(executor.map(process_file, args), total=len(files)):
            vocab.update(characters)
    return vocab

def main():
    """

    Main execution function for OpenWebText data extraction.

    

    Process flow:

    1. Scan for .xz files in 'openwebtext' directory

    2. Split files into 90% training, 10% validation

    3. Process files in parallel

    4. Extract and combine character vocabularies

    5. Save vocabulary to vocab.txt

    

    Output files:

    - output_train.txt: Training text data

    - output_val.txt: Validation text data  

    - vocab.txt: Character vocabulary (one char per line)

    """
    folder_path = "openwebtext"
    output_file_train = "output_train.txt"
    output_file_val = "output_val.txt"
    vocab_file = "vocab.txt"

    files = xz_files_in_dir(folder_path)
    total_files = len(files)
    split_index = int(total_files * 0.9)  # 90% for training
    files_train = files[:split_index]
    files_val = files[split_index:]

    # Ensure output files are empty before appending
    open(output_file_train, 'w').close()
    open(output_file_val, 'w').close()

    # Process the training files
    vocab_train = process_files_in_parallel(files_train, folder_path, output_file_train)

    # Process the validation files
    vocab_val = process_files_in_parallel(files_val, folder_path, output_file_val)

    # Combine vocabularies (if needed) and write to vocab.txt
    vocab = vocab_train.union(vocab_val)
    with open(vocab_file, "w", encoding="utf-8") as vfile:
        for char in sorted(vocab):
            vfile.write(char + '\n')

if __name__ == '__main__':
    freeze_support()  # For Windows support
    main()