File size: 8,647 Bytes
31a8692
36d9d14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31a8692
 
36d9d14
 
31a8692
 
36d9d14
 
 
 
 
31a8692
 
36d9d14
31a8692
36d9d14
 
 
31a8692
 
 
36d9d14
 
 
 
 
31a8692
 
 
36d9d14
ff230a0
 
 
 
 
31a8692
 
 
 
 
ff230a0
31a8692
ff230a0
 
 
 
 
 
 
 
31a8692
 
ff230a0
 
 
31a8692
 
ff230a0
 
 
31a8692
 
36d9d14
 
31a8692
 
 
 
36d9d14
 
 
ff230a0
31a8692
 
36d9d14
 
 
 
 
 
 
31a8692
 
 
 
 
 
 
 
 
 
 
36d9d14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df2c6bb
 
36d9d14
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
"""Zstd dataset based on Common Crawl."""


import gzip
import json

import datasets
import more_itertools
import numpy as np
import io
import json
import dataclasses
import pyzstd
from tensorflow.io import gfile


logger = datasets.logging.get_logger(__name__)


@dataclasses.dataclass
class State:
    file_index: int  
    file_position: int  

    def to_int(self) -> int:
        return (self.file_position << 17) | self.file_index

    @classmethod
    def from_int(cls, value: int):
        file_position = value >> 17
        file_index = value & ((1 << 17) - 1)
        return cls(file_index, file_position)
    
class ZstdReader:
    def __init__(self,
                 filepaths,
                 worker_id,
                 num_workers,
                 file_loc=0,
                 buffer_size=65536,
                 deserialize_func=None,
                 ):
        self.filepaths = filepaths
        n_shards = len(self.filepaths)
        if n_shards == 0:
            raise ValueError("No shards found")

        logger.info(f"Found {len(self.filepaths)} files")
        self.buffer_size = buffer_size
        self.state = State.from_int(file_loc)
        self.worker_id = worker_id
        self.num_workers = num_workers
        assert worker_id < num_workers, "worker_id must be less than num_workers"
        if n_shards < num_workers:
            self.workers_per_shard = num_workers // n_shards
            self.filepaths = [self.filepaths[worker_id % n_shards]]
            self.internal_worker_id = int(worker_id // n_shards)
        else:
            self.workers_per_shard = None
            self.filepaths = list(more_itertools.distribute(num_workers, self.filepaths)[worker_id])
        logger.info(f"Using {len(self.filepaths)} files")
        
        if deserialize_func is None:
            self.deserialize_func = deserialize
        else:
            self.deserialize_func = deserialize_func

        

    def __iter__(self):
        """This function returns the examples in the raw (text) form by iterating on all the files."""
        while self.state.file_index<len(self.filepaths):
            filepath = self.filepaths[self.state.file_index]
            with gfile.GFile(filepath, 'rb') as f:
                with pyzstd.ZstdFile(f, 'rb') as ifo:
                    raw_reader = MultiBytesIOReader(ifo,
                                                buffer_size=self.buffer_size,
                                                file_position=self.state.file_position)
                    if self.workers_per_shard is not None:
                        reader = more_itertools.distribute(self.workers_per_shard, raw_reader)[self.internal_worker_id]
                    else:
                        reader = raw_reader
                    for example in reader:
                        self.state.file_position = raw_reader.tell()
                        example = self.deserialize_func(example)
                        example["file_loc"]  = self.state.to_int()
                        yield  example
            self.state.file_position = 0
            self.state.file_index += 1
        self.state = State.from_int(0)
        



def deserialize(data_point):
    return json.loads(data_point.decode('utf-8'))

class MultiBytesIOReader:
    def __init__(self,
                 decompressedStream: io.BytesIO,
                 buffer_size=65536,
                 file_position=0,
                 ):
        self.decompressedStream = decompressedStream
        self.buffer_size = buffer_size
        self.incomplete_line = bytearray()
        self.position = file_position
    
    def seek(self, position):
        self.position = position

    def tell(self):
        return self.position
    
    def __iter__(self):
        self.decompressedStream.seek(self.position)
        while True:
            buffer = self.decompressedStream.read(self.buffer_size)
            if not buffer:
                break
            buffer = self.incomplete_line + buffer
            self.incomplete_line = bytearray()
            lines = buffer.split(b'\n')
            if lines and lines[-1]:
                self.incomplete_line = lines.pop()
            for line in lines:
                if line:
                    self.position += len(line)+1
                    yield line
        if self.incomplete_line:
            self.position += len(self.incomplete_line)
            yield self.incomplete_line


class ZstdConfig(datasets.BuilderConfig):
    """BuilderConfig for Zstd."""

    def __init__(self,
                 data_url=None,
                 cluster_layout=None,
                 worker_id=None,
                 n_workers=None,
                 filepaths=None,
                 file_loc=None,
                 buffer_size=65536,
                 cluster_spec=None,
                 features=None,
                 **kwargs):
        """BuilderConfig for Zstd.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(ZstdConfig, self).__init__(**kwargs)
        self.cluster_layout = cluster_layout
        self.data_url = data_url
        self.worker_id = worker_id
        self.n_workers = n_workers
        self.filepaths = filepaths
        self.file_loc = file_loc
        self.buffer_size = buffer_size
        self.cluster_spec = cluster_spec
        self.features = features
        assert (self.cluster_spec is None) or (self.filepaths is None ) #one of these must be None




import numpy as np
import itertools
def get_cluster_tuples(cluster_layout, cluster_spec):
    if cluster_spec is None:
        cluster_spec = (None,)*len(cluster_layout)
    # if cluster_spec is 2d and cluster_layout is 3d, we will pad cluster_spec with Nones at the end.
    cluster_spec = cluster_spec + (None,)*(len(cluster_layout)-len(cluster_spec))
    dim_list = []
    for a,b in zip(cluster_layout,cluster_spec):
        if b is None:
            dim_list.append(range(a))
        else:
            dim_list.append([b])
    
    return list(itertools.product(*dim_list))

def _get_filepaths(config):
    cluster_layout = config.cluster_layout[config.name]
    cluster_spec = config.cluster_spec
    if config.filepaths is not None:
        filepaths = config.filepaths
    else:
        tuples = get_cluster_tuples(cluster_layout, cluster_spec)
        filepaths = [config.data_url[config.name].format(*tup) for tup in tuples]
    return filepaths
        

class Zstd(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [ZstdConfig()]

    def _info(self):
        self.data_url = self.config.data_url
        self.cluster_layout = self.config.cluster_layout
        assert self.cluster_layout is not None
        assert self.data_url is not None
        self.worker_id = self.config.worker_id
        self.n_workers = self.config.n_workers
        self.buffer_size = self.config.buffer_size
        self.filepaths = _get_filepaths(self.config)
        if self.config.features is None:
            self.features=datasets.Features(
                {
                    "text": datasets.Value("string"),
                    "source": datasets.Value("string"),
                    "url": datasets.Value("string"),
                    "id": datasets.Value("int32"),
                    "file_loc": datasets.Value("int64"),
                }
            )
        else:
            self.features = self.config.features
            
        if self.config.file_loc is not None:
            self.file_loc = self.config.file_loc
        else:
            self.file_loc = 0
        
        return datasets.DatasetInfo(
            features=self.features,
            supervised_keys=None,
        )

    def _split_generators(self, _):
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": self.filepaths}),
        ]

    def _generate_examples(self, filepaths):
        """This function returns the examples in the raw (text) form by iterating on all the files."""
        id_ = 0
        dataset = ZstdReader(filepaths=list(filepaths),
                             worker_id=self.worker_id if self.worker_id is not None else 0,
                             num_workers=self.n_workers if self.n_workers is not None else 1,
                             file_loc=self.file_loc,
                             buffer_size=self.buffer_size,
                             )
        for example in dataset:
            url = example["id"]
            example["id"] = id_
            example["url"] = url
            if id_==0:
                print(f"{example.keys()=}")
            id_ += 1
            yield id_, example