c5 / c5.py
iohadrubin's picture
Update c5.py
0da82b3
"""C5 dataset based on Common Crawl."""
import json
import io
import datasets
from tensorflow.io import gfile
import pyzstd
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
A colossal, cleaned version of Common Crawl's web crawl corpus.
Based on Common Crawl dataset: "https://commoncrawl.org".
This is the processed version of Google's C5 dataset by AllenAI.
"""
_CITATION = """
@article{2019t5,
author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
journal = {arXiv e-prints},
year = {2019},
archivePrefix = {arXiv},
eprint = {1910.10683},
}
"""
_URL = "https://github.com/allenai/allennlp/discussions/5056"
_VARIANTS = ["en","other"]
buffer_size=1024
# _DATA_URL = "https://huggingface.co/datasets/allenai/c4/resolve/1ddc917116b730e1859edef32896ec5c16be51d0/{name}/c4-{split}.{index:05d}-of-{n_shards:05d}.json.gz"
def reverseMultiBytesIO(decompressedStream: io.BytesIO, buffer_size=1024):
incomplete_line = bytearray()
while True:
# Read a chunk from the decompressed stream
buffer = decompressedStream.read(buffer_size) # Adjust the read size as needed
if not buffer:
# Edge Case 5: Handle EOF
break
# Concatenate with any incomplete line from the last read
# Edge Case 1: Incomplete Lines
buffer = incomplete_line + buffer
incomplete_line = bytearray()
# Split the buffer by the newline character
lines = buffer.split(b'\n')
# Check if the last line is complete
if lines and lines[-1]:
incomplete_line = lines.pop()
# Decode and parse each line
for line in lines:
if line:
yield json.loads(line.decode('utf-8'))
class C5Config(datasets.BuilderConfig):
"""BuilderConfig for C5."""
def __init__(self, name, shard_id=None, subshard_id=None, buffer_size=1024, **kwargs):
"""BuilderConfig for C5.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(C5Config, self).__init__(name=name,**kwargs)
self.shard_id = shard_id
self.subshard_id = subshard_id
self.buffer_size = buffer_size
_DATA_URL_en = "gs://meliad2_us2/EasyLM/cluster/c5_{split}/shard_{shard_id}/subshard_{subshard_id}/text.jsonl"
_DATA_URL_non_en = "gs://meliad2_us2/EasyLM/v1/c5_{split}/shard_{shard_id}.jsonl"
class C5(datasets.GeneratorBasedBuilder):
"""C5, a colossal, cleaned version of Common Crawl's web crawl corpus."""
BUILDER_CONFIGS = [C5Config(name) for name in _VARIANTS]
def _info(self):
self.shard_id = self.config.shard_id
self.subshard_id = self.config.subshard_id
self.buffer_size = self.config.buffer_size
print(f"{self.name=}")
if self.config.name=="en":
features=datasets.Features(
{
"text": datasets.Value("string"),
"timestamp": datasets.Value("string"),
"url": datasets.Value("string"),
}
)
else:
features=datasets.Features(
{
"text": datasets.Value("string"),
"cluster_id": datasets.Value("int16"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_urls = {}
if self.config.name=="en":
allowed_shards = range(64) if self.shard_id is None else [self.shard_id]
allowed_subshards = range(64) if self.subshard_id is None else [self.subshard_id]
all_allowed = [(shard_id, subshard_id) for shard_id in allowed_shards for subshard_id in allowed_subshards]
format_data_url = lambda split,idx: _DATA_URL_en.format(split=split, shard_id=idx[0], subshard_id=idx[1])
else:
all_allowed = range(1024) if self.shard_id is None else [self.shard_id]
format_data_url = lambda split,idx: _DATA_URL_non_en.format(split=split, shard_id=idx)
for split in ["train"]:
data_urls[split] = [
format_data_url(split,idx) for idx in all_allowed
]
train_downloaded_files = data_urls["train"]
# train_downloaded_files = dl_manager.download()
# validation_downloaded_files = dl_manager.download(data_urls["validation"])
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
# datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}),
]
def _generate_examples(self, filepaths):
"""This function returns the examples in the raw (text) form by iterating on all the files."""
id_ = 0
for filepath in filepaths:
with gfile.GFile(filepath, 'rb') as f:
logger.info("generating examples from = %s", filepath)
with pyzstd.ZstdFile(f, 'rb') as ifo:
for example in reverseMultiBytesIO(ifo, buffer_size=self.buffer_size):
yield id_, example
id_ += 1