| import json |
| import os |
| import zstandard as zstd |
|
|
| import datasets |
|
|
| _CITATION="""\ |
| @article{azerbayev2023llemma, |
| title={Llemma: an open language model for mathematics}, |
| author={Zhangir Azerbayev and Hailey Schoelkopf and Keiran Paster and Marco Dos Santos and Stephen McAleer and Albert Q. Jiang and Jia Deng and Stella Biderman and Sean Welleck}, |
| eprint={xyz.xyz}, |
| archivePrefix={arXiv} |
| year={2023} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| A dataset of high quality mathematical text. """ |
| _HOMEPAGE = "https://github.com/EleutherAI/math-lm" |
|
|
|
|
| class ProofPile2Config(datasets.BuilderConfig): |
| """BuilderConfig for RedPajama sample.""" |
|
|
| def __init__(self, *args, subsets, **kwargs): |
| """BuilderConfig for ProofPile2. |
| Args: |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super(ProofPile2Config, self).__init__(**kwargs) |
| self.subsets = subsets |
|
|
|
|
| class ProofPile2(datasets.GeneratorBasedBuilder): |
| """A large dataset of mathematical text.""" |
| VERSION = datasets.Version("1.1.0") |
|
|
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
| BUILDER_CONFIGS = [ |
| ProofPile2Config( |
| name='default', |
| subsets=['arxiv', 'open-web-math', 'algebraic-stack'], |
| version=VERSION, |
| description="All subsets" |
| ), |
| ProofPile2Config( |
| name='arxiv', |
| subsets=["arxiv"], |
| version=VERSION, |
| description="ArXiv subset" |
| ), |
| ProofPile2Config( |
| name='open-web-math', |
| subsets=['open-web-math'], |
| version=VERSION, |
| description="OpenWebMath" |
| ), |
| ProofPile2Config( |
| name='algebraic-stack', |
| subsets=['algebraic-stack'], |
| version=VERSION, |
| description="Code subset" |
| ), |
| ] |
|
|
|
|
| def _info(self): |
| features = datasets.Features( |
| { |
| "text": datasets.Value("string"), |
| "meta": datasets.Value("string") |
| } |
| ) |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| homepage=_HOMEPAGE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| return [ |
| datasets.SplitGenerator( |
| name=split_obj, |
| |
| gen_kwargs={ |
| "data_files": list(map( |
| dl_manager.download, |
| [ |
| x |
| for subset in self.config.subsets |
| for x in dl_manager.iter_files(os.path.join(subset, split)) |
| ] |
| )) |
| }, |
| ) |
| for split, split_obj in zip( |
| ("train", "validation", "test"), |
| (datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST) |
| ) |
| ] |
|
|
| def _generate_examples(self, data_files): |
| key = 0 |
| for name in data_files: |
| with zstd.open(open(name, "rb"), "rt", encoding="utf-8") as f: |
| for x in f.readlines(): |
| instance = json.loads(x) |
| if instance: |
| if "meta" not in instance: |
| instance["meta"] = dict() |
| yield key, {"text": instance["text"], "meta": json.dumps(instance["meta"])} |
| key += 1 |
|
|