proof-pile-2-fixed / README.md
aklein4's picture
Update README.md
834ac90 verified
metadata
dataset_info:
  - config_name: algebraic-stack
    features:
      - name: text
        dtype: string
      - name: meta
        struct:
          - name: alphanum_fraction
            dtype: float64
          - name: author
            dtype: string
          - name: avg_line_length
            dtype: float64
          - name: converted
            dtype: bool
          - name: ext
            dtype: string
          - name: file
            dtype: string
          - name: hexsha
            dtype: string
          - name: include
            dtype: bool
          - name: lang
            dtype: string
          - name: length
            dtype: int64
          - name: llama_tokens
            dtype: int64
          - name: mathlib_filename
            dtype: string
          - name: max_forks_count
            dtype: float64
          - name: max_forks_repo_forks_event_max_datetime
            dtype: string
          - name: max_forks_repo_forks_event_min_datetime
            dtype: string
          - name: max_forks_repo_head_hexsha
            dtype: string
          - name: max_forks_repo_licenses
            sequence: string
          - name: max_forks_repo_name
            dtype: string
          - name: max_forks_repo_path
            dtype: string
          - name: max_issues_count
            dtype: float64
          - name: max_issues_repo_head_hexsha
            dtype: string
          - name: max_issues_repo_issues_event_max_datetime
            dtype: string
          - name: max_issues_repo_issues_event_min_datetime
            dtype: string
          - name: max_issues_repo_licenses
            sequence: string
          - name: max_issues_repo_name
            dtype: string
          - name: max_issues_repo_path
            dtype: string
          - name: max_line_length
            dtype: int64
          - name: max_stars_count
            dtype: float64
          - name: max_stars_repo_head_hexsha
            dtype: string
          - name: max_stars_repo_licenses
            sequence: string
          - name: max_stars_repo_name
            dtype: string
          - name: max_stars_repo_path
            dtype: string
          - name: max_stars_repo_stars_event_max_datetime
            dtype: string
          - name: max_stars_repo_stars_event_min_datetime
            dtype: string
          - name: num_tokens
            dtype: int64
          - name: path
            dtype: string
          - name: reason
            dtype: string
          - name: repo
            dtype: string
          - name: save_path
            dtype: string
          - name: sha
            dtype: string
          - name: size
            dtype: int64
    splits:
      - name: train
        num_bytes: 31797979222
        num_examples: 3404654
      - name: validation
        num_bytes: 165884973
        num_examples: 18040
      - name: test
        num_bytes: 162752298
        num_examples: 18000
    download_size: 11905060512
    dataset_size: 32126616493
  - config_name: arxiv
    features:
      - name: text
        dtype: string
      - name: meta
        struct:
          - name: arxiv_id
            dtype: string
          - name: language
            dtype: string
          - name: timestamp
            dtype: string
          - name: url
            dtype: string
          - name: yymm
            dtype: string
    splits:
      - name: train
        num_bytes: 88423197439
        num_examples: 1542673
      - name: validation
        num_bytes: 463620511
        num_examples: 7793
      - name: test
        num_bytes: 473629411
        num_examples: 7840
    download_size: 40591755551
    dataset_size: 89360447361
  - config_name: open-web-math
    features:
      - name: url
        dtype: string
      - name: text
        dtype: string
      - name: date
        dtype: string
      - name: metadata
        dtype: string
    splits:
      - name: train
        num_bytes: 56086326272
        num_examples: 6252080
      - name: validation
        num_bytes: 281648006
        num_examples: 31576
      - name: test
        num_bytes: 284020779
        num_examples: 31577
    download_size: 27232284726
    dataset_size: 56651995057
configs:
  - config_name: algebraic-stack
    data_files:
      - split: train
        path: algebraic-stack/train-*
      - split: validation
        path: algebraic-stack/validation-*
      - split: test
        path: algebraic-stack/test-*
  - config_name: arxiv
    data_files:
      - split: train
        path: arxiv/train-*
      - split: validation
        path: arxiv/validation-*
      - split: test
        path: arxiv/test-*
  - config_name: open-web-math
    data_files:
      - split: train
        path: open-web-math/train-*
      - split: validation
        path: open-web-math/validation-*
      - split: test
        path: open-web-math/test-*
task_categories:
  - text-generation

The header image of EleutherAI/proof-pile-2 edited to say "fixed"

The original EleutherAI/proof-pile-2 dataset uses a custom python script and .jsonl.zst files, which some versions of the datasets library struggle with.

This dataset contains the same data, subsets, and splits as EleutherAI/proof-pile-2, converted into standard parquet format.

Each subset and split was also shuffled so that you can directly train on the data without issue.

Conversion was performed using the following script:

import os
import zstandard as zstd
import json
import pandas as pd
from tqdm import tqdm

import datasets
import huggingface_hub as hf


DATA_URL = "EleutherAI/proof-pile-2"
SUBSETS = [
    "algebraic-stack",
    "arxiv",
    "open-web-math"
]
SPLITS = [
    "train",
    "validation",
    "test"
]

LOCAL_DIR = "./local_data/proof-pile-2"

OUT_URL = 'aklein4/proof-pile-2-fixed'


def download_data(
    url: str,
    subset: str,
    split: str,
):
    hf.snapshot_download(
        repo_id=url,
        repo_type="dataset",
        allow_patterns=[f"{subset}/{split}/*"],
        local_dir=LOCAL_DIR,
    )

    return os.path.join(LOCAL_DIR, subset, split)


def format_data(
    url: str,
    subset: str ,
    split: str,
):
    
    # download the data
    folder = download_data(url, subset, split)

    # get all files in the local dir
    data_files = [
        os.path.join(folder, f)
        for f in os.listdir(folder)
        if f.endswith(".zst")
    ]

    # read all of the .jsonl.zst files
    examples = []
    for file_path in tqdm(data_files):

        with zstd.open(open(file_path, "rb"), "rt", encoding="utf-8") as f: 
            for x in f.readlines():
                examples.append(json.loads(x))

    # get the dataset
    df = pd.DataFrame(examples)
    dataset = datasets.Dataset.from_pandas(df)
    dataset = dataset.shuffle(seed=42)

    dataset.push_to_hub(
        OUT_URL,
        config_name=subset,
        split=split,
        private=False
    )


def main():

    for subset in SUBSETS:
        for split in SPLITS:
            
            format_data(
                DATA_URL,
                subset,
                split,
            )


if __name__ == "__main__":
    main()