import json import datasets import traceback import os logger = datasets.logging.get_logger(__name__) _DESCRIPTION = """\ RedPajama is a clean-room, fully open-source implementation of the LLaMa dataset. """ # Define a list of programming languages for the subsets _LANGUAGE_SUBSETS = ["csharp", "cplusplus", "c", "go", "html", "haskell", "java", "javascript", "jupyter", "kotlin", "php", "perl", "python", "ruby", "rust", "shell", "swift", "typescript", "v"] # Generate URLs for the subsets _URL_LISTS = {language: f"https://huggingface.co/datasets/AtlasUnified/atlas-preprocessed-code/blob/main/{language}.jsonl" for language in _LANGUAGE_SUBSETS} _URL_BASE = 'https://data.together.xyz/redpajama-data-1T/v1.0.0' _DATA_DIR = os.environ.get('RED_PAJAMA_DATA_DIR', None) class RedPajama1TConfig(datasets.BuilderConfig): """BuilderConfig for RedPajama sample.""" def __init__(self, *args, subsets, **kwargs): """BuilderConfig for RedPajama. Args: **kwargs: keyword arguments forwarded to super. """ super(RedPajama1TConfig, self).__init__(**kwargs) self.subsets = subsets class RedPajama1T(datasets.GeneratorBasedBuilder): """RedPajama: Reproducing the LLaMA training dataset of over 1.2 trillion tokens. Version 1.0.0.""" # Generate builder configs for each language subset BUILDER_CONFIGS = [RedPajama1TConfig(name=language, subsets=[language], version=datasets.Version("1.0.0", ""), description=f"RedPajama1T {language} subset") for language in _LANGUAGE_SUBSETS] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "text": datasets.Value("string"), "meta": datasets.Value("string"), "red_pajama_subset": datasets.Value("string"), } ), supervised_keys=None, ) def _split_generators(self, dl_manager): url_lists = dl_manager.download_and_extract({ subset: _URL_LISTS[subset] for subset in self.config.subsets }) urls = {} for subset, url_list in url_lists.items(): with open(url_list, encoding="utf-8") as f: urls[subset] = [line.strip() for line in f] if _DATA_DIR is not None: print(f'Reading data from {_DATA_DIR}') url_prefix_slashes = len(_URL_BASE.split('/')) downloaded_files = { subset: [ os.path.join(_DATA_DIR, *url.split('/')[url_prefix_slashes:]) for url in url_list ] for subset, url_list in urls.items() } else: downloaded_files = dl_manager.download(urls) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs = { "files": { subset: downloaded_files[subset] for subset in self.config.subsets } } ) ] def _generate_examples(self, files): """This function returns the examples in the raw (text) form.""" key = 0 for subset in files: for path in files[subset]: with open(path, encoding="utf-8") as f: for i, row in enumerate(f): try: data = json.loads(row) if "meta" not in data: text = data["text"] del data["text"] yield key, { "text": text, "meta": json.dumps(data), "red_pajama_subset": subset, } else: yield key, { "text": data["text"], "meta": data["meta"], "red_pajama_subset": subset, } key += 1 except Exception as e: print(f'Subset: {subset}') print(f'Path: {path}') print(f'Row: {row}') traceback.print_exc() raise e