| import os |
|
|
| import pyarrow as pa |
| import pyarrow.parquet as pq |
| import datasets |
|
|
| _REPO_NAME = 'Fsoft-AIC/the-vault' |
|
|
| _LANG_TO_TEXT = { |
| "python": "python", |
| "c": "c", |
| |
| |
| |
| |
| |
| |
| |
| |
| } |
|
|
|
|
| _DESCRIPTION = """The Vault""" |
|
|
| _HOMEPAGE = "https://huggingface.co/Fsoft-AIC" |
|
|
|
|
| _TEXT_TO_LANG = {} |
| for lang in _LANG_TO_TEXT: |
| _TEXT_TO_LANG[_LANG_TO_TEXT[lang]] = lang |
|
|
|
|
| |
| _LANG_CONFIGS = ["all"] + list(_TEXT_TO_LANG.keys()) |
|
|
| num_shard_split = { |
| 'train/small/python': 1, |
| 'train/medium/python': 1, |
| 'train/small/c': 1, |
| 'train/medium/c': 1 |
| } |
| _SPLIT_CONFIGS = ["all", "train", "train/small", "train/medium"] |
|
|
| class TheVaultFunctionConfig(datasets.BuilderConfig): |
| """BuilderConfig for The Vault dataset.""" |
|
|
| def __init__(self, *args, languages=["all"], split_set= ["all"], **kwargs): |
| """BuilderConfig for the GitHub Code dataset. |
| Args: |
| split_set (:obj:`List[str]`): List of split set to load. |
| languages (:obj:`List[str]`): List of languages to load. |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super().__init__( |
| *args, |
| name= "+".join([split.replace("/", "_") for split in split_set]) + "-" + "+".join(languages), |
| **kwargs, |
| ) |
| |
| languages = set([lang.lower() for lang in languages]) |
| split_set = set([split.lower() for split in split_set]) |
| |
| assert all([language in _LANG_CONFIGS for language in languages]), f"languages {languages} contains language not in {_LANG_CONFIGS}." |
| assert all([split in _SPLIT_CONFIGS for split in split_set]), f"split_set {split_set} contains element not in {_SPLIT_CONFIGS}." |
| |
| if "all" in split_set: |
| assert len(split_set)==1, f"Passed 'all' together with other split sets. {split_set}" |
| elif "train" in split_set: |
| for split in split_set: |
| if "train" in split and split != "train": |
| raise ValueError(f"Split set 'train' already contains '{split}'. Please only include one.") |
|
|
| if "all" in languages: |
| assert len(languages)==1, f"Passed 'all' together with other languages. {languages}" |
| |
| |
| |
| |
| self.languages = set(languages) |
| self.split_set= split_set |
|
|
|
|
| class TheVaultFunction(datasets.GeneratorBasedBuilder): |
| """The Vault dataset.""" |
|
|
| VERSION = datasets.Version("1.0.0") |
| |
| BUILDER_CONFIG_CLASS = TheVaultFunctionConfig |
| BUILDER_CONFIGS = [TheVaultFunctionConfig(languages=[lang], split_set=[spl]) for lang in _LANG_CONFIGS for spl in _SPLIT_CONFIGS] |
| DEFAULT_CONFIG_NAME = "all-all" |
|
|
| |
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features({ |
| "repo": datasets.Value("string"), |
| "path": datasets.Value("string"), |
| "license": datasets.Value("string"), |
| "language": datasets.Value("string"), |
| "identifier": datasets.Value("string"), |
| "return_type": datasets.Value("string"), |
| |
| "original_docstring": datasets.Value("string"), |
| "docstring": datasets.Value("string"), |
| "code": datasets.Value("string"), |
| "code_tokens": datasets.Value("string"), |
| "docstring_tokens": datasets.Value("string"), |
| "short_docstring": datasets.Value("string"), |
| "short_docstring_tokens": datasets.Value("string"), |
| "comment": datasets.Value("string"), |
| }), |
| supervised_keys=None, |
| homepage=_HOMEPAGE, |
| |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| generators = [] |
| split_set = list(self.config.split_set) |
| languages = list(self.config.languages) |
|
|
| if "all" in split_set: |
| split_set = _SPLIT_CONFIGS[1:] |
|
|
| if "train" in split_set: |
| split_set.remove('train') |
| split_set.extend(["train/small", "train/medium"]) |
| |
| if "all" in languages: |
| languages = _LANG_CONFIGS[1:] |
|
|
| for split in split_set: |
| split_files = [] |
| for language in languages: |
| num_shards = num_shard_split[f"{split}/{language}"] |
| data_files = [ |
| f"data/{split}/{language}-{_index:05d}-of-{num_shards:05d}.parquet" |
| for _index in range(num_shards) |
| ] |
| files = dl_manager.download(data_files) |
| split_files.extend(files) |
|
|
| generators.append( |
| datasets.SplitGenerator( |
| name=split.replace("/", "_"), |
| gen_kwargs={ |
| "files": split_files, |
| }, |
| ), |
| ) |
| return generators |
|
|
| def _generate_examples(self, files): |
| key = 0 |
| for file_idx, file in enumerate(files): |
| with open(file, "rb") as f: |
| parquet_file = pq.ParquetFile(f) |
| for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)): |
| pa_table = pa.Table.from_batches([record_batch]) |
| for row_index in range(pa_table.num_rows): |
| row = pa_table.slice(row_index, 1).to_pydict() |
| |
| |
| |
| |
| |
| |
| yield key, { |
| "repo": row['repo'][0], |
| "path": row['path'][0], |
| "license": row['license'][0], |
| "language": row['language'][0], |
| "identifier": row['identifier'][0], |
| "return_type": row['return_type'][0], |
| |
| "original_docstring": row['original_docstring'][0], |
| "docstring": row['docstring'][0], |
| "docstring_tokens": row['docstring_tokens'][0], |
| "code": row['code'][0], |
| "code_tokens": row['code_tokens'][0], |
| "short_docstring": row['short_docstring'][0], |
| "short_docstring_tokens": row['short_docstring_tokens'][0], |
| "comment": row['comment'][0] |
| } |
| key += 1 |