File size: 2,853 Bytes
6a2cae5 6fce381 6a2cae5 6fce381 6a2cae5 6fce381 6a2cae5 6fce381 6a2cae5 6fce381 6a2cae5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 | import gzip
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_HOMEPAGE = "https://github.com/atlasunified"
_DESCRIPTION = "\
Your description here.\
"
_LICENSE = "odc-by"
_VARIANTS = {
"C#": {
"version": "1.0.0",
"download_size": 12345678, # replace with actual size
"dataset_size": 12345678, # replace with actual size
"splits": {
"train": {
"num_bytes": 12345678, # replace with actual size
"num_examples": 12345678, # replace with actual size
"files": [
"data/C#.jsonl",
# continue with other file paths...
],
},
},
},
"C++": {
# similar structure as above, replace with actual data
},
# continue for other languages...
}
_FEATURES = datasets.Features(
added=datasets.Value("string"),
created=datasets.Value("string"),
id=datasets.Value("string"),
source=datasets.Value("string"),
text=datasets.Value("string"),
version=datasets.Value("string"),
)
_CITATION = """\
Your citation here.
"""
class MyHFRepo(datasets.GeneratorBasedBuilder):
"""Your dataset description here."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=name, version=config["version"])
for name, config in _VARIANTS.items()
]
DEFAULT_CONFIG_NAME = "C#" # replace with the language you want as default
def _info(self):
"""Give information and typings for the dataset."""
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=_FEATURES,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
dataset_size=_VARIANTS[self.config.name]["dataset_size"],
download_size=_VARIANTS[self.config.name]["download_size"],
)
def _split_generators(self, dl_manager):
train_downloaded_files = dl_manager.download(
_VARIANTS[self.config.name]["splits"]["train"]["files"]
)
return [
datasets.SplitGenerator(
name=str(datasets.Split.TRAIN),
gen_kwargs={"filepaths": train_downloaded_files},
),
]
def _generate_examples(self, filepaths):
"""This function returns the examples in the raw (text) form by
iterating on all the files."""
id_ = 0
for filepath in filepaths:
logger.info("generating examples from = %s", filepath)
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
for line in f:
if line:
example = json.loads(line)
yield id_, example
id_ += 1
|