Datasets:
kernelmachine
commited on
Commit
·
47f7c92
1
Parent(s):
7bcc487
update
Browse files- open-license-corpus.py +19 -23
open-license-corpus.py
CHANGED
|
@@ -60,17 +60,16 @@ N_SHARDS_PER_SPLIT = {
|
|
| 60 |
DATA_URL = 'https://huggingface.co/datasets/kernelmachine/open-license-corpus/resolve/main/data/{name}/{split}-{index:05d}-of-{n_shards:05d}.jsonl.gz'
|
| 61 |
|
| 62 |
class OpenLicenseCorpusConfig(datasets.BuilderConfig):
|
| 63 |
-
def __init__(self,
|
| 64 |
-
super(
|
| 65 |
-
self.subsets = subsets
|
| 66 |
|
| 67 |
|
| 68 |
class OpenLicenseCorpus(datasets.GeneratorBasedBuilder):
|
| 69 |
|
| 70 |
BUILDER_CONFIGS = [
|
| 71 |
-
|
| 72 |
for name in OLC_SUBSET_NAMES
|
| 73 |
-
]
|
| 74 |
|
| 75 |
def _info(self):
|
| 76 |
return datasets.DatasetInfo(
|
|
@@ -88,27 +87,24 @@ class OpenLicenseCorpus(datasets.GeneratorBasedBuilder):
|
|
| 88 |
def _split_generators(self, dl_manager):
|
| 89 |
data_urls = {}
|
| 90 |
for split in ["train"]:
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
]
|
| 97 |
-
}
|
| 98 |
|
| 99 |
-
train_downloaded_files =
|
| 100 |
|
| 101 |
-
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths":
|
| 102 |
|
| 103 |
def _generate_examples(self, filepaths):
|
| 104 |
"""This function returns the examples in the raw (text) form by iterating on all the files."""
|
| 105 |
id_ = 0
|
| 106 |
-
for
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
id_ += 1
|
|
|
|
| 60 |
DATA_URL = 'https://huggingface.co/datasets/kernelmachine/open-license-corpus/resolve/main/data/{name}/{split}-{index:05d}-of-{n_shards:05d}.jsonl.gz'
|
| 61 |
|
| 62 |
class OpenLicenseCorpusConfig(datasets.BuilderConfig):
|
| 63 |
+
def __init__(self, features, citation, **kwargs):
|
| 64 |
+
super().__init__(**kwargs)
|
|
|
|
| 65 |
|
| 66 |
|
| 67 |
class OpenLicenseCorpus(datasets.GeneratorBasedBuilder):
|
| 68 |
|
| 69 |
BUILDER_CONFIGS = [
|
| 70 |
+
datasets.BuilderConfig(name=name)
|
| 71 |
for name in OLC_SUBSET_NAMES
|
| 72 |
+
]
|
| 73 |
|
| 74 |
def _info(self):
|
| 75 |
return datasets.DatasetInfo(
|
|
|
|
| 87 |
def _split_generators(self, dl_manager):
|
| 88 |
data_urls = {}
|
| 89 |
for split in ["train"]:
|
| 90 |
+
n_shards = N_SHARDS_PER_SPLIT[self.config.name][split] - 1
|
| 91 |
+
data_urls[split] = [
|
| 92 |
+
DATA_URL.format(name=self.config.name, split=split, index=index, n_shards=n_shards)
|
| 93 |
+
for index in range(n_shards)
|
| 94 |
+
]
|
|
|
|
|
|
|
| 95 |
|
| 96 |
+
train_downloaded_files = dl_manager.download(data_urls["train"])
|
| 97 |
|
| 98 |
+
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files})]
|
| 99 |
|
| 100 |
def _generate_examples(self, filepaths):
|
| 101 |
"""This function returns the examples in the raw (text) form by iterating on all the files."""
|
| 102 |
id_ = 0
|
| 103 |
+
for filepath in filepaths:
|
| 104 |
+
logger.info("generating examples from = %s", filepath)
|
| 105 |
+
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
|
| 106 |
+
for line in f:
|
| 107 |
+
if line:
|
| 108 |
+
example = json.loads(line)
|
| 109 |
+
yield id_, example
|
| 110 |
+
id_ += 1
|
|
|