Datasets:
Tasks:
Token Classification
Modalities:
Text
Languages:
English
Size:
100K - 1M
ArXiv:
Tags:
abbreviation-detection
License:
Commit
·
fabacba
1
Parent(s):
a87ff0e
changeS
Browse files- PLOD-filtered.py +12 -57
- random.ipynb +0 -0
PLOD-filtered.py
CHANGED
|
@@ -2,6 +2,7 @@ import os
|
|
| 2 |
|
| 3 |
import datasets
|
| 4 |
from typing import List
|
|
|
|
| 5 |
|
| 6 |
logger = datasets.logging.get_logger(__name__)
|
| 7 |
|
|
@@ -81,13 +82,6 @@ class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
|
|
| 81 |
homepage="https://github.com/surrey-nlp/PLOD-AbbreviationDetection",
|
| 82 |
citation=_CITATION,
|
| 83 |
)
|
| 84 |
-
# _TRAINING_FILE_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/PLOS-train70-filtered-pos_bio.json"
|
| 85 |
-
# _DEV_FILE_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/PLOS-val15-filtered-pos_bio.json"
|
| 86 |
-
# _TEST_FILE_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/PLOS-test15-filtered-pos_bio.json"
|
| 87 |
-
|
| 88 |
-
# _TRAINING_FILE = "PLOS-train70-filtered-pos_bio.json"
|
| 89 |
-
# _DEV_FILE = "PLOS-val15-filtered-pos_bio.json"
|
| 90 |
-
# _TEST_FILE = "PLOS-test15-filtered-pos_bio.json"
|
| 91 |
|
| 92 |
_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/"
|
| 93 |
_URLS = {
|
|
@@ -106,54 +100,15 @@ class PLODfilteredConfig(datasets.GeneratorBasedBuilder):
|
|
| 106 |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
|
| 107 |
]
|
| 108 |
|
| 109 |
-
# def _split_generators(self, dl_manager):
|
| 110 |
-
# """Returns SplitGenerators."""
|
| 111 |
-
# downloaded_train = dl_manager.download_and_extract(_TRAINING_FILE_URL)
|
| 112 |
-
# downloaded_val = dl_manager.download_and_extract(_DEV_FILE_URL)
|
| 113 |
-
# downloaded_test = dl_manager.download_and_extract(_TEST_FILE_URL)
|
| 114 |
-
|
| 115 |
-
# data_files = {
|
| 116 |
-
# "train": _TRAINING_FILE,
|
| 117 |
-
# "dev": _DEV_FILE,
|
| 118 |
-
# "test": _TEST_FILE,
|
| 119 |
-
# }
|
| 120 |
-
|
| 121 |
-
# return [
|
| 122 |
-
# datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
|
| 123 |
-
# datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
|
| 124 |
-
# datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
|
| 125 |
-
# ]
|
| 126 |
-
|
| 127 |
def _generate_examples(self, filepath):
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
"tokens": tokens,
|
| 140 |
-
"pos_tags": pos_tags,
|
| 141 |
-
"ner_tags": ner_tags,
|
| 142 |
-
}
|
| 143 |
-
guid += 1
|
| 144 |
-
tokens = []
|
| 145 |
-
pos_tags = []
|
| 146 |
-
ner_tags = []
|
| 147 |
-
else:
|
| 148 |
-
# conll2003 tokens are space separated
|
| 149 |
-
splits = line.split(" ")
|
| 150 |
-
tokens.append(splits[0])
|
| 151 |
-
pos_tags.append(splits[1].strip())
|
| 152 |
-
ner_tags.append(splits[2].strip())
|
| 153 |
-
# last example
|
| 154 |
-
yield guid, {
|
| 155 |
-
"id": str(guid),
|
| 156 |
-
"tokens": tokens,
|
| 157 |
-
"pos_tags": pos_tags,
|
| 158 |
-
"ner_tags": ner_tags,
|
| 159 |
-
}
|
|
|
|
| 2 |
|
| 3 |
import datasets
|
| 4 |
from typing import List
|
| 5 |
+
import json
|
| 6 |
|
| 7 |
logger = datasets.logging.get_logger(__name__)
|
| 8 |
|
|
|
|
| 82 |
homepage="https://github.com/surrey-nlp/PLOD-AbbreviationDetection",
|
| 83 |
citation=_CITATION,
|
| 84 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
_URL = "https://huggingface.co/datasets/surrey-nlp/PLOD-filtered/resolve/main/data/"
|
| 87 |
_URLS = {
|
|
|
|
| 100 |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
|
| 101 |
]
|
| 102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
def _generate_examples(self, filepath):
|
| 104 |
+
"""This function returns the examples in the raw (text) form."""
|
| 105 |
+
logger.info("generating examples from = %s", filepath)
|
| 106 |
+
with open(filepath) as f:
|
| 107 |
+
plod = json.load(f)
|
| 108 |
+
for object in plod["data"]:
|
| 109 |
+
id_ = int(object['id'])
|
| 110 |
+
yield id_, {
|
| 111 |
+
"id": str(id_),
|
| 112 |
+
"pos_tags": object['pos_tags'],
|
| 113 |
+
"ner_tags": object['ner_tags'],
|
| 114 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
random.ipynb
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|