| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| """Survey Variable Identification (SV-Ident) Corpus.""" |
|
|
| import csv |
| import random |
|
|
| import datasets |
|
|
|
|
| |
| _CITATION = """\ |
| @misc{sv-ident, |
| author={vadis-project}, |
| title={SV-Ident}, |
| year={2022}, |
| url={https://github.com/vadis-project/sv-ident}, |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| The SV-Ident corpus (version 0.3) is a collection of 4,248 expert-annotated English |
| and German sentences from social science publications, supporting the task of |
| multi-label text classification. |
| """ |
|
|
| _HOMEPAGE = "https://github.com/vadis-project/sv-ident" |
|
|
| |
| |
|
|
| _URL = "https://raw.githubusercontent.com/vadis-project/sv-ident/a8e71bba570f628c460e2b542d4cc645e4eb7d03/data/train/" |
| _URLS = { |
| "train": _URL+"train.tsv", |
| "dev": _URL+"val.tsv", |
| |
| } |
|
|
|
|
| class SVIdent(datasets.GeneratorBasedBuilder): |
| """Survey Variable Identification (SV-Ident) Corpus.""" |
|
|
| VERSION = datasets.Version("0.3.0") |
|
|
| def _info(self): |
| features = datasets.Features( |
| { |
| "sentence": datasets.Value("string"), |
| "is_variable": datasets.ClassLabel(names=["0", "1"]), |
| "variable": datasets.Sequence(datasets.Value(dtype="string")), |
| "research_data": datasets.Sequence(datasets.Value(dtype="string")), |
| "doc_id": datasets.Value("string"), |
| "uuid": datasets.Value("string"), |
| "lang": datasets.Value("string"), |
| } |
| ) |
|
|
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| supervised_keys=("sentence", "is_variable"), |
| homepage=_HOMEPAGE, |
| |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| """Returns SplitGenerators.""" |
| downloaded_files = dl_manager.download(_URLS) |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "filepath": downloaded_files["train"], |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "filepath": downloaded_files["dev"], |
| }, |
| ) |
| ] |
|
|
| def _generate_examples(self, filepath): |
| """Yields examples.""" |
| data = [] |
| with open(filepath, newline="", encoding="utf-8") as csvfile: |
| reader = csv.reader(csvfile, delimiter="\t") |
| next(reader, None) |
| for row in reader: |
| data.append(row) |
|
|
| seed = 42 |
| random.seed(seed) |
| random.shuffle(data) |
|
|
| for id_, example in enumerate(data): |
| sentence = example[0] |
| is_variable = example[1] |
| variable = example[2] if example[2] != "" else [] |
| if variable: |
| variable = variable.split(";") if ";" in variable else [variable] |
| research_data = example[3] if example[3] != "" else [] |
| if research_data: |
| research_data = research_data.split(";") if ";" in research_data else [research_data] |
| doc_id = example[4] |
| uuid = example[5] |
| lang = example[6] |
|
|
| yield id_, { |
| "sentence": sentence, |
| "is_variable": is_variable, |
| "variable": variable, |
| "research_data": research_data, |
| "doc_id": doc_id, |
| "uuid": uuid, |
| "lang": lang, |
| } |
|
|