Datasets:
Tasks:
Text Classification
Sub-tasks:
fact-checking
Languages:
Danish
Size:
1K<n<10K
Tags:
stance-detection
License:
| # coding=utf-8 | |
| # Copyright 2022 Leon Derczynski, HuggingFace Datasets Authors. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| # Lint as: python3 | |
| """Danish Stance Dataset DAST""" | |
| from collections import defaultdict | |
| import glob | |
| import json | |
| import os | |
| import sys | |
| import datasets | |
| logger = datasets.logging.get_logger(__name__) | |
| _CITATION = """\ | |
| @inproceedings{lillie-etal-2019-joint, | |
| title = "Joint Rumour Stance and Veracity Prediction", | |
| author = "Lillie, Anders Edelbo and | |
| Middelboe, Emil Refsgaard and | |
| Derczynski, Leon", | |
| booktitle = "Proceedings of the 22nd Nordic Conference on Computational Linguistics", | |
| month = sep # "{--}" # oct, | |
| year = "2019", | |
| address = "Turku, Finland", | |
| publisher = {Link{\"o}ping University Electronic Press}, | |
| url = "https://aclanthology.org/W19-6122", | |
| pages = "208--221", | |
| } | |
| """ | |
| _DESCRIPTION = """\ | |
| This dataset presents a series of stories on Reddit and the conversation around | |
| them, annotated for stance. Stories are also annotated for veracity. | |
| For more details see https://aclanthology.org/W19-6122/ | |
| """ | |
| _URL = "dast.jsonl" | |
| class DastConfig(datasets.BuilderConfig): | |
| """BuilderConfig for IPM NEL""" | |
| def __init__(self, **kwargs): | |
| """BuilderConfig for IPM NEL. | |
| Args: | |
| **kwargs: keyword arguments forwarded to super. | |
| """ | |
| super(DastConfig, self).__init__(**kwargs) | |
| class Dast(datasets.GeneratorBasedBuilder): | |
| """Dast dataset.""" | |
| BUILDER_CONFIGS = [ | |
| DastConfig(name="dkstance", version=datasets.Version("1.0.0"), description="Danish Stance"), | |
| ] | |
| def _info(self): | |
| return datasets.DatasetInfo( | |
| description=_DESCRIPTION, | |
| features=datasets.Features( | |
| { | |
| "id": datasets.Value("string"), | |
| "native_id": datasets.Value("string"), | |
| "text": datasets.Value("string"), | |
| "parent_id": datasets.Value("string"), | |
| "parent_text": datasets.Value("string"), | |
| "parent_stance": datasets.features.ClassLabel( | |
| names=[ | |
| "Supporting", | |
| "Denying", | |
| "Querying", | |
| "Commenting", | |
| ] | |
| ), | |
| "source_id": datasets.Value("string"), | |
| "source_text": datasets.Value("string"), | |
| "source_stance": datasets.features.ClassLabel( | |
| names=[ | |
| "Supporting", | |
| "Denying", | |
| "Querying", | |
| "Commenting", | |
| ] | |
| ), | |
| } | |
| ), | |
| supervised_keys=None, | |
| homepage="https://aclanthology.org/W19-6122/", | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| """Returns SplitGenerators.""" | |
| downloaded_file = dl_manager.download_and_extract(_URL) | |
| print(downloaded_file) | |
| data_files = { | |
| "dast": downloaded_file, | |
| } | |
| return [ | |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files['dast'], "split":"train"}), | |
| datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files['dast'], "split":"validation"}), | |
| datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files['dast'], "split":"test"}), | |
| ] | |
| def unpack(self, entry, parent_id = None, source_id = None): | |
| if isinstance(entry, dict): | |
| e = entry['comment'] | |
| original_id = e['comment_id'] | |
| text = e['text'] | |
| parent_id = e['parent_id'] | |
| parent_stance = e['SDQC_Parent'] | |
| source_id = e['submission_id'] | |
| source_stance = e['SDQC_Submission'] | |
| self.texts[original_id] = text | |
| instance = { | |
| "id":self.guid, | |
| "native_id":original_id, | |
| "text":text, | |
| "parent_id":parent_id, | |
| "parent_text":self.texts[parent_id], | |
| "parent_stance":parent_stance, | |
| "source_id":source_id, | |
| "source_text":self.texts[source_id], | |
| "source_stance":source_stance, | |
| } | |
| self.id_mapper[e['comment_id']] = self.guid | |
| self.guid += 1 | |
| yield instance | |
| elif isinstance(entry, list): | |
| for sub_entry in entry: | |
| yield from self.unpack(sub_entry, parent_id=parent_id, source_id=source_id) | |
| def process_block(self, block): | |
| j = json.loads(block) | |
| s = j['redditSubmission'] | |
| descr = s['RumourDescription'] | |
| source_id = s['submission_id'] | |
| #print(i, '', descr, '', '', s['title'], s['SourceSDQC']) | |
| self.id_mapper[source_id] = self.guid | |
| self.guid += 1 | |
| self.texts[source_id] = s['title'] | |
| yield from self.unpack(j['branches'], source_id = 0, parent_id = 0) | |
| def _generate_examples(self, filepath, split): | |
| logger.info("⏳ Generating %s examples from = %s", (split, filepath)) | |
| def _deleted(): | |
| return "[deleted]" | |
| self.guid = 0 | |
| self.id_mapper = {} | |
| self.texts = defaultdict(_deleted) | |
| partition_sources = () | |
| if split == 'train': | |
| partition_sources = ('8sjevz', 'a0954m', 'a1gsmt', 'a2fpjr', 'a6o3us', 'ax70y5', 'axnshu', 'b23eat', 'b2xrgd', 'b72gok', 'b7aybw', 'b7ohqt', 'bb9iqt') | |
| elif split == 'validation': | |
| partition_sources = ('6v1ivh', '76y6rb', '7r9ouo', '8192oe', '83l9nm', '8agt1s', '8clb74', '8k6lcb') | |
| elif split == 'test': | |
| partition_sources = ('3qc12m', '3ud5z9', '53u5j7', '5emjyw', '5pfq1r', '5t1h6y', '60il0b', '67c2zf', '6jqtkm', '6nz7dy', '6szxwj', '6tm5kp') | |
| with open(filepath, 'r', encoding="utf-8") as dastfile: | |
| for line in dastfile: | |
| instances = self.process_block(line.strip()) | |
| for instance in instances: | |
| if instance['source_id'] in partition_sources: | |
| yield instance['id'], instance |