Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF Staff commited on
Commit
98175af
·
verified ·
1 Parent(s): 0501e18

Delete loading script

Browse files
Files changed (1) hide show
  1. per_sent.py +0 -149
per_sent.py DELETED
@@ -1,149 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ **Person SenTiment, a challenge dataset for author sentiment prediction in the news domain **
16
-
17
- PerSenT is a crowd-sourced dataset that captures the sentiment of an author towards the main entity in a news article. This dataset contains annotation for 5.3k documents and 38k paragraphs covering 3.2k unique entities.
18
-
19
- """
20
-
21
-
22
- import csv
23
-
24
- import datasets
25
- from datasets.splits import NamedSplit
26
-
27
-
28
- # TODO: Add BibTeX citation
29
- # Find for instance the citation on arxiv or on the dataset repo/website
30
- _CITATION = """\
31
- @inproceedings{bastan2020authors,
32
- title={Author's Sentiment Prediction},
33
- author={Mohaddeseh Bastan and Mahnaz Koupaee and Youngseo Son and Richard Sicoli and Niranjan Balasubramanian},
34
- year={2020},
35
- eprint={2011.06128},
36
- archivePrefix={arXiv},
37
- primaryClass={cs.CL}
38
- }
39
- """
40
-
41
- _DESCRIPTION = """\
42
- Person SenTiment (PerSenT) is a crowd-sourced dataset that captures the sentiment of an author towards the main entity in a news article. This dataset contains annotation for 5.3k documents and 38k paragraphs covering 3.2k unique entities.
43
-
44
- The dataset consists of sentiment annotations on news articles about people. For each article, annotators judge what the author’s sentiment is towards the main (target) entity of the article. The annotations also include similar judgments on paragraphs within the article.
45
-
46
- To split the dataset, entities into 4 mutually exclusive sets. Due to the nature of news collections, some entities tend to dominate the collection. In the collection, there were four entities which were the main entity in nearly 800 articles. To avoid these entities from dominating the train or test splits, we moved them to a separate test collection. We split the remaining into a training, dev, and test sets at random. Thus our collection includes one standard test set consisting of articles drawn at random (Test Standard -- `test_random`), while the other is a test set which contains multiple articles about a small number of popular entities (Test Frequent -- `test_fixed`).
47
- """
48
-
49
- _LICENSE = "Creative Commons Attribution 4.0 International License"
50
-
51
- _URLs = {
52
- "train": "https://raw.githubusercontent.com/MHDBST/PerSenT/main/train.csv",
53
- "dev": "https://raw.githubusercontent.com/MHDBST/PerSenT/main/dev.csv",
54
- "test_random": "https://raw.githubusercontent.com/MHDBST/PerSenT/main/random_test.csv",
55
- "test_fixed": "https://raw.githubusercontent.com/MHDBST/PerSenT/main/fixed_test.csv",
56
- }
57
-
58
-
59
- class PerSent(datasets.GeneratorBasedBuilder):
60
- """Person SenTiment (PerSenT) is a crowd-sourced dataset that captures the sentiment of an author towards the main entity in a news article. This dataset contains annotations for 5.3k documents and 38k paragraphs covering 3.2k unique entities."""
61
-
62
- VERSION = datasets.Version("1.1.0")
63
- LABELS = ["Negative", "Neutral", "Positive"]
64
- LABEL_COLS = ["TRUE_SENTIMENT"] + ["Paragraph" + str(i) for i in range(16)]
65
-
66
- def _info(self):
67
- label = datasets.features.ClassLabel(names=self.LABELS)
68
- feature_dict = {
69
- "DOCUMENT_INDEX": datasets.Value("int64"),
70
- "TITLE": datasets.Value("string"),
71
- "TARGET_ENTITY": datasets.Value("string"),
72
- "DOCUMENT": datasets.Value("string"),
73
- "MASKED_DOCUMENT": datasets.Value("string"),
74
- }
75
- feature_dict.update({k: label for k in self.LABEL_COLS})
76
-
77
- return datasets.DatasetInfo(
78
- description=_DESCRIPTION,
79
- features=datasets.Features(feature_dict),
80
- supervised_keys=None,
81
- homepage="https://stonybrooknlp.github.io/PerSenT",
82
- license=_LICENSE,
83
- citation=_CITATION,
84
- )
85
-
86
- def _split_generators(self, dl_manager):
87
- """Returns SplitGenerators."""
88
- train_path = dl_manager.download(_URLs["train"])
89
- dev_path = dl_manager.download(_URLs["dev"])
90
- test_fixed_path = dl_manager.download(_URLs["test_fixed"])
91
- test_random_path = dl_manager.download(_URLs["test_random"])
92
-
93
- return [
94
- datasets.SplitGenerator(
95
- name=datasets.Split.TRAIN,
96
- # These kwargs will be passed to _generate_examples
97
- gen_kwargs={
98
- "filepath": train_path,
99
- "split": "train",
100
- },
101
- ),
102
- datasets.SplitGenerator(
103
- name=NamedSplit("test_random"),
104
- # These kwargs will be passed to _generate_examples
105
- gen_kwargs={"filepath": test_random_path, "split": "test_random"},
106
- ),
107
- datasets.SplitGenerator(
108
- name=NamedSplit("test_fixed"),
109
- # These kwargs will be passed to _generate_examples
110
- gen_kwargs={"filepath": test_fixed_path, "split": "test_fixed"},
111
- ),
112
- datasets.SplitGenerator(
113
- name=datasets.Split.VALIDATION,
114
- # These kwargs will be passed to _generate_examples
115
- gen_kwargs={
116
- "filepath": dev_path,
117
- "split": "dev",
118
- },
119
- ),
120
- ]
121
-
122
- def _generate_examples(self, filepath, split):
123
- """Yields examples.
124
-
125
- For examples with missing labels (empty strings in the original files), we replace with -1.
126
- """
127
-
128
- with open(filepath, encoding="utf-8") as f:
129
- reader = csv.reader(f)
130
-
131
- # Header
132
- _ = next(reader)
133
-
134
- for id_, row in enumerate(reader):
135
- doc_idx, title, target, doc, masked_doc, *labels = row
136
-
137
- # Replace missing labels with -1
138
- labels = [label if label in self.LABELS else -1 for label in labels]
139
-
140
- example = {
141
- "DOCUMENT_INDEX": doc_idx,
142
- "TITLE": title,
143
- "TARGET_ENTITY": target,
144
- "DOCUMENT": doc,
145
- "MASKED_DOCUMENT": masked_doc,
146
- }
147
- example.update(dict(zip(self.LABEL_COLS, labels)))
148
-
149
- yield id_, example