DominguesPH commited on
Commit
c956bfb
·
1 Parent(s): 47c933e

Upload BPSAD.py

Browse files

Loading script for BPSAD.

Files changed (1) hide show
  1. BPSAD.py +235 -0
BPSAD.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset
2
+ # script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Brazilian Portuguese Sentiment Analysis Datasets (BPSAD)"""
16
+
17
+ import csv
18
+ import re
19
+ import pandas as pd
20
+ import json
21
+ import os
22
+ import datasets
23
+
24
+ # Functions
25
+ def get_text(text):
26
+ preproc_text = []
27
+ for sentence in text:
28
+ preproc_sentence = re.findall("'([^']*)'", sentence)
29
+ preproc_sentence = ' '.join(preproc_sentence)
30
+ preproc_text.append(preproc_sentence)
31
+ return preproc_text
32
+
33
+
34
+ def get_kfold(text, label, kfold_ref, kfolds):
35
+ output_dictionary = {}
36
+ boolean_vec = [kfold_ref[i]
37
+ in kfolds for i in range(len(kfold_ref))]
38
+ output_dictionary['text'] = [text[i] for i in range(len(text)) if boolean_vec[i]]
39
+ output_dictionary['label'] = [int(label[i]) for i in range(len(label)) if boolean_vec[i]]
40
+ output_dictionary['kfold'] = [kfold_ref[i] for i in range(len(text)) if boolean_vec[i]]
41
+ return output_dictionary
42
+
43
+
44
+ def load_bpsad_p(address):
45
+ table = pd.read_csv(address, low_memory = False)
46
+
47
+ # We'll get 'review_text_tokenized' and 'polarity'
48
+ text = table['review_text_tokenized'].to_list()
49
+ text = get_text(text)
50
+ label = table['polarity'].to_list()
51
+ # label = [int(i) for i in table['polarity'].to_list()]
52
+ kfold = table['kfold_polarity'].to_list()
53
+ # Removing nan instances from polarity
54
+ data_train = get_kfold(text, label, kfold, [1,2,3,4,5,6,7,8])
55
+ data_dev = get_kfold(text, label, kfold, [9])
56
+ data_test = get_kfold(text, label, kfold, [10])
57
+ return data_train, data_dev, data_test
58
+
59
+
60
+ def load_bpsad_r(address):
61
+ table = pd.read_csv(address, low_memory = False)
62
+
63
+ # We'll get 'review_text_tokenized' and 'polarity'
64
+ text = table['review_text_tokenized'].to_list()
65
+ text = get_text(text)
66
+ label = table['rating'].to_list()
67
+ # label = [int(i) for i in table['rating'].to_list()]
68
+ kfold = table['kfold_polarity'].to_list()
69
+ # Removing nan instances from polarity
70
+ data_train = get_kfold(text, label, kfold, [1,2,3,4,5,6,7,8])
71
+ data_dev = get_kfold(text, label, kfold, [9])
72
+ data_test = get_kfold(text, label, kfold, [10])
73
+ return data_train, data_dev, data_test
74
+
75
+
76
+ _HOMEPAGE = "https://www.kaggle.com/datasets/fredericods/ptbr-sentiment-analysis-datasets"
77
+
78
+ _DESCRIPTION = """
79
+ The Brazilian Portuguese Sentiment Analysis Dataset (BPSAD) is composed by the
80
+ concatenation of 5 differents sources (Olist, B2W Digital, Buscapé, UTLC-Apps and
81
+ UTLC-Movies), each one is composed by evaluation sentences classified according
82
+ to the polarity (0: negative; 1: positive) and ratings (1, 2, 3, 4 and 5 stars).
83
+ """
84
+
85
+ _CITATION = r"""
86
+ @misc{corpusCarolinaV1.1,
87
+ title={
88
+ Brazilian Portuguese Sentiment Analysis Datasets},
89
+ author={
90
+ Dias, Frederico},
91
+ howpublished={
92
+ \url{https://www.kaggle.com/datasets/fredericods/ptbr-sentiment-analysis-datasets}},
93
+ year={
94
+ 2021},
95
+ note={Version 1},
96
+ }
97
+ """
98
+
99
+ _LICENSE = """
100
+ """
101
+
102
+ _MANUAL_DOWNLOAD_INSTRUCTIONS = """
103
+ data = datasets.load_dataset(
104
+ path = 'BPSAD.py',
105
+ name = 'Polarity'/'Rating',
106
+ data_dir = 'path to concatenated.csv')
107
+ """
108
+
109
+ class BPSADPolarity(datasets.GeneratorBasedBuilder):
110
+ """BPSAD: Polarity classification task for BPSAD dataset."""
111
+
112
+ VERSION = datasets.Version("1.0.0")
113
+
114
+ # This is an example of a dataset with multiple configurations.
115
+ # If you don't want/need to define several sub-sets in your dataset,
116
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
117
+
118
+ # If you need to make complex sub-parts in the datasets with configurable options
119
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
120
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
121
+
122
+ # You will be able to load one or the other configurations in the following list with
123
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
124
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
125
+ BUILDER_CONFIGS = [
126
+ datasets.BuilderConfig(name="Polarity", version=VERSION, description="Polarity classification of the Brazilian Portuguese Sentiment Analysis Datasets (BPSAD)"),
127
+ datasets.BuilderConfig(name="Rating", version=VERSION, description="Rating classification of the Brazilian Portuguese Sentiment Analysis Datasets (BPSAD)"),
128
+ ]
129
+
130
+ DEFAULT_CONFIG_NAME = "Polarity" # It's not mandatory to have a default configuration. Just use one if it make sense.
131
+
132
+ def _info(self):
133
+ features = datasets.Features(
134
+ {
135
+ "text": datasets.Value("string"),
136
+ "label": datasets.Value("int8"),
137
+ "kfold": datasets.Value("int8")
138
+ # These are the features of your dataset like images, labels ...
139
+ }
140
+ )
141
+
142
+ return datasets.DatasetInfo(
143
+ # This is the description that will appear on the datasets page.
144
+ description=_DESCRIPTION,
145
+ # This defines the different columns of the dataset and their types
146
+ features=features, # Here we define them above because they are different between the two configurations
147
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
148
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
149
+ # supervised_keys=("sentence", "label"),
150
+ # Homepage of the dataset for documentation
151
+ homepage=_HOMEPAGE,
152
+ # License for the dataset if available
153
+ license=_LICENSE,
154
+ # Citation for the dataset
155
+ citation=_CITATION,
156
+ )
157
+
158
+
159
+ def _split_generators(self, dl_manager):
160
+ data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
161
+ # check if manual folder exists
162
+ if not os.path.exists(data_dir):
163
+ raise FileNotFoundError(
164
+ f"{data_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('bpsad', data_dir=...)`. Manual download instructions: {_MANUAL_DOWNLOAD_INSTRUCTIONS})"
165
+ )
166
+
167
+ data_file = os.path.join(data_dir, "concatenated.csv")
168
+ # check if dataset file exists
169
+ if not os.path.exists(data_file):
170
+ raise FileNotFoundError(
171
+ f"{data_file} does not exist. Make sure you the downloaded data is inside the manual dir passed via `datasetts.load_dataset('bpsad', data_dir=...)`. Manual download instructions: {_MANUAL_DOWNLOAD_INSTRUCTIONS})"
172
+ )
173
+ if self.config.name == "Polarity":
174
+ data_train, data_dev, data_test = load_bpsad_p(data_file)
175
+ else:
176
+ data_train, data_dev, data_test = load_bpsad_r(data_file)
177
+
178
+ pd.DataFrame(data_train).to_csv(
179
+ os.path.join(data_dir, "train.csv"), index=False, header=False)
180
+ pd.DataFrame(data_dev).to_csv(
181
+ os.path.join(data_dir, "dev.csv"), index=False, header=False)
182
+ pd.DataFrame(data_test).to_csv(
183
+ os.path.join(data_dir, "test.csv"), index=False, header=False)
184
+
185
+ # with open(os.path.join(data_dir, "train.jsonl"),"w") as fname:
186
+ # json.dump(data_train, fname)
187
+
188
+ # with open(os.path.join(data_dir, "dev.jsonl"), "w") as fname:
189
+ # json.dump(data_dev, fname)
190
+
191
+ # with open(os.path.join(data_dir, "test.jsonl"), "w") as fname:
192
+ # json.dump(data_test, fname)
193
+
194
+ return [
195
+ datasets.SplitGenerator(
196
+ name=datasets.Split.TRAIN,
197
+ # These kwargs will be passed to _generate_examples
198
+ gen_kwargs={
199
+ "filepath": os.path.join(data_dir, "train.csv"),
200
+ "split": "train",
201
+ },
202
+ ),
203
+ datasets.SplitGenerator(
204
+ name=datasets.Split.VALIDATION,
205
+ # These kwargs will be passed to _generate_examples
206
+ gen_kwargs={
207
+ "filepath": os.path.join(data_dir, "dev.csv"),
208
+ "split": "dev",
209
+ },
210
+ ),
211
+ datasets.SplitGenerator(
212
+ name=datasets.Split.TEST,
213
+ # These kwargs will be passed to _generate_examples
214
+ gen_kwargs={
215
+ "filepath": os.path.join(data_dir, "test.csv"),
216
+ "split": "test"
217
+ },
218
+ ),
219
+ ]
220
+
221
+
222
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
223
+ def _generate_examples(self, filepath, split):
224
+ with open(filepath, "r") as f:
225
+ reader = csv.reader(f)
226
+ # for key, row in enumerate(f):
227
+ for key, row in enumerate(reader):
228
+ # data = json.loads(row)
229
+
230
+ # Yields examples as (key, example) tuples
231
+ yield key, {
232
+ "text": row[0],
233
+ "label": row[1],
234
+ "kfold": row[2],
235
+ }