Datasets:

Languages:
Indonesian
ArXiv:
holylovenia commited on
Commit
1c548c2
·
verified ·
1 Parent(s): fce6349

Upload indolem_tweet_ordering.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indolem_tweet_ordering.py +24 -24
indolem_tweet_ordering.py CHANGED
@@ -28,7 +28,7 @@ To create a dataset loading script you will create a class and implement 3 metho
28
 
29
  TODO: Before submitting your script, delete this doc string and replace it with a description of your dataset.
30
 
31
- [nusantara_schema_name] = (kb, pairs, qa, text, t2t, entailment)
32
  """
33
  from base64 import encode
34
  import json
@@ -37,10 +37,10 @@ from typing import Dict, List, Tuple
37
 
38
  import datasets
39
 
40
- from nusacrowd.utils import schemas
41
- from nusacrowd.utils.common_parser import load_conll_data
42
- from nusacrowd.utils.configs import NusantaraConfig
43
- from nusacrowd.utils.constants import Tasks
44
 
45
  # TODO: Add BibTeX citation
46
  _CITATION = """\
@@ -93,7 +93,7 @@ _LICENSE = "Creative Commons Attribution 4.0"
93
  # For local datasets, this variable can be an empty dictionary.
94
 
95
  # For publicly available datasets you will most likely end up passing these URLs to dl_manager in _split_generators.
96
- # In most cases the URLs will be the same for the source and nusantara config.
97
  # However, if you need to access different files for each config you can have multiple entries in this dict.
98
  # This can be an arbitrarily nested dict/list of URLs (see below in `_split_generators` method)
99
  _URLS = {
@@ -112,7 +112,7 @@ _SUPPORTED_TASKS = [Tasks.SENTENCE_ORDERING] # example: [Tasks.TRANSLATION, Tas
112
  # provided by the original dataset as a version goes.
113
  _SOURCE_VERSION = "1.0.0"
114
 
115
- _NUSANTARA_VERSION = "1.0.0"
116
 
117
 
118
  # TODO: Name the dataset class to match the script name using CamelCase instead of snake_case
@@ -120,40 +120,40 @@ class IndolemTweetOrderingDataset(datasets.GeneratorBasedBuilder):
120
  """This task is based on the sentence ordering task of Barzilay and Lapata (2008) to assess text relatedness. We construct the data by shuffling Twitter threads (containing 3 to 5 tweets), and assessing the predicted ordering in terms of rank correlation (p) with the original."""
121
  label_classes = [0,1,2,3,4]
122
  SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
123
- NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
124
 
125
- # You will be able to load the "source" or "nusanrata" configurations with
126
  # ds_source = datasets.load_dataset('my_dataset', name='source')
127
- # ds_nusantara = datasets.load_dataset('my_dataset', name='nusantara')
128
 
129
  # For local datasets you can make use of the `data_dir` and `data_files` kwargs
130
  # https://huggingface.co/docs/datasets/add_dataset.html#downloading-data-files-and-organizing-splits
131
  # ds_source = datasets.load_dataset('my_dataset', name='source', data_dir="/path/to/data/files")
132
- # ds_nusantara = datasets.load_dataset('my_dataset', name='nusantara', data_dir="/path/to/data/files")
133
 
134
  # TODO: For each dataset, implement Config for Source and Nusantara;
135
- # If dataset contains more than one subset (see nusantara/nusa_datasets/smsa.py) implement for EACH of them.
136
  # Each of them should contain:
137
- # - name: should be unique for each dataset config eg. smsa_(source|nusantara)_[nusantara_schema_name]
138
- # - version: option = (SOURCE_VERSION|NUSANTARA_VERSION)
139
  # - description: one line description for the dataset
140
- # - schema: options = (source|nusantara_[nusantara_schema_name])
141
  # - subset_id: subset id is the canonical name for the dataset (eg. smsa)
142
- # where [nusantara_schema_name] = (kb, pairs, qa, text, t2t)
143
 
144
  BUILDER_CONFIGS = [
145
- NusantaraConfig(
146
  name="indolem_tweet_ordering_source",
147
  version=SOURCE_VERSION,
148
  description="indolem_tweet_ordering source schema",
149
  schema="source",
150
  subset_id="indolem_tweet_ordering",
151
  ),
152
- NusantaraConfig(
153
- name="indolem_tweet_ordering_nusantara_seq_label",
154
- version=NUSANTARA_VERSION,
155
  description="indolem_tweet_ordering Nusantara schema",
156
- schema="nusantara_seq_label",
157
  subset_id="indolem_tweet_ordering",
158
  ),
159
  ]
@@ -168,7 +168,7 @@ class IndolemTweetOrderingDataset(datasets.GeneratorBasedBuilder):
168
 
169
  if self.config.schema == "source":
170
  features = datasets.Features({"tweets":[datasets.Value("string")], "order": [datasets.Value("int32")]})
171
- elif self.config.schema == "nusantara_seq_label":
172
  features = schemas.seq_label_features(self.label_classes)
173
 
174
  return datasets.DatasetInfo(
@@ -182,7 +182,7 @@ class IndolemTweetOrderingDataset(datasets.GeneratorBasedBuilder):
182
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
183
  """Returns SplitGenerators."""
184
  # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
185
- # If you need to access the "source" or "nusantara" config choice, that will be in self.config.name
186
  # LOCAL DATASETS: You do not need the dl_manager; you can ignore this argument. Make sure `gen_kwargs` in the return gets passed the right filepath
187
  # PUBLIC DATASETS: Assign your data-dir based on the dl_manager.
188
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs; many examples use the download_and_extract method; see the DownloadManager docs here: https://huggingface.co/docs/datasets/package_reference/builder_classes.html#datasets.DownloadManager
@@ -236,7 +236,7 @@ class IndolemTweetOrderingDataset(datasets.GeneratorBasedBuilder):
236
  ex = {}
237
  if self.config.schema == 'source':
238
  ex = {'tweets': data[i]['tweets'], 'order': data[i]['order']}
239
- elif self.config.schema == 'nusantara_seq_label':
240
  ex = {"id": str(i), "tokens": data[i]['tweets'], "labels": data[i]['order']}
241
  else:
242
  raise ValueError(f"Invalid config: {self.config.name}")
 
28
 
29
  TODO: Before submitting your script, delete this doc string and replace it with a description of your dataset.
30
 
31
+ [seacrowd_schema_name] = (kb, pairs, qa, text, t2t, entailment)
32
  """
33
  from base64 import encode
34
  import json
 
37
 
38
  import datasets
39
 
40
+ from seacrowd.utils import schemas
41
+ from seacrowd.utils.common_parser import load_conll_data
42
+ from seacrowd.utils.configs import SEACrowdConfig
43
+ from seacrowd.utils.constants import Tasks
44
 
45
  # TODO: Add BibTeX citation
46
  _CITATION = """\
 
93
  # For local datasets, this variable can be an empty dictionary.
94
 
95
  # For publicly available datasets you will most likely end up passing these URLs to dl_manager in _split_generators.
96
+ # In most cases the URLs will be the same for the source and seacrowd config.
97
  # However, if you need to access different files for each config you can have multiple entries in this dict.
98
  # This can be an arbitrarily nested dict/list of URLs (see below in `_split_generators` method)
99
  _URLS = {
 
112
  # provided by the original dataset as a version goes.
113
  _SOURCE_VERSION = "1.0.0"
114
 
115
+ _SEACROWD_VERSION = "2024.06.20"
116
 
117
 
118
  # TODO: Name the dataset class to match the script name using CamelCase instead of snake_case
 
120
  """This task is based on the sentence ordering task of Barzilay and Lapata (2008) to assess text relatedness. We construct the data by shuffling Twitter threads (containing 3 to 5 tweets), and assessing the predicted ordering in terms of rank correlation (p) with the original."""
121
  label_classes = [0,1,2,3,4]
122
  SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
123
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
124
 
125
+ # You will be able to load the "source" or "seacrowd" configurations with
126
  # ds_source = datasets.load_dataset('my_dataset', name='source')
127
+ # ds_seacrowd = datasets.load_dataset('my_dataset', name='seacrowd')
128
 
129
  # For local datasets you can make use of the `data_dir` and `data_files` kwargs
130
  # https://huggingface.co/docs/datasets/add_dataset.html#downloading-data-files-and-organizing-splits
131
  # ds_source = datasets.load_dataset('my_dataset', name='source', data_dir="/path/to/data/files")
132
+ # ds_seacrowd = datasets.load_dataset('my_dataset', name='seacrowd', data_dir="/path/to/data/files")
133
 
134
  # TODO: For each dataset, implement Config for Source and Nusantara;
135
+ # If dataset contains more than one subset (see seacrowd/sea_datasets/smsa.py) implement for EACH of them.
136
  # Each of them should contain:
137
+ # - name: should be unique for each dataset config eg. smsa_(source|seacrowd)_[seacrowd_schema_name]
138
+ # - version: option = (SOURCE_VERSION|SEACROWD_VERSION)
139
  # - description: one line description for the dataset
140
+ # - schema: options = (source|seacrowd_[seacrowd_schema_name])
141
  # - subset_id: subset id is the canonical name for the dataset (eg. smsa)
142
+ # where [seacrowd_schema_name] = (kb, pairs, qa, text, t2t)
143
 
144
  BUILDER_CONFIGS = [
145
+ SEACrowdConfig(
146
  name="indolem_tweet_ordering_source",
147
  version=SOURCE_VERSION,
148
  description="indolem_tweet_ordering source schema",
149
  schema="source",
150
  subset_id="indolem_tweet_ordering",
151
  ),
152
+ SEACrowdConfig(
153
+ name="indolem_tweet_ordering_seacrowd_seq_label",
154
+ version=SEACROWD_VERSION,
155
  description="indolem_tweet_ordering Nusantara schema",
156
+ schema="seacrowd_seq_label",
157
  subset_id="indolem_tweet_ordering",
158
  ),
159
  ]
 
168
 
169
  if self.config.schema == "source":
170
  features = datasets.Features({"tweets":[datasets.Value("string")], "order": [datasets.Value("int32")]})
171
+ elif self.config.schema == "seacrowd_seq_label":
172
  features = schemas.seq_label_features(self.label_classes)
173
 
174
  return datasets.DatasetInfo(
 
182
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
183
  """Returns SplitGenerators."""
184
  # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
185
+ # If you need to access the "source" or "seacrowd" config choice, that will be in self.config.name
186
  # LOCAL DATASETS: You do not need the dl_manager; you can ignore this argument. Make sure `gen_kwargs` in the return gets passed the right filepath
187
  # PUBLIC DATASETS: Assign your data-dir based on the dl_manager.
188
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs; many examples use the download_and_extract method; see the DownloadManager docs here: https://huggingface.co/docs/datasets/package_reference/builder_classes.html#datasets.DownloadManager
 
236
  ex = {}
237
  if self.config.schema == 'source':
238
  ex = {'tweets': data[i]['tweets'], 'order': data[i]['order']}
239
+ elif self.config.schema == 'seacrowd_seq_label':
240
  ex = {"id": str(i), "tokens": data[i]['tweets'], "labels": data[i]['order']}
241
  else:
242
  raise ValueError(f"Invalid config: {self.config.name}")