Datasets:
Add the capability to get the entire corpora using the 'all' subset.
Browse files- MASSIVE.py +74 -57
- test_MASSIVE.py +14 -1
MASSIVE.py
CHANGED
|
@@ -260,7 +260,7 @@ _SCENARIOS = ['social', 'transport', 'calendar', 'play', 'news', 'datetime', 're
|
|
| 260 |
|
| 261 |
_INTENTS = ['datetime_query', 'iot_hue_lightchange', 'transport_ticket', 'takeaway_query', 'qa_stock', 'general_greet', 'recommendation_events', 'music_dislikeness', 'iot_wemo_off', 'cooking_recipe', 'qa_currency', 'transport_traffic', 'general_quirky', 'weather_query', 'audio_volume_up', 'email_addcontact', 'takeaway_order', 'email_querycontact', 'iot_hue_lightup', 'recommendation_locations', 'play_audiobook', 'lists_createoradd', 'news_query', 'alarm_query', 'iot_wemo_on', 'general_joke', 'qa_definition', 'social_query', 'music_settings', 'audio_volume_other', 'calendar_remove', 'iot_hue_lightdim', 'calendar_query', 'email_sendemail', 'iot_cleaning', 'audio_volume_down', 'play_radio', 'cooking_query', 'datetime_convert', 'qa_maths', 'iot_hue_lightoff', 'iot_hue_lighton', 'transport_query', 'music_likeness', 'email_query', 'play_music', 'audio_volume_mute', 'social_post', 'alarm_set', 'qa_factoid', 'calendar_set', 'play_game', 'alarm_remove', 'lists_remove', 'transport_taxi', 'recommendation_movies', 'iot_coffee', 'music_query', 'play_podcasts', 'lists_query']
|
| 262 |
|
| 263 |
-
_TAGS = ['O', '
|
| 264 |
|
| 265 |
class MASSIVE(datasets.GeneratorBasedBuilder):
|
| 266 |
"""MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
|
|
@@ -270,9 +270,15 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
| 270 |
name = name,
|
| 271 |
version = datasets.Version("1.0.0"),
|
| 272 |
description = f"The MASSIVE corpora for {name}",
|
| 273 |
-
) for name in _LANGUAGE_PAIRS
|
| 274 |
]
|
| 275 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 276 |
DEFAULT_CONFIG_NAME = "en-US"
|
| 277 |
|
| 278 |
def _info(self):
|
|
@@ -396,65 +402,76 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
| 396 |
|
| 397 |
def _generate_examples(self, filepath, split, lang):
|
| 398 |
|
| 399 |
-
|
| 400 |
|
| 401 |
-
|
|
|
|
|
|
|
|
|
|
| 402 |
|
| 403 |
-
|
| 404 |
-
f = open(filepath,"r")
|
| 405 |
-
lines = f.read().split("\n")
|
| 406 |
-
f.close()
|
| 407 |
|
| 408 |
-
|
| 409 |
|
| 410 |
-
|
| 411 |
|
| 412 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 413 |
|
| 414 |
-
|
| 415 |
-
continue
|
| 416 |
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
|
| 438 |
-
|
| 439 |
-
|
| 440 |
-
|
| 441 |
-
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
|
| 448 |
-
|
| 449 |
-
|
| 450 |
-
|
| 451 |
-
|
| 452 |
-
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
|
| 460 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 260 |
|
| 261 |
_INTENTS = ['datetime_query', 'iot_hue_lightchange', 'transport_ticket', 'takeaway_query', 'qa_stock', 'general_greet', 'recommendation_events', 'music_dislikeness', 'iot_wemo_off', 'cooking_recipe', 'qa_currency', 'transport_traffic', 'general_quirky', 'weather_query', 'audio_volume_up', 'email_addcontact', 'takeaway_order', 'email_querycontact', 'iot_hue_lightup', 'recommendation_locations', 'play_audiobook', 'lists_createoradd', 'news_query', 'alarm_query', 'iot_wemo_on', 'general_joke', 'qa_definition', 'social_query', 'music_settings', 'audio_volume_other', 'calendar_remove', 'iot_hue_lightdim', 'calendar_query', 'email_sendemail', 'iot_cleaning', 'audio_volume_down', 'play_radio', 'cooking_query', 'datetime_convert', 'qa_maths', 'iot_hue_lightoff', 'iot_hue_lighton', 'transport_query', 'music_likeness', 'email_query', 'play_music', 'audio_volume_mute', 'social_post', 'alarm_set', 'qa_factoid', 'calendar_set', 'play_game', 'alarm_remove', 'lists_remove', 'transport_taxi', 'recommendation_movies', 'iot_coffee', 'music_query', 'play_podcasts', 'lists_query']
|
| 262 |
|
| 263 |
+
_TAGS = ['O', 'B-food_type', 'B-movie_type', 'B-person', 'B-change_amount', 'I-relation', 'I-game_name', 'B-date', 'B-movie_name', 'I-person', 'I-place_name', 'I-podcast_descriptor', 'I-audiobook_name', 'B-email_folder', 'B-coffee_type', 'B-app_name', 'I-time', 'I-coffee_type', 'B-transport_agency', 'B-podcast_descriptor', 'I-playlist_name', 'B-media_type', 'B-song_name', 'I-music_descriptor', 'I-song_name', 'B-event_name', 'I-timeofday', 'B-alarm_type', 'B-cooking_type', 'I-business_name', 'I-color_type', 'B-podcast_name', 'I-personal_info', 'B-weather_descriptor', 'I-list_name', 'B-transport_descriptor', 'I-game_type', 'I-date', 'B-place_name', 'B-color_type', 'B-game_name', 'I-artist_name', 'I-drink_type', 'B-business_name', 'B-timeofday', 'B-sport_type', 'I-player_setting', 'I-transport_agency', 'B-game_type', 'B-player_setting', 'I-music_album', 'I-event_name', 'I-general_frequency', 'I-podcast_name', 'I-cooking_type', 'I-radio_name', 'I-joke_type', 'I-meal_type', 'I-transport_type', 'B-joke_type', 'B-time', 'B-order_type', 'B-business_type', 'B-general_frequency', 'I-food_type', 'I-time_zone', 'B-currency_name', 'B-time_zone', 'B-ingredient', 'B-house_place', 'B-audiobook_name', 'I-ingredient', 'I-media_type', 'I-news_topic', 'B-music_genre', 'I-definition_word', 'B-list_name', 'B-playlist_name', 'B-email_address', 'I-currency_name', 'I-movie_name', 'I-device_type', 'I-weather_descriptor', 'B-audiobook_author', 'I-audiobook_author', 'I-app_name', 'I-order_type', 'I-transport_name', 'B-radio_name', 'I-business_type', 'B-definition_word', 'B-artist_name', 'I-movie_type', 'B-transport_name', 'I-email_folder', 'B-music_album', 'I-house_place', 'I-music_genre', 'B-drink_type', 'I-alarm_type', 'B-music_descriptor', 'B-news_topic', 'B-meal_type', 'I-transport_descriptor', 'I-email_address', 'I-change_amount', 'B-device_type', 'B-transport_type', 'B-relation', 'I-sport_type', 'B-personal_info']
|
| 264 |
|
| 265 |
class MASSIVE(datasets.GeneratorBasedBuilder):
|
| 266 |
"""MASSIVE: A 1M-Example Multilingual Natural Language Understanding Dataset with 51 Typologically-Diverse Languages"""
|
|
|
|
| 270 |
name = name,
|
| 271 |
version = datasets.Version("1.0.0"),
|
| 272 |
description = f"The MASSIVE corpora for {name}",
|
| 273 |
+
) for name in _LANGUAGE_PAIRS
|
| 274 |
]
|
| 275 |
|
| 276 |
+
BUILDER_CONFIGS.append(datasets.BuilderConfig(
|
| 277 |
+
name = "all",
|
| 278 |
+
version = datasets.Version("1.0.0"),
|
| 279 |
+
description = f"The MASSIVE corpora for entire corpus",
|
| 280 |
+
))
|
| 281 |
+
|
| 282 |
DEFAULT_CONFIG_NAME = "en-US"
|
| 283 |
|
| 284 |
def _info(self):
|
|
|
|
| 402 |
|
| 403 |
def _generate_examples(self, filepath, split, lang):
|
| 404 |
|
| 405 |
+
key_ = 0
|
| 406 |
|
| 407 |
+
if lang == "all":
|
| 408 |
+
lang = _LANGUAGE_PAIRS
|
| 409 |
+
else:
|
| 410 |
+
lang = [lang]
|
| 411 |
|
| 412 |
+
logger.info("⏳ Generating examples from = %s", ", ".join(lang))
|
|
|
|
|
|
|
|
|
|
| 413 |
|
| 414 |
+
for l in lang:
|
| 415 |
|
| 416 |
+
path = filepath + "/1.0/data/" + l + ".jsonl"
|
| 417 |
|
| 418 |
+
print(f"->{l}<-")
|
| 419 |
+
|
| 420 |
+
# Read the file
|
| 421 |
+
f = open(path ,"r")
|
| 422 |
+
lines = f.read().split("\n")
|
| 423 |
+
f.close()
|
| 424 |
|
| 425 |
+
for line in lines:
|
|
|
|
| 426 |
|
| 427 |
+
data = json.loads(line)
|
| 428 |
+
|
| 429 |
+
# print(data["id"])
|
| 430 |
+
|
| 431 |
+
if data["partition"] != split:
|
| 432 |
+
continue
|
| 433 |
+
|
| 434 |
+
# Slot method
|
| 435 |
+
if "slot_method" in data:
|
| 436 |
+
slot_method = [
|
| 437 |
+
{
|
| 438 |
+
"slot": s["slot"],
|
| 439 |
+
"method": s["method"],
|
| 440 |
+
} for s in data["slot_method"]
|
| 441 |
+
]
|
| 442 |
+
else:
|
| 443 |
+
slot_method = []
|
| 444 |
+
|
| 445 |
+
# Judgments
|
| 446 |
+
if "judgments" in data:
|
| 447 |
+
judgments = [
|
| 448 |
+
{
|
| 449 |
+
"worker_id": j["worker_id"],
|
| 450 |
+
"intent_score": j["intent_score"],
|
| 451 |
+
"slots_score": j["slots_score"],
|
| 452 |
+
"grammar_score": j["grammar_score"],
|
| 453 |
+
"spelling_score": j["spelling_score"],
|
| 454 |
+
"language_identification": j["language_identification"] if "language_identification" in j else "target",
|
| 455 |
+
} for j in data["judgments"]
|
| 456 |
+
]
|
| 457 |
+
else:
|
| 458 |
+
judgments = []
|
| 459 |
+
|
| 460 |
+
tokens, tags = self._getBioFormat(data["annot_utt"])
|
| 461 |
+
|
| 462 |
+
yield key_, {
|
| 463 |
+
"id": data["id"],
|
| 464 |
+
"locale": data["locale"],
|
| 465 |
+
"partition": data["partition"],
|
| 466 |
+
"scenario": data["scenario"],
|
| 467 |
+
"intent": data["intent"],
|
| 468 |
+
"utt": data["utt"],
|
| 469 |
+
"annot_utt": data["annot_utt"],
|
| 470 |
+
"tokens": tokens,
|
| 471 |
+
"ner_tags": tags,
|
| 472 |
+
"worker_id": data["worker_id"],
|
| 473 |
+
"slot_method": slot_method,
|
| 474 |
+
"judgments": judgments,
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
key_ += 1
|
test_MASSIVE.py
CHANGED
|
@@ -5,7 +5,9 @@ set_caching_enabled(False)
|
|
| 5 |
source = "MASSIVE.py"
|
| 6 |
# source = "qanastek/MASSIVE"
|
| 7 |
|
| 8 |
-
dataset = load_dataset(source, "
|
|
|
|
|
|
|
| 9 |
# dataset = load_dataset(source, "fr-FR", download_mode="force_redownload")
|
| 10 |
# print(dataset)
|
| 11 |
|
|
@@ -17,3 +19,14 @@ print(dataset)
|
|
| 17 |
# print(dataset[0])
|
| 18 |
f = dataset["train"][0]
|
| 19 |
print(f)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
source = "MASSIVE.py"
|
| 6 |
# source = "qanastek/MASSIVE"
|
| 7 |
|
| 8 |
+
dataset = load_dataset(source, "all")
|
| 9 |
+
# dataset = load_dataset(source, "zh-CN")
|
| 10 |
+
# dataset = load_dataset(source, "fr-FR")
|
| 11 |
# dataset = load_dataset(source, "fr-FR", download_mode="force_redownload")
|
| 12 |
# print(dataset)
|
| 13 |
|
|
|
|
| 19 |
# print(dataset[0])
|
| 20 |
f = dataset["train"][0]
|
| 21 |
print(f)
|
| 22 |
+
|
| 23 |
+
# tags = []
|
| 24 |
+
|
| 25 |
+
# for e in dataset["train"]:
|
| 26 |
+
# tags.extend(
|
| 27 |
+
# e["ner_tags"]
|
| 28 |
+
# )
|
| 29 |
+
|
| 30 |
+
# print("#"*50)
|
| 31 |
+
|
| 32 |
+
# print(list(set(tags)))
|