| |
| |
|
|
| import datasets |
| import pandas as pd |
| from datasets import ClassLabel |
| import os |
|
|
| """The Audio, Speech, and Vision Processing Lab - Emotional Sound Database (ASVP - ESD)""" |
|
|
| _HOMEPAGE = "https://affective-meld.github.io/" |
|
|
| _CITATION = """\ |
| @article{poria2018meld, |
| title={Meld: A multimodal multi-party dataset for emotion recognition in conversations}, |
| author={Poria, Soujanya and Hazarika, Devamanyu and Majumder, Navonil and Naik, Gautam and Cambria, Erik and Mihalcea, Rada}, |
| journal={arXiv preprint arXiv:1810.02508}, |
| year={2018} |
| } |
| @article{chen2018emotionlines, |
| title={Emotionlines: An emotion corpus of multi-party conversations}, |
| author={Chen, Sheng-Yeh and Hsu, Chao-Chun and Kuo, Chuan-Chun and Ku, Lun-Wei and others}, |
| journal={arXiv preprint arXiv:1802.08379}, |
| year={2018} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| Multimodal EmotionLines Dataset (MELD) has been created by enhancing and extending EmotionLines dataset. |
| MELD contains the same dialogue instances available in EmotionLines, but it also encompasses audio and |
| visual modality along with text. MELD has more than 1400 dialogues and 13000 utterances from Friends TV series. |
| Multiple speakers participated in the dialogues. Each utterance in a dialogue has been labeled by any of these |
| seven emotions -- Anger, Disgust, Sadness, Joy, Neutral, Surprise and Fear. MELD also has sentiment (positive, |
| negative and neutral) annotation for each utterance. |
| This dataset is modified from https://huggingface.co/datasets/zrr1999/MELD_Text_Audio. |
| The audio is extracted from MELD mp4 files while the audio only has one channel with sample rate 16khz. |
| """ |
|
|
| _LICENSE = "gpl-3.0" |
|
|
|
|
| class MELD_Audio(datasets.GeneratorBasedBuilder): |
|
|
| DEFAULT_WRITER_BATCH_SIZE = 256 |
| DEFAULT_CONFIG_NAME = "MELD_Audio" |
| BUILDER_CONFIGS = [ |
| datasets.BuilderConfig(name="MELD_Audio", version=datasets.Version("0.0.1"), description="MELD audio"), |
| ] |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "text": datasets.Value("string"), |
| "path": datasets.Value("string"), |
| "audio": datasets.Audio(sampling_rate=16000), |
| "emotion": datasets.Value("string"), |
| "sentiment": datasets.Value("string"), |
| } |
| ), |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| """Returns SplitGenerators.""" |
| metadata_dir: dict[str, str] = dl_manager.download_and_extract( |
| {"train": "train.csv", "dev": "dev.csv", "test": "test.csv"} |
| ) |
| data_path: dict[str, str] = dl_manager.download( |
| { |
| "train": "archive/train.tar.gz", |
| "dev": "archive/dev.tar.gz", |
| "test": "archive/test.tar.gz", |
| } |
| ) |
| local_extracted_archive: dict[str, str] = ( |
| dl_manager.extract(data_path) |
| if not dl_manager.is_streaming |
| else { |
| "train": None, |
| "dev": None, |
| "test": None, |
| } |
| ) |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "filepath": metadata_dir["train"], |
| "split": "train", |
| "local_extracted_archive": local_extracted_archive["train"], |
| "audio_files": dl_manager.iter_archive(data_path["train"]), |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "filepath": metadata_dir["dev"], |
| "split": "dev", |
| "local_extracted_archive": local_extracted_archive["dev"], |
| "audio_files": dl_manager.iter_archive(data_path["dev"]), |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "filepath": metadata_dir["test"], |
| "split": "test", |
| "local_extracted_archive": local_extracted_archive["test"], |
| "audio_files": dl_manager.iter_archive(data_path["test"]), |
| }, |
| ), |
| ] |
|
|
| def _generate_examples(self, filepath, split, local_extracted_archive, audio_files): |
| """Yields examples.""" |
| metadata_df = pd.read_csv(filepath, sep=",", index_col=0, header=0) |
| metadata = {} |
| for _, row in metadata_df.iterrows(): |
| id_ = f"dia{row['Dialogue_ID']}_utt{row['Utterance_ID']}" |
| audio_path = f"{split}/{id_}.flac" |
| metadata[audio_path] = row |
|
|
| id_ = 0 |
| for path, f in audio_files: |
| if path in metadata: |
| row = metadata[path] |
| path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path |
| audio = {"path": path, bytes: f.read()} |
| yield ( |
| id_, |
| { |
| "text": row["Utterance"], |
| "path": path, |
| "audio": audio, |
| "emotion": row["Emotion"], |
| "sentiment": row["Sentiment"], |
| }, |
| ) |
| id_ += 1 |
|
|