Dataset Viewer
The dataset viewer is not available for this subset.
Cannot get the split names for the config 'default' of the dataset.
Exception:    SplitsNotFoundError
Message:      The split names could not be parsed from the dataset config.
Traceback:    Traceback (most recent call last):
                File "/usr/local/lib/python3.12/site-packages/datasets/inspect.py", line 289, in get_dataset_config_info
                  for split_generator in builder._split_generators(
                                         ^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py", line 191, in _split_generators
                  for pa_metadata_table in self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext):
                                           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py", line 296, in _read_metadata
                  for df in csv_file_reader:
                            ^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/pandas/io/parsers/readers.py", line 1843, in __next__
                  return self.get_chunk()
                         ^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/pandas/io/parsers/readers.py", line 1985, in get_chunk
                  return self.read(nrows=size)
                         ^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/pandas/io/parsers/readers.py", line 1923, in read
                  ) = self._engine.read(  # type: ignore[attr-defined]
                      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/pandas/io/parsers/c_parser_wrapper.py", line 234, in read
                  chunks = self._reader.read_low_memory(nrows)
                           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "pandas/_libs/parsers.pyx", line 850, in pandas._libs.parsers.TextReader.read_low_memory
                File "pandas/_libs/parsers.pyx", line 905, in pandas._libs.parsers.TextReader._read_rows
                File "pandas/_libs/parsers.pyx", line 874, in pandas._libs.parsers.TextReader._tokenize_rows
                File "pandas/_libs/parsers.pyx", line 891, in pandas._libs.parsers.TextReader._check_tokenize_status
                File "pandas/_libs/parsers.pyx", line 2061, in pandas._libs.parsers.raise_parser_error
              pandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 6, saw 3
              
              
              The above exception was the direct cause of the following exception:
              
              Traceback (most recent call last):
                File "/src/services/worker/src/worker/job_runners/config/split_names.py", line 65, in compute_split_names_from_streaming_response
                  for split in get_dataset_split_names(
                               ^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/inspect.py", line 343, in get_dataset_split_names
                  info = get_dataset_config_info(
                         ^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/inspect.py", line 294, in get_dataset_config_info
                  raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err
              datasets.inspect.SplitsNotFoundError: The split names could not be parsed from the dataset config.

Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.

import datasets from datasets import DatasetDict, Audio, DatasetInfo, Features, Value, Sequence from transformers import pipeline import logging

Настройка логирования

logging.basicConfig(level=logging.INFO) logger = logging.getLogger(name)

_DESCRIPTION = """Расширенная версия VALL-E-X_Dataset с автоматическим распознаванием речи (ASR)"""

_HOMEPAGE = "https://huggingface.co/datasets/Kremon96/VALL-E-X_Dataset"

_LICENSE = "mit"

class VallEXWithASR(datasets.GeneratorBasedBuilder): """Датасет VALL-E-X с автоматической транскрипцией аудио."""

VERSION = datasets.Version("1.0.0")

def _info(self):
    return datasets.DatasetInfo(
        description=_DESCRIPTION,
        features=datasets.Features({
            "audio": datasets.Audio(sampling_rate=16000),
            "transcription": datasets.Value("string"),
            "duration": datasets.Value("float32"),
            "original_metadata": datasets.Value("string")
        }),
        homepage=_HOMEPAGE,
        license=_LICENSE,
    )

def _split_generators(self, dl_manager):
    # Загружаем оригинальный датасет
    original_dataset = datasets.load_dataset(
        "Kremon96/VALL-E-X_Dataset", 
        split="train",
        trust_remote_code=True
    )
    
    return [
        datasets.SplitGenerator(
            name=datasets.Split.TRAIN,
            gen_kwargs={"dataset": original_dataset}
        )
    ]

def _generate_examples(self, dataset):
    """Генератор примеров с транскрипцией."""
    
    # Инициализируем ASR пайплайн (загружается при первом вызове)
    asr_pipeline = None
    
    for idx, example in enumerate(dataset):
        # Ленивая загрузка пайплайна (только при первом использовании)
        if asr_pipeline is None:
            logger.info("Загрузка модели Whisper для ASR...")
            try:
                asr_pipeline = pipeline(
                    "automatic-speech-recognition",
                    model="openai/whisper-base",
                    device=-1  # -1 для CPU, 0 для GPU
                )
            except Exception as e:
                logger.error(f"Ошибка загрузки модели: {e}")
                asr_pipeline = None
        
        # Получаем аудио данные
        audio_data = example["audio"]
        
        # Распознаем речь, если модель загружена
        transcription = ""
        if asr_pipeline is not None and audio_data["array"] is not None:
            try:
                result = asr_pipeline({
                    "array": audio_data["array"],
                    "sampling_rate": audio_data["sampling_rate"]
                })
                transcription = result["text"]
                logger.info(f"Образец {idx}: Транскрибировано успешно")
            except Exception as e:
                logger.warning(f"Ошибка транскрипции для образца {idx}: {e}")
                transcription = f"[Ошибка ASR: {str(e)[:50]}]"
        
        # Возвращаем пример с транскрипцией
        yield idx, {
            "audio": {
                "path": None,  # Для сгенерированных аудио
                "array": audio_data["array"],
                "sampling_rate": 16000
            },
            "transcription": transcription,
            "duration": example.get("duration", 0.0),
            "original_metadata": str(example)
        }
Downloads last month
56