Dataset Preview
Duplicate
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed
Error code:   DatasetGenerationError
Exception:    GatedRepoError
Message:      401 Client Error. (Request ID: Root=1-684f191d-57e914571d895259332d1ea8;15c0674a-83df-4198-87a5-e11d7be394be)

Cannot access gated repo for url https://huggingface.co/datasets/subratasarkar32/IndicVoices_bengali/resolve/f7c35e27d6ba579c6b5bbcd15d6c74ba6c7ca7eb/train/batch_00001/aud_00000010.wav.
Access to dataset subratasarkar32/IndicVoices_bengali is restricted. You must have access to it and be authenticated to access it. Please log in.
Traceback:    Traceback (most recent call last):
                File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_http.py", line 409, in hf_raise_for_status
                  response.raise_for_status()
                File "/src/services/worker/.venv/lib/python3.9/site-packages/requests/models.py", line 1024, in raise_for_status
                  raise HTTPError(http_error_msg, response=self)
              requests.exceptions.HTTPError: 401 Client Error: Unauthorized for url: https://huggingface.co/datasets/subratasarkar32/IndicVoices_bengali/resolve/f7c35e27d6ba579c6b5bbcd15d6c74ba6c7ca7eb/train/batch_00001/aud_00000010.wav
              
              The above exception was the direct cause of the following exception:
              
              Traceback (most recent call last):
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1626, in _prepare_split_single
                  writer.write(example, key)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 552, in write
                  self.write_examples_on_file()
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 510, in write_examples_on_file
                  self.write_batch(batch_examples=batch_examples)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 630, in write_batch
                  self.write_table(pa_table, writer_batch_size)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 645, in write_table
                  pa_table = embed_table_storage(pa_table)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2271, in embed_table_storage
                  arrays = [
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2272, in <listcomp>
                  embed_array_storage(table[name], feature) if require_storage_embed(feature) else table[name]
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 1796, in wrapper
                  return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 1796, in <listcomp>
                  return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2141, in embed_array_storage
                  return feature.embed_storage(array)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/features/audio.py", line 263, in embed_storage
                  [
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/features/audio.py", line 264, in <listcomp>
                  (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/utils/py_utils.py", line 311, in wrapper
                  return func(value) if value is not None else None
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/features/audio.py", line 259, in path_to_bytes
                  bytes_ = f.read()
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/utils/file_utils.py", line 827, in read_with_retries
                  out = read(*args, **kwargs)
                File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 809, in track_read
                  out = f_read(*args, **kwargs)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/hf_file_system.py", line 1012, in read
                  return f.read()
                File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 809, in track_read
                  out = f_read(*args, **kwargs)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/hf_file_system.py", line 1076, in read
                  hf_raise_for_status(self.response)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_http.py", line 426, in hf_raise_for_status
                  raise _format(GatedRepoError, message, response) from e
              huggingface_hub.errors.GatedRepoError: 401 Client Error. (Request ID: Root=1-684f191d-5e59a522080816fc390fc6b3;036e8ba4-2cc2-4360-ad7d-49bc9d3489c6)
              
              Cannot access gated repo for url https://huggingface.co/datasets/subratasarkar32/IndicVoices_bengali/resolve/f7c35e27d6ba579c6b5bbcd15d6c74ba6c7ca7eb/train/batch_00001/aud_00000010.wav.
              Access to dataset subratasarkar32/IndicVoices_bengali is restricted. You must have access to it and be authenticated to access it. Please log in.
              
              During handling of the above exception, another exception occurred:
              
              Traceback (most recent call last):
                File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_http.py", line 409, in hf_raise_for_status
                  response.raise_for_status()
                File "/src/services/worker/.venv/lib/python3.9/site-packages/requests/models.py", line 1024, in raise_for_status
                  raise HTTPError(http_error_msg, response=self)
              requests.exceptions.HTTPError: 401 Client Error: Unauthorized for url: https://huggingface.co/datasets/subratasarkar32/IndicVoices_bengali/resolve/f7c35e27d6ba579c6b5bbcd15d6c74ba6c7ca7eb/train/batch_00001/aud_00000010.wav
              
              The above exception was the direct cause of the following exception:
              
              Traceback (most recent call last):
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1635, in _prepare_split_single
                  num_examples, num_bytes = writer.finalize()
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 657, in finalize
                  self.write_examples_on_file()
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 510, in write_examples_on_file
                  self.write_batch(batch_examples=batch_examples)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 630, in write_batch
                  self.write_table(pa_table, writer_batch_size)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 645, in write_table
                  pa_table = embed_table_storage(pa_table)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2271, in embed_table_storage
                  arrays = [
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2272, in <listcomp>
                  embed_array_storage(table[name], feature) if require_storage_embed(feature) else table[name]
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 1796, in wrapper
                  return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 1796, in <listcomp>
                  return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2141, in embed_array_storage
                  return feature.embed_storage(array)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/features/audio.py", line 263, in embed_storage
                  [
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/features/audio.py", line 264, in <listcomp>
                  (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/utils/py_utils.py", line 311, in wrapper
                  return func(value) if value is not None else None
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/features/audio.py", line 259, in path_to_bytes
                  bytes_ = f.read()
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/utils/file_utils.py", line 827, in read_with_retries
                  out = read(*args, **kwargs)
                File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 809, in track_read
                  out = f_read(*args, **kwargs)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/hf_file_system.py", line 1012, in read
                  return f.read()
                File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 809, in track_read
                  out = f_read(*args, **kwargs)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/hf_file_system.py", line 1076, in read
                  hf_raise_for_status(self.response)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_http.py", line 426, in hf_raise_for_status
                  raise _format(GatedRepoError, message, response) from e
              huggingface_hub.errors.GatedRepoError: 401 Client Error. (Request ID: Root=1-684f191d-57e914571d895259332d1ea8;15c0674a-83df-4198-87a5-e11d7be394be)
              
              Cannot access gated repo for url https://huggingface.co/datasets/subratasarkar32/IndicVoices_bengali/resolve/f7c35e27d6ba579c6b5bbcd15d6c74ba6c7ca7eb/train/batch_00001/aud_00000010.wav.
              Access to dataset subratasarkar32/IndicVoices_bengali is restricted. You must have access to it and be authenticated to access it. Please log in.
              
              The above exception was the direct cause of the following exception:
              
              Traceback (most recent call last):
                File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1431, in compute_config_parquet_and_info_response
                  parquet_operations, partial, estimated_dataset_info = stream_convert_to_parquet(
                File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 992, in stream_convert_to_parquet
                  builder._prepare_split(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1487, in _prepare_split
                  for job_id, done, content in self._prepare_split_single(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1644, in _prepare_split_single
                  raise DatasetGenerationError("An error occurred while generating the dataset") from e
              datasets.exceptions.DatasetGenerationError: An error occurred while generating the dataset

Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.

audio
audio
label
class label
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
0batch_00001
End of preview.

IndicVoices_bengali

This dataset has been created from ai4bharat/IndicVoices. Since directly trying to load the dataset for bengali was not working with IndicVoices due to errors in some files, this dataset addresses those files by removing them.

To use this dataset with lazyloading for training speech to text models, below is sample code with wav2vec2.

import pandas as pd
import torchaudio
from torch.utils.data import Dataset, DataLoader
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC,
import torch
import os
import numpy as np
from sklearn.model_selection import train_test_split


# === Load CSV ===
df = pd.read_csv("updated_train_data.csv")
train, test = train_test_split(df, test_size=0.3, shuffle=False)
val, test = train_test_split(test, test_size=0.5, shuffle=False)
print(df.columns)
print(df.head())


# === Custom Dataset ===
class ASRDataset(Dataset):
    def __init__(self, df, processor, sample_rate=16000):
        self.df = df
        self.processor = processor
        self.sample_rate = sample_rate

    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        audio_path = row['fname']
        text = row['text']  # text = row['phonetic_text']  # For taking phonetic text

        waveform, sr = torchaudio.load(audio_path)
        if sr != self.sample_rate:
            waveform = torchaudio.transforms.Resample(sr, self.sample_rate)(waveform)
        waveform = waveform.squeeze()

        # Process audio
        input_values = self.processor(waveform, sampling_rate=self.sample_rate, return_attention_mask=False).input_values[0]

        # Process text → labels
        labels = self.processor.tokenizer(text, return_tensors="pt", padding=True).input_ids.squeeze(0).tolist()

        return {"input_values": input_values, "labels": labels}

# === Processor and Model ===
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h")
model = Wav2Vec2ForCTC.from_pretrained(
    "facebook/wav2vec2-base-960h", 
    ctc_loss_reduction="mean", 
    pad_token_id=processor.tokenizer.pad_token_id
)
model.to("cuda" if torch.cuda.is_available() else "cpu")


# === Collate Function ===
def collate_fn(batch):
    input_values = [b["input_values"] for b in batch]
    labels = [b["labels"] for b in batch]
    
    # Pad inputs
    batch_inputs = processor.pad(
        {"input_values": input_values},
        padding=True,
        return_tensors="pt"
    )

    # Pad labels using tokenizer directly
    batch_labels = processor.tokenizer.pad(
        {"input_ids": labels},
        padding=True,
        return_tensors="pt"
    )

    batch_inputs["labels"] = batch_labels["input_ids"]
    return batch_inputs


# === DataLoader ===
train_dataset = ASRDataset(train, processor)
train_dataloader = DataLoader(train_dataset, batch_size=4, collate_fn=collate_fn, shuffle=True)

val_dataset = ASRDataset(val, processor)
val_dataloader = DataLoader(val_dataset, batch_size=4, collate_fn=collate_fn, shuffle=True)

test_dataset = ASRDataset(test, processor)
test_dataloader = DataLoader(test_dataset, batch_size=4, collate_fn=collate_fn, shuffle=True)

In case you find some errors in this repo feel free to reach out to subrotosarkar32@gmail.com for correction

Downloads last month
1,893