| | import csv |
| | import os |
| | import urllib |
| |
|
| |
|
| | import datasets |
| | from datasets.utils.py_utils import size_str |
| |
|
| |
|
| | import datasets |
| | import requests |
| | from datasets.utils.py_utils import size_str |
| | from huggingface_hub import HfApi, HfFolder |
| |
|
| | |
| | |
| | from .release_stats import STATS |
| |
|
| |
|
| |
|
| | |
| |
|
| | _HOMEPAGE = "https://commonvoice.mozilla.org/en/datasets" |
| |
|
| | _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/" |
| |
|
| | _API_URL = "https://commonvoice.mozilla.org/api/v1" |
| |
|
| |
|
| |
|
| |
|
| |
|
| | class CommonVoiceConfig(datasets.BuilderConfig): |
| | """BuilderConfig for CommonVoice.""" |
| |
|
| | def __init__(self, name, version, **kwargs): |
| | self.language = "bn" |
| | self.release_date = "2022-04-27" |
| | self.num_clips = 231120 |
| | self.num_speakers = 19863 |
| | self.validated_hr = 56.61 |
| | self.total_hr = 399.47 |
| | self.size_bytes = 8262390506 |
| | self.size_human = size_str(self.size_bytes) |
| | description = ( |
| | f"Common Voice speech to text dataset in {self.language} released on {self.release_date}. " |
| | f"The dataset comprises {self.validated_hr} hours of validated transcribed speech data " |
| | f"out of {self.total_hr} hours in total from {self.num_speakers} speakers. " |
| | f"The dataset contains {self.num_clips} audio clips and has a size of {self.size_human}." |
| | ) |
| | super(CommonVoiceConfig, self).__init__( |
| | name=name, |
| | version=datasets.Version(version), |
| | description=description, |
| | **kwargs, |
| | ) |
| |
|
| |
|
| | class CommonVoice(datasets.GeneratorBasedBuilder): |
| | |
| | DEFAULT_CONFIG_NAME = "bn" |
| | DEFAULT_WRITER_BATCH_SIZE = 1000 |
| |
|
| | BUILDER_CONFIGS = [ |
| | CommonVoiceConfig( |
| | name="bn" |
| | version= '9.0.0' |
| | language= "Bengali" |
| | release_date= "2022-04-27" |
| | num_clips= 231120 |
| | num_speakers= 19863 |
| | validated_hr= float(56.61) |
| | total_hr= float(399.47) |
| | size_bytes= int(8262390506) |
| | ) |
| | |
| | ] |
| |
|
| | def _info(self): |
| | |
| | |
| | total_languages = 1 |
| | total_valid_hours = float(399.47) |
| | description = ( |
| | "Common Voice Bangla is bengali AI's initiative to help teach machines how real people speak in Bangla. " |
| | f"The dataset is for initial training of a general speech recognition model for Bangla." |
| | ) |
| | features = datasets.Features( |
| | { |
| | "client_id": datasets.Value("string"), |
| | "path": datasets.Value("string"), |
| | "audio": datasets.features.Audio(sampling_rate=48_000), |
| | "sentence": datasets.Value("string"), |
| | "up_votes": datasets.Value("int64"), |
| | "down_votes": datasets.Value("int64"), |
| | "age": datasets.Value("string"), |
| | "gender": datasets.Value("string"), |
| | "accent": datasets.Value("string"), |
| | "locale": datasets.Value("string"), |
| | "segment": datasets.Value("string"), |
| | } |
| | ) |
| |
|
| | return datasets.DatasetInfo( |
| | description=description, |
| | features=features, |
| | supervised_keys=None, |
| | |
| | license=_LICENSE, |
| | |
| | version=self.config.version, |
| | |
| | |
| | |
| | ) |
| |
|
| |
|
| | def _get_bundle_url(self, locale, url_template): |
| | |
| | path = url_template.replace("{locale}", locale) |
| | path = urllib.parse.quote(path.encode("utf-8"), safe="~()*!.'") |
| | |
| | |
| | response = requests.get( |
| | f"{_API_URL}/bucket/dataset/{path}", timeout=10.0 |
| | ).json() |
| | return response["url"] |
| |
|
| | def _log_download(self, locale, bundle_version, auth_token): |
| | if isinstance(auth_token, bool): |
| | auth_token = HfFolder().get_token() |
| | whoami = HfApi().whoami(auth_token) |
| | email = whoami["email"] if "email" in whoami else "" |
| | payload = {"email": email, "locale": locale, "dataset": bundle_version} |
| | requests.post(f"{_API_URL}/{locale}/downloaders", json=payload).json() |
| |
|
| | def _split_generators(self, dl_manager): |
| | """Returns SplitGenerators.""" |
| | hf_auth_token = dl_manager.download_config.use_auth_token |
| | if hf_auth_token is None: |
| | raise ConnectionError( |
| | "Please set use_auth_token=True or use_auth_token='<TOKEN>' to download this dataset" |
| | ) |
| |
|
| | bundle_url_template = STATS["bundleURLTemplate"] |
| | bundle_version = bundle_url_template.split("/")[0] |
| | dl_manager.download_config.ignore_url_params = True |
| |
|
| | self._log_download(self.config.name, bundle_version, hf_auth_token) |
| | archive_path = dl_manager.download( |
| | self._get_bundle_url(self.config.name, bundle_url_template) |
| | ) |
| | local_extracted_archive = ( |
| | dl_manager.extract(archive_path) if not dl_manager.is_streaming else None |
| | ) |
| |
|
| | if self.config.version < datasets.Version("5.0.0"): |
| | path_to_data = "" |
| | else: |
| | path_to_data = "/".join([bundle_version, self.config.name]) |
| | path_to_clips = "/".join([path_to_data, "clips"]) if path_to_data else "clips" |
| |
|
| | |
| | path_to_tsvs = "/" + "bengali_ai_tsv" + "/" |
| |
|
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | gen_kwargs={ |
| | "local_extracted_archive": local_extracted_archive, |
| | "archive_iterator": dl_manager.iter_archive(archive_path), |
| | |
| | |
| | |
| | |
| | "metadata_filepath": "/".join([path_to_tsvs, "train.tsv"]), |
| | "path_to_clips": path_to_clips, |
| | }, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | gen_kwargs={ |
| | "local_extracted_archive": local_extracted_archive, |
| | "archive_iterator": dl_manager.iter_archive(archive_path), |
| | |
| | |
| | |
| | |
| | "metadata_filepath": "/".join([path_to_tsvs, "test.tsv"]), |
| | "path_to_clips": path_to_clips, |
| | }, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | gen_kwargs={ |
| | "local_extracted_archive": local_extracted_archive, |
| | "archive_iterator": dl_manager.iter_archive(archive_path), |
| | |
| | |
| | |
| | |
| | "metadata_filepath": "/".join([path_to_tsvs, "dev.tsv"]), |
| | "path_to_clips": path_to_clips, |
| | }, |
| | ), |
| | ] |
| |
|
| |
|
| |
|
| | def _generate_examples( |
| | self, |
| | local_extracted_archive, |
| | archive_iterator, |
| | metadata_filepath, |
| | path_to_clips, |
| | ): |
| | """Yields examples.""" |
| | data_fields = list(self._info().features.keys()) |
| | metadata = {} |
| | metadata_found = False |
| | for path, f in archive_iterator: |
| | if path == metadata_filepath: |
| | metadata_found = True |
| | lines = (line.decode("utf-8") for line in f) |
| | reader = csv.DictReader(lines, delimiter="\t", quoting=csv.QUOTE_NONE) |
| | for row in reader: |
| | |
| | if not row["path"].endswith(".mp3"): |
| | row["path"] += ".mp3" |
| | row["path"] = os.path.join(path_to_clips, row["path"]) |
| | |
| | if "accents" in row: |
| | row["accent"] = row["accents"] |
| | del row["accents"] |
| | |
| | for field in data_fields: |
| | if field not in row: |
| | row[field] = "" |
| | metadata[row["path"]] = row |
| | elif path.startswith(path_to_clips): |
| | assert metadata_found, "Found audio clips before the metadata TSV file." |
| | if not metadata: |
| | break |
| | if path in metadata: |
| | result = metadata[path] |
| | |
| | path = ( |
| | os.path.join(local_extracted_archive, path) |
| | if local_extracted_archive |
| | else path |
| | ) |
| | result["audio"] = {"path": path, "bytes": f.read()} |
| | |
| | result["path"] = path if local_extracted_archive else None |
| |
|
| | yield path, result |
| |
|
| |
|
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|