{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "%pip install \"datasets[Audio]\" sprakbanken_normalizer " ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "from datasets import load_dataset, Audio, Dataset, DatasetDict\n", "from datasets import DownloadManager\n", "\n", "\n", "import pandas as pd" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "4a271a8428ed40e6bea96304d9655d8b", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Downloading builder script: 0%| | 0.00/10.5k [00:00\", text)\n", "\n", "def remove_metatags(text): \n", " \"\"\"Remove metatags for hesitations, laughter, paralinguistic sounds etc.\"\"\"\n", " return re.sub(r\"%\\w{3}\\s\", \"\", text)\n", "\n", "def remove_percentage_sign(text):\n", " \"\"\"Remove percentage sign.\"\"\"\n", " return re.sub(r\"%\", \"\", text)\n", "\n", "def remove_false_starts(text):\n", " \"\"\"Remove annotations of false starts and interruptions.\"\"\"\n", " return re.sub(r\"\\s\\w+£\", \"\", text)\n", "\n", "def remove_pound_sign(text):\n", " \"\"\"Remove pound sign.\"\"\"\n", " return re.sub(r\"£\", \"\", text)\n", "\n", "def replace_underscore(text):\n", " \"\"\"Replace underscore with a single whitespace.\"\"\"\n", " return re.sub(r\"_\", \" \", text)\n", "\n", "def remove_punctuation(text):\n", " \"\"\"Remove punctuation.\"\"\"\n", " return re.sub(r\"[,\\.\\!\\'-]\", \"\", text)\n", "\n", "def normalize_number_words(text):\n", " \"\"\"Normalize number words to integers.\"\"\"\n", " inv_norm = inv_normalize(text)\n", " return inv_norm\n", "\n", " # TODO: convert hyphenated year-words to integers\n", " # TODO: deal with punctuation at the end \n", "\n", "# TODO: combine hyphenated words with numbers (e.g. CO2, C_O-to\\CO2)\n" ] }, { "cell_type": "code", "execution_count": 60, "metadata": {}, "outputs": [], "source": [ "df[\"no_punct\"] = df.transcription.str.replace('[^\\w\\s]','')" ] }, { "cell_type": "code", "execution_count": 65, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "0 hallo og velkommen hit til Nasjonalbiblioteket.\n", "1 det er kulturnatt, og det skjer en bråte ting ...\n", "2 vi har ett arrangement til i kveld klokka tjue...\n", "3 vi snakker matkultur og det eksotiske åttitall...\n", "4 og mer spennende enn kokte poteter tør jeg ogs...\n", "Name: no_punct, dtype: object" ] }, "execution_count": 65, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df.no_punct.head()" ] }, { "cell_type": "code", "execution_count": 57, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "0 hallo og velkommen hit til Nasjonalbiblioteket\n", "1 det er kulturnatt og det skjer en bråte ting o...\n", "2 vi har ett arrangement til i kveld klokka tjue...\n", "3 vi snakker matkultur og det eksotiske åttitall...\n", "4 og mer spennende enn kokte poteter tør jeg ogs...\n", "Name: transcription, dtype: object" ] }, "execution_count": 57, "metadata": {}, "output_type": "execute_result" } ], "source": [ "texts[texts.str.contains(\".\")].head()" ] }, { "cell_type": "code", "execution_count": 79, "metadata": {}, "outputs": [], "source": [ "\n", "\n", "def normalize_transcription(transcription: str, config=\"annotations\"):\n", " \"\"\"Normalize transcriptions according to orthographic standards, or verbatim.\"\"\"\n", " t = transcription\n", " if config == \"annotations\":\n", " # Nothing do, return as is\n", " return t\n", " if config == \"orthographic\":\n", " t = remove_metatags(t)\n", " t = remove_false_starts(t)\n", " t = re.sub(r\"CO-to\", \"CO2\", t)\n", " t = filter_backslash(t, left=False)\n", " t = normalize_number_words(t)\n", " elif config == \"verbatim\":\n", " t = bracket_metatags(t)\n", " t = remove_percentage_sign(t)\n", " t = remove_pound_sign(t)\n", " t = re.sub(r\"C_O-to\", \"C O to\", t)\n", " t = filter_backslash(t, left=True)\n", " t = remove_punctuation(t)\n", " # For both, at the end:\n", " t = replace_underscore(t)\n", " return t\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": 67, "metadata": {}, "outputs": [], "source": [ "\n", "hyphenated = \"og han har et prosjekt der hvor han tenker seg at algene kan spise opp C_O-to-en\\\\CO-to-en og så kan mengden alger bli en del av fôret for krill og og det var vel i hovedsak krill sånn som jeg har forstått det og inn i dette nå skjønner de at en algeforsker holder på med sånne type ting.\"" ] }, { "cell_type": "code", "execution_count": 80, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'og han har et prosjekt der hvor han tenker seg at algene kan spise opp C O toen og så kan mengden alger bli en del av fôret for krill og og det var vel i hovedsak krill sånn som jeg har forstått det og inn i dette nå skjønner de at en algeforsker holder på med sånne type ting'" ] }, "execution_count": 80, "metadata": {}, "output_type": "execute_result" } ], "source": [ "\n", "normalize_transcription(hyphenated, config=\"verbatim\")" ] }, { "cell_type": "code", "execution_count": 173, "metadata": {}, "outputs": [], "source": [ "texts = map_substrings(df.transcription.copy())" ] }, { "cell_type": "code", "execution_count": 174, "metadata": {}, "outputs": [], "source": [ "\n", "#df[\"normalized\"] = texts.apply(normalize_transcription, verbatim=False)\n", "\n", "\n", "#apply(normalize_transcription )\n" ] }, { "cell_type": "code", "execution_count": 181, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'vi har ett arrangement til i kveld klokka 21 null null om norsk fankultur og Wenche Myhre og det hele, men aller først har vi noe ganske anna på menyen.'" ] }, "execution_count": 181, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df.normalized.iloc[2]" ] }, { "cell_type": "code", "execution_count": 90, "metadata": {}, "outputs": [], "source": [ "#df[\"verbatim\"] = df.transcription.apply(normalize_transcription, verbatim=True)" ] }, { "cell_type": "code", "execution_count": 37, "metadata": {}, "outputs": [], "source": [ "pd.set_option('display.max_colwidth', None)\n", "\n", "#df[[\"transcription\", \"normalized\", \"verbatim\"]].iloc[1]" ] }, { "cell_type": "code", "execution_count": 70, "metadata": {}, "outputs": [], "source": [ "df[\"characters\"] = df.transcription.apply(lambda x: list(x))\n", "df = df.explode(\"characters\")" ] }, { "cell_type": "code", "execution_count": 73, "metadata": {}, "outputs": [], "source": [ "df.characters.value_counts().to_csv(\"data/characters.csv\")" ] }, { "cell_type": "code", "execution_count": 42, "metadata": {}, "outputs": [], "source": [ "#df[\"metatags\"] = df.transcription.str.extractall(r\"(\\%\\w+)\", re.IGNORECASE)\n", "metatags = df.transcription.str.extractall(r\"(\\%\\w+)\")\n", "\n", "metatags.columns = [\"metatags\"]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "segments_to_correct =[\n", " \"50f-18_0343045-0343993.wav\", # € mappes til £ \n", " \"50f-6_1310935-1312226.wav\", # tankestrek til mellomrom \n", " \"nb-4_4086270-4092450.wav\", # èn til én \n", " \"nb-4_5245120-5268300.wav\", # èn til én\n", " \n", "\n", "]\n", "\n", "#TODO: mapping mellom uvanlige symboler \n", "unusual_symbols = \"\"\"\n", "\n", "á,a\n", "ä,a\n", "ô,o\n", "à,a\n", "ê,e\n", "Á,A\n", "š,s\n", "đ,d\n", "č,c\n", "ž,z\n", "€,£\n", "–,\n", "\"\"\"\n", "# normalisert + verbatim skal ikke ha samiske tegn, ü, osv. \n", "# verbatim skal heller ikke ha de minst vanlige bokstavene med aksent. \n", "\n", "# verbatim: fjerne skilletegn " ] }, { "cell_type": "code", "execution_count": 82, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'det stod faktisk folk bak disken og dei importerte jo massevis av spennande %eee råvarer og produkt frå Amerika til oljeamerikanarane, så %eee vi var nok litt foran på ein på eit vis %eee ja så men du nemnde òg peparsteik.'" ] }, "execution_count": 82, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df.transcription[df.transcription.str.contains(\"ò\")].iloc[0]" ] }, { "cell_type": "code", "execution_count": 43, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "metatags\n", "%eee 7666\n", "%heh 802\n", "%emm 615\n", "%mmm 352\n", "%overlapping 337\n", "%unint 318\n", "%foreign 215\n", "%ttt 108\n", "%qqq 67\n", "%unk 8\n", "Name: count, dtype: int64" ] }, "execution_count": 43, "metadata": {}, "output_type": "execute_result" } ], "source": [ "\n", "metatags.metatags.value_counts()" ] }, { "cell_type": "code", "execution_count": 62, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Sentence: \n", " det skulle vere på ein måte utanlandsk, vere importerte råvarer, utanlandske namn, og så vidare og så vidare. \n", "det skulle vere på ein måte utanlandsk, vere importerte råvarer, utanlandske namn, osv. og så vidare.\n", "det skulle vere på ein måte utanlandsk, vere importerte råvarer, utanlandske namn, osv. og så vidare.\n", "det skulle vere på ein måte utanlandsk, vere importerte råvarer, utanlandske namn, osv. og så vidare.\n", "det skulle vere på ein måte utanlandsk, vere importerte råvarer, utanlandske namn, osv. og så vidare.\n" ] } ], "source": [ "#from sprakbanken_normalizer.inverse_text_normalizer import yeargrammar\n", "\n", "\n", "df[\"last_punct\"] = df.transcription.str.extract(r\"\\w+(.)$\")\n", "\n", "#last_punct.replace(\"nan\", \"\", inplace=True)\n", "\n", "nonstandard_punct = df[\"transcription\"][df[\"last_punct\"] != \".\"].tolist()\n", "\n", "idx = 7\n", "\n", "punct = \".\"\n", "\n", "sentence = nonstandard_punct[idx]\n", "print(\"Sentence: \" + \"\\n\", sentence)\n", "\n", "print(inv_normalize(sentence))\n", "\n", "standard_orth = right_backslash(sentence)\n", "\n", "print(inv_normalize(standard_orth))\n", "\n", "no_punct = standard_orth.strip(punct)\n", "\n", "print(inv_normalize(no_punct))\n", "\n", "token_separated = no_punct.replace(\"åttitallet\", \"åtti - tallet\")\n", "print(inv_normalize(token_separated))\n", "\n", "\n", "#convert_nums(mynumstring_nn, nn=True, reverse=True),\n", "# convert_ords(mynumstring_nn, nn=True, reverse=True)," ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'vi har ett arrangement til i kveld klokka tjueen null null om norsk fankultur og Wenche Myhre og det hele, men aller først har vi noe ganske anna på menyen.'" ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "\n", "inv_normalize(t.strip(\".\")) + \".\"" ] }, { "cell_type": "code", "execution_count": 65, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "BEFORE: dagens dato er tjuende juni tjuetjueto\n", "AFTER: dagens dato er 20. juni 2022\n", "\n", "BEFORE: dette er for eksempel en forkortelse\n", "AFTER: dette er f.eks. en forkortelse\n", "\n", "BEFORE: renta er på to komma fire prosent\n", "AFTER: renta er på 2,4%\n", "\n", "BEFORE: dette tallet er tre hundre tusen fire hundre og tjueto\n", "AFTER: dette tallet er 300422\n", "\n", "BEFORE: denne setninga skal ikke normaliseres og inneholder æøå-\n", "AFTER: denne setninga skal ikke normaliseres og inneholder æøå-\n", "\n", "BEFORE: blant annet satt hun i Statoils sitt styre store deler av nittenåtti-tallet\n", "AFTER: bl.a. satt hun i Statoils sitt styre store deler av nittenåtti-tallet\n", "\n", "BEFORE: tjuesekstiåtte\n", "AFTER: 2068\n", "\n", "BEFORE: totusenogén\n", "AFTER: 2001\n", "\n", "BEFORE: nittenhundreogfire\n", "AFTER: 1904\n", "\n", "BEFORE: ni og et halvt tusen\n", "AFTER: 9500\n", "\n", "BEFORE: av den årsak er det viktig å behandle denne loven i dag slik at de også etter den første oktober har en lov til å forho forholde oss til i tilknytning til den her saken\n", "AFTER: av den årsak er det viktig å behandle denne loven i dag slik at de også etter 1. oktober har en lov til å forho forholde oss til i tilknytning til den her saken\n", "\n", "BEFORE: os Kartverkets tinglysning så tinglyses omkring én komma seks millioner dokumenter i året\n", "AFTER: os Kartverkets tinglysning så tinglyses omkring 1,6 millioner dokumenter i året\n", "\n", "BEFORE: vi har ett arrangement til i kveld klokka tjueen null null om norsk fankultur og Wenche Myhre og det hele.\n", "AFTER: vi har ett arrangement til i kveld klokka 21 null null om norsk fankultur og Wenche Myhre og det hele.\n", "\n" ] } ], "source": [ "teststrings = [\n", " \"dagens dato er tjuende juni tjuetjueto\",\n", " \"dette er for eksempel en forkortelse\",\n", " \"renta er på to komma fire prosent\",\n", " \"dette tallet er tre hundre tusen fire hundre og tjueto\",\n", " \"denne setninga skal ikke normaliseres og inneholder æøå-\",\n", " \"blant annet satt hun i Statoils sitt styre store deler av nittenåtti-tallet\",\n", " \"tjuesekstiåtte\",\n", " \"totusenogén\",\n", " \"nittenhundreogfire\",\n", " \"ni og et halvt tusen\",\n", " \"av den årsak er det viktig å behandle denne loven i dag slik at de også etter den første oktober har en lov til å forho forholde oss til i tilknytning til den her saken\",\n", " \"os Kartverkets tinglysning så tinglyses omkring én komma seks millioner dokumenter i året\",\n", " \"vi har ett arrangement til i kveld klokka tjueen null null om norsk fankultur og Wenche Myhre og det hele.\"\n", "]\n", "for test in teststrings:\n", " print(\"BEFORE: \" + test)\n", " print(\"AFTER: \" + inv_normalize(test))\n", " print()" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "BEFORE: vi har ett arrangement til i kveld klokka tjueen null null om norsk fankultur og Wenche Myhre og det hele.\n", "AFTER: vi har ett arrangement til i kveld klokka 21 null null om norsk fankultur og Wenche Myhre og det hele.\n" ] } ], "source": [ "teststring = \"vi har ett arrangement til i kveld klokka tjueen null null om norsk fankultur og Wenche Myhre og det hele.\"\n", "print(\"BEFORE: \" + teststring)\n", "print(\"AFTER: \" + inv_normalize(teststring))" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [], "source": [ "# TODO: filter bakstrekord (funksjonsord)\n", "\n", "from pathlib import Path\n", "from collections import defaultdict\n", "\n", "\n", "def construct_backslash_lookup(filepath: str = \"baktrekede_funksjonsord.txt\") -> dict:\n", " \"\"\"Construct a lookup table for backslash notation from a text file.\"\"\"\n", " backslash_list = Path(filepath).read_text().splitlines()\n", " backslash_words = defaultdict(list)\n", "\n", " for line in backslash_list:\n", " if not line: continue\n", " if line.endswith(\":\"):\n", " lang = line.strip(\":\").lower()\n", " else: \n", " first, last = line.split(\"\\\\\")\n", " backslash_words[lang].append({\"verbatim\": first, \"standard\": last})\n", " return backslash_words\n", "\n", "def get_backslash_wordset(backslash_words: dict, orth: str, normalize: bool = True) -> set:\n", " \"\"\"Get a set of verbatim or standardized words for a given orthographic standard.\"\"\"\n", " style = \"standard\" if normalize else \"verbatim\"\n", " return set(item[style] for item in backslash_words[orth])\n", "\n" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'demmes',\n", " 'dokk',\n", " 'dokker',\n", " 'dokkers',\n", " 'hen',\n", " 'hva',\n", " 'kass',\n", " 'korleis',\n", " 'korsen',\n", " 'koss',\n", " 'kossen',\n", " 'kva',\n", " 'vart'}" ] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "backslash_words = construct_backslash_lookup()\n", "get_backslash_wordset(backslash_words, \"bokmål\", normalize=False)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# generate dataset card from the loader script\n", "#datasets-cli test path/to/ --save_info --all_configs" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "### COPIED FROM NPSC\n", "\n", "def _generate_examples(self, filepaths):\n", " \"\"\"Yields examples from NPSC.\"\"\"\n", " data_fields = list(self._info().features.keys())\n", " data_fields.remove(\"audio\")\n", " lang_code = None\n", " if self.config.name.endswith(\"bokmaal\"):\n", " lang_code = \"nb-no\"\n", " elif self.config.name.endswith(\"nynorsk\"):\n", " lang_code = \"nn-no\"\n", " for metadata_path, archive_path in filepaths:\n", " metadata = {}\n", " with open(metadata_path) as metadata_file:\n", " for line in metadata_file.read().split(\"\\n\"):\n", " if line:\n", " metadata_object = json.loads(line)\n", " if \"path\" in metadata_object:\n", " metadata_key = metadata_object[\"path\"].split(\"/\", 1)[-1]\n", " metadata[metadata_key] = metadata_object\n", " with open(archive_path, \"rb\") as archive_fs:\n", " archive_bytes = io.BytesIO(archive_fs.read())\n", " with tarfile.open(fileobj=archive_bytes, mode=\"r\") as tar:\n", " for audio_file in tar.getmembers():\n", " if audio_file.isfile():\n", " metadata_key = audio_file.name.split(\".mp3\", 1)[0].split(\"/\", 1)[-1]\n", " audio_bytes = tar.extractfile(audio_file).read()\n", " audio_dict = {\"bytes\": audio_bytes, \"path\": audio_file.name}\n", " fields = {key: metadata[metadata_key][key] for key in data_fields}\n", " if lang_code:\n", " if lang_code == fields.get(\"sentence_language_code\", \"\").lower():\n", " yield metadata_key, {\"audio\": audio_dict, **fields}\n", " else:\n", " yield metadata_key, {\"audio\": audio_dict, **fields}" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Load dataset splits" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def split_file_path(row):\n", " path = row[\"file_name\"]\n", " pathlist = path.split(\"/\")\n", " dir_path = \"/\".join(pathlist[:-1])\n", " splitname = pathlist[1]\n", " filename = pathlist[-1]\n", " return path, dir_path, splitname, filename\n", "\n", "\n", "def load_metadata(filepath):\n", " metadata = pd.read_json(filepath, lines=True)\n", " meta = metadata.dropna() # drop segments from the dataset metadata file that have NaN-values (music and noise don't have values for gender and dialect)\n", " meta[['audio', 'file_path', 'split', 'file_name']] = meta.apply(split_file_path, axis=1, result_type='expand')\n", " return meta " ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "import datasets\n", "import json\n", "from nb_samtale import _DESCRIPTION, _HOMEPAGE, _LICENSE, _CITATION\n", "\n", "# Define dataset info\n", "dataset_info = datasets.DatasetInfo(\n", " # This is the description that will appear on the datasets page.\n", " description=_DESCRIPTION,\n", " # This defines the different columns of the dataset and their types\n", " # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and\n", " # specify them. They'll be used if as_supervised=True in builder.as_dataset.\n", " # supervised_keys=(\"sentence\", \"label\"),\n", " # Homepage of the dataset for documentation\n", " homepage=_HOMEPAGE,\n", " # License for the dataset if available\n", " license=_LICENSE,\n", " # Citation for the dataset\n", " citation=_CITATION\n", " )" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "\n", "\n", "def create_split_datasets(splitname: str, meta_df: pd.DataFrame):\n", " \"\"\"Create a huggingface dataset for a split with the metadata and push it to huggingface datasets hub.\"\"\"\n", " split_df = meta_df[meta_df[\"split\"] == splitname]\n", "\n", " # Write the split metadata to a jsonl file\n", " #split_df.drop(\"audio\", axis=1).to_json(f\"data/{splitname}/metadata.jsonl\", orient='records', lines=True, force_ascii=False)\n", "\n", " # Create a huggingface dataset from the split metadata\n", " dataset = Dataset.from_pandas(split_df, split=splitname, info=dataset_info, preserve_index=False).cast_column(\"audio\", Audio(sampling_rate=16000))\n", "\n", " # cast the features to the correct type\n", " for colname in [\"dialect\", \"orthography\", \"source_type\", \"gender\"]: \n", " dataset = dataset.class_encode_column(colname)\n", " return dataset\n", "\n", "\n", "def preprocess_splits(metadata_filepath: str):\n", " \"\"\"Preprocess the metadata and create the huggingface datasets for the splits.\"\"\"\n", " metadata = load_metadata(metadata_filepath)\n", " splits = metadata[\"split\"].unique()\n", " datasplits = {}\n", " for split in splits:\n", " dataset = create_split_datasets(split, metadata)\n", " dataset.to_pandas().drop(\"audio\", axis=1).to_json(f\"data/{split}/metadata.jsonl\", orient='records', lines=True, force_ascii=False)\n", " datasplits[split] = dataset\n", " # dataset.push_to_hub('Sprakbanken/nb_samtale', split=split)\n", " return datasplits" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Casting to class labels: 100%|██████████| 9548/9548 [00:00<00:00, 189643.53 examples/s]\n", "Casting to class labels: 100%|██████████| 9548/9548 [00:00<00:00, 164263.24 examples/s]\n", "Casting to class labels: 100%|██████████| 9548/9548 [00:00<00:00, 135115.25 examples/s]\n", "Casting to class labels: 100%|██████████| 9548/9548 [00:00<00:00, 191127.87 examples/s]\n", "Casting to class labels: 100%|██████████| 1195/1195 [00:00<00:00, 66483.53 examples/s]\n", "Casting to class labels: 100%|██████████| 1195/1195 [00:00<00:00, 78831.62 examples/s]\n", "Casting to class labels: 100%|██████████| 1195/1195 [00:00<00:00, 93411.73 examples/s]\n", "Casting to class labels: 100%|██████████| 1195/1195 [00:00<00:00, 99626.18 examples/s]\n", "Casting to class labels: 100%|██████████| 1194/1194 [00:00<00:00, 86298.68 examples/s]\n", "Casting to class labels: 100%|██████████| 1194/1194 [00:00<00:00, 62575.74 examples/s]\n", "Casting to class labels: 100%|██████████| 1194/1194 [00:00<00:00, 63845.78 examples/s]\n", "Casting to class labels: 100%|██████████| 1194/1194 [00:00<00:00, 66885.69 examples/s]\n" ] } ], "source": [ "metafilepath = 'data/metadata.jsonl'\n", "\n", "dataset = preprocess_splits(metafilepath)\n", "\n" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'source_file_id': 'nb-1',\n", " 'segment_id': '0100800-0104784',\n", " 'segment_order': 9,\n", " 'duration': 3.984,\n", " 'overlap_previous': False,\n", " 'overlap_next': False,\n", " 'speaker_id': 'P48',\n", " 'gender': 0,\n", " 'dialect': 0,\n", " 'orthography': 0,\n", " 'source_type': 0,\n", " 'file_name': 'nb-1_0100800-0104784.wav',\n", " 'transcription': 'vi er altså mer spennende enn %eee kokte poteter.',\n", " 'audio': {'path': 'data/dev/bm/nb-1_0100800-0104784.wav',\n", " 'array': array([-0.00192261, 0.00140381, 0.00299072, ..., 0.00524902,\n", " 0.00448608, 0.00799561]),\n", " 'sampling_rate': 16000},\n", " 'file_path': 'data/dev/bm',\n", " 'split': 'dev'}" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "dataset[\"dev\"][0]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "---" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## create shards \n" ] }, { "cell_type": "code", "execution_count": 288, "metadata": {}, "outputs": [], "source": [ "from typing import Generator \n", "\n", "\n", "def ceildiv(a, b):\n", " return -(a // -b)\n", "\n", "def human_readable_size(size: int, round_off: bool = True):\n", " \"\"\"Convert a file size in bytes to a human readable format.\"\"\"\n", " for unit in [\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\"]:\n", " if size < 1024.0:\n", " break\n", " size /= 1024.0\n", " if round_off:\n", " return f\"{round(size)}{unit}\"\n", " return f\"{size:.2f}{unit}\"\n", "\n", "def get_foldersize(dir_path: str):\n", " \"\"\"Get the total byte size of a folder.\"\"\"\n", " return sum_filesizes(Path(dir_path).iterdir())\n", "\n", "def sum_filesizes(filelist: list):\n", " \"\"\"Get the total byte size of all files in a file list.\"\"\"\n", " total_size = 0\n", " for filename in filelist:\n", " total_size += os.path.getsize(filename)\n", " return total_size\n", "\n", "def create_file_batches(file_dir: str, num: int, size: int) -> Generator:\n", " \"\"\"Create a list of files to include in a shard.\"\"\"\n", " batch_size = 0\n", " filelist = []\n", " for f in Path(file_dir).iterdir():\n", " if f.is_file():\n", " if batch_size + f.stat().st_size >= size:\n", " yield filelist\n", " filelist = []\n", " batch_size = 0\n", " batch_size += f.stat().st_size\n", " filelist.append(f)\n", " return filelist" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 292, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['499.97MB', '499.97MB', '499.76MB']" ] }, "execution_count": 292, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#[(i, human_readable_size(f.stat().st_size)) for i, f in enumerate(Path(data_dir).iterdir())][:10]\n", "shards = create_file_batches(\"data/train/bm\", 10, 500*1024*1024)\n", "\n", "[human_readable_size(sum_filesizes(sh), round_off=False) for sh in shards]\n", "\n" ] }, { "cell_type": "code", "execution_count": 269, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Total shards: 4\n", "Total size: 1.57GB\n" ] } ], "source": [ "import os \n", "import tarfile\n", "\n", "source_dir = \"data\"\n", "target_dir = \"shards\"\n", "max_size = 500 * 1024 * 1024 # ~500MB\n", "\n", "metadata_file = \"metadata.jsonl\"\n", "metadf = load_metadata(Path(source_dir) / metadata_file)\n", "\n", "splits = [\"train\", \"dev\", \"test\"]\n", "langs = [\"bm\", \"nn\"]\n", "\n", "split = \"train\"\n", "lang = \"bm\" \n", "# the smallest dataset partition is dev/nn, ~60MB, and the largest is train/bm, ~1.6GB\n", "\n", "data_dir = os.path.join(source_dir, split, lang)\n", "total_shards = ceildiv(get_foldersize(data_dir), max_size)\n", "\n", "print(f\"Total shards: {total_shards}\")\n", "print(f\"Total size of files in folder: {human_readable_size(get_foldersize(data_dir), round_off=False)}\")\n", "\n", "\n", "#create_shards(dataset[split], audio_dir, shard_dir, shard_size, split, langs, metadata_file)\n", "#!tar -czvf $split_$lang_$shardnum.tar.gz data/$split/$lang/*.wav\n" ] }, { "cell_type": "code", "execution_count": 238, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'shards/dev_nn-0001.tar.gz'" ] }, "execution_count": 238, "metadata": {}, "output_type": "execute_result" } ], "source": [ "shard_count = 0\n", "shard_name = f\"{split}_{lang}-{(shard_count + 1):04d}.tar.gz\"\n", "shard_path = os.path.join(target_dir, shard_name)\n", "\n", "shard_path\n", "#for idx, filename in enumerate(os.listdir(data_dir)): \n", " #print(idx, filename)" ] }, { "cell_type": "code", "execution_count": 223, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'4.75MB'" ] }, "execution_count": 223, "metadata": {}, "output_type": "execute_result" } ], "source": [ "human_readable_size(os.path.getsize(os.path.join(source_dir, \"metadata.jsonl\")), round_off=False)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Draft dataset loader script" ] }, { "cell_type": "code", "execution_count": 243, "metadata": {}, "outputs": [], "source": [ "_DATA_URL = \"https://huggingface.co/datasets/Sprakbanken/nb_samtale/resolve/main/data/{batch}\"\n", "\n", "_METADATA_URL = \"https://huggingface.co/datasets/Sprakbanken/nb_samtale/resolve/dev/data/{split}/metadata.jsonl\"\n", "\n", "_DATA_SPLITS = {\n", " \"train\": [\n", " \"train-00000-of-00005-061bdbf053384eef.parquet\",\n", " \"train-00001-of-00005-614293dd049991fc.parquet\",\n", " \"train-00002-of-00005-d2365f0dd5270964.parquet\",\n", " \"train-00003-of-00005-f4a44d9345ca98f6.parquet\",\n", " \"train-00004-of-00005-8b07130ce556d0ce.parquet\",\n", " ],\n", " \"dev\": [\"dev-00000-of-00001-8ddd802c462e8165.parquet\"],\n", " \"test\": [\"test-00000-of-00001-7cad5ef6694eb6a0.parquet\"],\n", "\n", "}" ] }, { "cell_type": "code", "execution_count": 240, "metadata": {}, "outputs": [], "source": [ "from huggingface_hub import hf_hub_url, hf_hub_download\n", "\n", "#mdata = hf_hub_download(\n", "# repo_id=\"Sprakbanken/nb_samtale\", filename=\"data/train/metadata.jsonl\", repo_type=\"dataset\", revision=\"dev\")\n", "batchfile = _DATA_SPLITS[\"dev\"][0]\n", "data = hf_hub_download(\n", " repo_id=\"Sprakbanken/nb_samtale\", \n", " filename=f\"data/{batchfile}\", \n", " repo_type=\"dataset\", \n", " revision=\"main\"\n", ")" ] }, { "cell_type": "code", "execution_count": 102, "metadata": {}, "outputs": [], "source": [ "\n", "dl_manager = DownloadManager()" ] }, { "cell_type": "code", "execution_count": 244, "metadata": {}, "outputs": [], "source": [ " def _split_generator( dl_manager):\n", " \"\"\"Returns SplitGenerators.\"\"\"\n", " #config_name = self.config.name\n", " #if config_name.endswith(\"bokmaal\") or config_name.endswith(\"nynorsk\"):\n", " # config_name, *_ = config_name.rsplit(\"_\", 1)\n", " metadata_path = {}\n", " audio_path = {}\n", " split_type = {\"train\": datasets.Split.TRAIN, \"test\": datasets.Split.TEST, \"dev\": datasets.Split.VALIDATION}\n", " for split in split_type:\n", " metadata_path[split] = dl_manager.download(_METADATA_URL.format(split=split))\n", " audio_path[split] = dl_manager.download_and_extract([_DATA_URL.format(batch=batch) for batch in _DATA_SPLITS[split]])\n", "\n", " return [\n", " datasets.SplitGenerator(\n", " name=split_type[split],\n", " gen_kwargs={\n", " \"audio_files\": dl_manager.iter_archive(audio_path[split]),\n", " \"metadata_path\": dl_manager.download_and_extract(metadata_path[split]),\n", " \"split\": split,\n", " },\n", " ) for split in split_type\n", " ]" ] }, { "cell_type": "code", "execution_count": 242, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Downloading data: 100%|██████████| 4.10M/4.10M [00:00<00:00, 6.36MB/s]\n", "Downloading data files: 100%|██████████| 5/5 [00:04<00:00, 1.15it/s]\n", "Computing checksums: 100%|██████████| 5/5 [00:06<00:00, 1.32s/it]\n", "Extracting data files: 100%|██████████| 5/5 [00:00<00:00, 2118.98it/s]\n", "Downloading data: 100%|██████████| 509k/509k [00:00<00:00, 2.57MB/s]\n", "Downloading data files: 100%|██████████| 1/1 [00:00<00:00, 1.43it/s]\n", "Extracting data files: 100%|██████████| 1/1 [00:00<00:00, 1244.97it/s]\n", "Downloading data: 100%|██████████| 503k/503k [00:00<00:00, 1.67MB/s]\n", "Downloading data files: 100%|██████████| 1/1 [00:00<00:00, 2.94it/s]\n", "Extracting data files: 100%|██████████| 1/1 [00:00<00:00, 1512.55it/s]\n" ] } ], "source": [ "gen = _split_generator(dl_manager)" ] }, { "cell_type": "code", "execution_count": 248, "metadata": {}, "outputs": [], "source": [ "kwargs = gen[0].gen_kwargs\n", "# for key, data in kwargs.items():\n", " # print(key, data)\n", "metadata_path = kwargs.get(\"metadata_path\")\n", "audio_files = kwargs[\"audio_files\"]\n", "split = kwargs[\"split\"]\n", "config = \"annotations\"" ] }, { "cell_type": "code", "execution_count": 250, "metadata": {}, "outputs": [], "source": [ " def _generate_tables( metadata_path, audio_files, split):\n", " for file_idx, file in enumerate(audio_files):\n", " with open(file, \"rb\") as f:\n", " parquet_file = pq.ParquetFile(f)\n", " try:\n", " for batch_idx, record_batch in enumerate(\n", " parquet_file.iter_batches(batch_size=10_000)\n", " ):\n", " pa_table = pa.Table.from_batches([record_batch])\n", " # Uncomment for debugging (will print the Arrow table size and elements)\n", " # logger.warning(f\"pa_table: {pa_table} num rows: {pa_table.num_rows}\")\n", " # logger.warning('\\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))\n", " yield f\"{file_idx}_{batch_idx}\", pa_table\n", " except ValueError as e:\n", " #logger.error(f\"Failed to read file '{file}' with error {type(e)}: {e}\")\n", " raise" ] }, { "cell_type": "code", "execution_count": 251, "metadata": {}, "outputs": [], "source": [ "_generator = _generate_tables(metadata_path, audio_files, split)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#!/usr/bin/python\n", "\n", "import datasets\n", "\n", "import pyarrow as pa\n", "import pyarrow.parquet as pq\n", "\n", "_DATA_FILES = ['data/combined-00009-of-00013-97a88bccf4215954.parquet',\n", " 'data/combined-00004-of-00013-119d653561443d7b.parquet',\n", " 'data/combined-00007-of-00013-ab54cce4ee6331d0.parquet',\n", " 'data/combined-00002-of-00013-149f5d0d22fe8f52.parquet',\n", " 'data/combined-00003-of-00013-426af6f6064e67dd.parquet',\n", " 'data/combined-00010-of-00013-89d7565c5f0d2e4e.parquet',\n", " 'data/combined-00000-of-00013-36d239509fb9e430.parquet',\n", " 'data/combined-00005-of-00013-363bba92a2b7f737.parquet',\n", " 'data/combined-00006-of-00013-4d4d574c9d87176e.parquet',\n", " 'data/combined-00001-of-00013-d5b44e96ad7d2927.parquet',\n", " 'data/combined-00012-of-00013-84cf41ef75dd5b76.parquet',\n", " 'data/combined-00011-of-00013-4c21766cedd5a4a0.parquet',\n", " 'data/combined-00008-of-00013-674f74b6f2288c61.parquet']\n", "\n", "class OOMethodTestDataset(datasets.ArrowBasedBuilder):\n", " def __init__(self, *args, **kwargs):\n", " super().__init__(*args, **kwargs)\n", "\n", " def _info(self):\n", " return datasets.DatasetInfo()\n", "\n", " def _split_generators(self, dl_manager):\n", " files = _DATA_FILES\n", " downloaded_files = dl_manager.download(files)\n", "\n", " #print(files)\n", " #print(downloaded_files)\n", "\n", " return [\n", " datasets.SplitGenerator(\n", " name=\"combined\",\n", " gen_kwargs={\n", " \"files\": downloaded_files,\n", " },\n", " ),\n", " ]\n", " \n", " def _generate_tables(self, files):\n", " for file_idx, file in enumerate(files):\n", " with open(file, \"rb\") as f:\n", " parquet_file = pq.ParquetFile(f)\n", " try:\n", " for batch_idx, record_batch in enumerate(\n", " parquet_file.iter_batches(batch_size=10_000)\n", " ):\n", " pa_table = pa.Table.from_batches([record_batch])\n", " # Uncomment for debugging (will print the Arrow table size and elements)\n", " # logger.warning(f\"pa_table: {pa_table} num rows: {pa_table.num_rows}\")\n", " # logger.warning('\\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))\n", " yield f\"{file_idx}_{batch_idx}\", pa_table\n", " except ValueError as e:\n", " #logger.error(f\"Failed to read file '{file}' with error {type(e)}: {e}\")\n", " raise" ] }, { "cell_type": "code", "execution_count": 230, "metadata": {}, "outputs": [], "source": [ "examples = {}\n", "with open(metadata, encoding=\"utf-8\") as f:\n", " for row in f:\n", " data = json.loads(row)\n", " fname = data[\"file_name\"]\n", " examples[fname] = {\n", " **data\n", " }\n", " #if self.config.name == \"annotations\": # Nothing special to do here \n", " #transcription = data[\"transcription\"]\n", " # yield examples\n", " if config == \"normalized\":\n", " examples[\"transcription\"] = normalize_transcription(data[\"transcription\"])\n", " # yield examples\n", " elif config == \"verbatim\":\n", " examples[\"transcription\"] = verbatim_transcription(data[\"transcription\"])\n", " # yield examples" ] }, { "cell_type": "code", "execution_count": 234, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "<_io.BytesIO object at 0x7fa9125b1990>\n", "<_io.BytesIO object at 0x7fa4fadb2160>\n" ] } ], "source": [ "import tarfile\n", "import io\n", "import pyarrow as pa\n", "import pyarrow.parquet as pq\n", "\n", "id_ = 1\n", "for path in audio_files:\n", " with open(path, \"rb\") as f:\n", " audio = {\"path\": path, \"bytes\": f.read()}\n", " print(id_, {**examples[path], \"audio\": audio})\n", " \n", " id_ += 1\n", " if id_ > 2:\n", " break\n", "\n", "\n", "#####\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# WIP \n", "for metadata_path, archive_path in filepaths:\n", " metadata = {}\n", " with open(metadata_path) as metadata_file:\n", " for line in metadata_file.read().split(\"\\n\"):\n", " if line:\n", " metadata_object = json.loads(line)\n", " if \"path\" in metadata_object:\n", " metadata_key = metadata_object[\"path\"].split(\"/\", 1)[-1]\n", " metadata[metadata_key] = metadata_object\n", " with open(archive_path, \"rb\") as archive_fs:\n", " archive_bytes = io.BytesIO(archive_fs.read())\n", " with tarfile.open(fileobj=archive_bytes, mode=\"r\") as tar:\n", " for audio_file in tar.getmembers():\n", " if audio_file.isfile():\n", " metadata_key = audio_file.name.split(\".mp3\", 1)[0].split(\"/\", 1)[-1]\n", " audio_bytes = tar.extractfile(audio_file).read()\n", " audio_dict = {\"bytes\": audio_bytes, \"path\": audio_file.name}\n", " fields = {key: metadata[metadata_key][key] for key in data_fields}\n", " if lang_code:\n", " if lang_code == fields.get(\"sentence_language_code\", \"\").lower():\n", " yield metadata_key, {\"audio\": audio_dict, **fields}\n", " else:\n", " yield metadata_key, {\"audio\": audio_dict, **fields}" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# WORKING FUNCTION\n", " # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`\n", " def _generate_examples(self, metadata, filepaths, split):\n", " # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.\n", " # The `id_` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.\n", " examples = {}\n", " with open(metadata, encoding=\"utf-8\") as f:\n", " for row in f:\n", " data = json.loads(row)\n", " audio_path = data[\"file_name\"].split(\"/\")[-1] # get the filename from the filepath\n", " examples[audio_path] = {\n", " **data\n", " }\n", " if self.config.name == \"annotations\": # Nothing special to do here \n", " #transcription = data[\"transcription\"]\n", " yield examples\n", " if self.config.name == \"normalized\":\n", " examples[\"transcription\"] = normalize_transcription(data[\"transcription\"])\n", " yield examples\n", " elif self.config.name == \"verbatim\":\n", " examples[\"transcription\"] = verbatim_transcription(data[\"transcription\"])\n", " yield examples\n", "\n" ] } ], "metadata": { "kernelspec": { "display_name": "talenv", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.4" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 }