Spaces:
Sleeping
Sleeping
mj-new
commited on
Commit
·
32fbd07
1
Parent(s):
f331a20
Working analysis of size and text/audio derived basic features
Browse files- .gitignore +3 -0
- .python-version +1 -0
- README.md +4 -4
- app.py +148 -0
- constants.py +19 -0
- playground-amu-dash.ipynb +0 -0
- requirements.txt +6 -0
- run-analysis.py +101 -0
- utils.py +601 -0
.gitignore
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
plots
|
| 2 |
+
reports
|
| 3 |
+
__pycache__
|
.python-version
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
README.md
CHANGED
|
@@ -1,10 +1,10 @@
|
|
| 1 |
---
|
| 2 |
title: Amu Bigos Data Dash
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: streamlit
|
| 7 |
-
sdk_version: 1.
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: cc-by-nc-nd-4.0
|
|
|
|
| 1 |
---
|
| 2 |
title: Amu Bigos Data Dash
|
| 3 |
+
emoji: 🐨
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: purple
|
| 6 |
sdk: streamlit
|
| 7 |
+
sdk_version: 1.33.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: cc-by-nc-nd-4.0
|
app.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import streamlit as st
|
| 3 |
+
|
| 4 |
+
import matplotlib.pyplot as plt
|
| 5 |
+
import seaborn as sns
|
| 6 |
+
import os
|
| 7 |
+
import json
|
| 8 |
+
|
| 9 |
+
from utils import read_reports, dict_to_multindex_df
|
| 10 |
+
#add_test_split_stats_from_secret_dataset, dict_to_multindex_df_all_splits
|
| 11 |
+
from utils import extract_stats_to_agg, extract_stats_all_splits, extract_stats_for_dataset_card
|
| 12 |
+
from constants import BIGOS_INFO, PELCRA_INFO, ABOUT_INFO
|
| 13 |
+
from datasets import get_dataset_config_names
|
| 14 |
+
|
| 15 |
+
# PL ASR BIGOS analysis
|
| 16 |
+
# PL ASR Diagnostic analysis
|
| 17 |
+
# PELCRA analysis
|
| 18 |
+
# TODO - compare the datasets
|
| 19 |
+
|
| 20 |
+
st.set_page_config(layout="wide")
|
| 21 |
+
|
| 22 |
+
about, analysis_bigos, analysis_bigos_pelcra = st.tabs(["About BIGOS datasets", "BIGOS V2 analysis", "PELCRA for BIGOS analysis"])
|
| 23 |
+
#analysis_bigos_diagnostic
|
| 24 |
+
#########################################BIGOS################################################
|
| 25 |
+
with about:
|
| 26 |
+
|
| 27 |
+
st.title("About BIGOS project")
|
| 28 |
+
st.markdown(ABOUT_INFO, unsafe_allow_html=True)
|
| 29 |
+
# TODO - load and display about BIGOS benchmark
|
| 30 |
+
|
| 31 |
+
with analysis_bigos:
|
| 32 |
+
dataset_name = "amu-cai/pl-asr-bigos-v2"
|
| 33 |
+
#dataset_secret = "amu-cai/pl-asr-bigos-v2-secret"
|
| 34 |
+
dataset_short_name = "BIGOS"
|
| 35 |
+
dataset_version = "V2"
|
| 36 |
+
|
| 37 |
+
dataset_configs = get_dataset_config_names(dataset_name,trust_remote_code=True)
|
| 38 |
+
# remove "all" subset, which is always the last config type
|
| 39 |
+
dataset_configs.pop()
|
| 40 |
+
print(dataset_configs)
|
| 41 |
+
# read the reports for public and secret datasets
|
| 42 |
+
[stats_dict_public, contents_dict_public] = read_reports(dataset_name)
|
| 43 |
+
|
| 44 |
+
# update the metrics for test split with the secret dataset statistics
|
| 45 |
+
#stats_dict_public = add_test_split_stats_from_secret_dataset(stats_dict_public, stats_dict_secret)
|
| 46 |
+
df_multindex_for_agg = dict_to_multindex_df(stats_dict_public, all_splits=False)
|
| 47 |
+
df_multindex_all_splits = dict_to_multindex_df(stats_dict_public, all_splits=True)
|
| 48 |
+
|
| 49 |
+
# extract metrics from dictionary and convert to various dataframes for easier analysis and visualization
|
| 50 |
+
#st.header("Summary statistics")
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
st.header("Dataset level metrics")
|
| 54 |
+
metrics_size = ["samples", "audio[h]", "speakers", "words", "chars"]
|
| 55 |
+
df_sum_stats_agg = extract_stats_to_agg(df_multindex_for_agg, metrics_size)
|
| 56 |
+
|
| 57 |
+
# split dataframe into separate dataframes for easier analysis and visualization
|
| 58 |
+
st.subheader("Dataset size (audio)")
|
| 59 |
+
df_sum_stats_audio = df_sum_stats_agg[["audio[h]", "samples", "speakers"]]
|
| 60 |
+
st.dataframe(df_sum_stats_audio)
|
| 61 |
+
|
| 62 |
+
st.subheader("Dataset size (text)")
|
| 63 |
+
df_sum_stats_text = df_sum_stats_agg[["samples", "words", "chars"]]
|
| 64 |
+
st.dataframe(df_sum_stats_text)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
metrics_features = ["utts_unique", "words_unique", "chars_unique", "words_per_sec", "chars_per_sec"]
|
| 68 |
+
|
| 69 |
+
df_sum_stats_all_splits = extract_stats_all_splits(df_multindex_all_splits, metrics_features)
|
| 70 |
+
|
| 71 |
+
st.subheader("Dataset features (text)")
|
| 72 |
+
df_sum_stats_feats_text = df_sum_stats_all_splits[metrics_features[0:2]]
|
| 73 |
+
st.dataframe(df_sum_stats_feats_text)
|
| 74 |
+
|
| 75 |
+
st.subheader("Dataset features (audio)")
|
| 76 |
+
df_sum_stats_feats_audio = df_sum_stats_all_splits[metrics_features[3:]]
|
| 77 |
+
st.dataframe(df_sum_stats_feats_audio)
|
| 78 |
+
|
| 79 |
+
st.header("BIGOS subsets (source datasets) cards")
|
| 80 |
+
for subset in dataset_configs:
|
| 81 |
+
st.subheader("Dataset card for: {}".format(subset))
|
| 82 |
+
df_metrics_subset_size = extract_stats_for_dataset_card(df_multindex_for_agg, subset, metrics_size, add_total=True)
|
| 83 |
+
st.dataframe(df_metrics_subset_size)
|
| 84 |
+
df_metrics_subset_features = extract_stats_for_dataset_card(df_multindex_for_agg, subset, metrics_features, add_total=False)
|
| 85 |
+
st.dataframe(df_metrics_subset_features)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
#########################################PELCRA################################################
|
| 90 |
+
with analysis_bigos_pelcra:
|
| 91 |
+
|
| 92 |
+
dataset_name = "pelcra/pl-asr-pelcra-for-bigos"
|
| 93 |
+
#dataset_secret = "pelcra/pl-asr-pelcra-for-bigos-secret"
|
| 94 |
+
|
| 95 |
+
dataset_short_name = "PELCRA"
|
| 96 |
+
|
| 97 |
+
dataset_configs = get_dataset_config_names(dataset_name,trust_remote_code=True)
|
| 98 |
+
# remove "all" subset, which is always the last config type
|
| 99 |
+
dataset_configs.pop()
|
| 100 |
+
print(dataset_configs)
|
| 101 |
+
# read the reports for public and secret datasets
|
| 102 |
+
[stats_dict_public, contents_dict_public] = read_reports(dataset_name)
|
| 103 |
+
|
| 104 |
+
# update the metrics for test split with the secret dataset statistics
|
| 105 |
+
#stats_dict_public = add_test_split_stats_from_secret_dataset(stats_dict_public, stats_dict_secret)
|
| 106 |
+
df_multindex_for_agg = dict_to_multindex_df(stats_dict_public, all_splits=False)
|
| 107 |
+
df_multindex_all_splits = dict_to_multindex_df(stats_dict_public, all_splits=True)
|
| 108 |
+
|
| 109 |
+
# extract metrics from dictionary and convert to various dataframes for easier analysis and visualization
|
| 110 |
+
#st.header("Summary statistics")
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
st.header("Dataset level metrics")
|
| 114 |
+
metrics_size = ["samples", "audio[h]", "speakers", "words", "chars"]
|
| 115 |
+
df_sum_stats_agg = extract_stats_to_agg(df_multindex_for_agg, metrics_size)
|
| 116 |
+
|
| 117 |
+
#st.dataframe(df_sum_stats_agg)
|
| 118 |
+
#print(df_sum_stats.columns)
|
| 119 |
+
|
| 120 |
+
# split dataframe into separate dataframes for easier analysis and visualization
|
| 121 |
+
st.subheader("Dataset size (audio)")
|
| 122 |
+
df_sum_stats_audio = df_sum_stats_agg[["audio[h]", "samples", "speakers"]]
|
| 123 |
+
st.dataframe(df_sum_stats_audio)
|
| 124 |
+
|
| 125 |
+
st.subheader("Dataset size (text)")
|
| 126 |
+
df_sum_stats_text = df_sum_stats_agg[["samples", "words", "chars"]]
|
| 127 |
+
st.dataframe(df_sum_stats_text)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
metrics_features = ["utts_unique", "words_unique", "chars_unique", "words_per_sec", "chars_per_sec"]
|
| 131 |
+
|
| 132 |
+
df_sum_stats_all_splits = extract_stats_all_splits(df_multindex_all_splits, metrics_features)
|
| 133 |
+
|
| 134 |
+
st.subheader("Dataset features (text)")
|
| 135 |
+
df_sum_stats_feats_text = df_sum_stats_all_splits[metrics_features[0:2]]
|
| 136 |
+
st.dataframe(df_sum_stats_feats_text)
|
| 137 |
+
|
| 138 |
+
st.subheader("Dataset features (audio)")
|
| 139 |
+
df_sum_stats_feats_audio = df_sum_stats_all_splits[metrics_features[3:]]
|
| 140 |
+
st.dataframe(df_sum_stats_feats_audio)
|
| 141 |
+
|
| 142 |
+
st.header("BIGOS subsets (source datasets) cards")
|
| 143 |
+
for subset in dataset_configs:
|
| 144 |
+
st.subheader("Dataset card for: {}".format(subset))
|
| 145 |
+
df_metrics_subset_size = extract_stats_for_dataset_card(df_multindex_for_agg, subset, metrics_size, add_total=True)
|
| 146 |
+
st.dataframe(df_metrics_subset_size)
|
| 147 |
+
df_metrics_subset_features = extract_stats_for_dataset_card(df_multindex_for_agg, subset, metrics_features, add_total=False)
|
| 148 |
+
st.dataframe(df_metrics_subset_features)
|
constants.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ABOUT_INFO = "Welcome to the BIGOS (Benchmark Intended Grouping of Open Speech) dataset analysis dashboard! <br> \
|
| 2 |
+
Here you can learn more about the contents of BIGOS speech datasets for Polish: <br> \
|
| 3 |
+
* **BIGOS V2 dataset** [HF datasets hub](https://huggingface.co/datasets/amu-cai/pl-asr-bigos-v2) <br> \
|
| 4 |
+
* **BIGOS V2 diagnostic dataset** [HF datasets hub](https://huggingface.co/datasets/amu-cai/pl-asr-bigos-v2-diagnostic) <br> \
|
| 5 |
+
* **PELCRA for BIGOS dataset** [HF datasets hub](https://huggingface.co/datasets/pelcra/pl-asr-pelcra-for-bigos) <br> \
|
| 6 |
+
Please visit respective tab to learn more about the contents of specific dataset. <br><br> \
|
| 7 |
+
Survey of Polish ASR speech datasets and benchmarks is available here: [Polish ASR survey](https://huggingface.co/spaces/amu-cai/pl-asr-survey) <br><br>\
|
| 8 |
+
The latest and most comprehensive ASR benchmarks for Polish is available here: [BIGOS/PELCRA ASR leaderboard](https://huggingface.co/spaces/amu-cai/pl-asr-bigos-bench-dash). <br><br> \
|
| 9 |
+
You can also contact the author via [email](mailto:michal.junczyk@amu.edu.pl) or [LinkedIn](https://www.linkedin.com/in/michaljunczyk/).<br>"
|
| 10 |
+
|
| 11 |
+
BIGOS_INFO = "TODO"
|
| 12 |
+
PELCRA_INFO = "TODO"
|
| 13 |
+
|
| 14 |
+
CITATION_MAIN = "@misc{junczyk-2024-pl-asr-bigos-dataset-analysis <br> \
|
| 15 |
+
title = {Analysis of BIGOS Dataset for Polish ASR evaluation.}, <br> \
|
| 16 |
+
author = {Michał Junczyk}, <br> \
|
| 17 |
+
year = {2024}, <br> \
|
| 18 |
+
publisher = {Hugging Face}, <br> \
|
| 19 |
+
url = {https://huggingface.co/spaces/amu-cai/amu-bigos-data-dash} }"
|
playground-amu-dash.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
datasets
|
| 2 |
+
pandas
|
| 3 |
+
streamlit
|
| 4 |
+
seaborn
|
| 5 |
+
matplotlib
|
| 6 |
+
librosa
|
run-analysis.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
from datasets import load_dataset, get_dataset_config_names, Features, Value
|
| 4 |
+
from utils import num_of_samples_per_split, uniq_utts_per_split, words_per_split, uniq_words_per_split, chars_per_split, uniq_chars_per_split
|
| 5 |
+
from utils import audio_duration_per_split, speakers_per_split, meta_cov_per_split
|
| 6 |
+
#, uniq_utts_per_speaker
|
| 7 |
+
from utils import meta_distribution_text, meta_distribution_violin_plot, recordings_per_speaker, speech_rate_words_per_split, speech_rate_chars_per_split
|
| 8 |
+
import argparse
|
| 9 |
+
# move to constants
|
| 10 |
+
output_dir_plots = "./plots"
|
| 11 |
+
output_dir_reports = "./reports"
|
| 12 |
+
os.makedirs(output_dir_plots, exist_ok=True)
|
| 13 |
+
os.makedirs(output_dir_plots, exist_ok=True)
|
| 14 |
+
|
| 15 |
+
# get as cmd line args
|
| 16 |
+
# read from command line argument
|
| 17 |
+
parser = argparse.ArgumentParser()
|
| 18 |
+
parser.add_argument("--dataset", type=str, required=True, help="Name of the dataset to generate reports for")
|
| 19 |
+
parser.add_argument("--secret_test_split", default=True, type=bool, help="Should references for test split be retrieved from the secret distribution?")
|
| 20 |
+
|
| 21 |
+
args = parser.parse_args()
|
| 22 |
+
|
| 23 |
+
dataset_name = args.dataset
|
| 24 |
+
print("Generating reports for dataset: {}".format(dataset_name))
|
| 25 |
+
if (args.secret_test_split):
|
| 26 |
+
dataset_name_secret = str.join("-", [dataset_name, "secret"])
|
| 27 |
+
# check if secret repo exists
|
| 28 |
+
print(dataset_name_secret)
|
| 29 |
+
try:
|
| 30 |
+
dataset_configs_secret = get_dataset_config_names(dataset_name_secret)
|
| 31 |
+
except:
|
| 32 |
+
print("Config for secret dataset {} cannot be retrieved!".format(dataset_name_secret))
|
| 33 |
+
|
| 34 |
+
#dataset_name = "amu-cai/pl-asr-bigos-v2"
|
| 35 |
+
output_dir_reports_dataset = os.path.join(output_dir_reports, dataset_name)
|
| 36 |
+
os.makedirs(output_dir_reports_dataset, exist_ok=True)
|
| 37 |
+
|
| 38 |
+
# get dataset config names
|
| 39 |
+
dataset_configs = get_dataset_config_names(dataset_name)
|
| 40 |
+
|
| 41 |
+
# initialize output structures
|
| 42 |
+
dataset_statistics = {}
|
| 43 |
+
output_fn_stats = os.path.join(output_dir_reports_dataset, "dataset_statistics.json")
|
| 44 |
+
|
| 45 |
+
dataset_contents = {}
|
| 46 |
+
output_fn_contents = os.path.join(output_dir_reports_dataset, "dataset_contents.json")
|
| 47 |
+
|
| 48 |
+
# specify features to load. Skip loading of audio data
|
| 49 |
+
features_to_load = Features({'audioname': Value(dtype='string', id=None), 'split': Value(dtype='string', id=None), 'dataset': Value(dtype='string', id=None), 'speaker_id': Value(dtype='string', id=None), 'ref_orig': Value(dtype='string', id=None), 'audio_duration_samples': Value(dtype='int32', id=None), 'audio_duration_seconds': Value(dtype='float32', id=None), 'samplingrate_orig': Value(dtype='int32', id=None), 'sampling_rate': Value(dtype='int32', id=None), 'audiopath_bigos': Value(dtype='string', id=None), 'audiopath_local': Value(dtype='string', id=None), 'speaker_age': Value(dtype='string', id=None), 'speaker_sex': Value(dtype='string', id=None)})
|
| 50 |
+
|
| 51 |
+
for config_name in dataset_configs:
|
| 52 |
+
print("Generating stats for {}".format(config_name))
|
| 53 |
+
|
| 54 |
+
dataset_statistics[config_name] = {}
|
| 55 |
+
dataset_contents[config_name] = {}
|
| 56 |
+
|
| 57 |
+
dataset_hf_subset = load_dataset(dataset_name, config_name, features=features_to_load, trust_remote_code=True)
|
| 58 |
+
if(args.secret_test_split):
|
| 59 |
+
dataset_hf_subset_secret = load_dataset(dataset_name_secret, config_name, features=features_to_load, trust_remote_code=True)
|
| 60 |
+
|
| 61 |
+
dataset_statistics[config_name]["samples"] = num_of_samples_per_split(dataset_hf_subset)
|
| 62 |
+
dataset_statistics[config_name]["audio[h]"] = audio_duration_per_split(dataset_hf_subset)
|
| 63 |
+
dataset_statistics[config_name]["speakers"] = speakers_per_split(dataset_hf_subset)
|
| 64 |
+
|
| 65 |
+
# metrics based on transcriptions (references) - requires reading secret repo for test split
|
| 66 |
+
dataset_statistics[config_name]["utts_unique"], dataset_contents[config_name]["unique_utts"] = uniq_utts_per_split(dataset_hf_subset, dataset_hf_subset_secret)
|
| 67 |
+
dataset_statistics[config_name]["words"] = words_per_split(dataset_hf_subset, dataset_hf_subset_secret)
|
| 68 |
+
dataset_statistics[config_name]["words_unique"], dataset_contents[config_name]["unique_words"] = uniq_words_per_split(dataset_hf_subset, dataset_hf_subset_secret)
|
| 69 |
+
dataset_statistics[config_name]["chars"] = chars_per_split(dataset_hf_subset, dataset_hf_subset_secret)
|
| 70 |
+
dataset_statistics[config_name]["chars_unique"], dataset_contents[config_name]["unique_chars"] = uniq_chars_per_split(dataset_hf_subset, dataset_hf_subset_secret)
|
| 71 |
+
dataset_statistics[config_name]["words_per_sec"] = speech_rate_words_per_split(dataset_hf_subset, dataset_hf_subset_secret)
|
| 72 |
+
dataset_statistics[config_name]["chars_per_sec"] = speech_rate_chars_per_split(dataset_hf_subset, dataset_hf_subset_secret)
|
| 73 |
+
|
| 74 |
+
# metadata coverage per subset in percent - speaker accent
|
| 75 |
+
dataset_statistics[config_name]["meta_cov_sex"] = meta_cov_per_split(dataset_hf_subset, 'speaker_sex')
|
| 76 |
+
dataset_statistics[config_name]["meta_cov_age"] = meta_cov_per_split(dataset_hf_subset, 'speaker_age')
|
| 77 |
+
|
| 78 |
+
# speech rate per subset
|
| 79 |
+
dataset_statistics[config_name]["meta_dist_sex"] = meta_distribution_text(dataset_hf_subset, 'speaker_sex')
|
| 80 |
+
dataset_statistics[config_name]["meta_dist_age"] = meta_distribution_text(dataset_hf_subset, 'speaker_age')
|
| 81 |
+
|
| 82 |
+
dataset_statistics[config_name]["samples_per_spk"], dataset_contents[config_name]["samples_per_spk"] = recordings_per_speaker(dataset_hf_subset)
|
| 83 |
+
# dataset_statistics[config_name] = uniq_utts_per_speaker(dataset_hf_subset)
|
| 84 |
+
# number of words per speaker (min, max, med, avg, std)
|
| 85 |
+
|
| 86 |
+
# distribution of audio duration per subset
|
| 87 |
+
output_dir_plots_subset = os.path.join(output_dir_plots, config_name)
|
| 88 |
+
meta_distribution_violin_plot(dataset_hf_subset, output_dir_plots_subset, 'audio_duration_seconds', 'speaker_sex')
|
| 89 |
+
|
| 90 |
+
# distribution of audio duration per age
|
| 91 |
+
meta_distribution_violin_plot(dataset_hf_subset, output_dir_plots_subset, 'audio_duration_seconds', 'speaker_age')
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# save datasets statistics dict to storage as JSON file
|
| 95 |
+
with open(output_fn_stats, 'w') as f:
|
| 96 |
+
json.dump(dataset_statistics, f)
|
| 97 |
+
|
| 98 |
+
# save dataset content analysis to storage
|
| 99 |
+
with open(output_fn_contents, 'w') as f:
|
| 100 |
+
json.dump(dataset_contents, f)
|
| 101 |
+
|
utils.py
ADDED
|
@@ -0,0 +1,601 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
import seaborn as sns
|
| 4 |
+
import matplotlib.pyplot as plt
|
| 5 |
+
import pandas as pd
|
| 6 |
+
import os
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
# move to consts
|
| 10 |
+
buckets_age=['teens','twenties', 'thirties', 'fourties', 'fifties', 'sixties', 'seventies', 'eighties', 'nineties']
|
| 11 |
+
buckets_sex=["male", "female"]
|
| 12 |
+
|
| 13 |
+
def load_bigos_analyzer_report(fp:str)->dict:
|
| 14 |
+
with open(fp, 'r') as f:
|
| 15 |
+
data = json.load(f)
|
| 16 |
+
return data
|
| 17 |
+
|
| 18 |
+
def num_of_samples_per_split(dataset_hf):
|
| 19 |
+
# input - huggingface dataset object
|
| 20 |
+
# output - dictionary with statistics about number of samples per split
|
| 21 |
+
out_dict = {}
|
| 22 |
+
# number of samples per subset and split
|
| 23 |
+
metric = "samples"
|
| 24 |
+
print("Calculating {}".format(metric))
|
| 25 |
+
|
| 26 |
+
for split in dataset_hf.keys():
|
| 27 |
+
samples = dataset_hf[split].num_rows
|
| 28 |
+
##print(split, samples)
|
| 29 |
+
out_dict[split] = samples
|
| 30 |
+
# add number of samples for all splits
|
| 31 |
+
out_dict["all_splits"] = sum(out_dict.values())
|
| 32 |
+
|
| 33 |
+
return out_dict
|
| 34 |
+
|
| 35 |
+
def audio_duration_per_split(dataset_hf):
|
| 36 |
+
# input - huggingface dataset object
|
| 37 |
+
# output - dictionary with statistics about audio duration per split
|
| 38 |
+
out_dict = {}
|
| 39 |
+
metric = "audio[h]"
|
| 40 |
+
print("Calculating {}".format(metric))
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
for split in dataset_hf.keys():
|
| 44 |
+
#sampling_rate = dataset_hf[split]["sampling_rate"][0]
|
| 45 |
+
#audio_total_length_samples = 0
|
| 46 |
+
#audio_total_length_samples = sum(len(audio_file["array"]) for audio_file in dataset_hf["test"]["audio"])
|
| 47 |
+
audio_total_length_seconds = sum(dataset_hf[split]["audio_duration_seconds"])
|
| 48 |
+
audio_total_length_hours = round(audio_total_length_seconds / 3600,2)
|
| 49 |
+
out_dict[split] = audio_total_length_hours
|
| 50 |
+
#print(split, audio_total_length_hours)
|
| 51 |
+
# add number of samples for all splits
|
| 52 |
+
out_dict["all_splits"] = sum(out_dict.values())
|
| 53 |
+
return out_dict
|
| 54 |
+
|
| 55 |
+
def speakers_per_split(dataset_hf):
|
| 56 |
+
# input - huggingface dataset object
|
| 57 |
+
# output - dictionary with statistics about audio duration per split
|
| 58 |
+
out_dict = {}
|
| 59 |
+
metric = "speakers"
|
| 60 |
+
print("Calculating {}".format(metric))
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
for split in dataset_hf.keys():
|
| 64 |
+
# extract speakers from file_id
|
| 65 |
+
speakers_ids_all = [str(fileid).split("-")[4] for fileid in dataset_hf[split]["audioname"]]
|
| 66 |
+
speakers_ids_uniq = list(set(speakers_ids_all))
|
| 67 |
+
speakers_count = len(speakers_ids_uniq)
|
| 68 |
+
#print(split, speakers_count)
|
| 69 |
+
out_dict[split] = speakers_count
|
| 70 |
+
# add number of samples for all splits
|
| 71 |
+
out_dict["all_splits"] = sum(out_dict.values())
|
| 72 |
+
return out_dict
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def uniq_utts_per_split(dataset_hf, dataset_hf_secret):
|
| 76 |
+
# input - huggingface dataset object
|
| 77 |
+
# output - dictionary with statistics about audio duration per split
|
| 78 |
+
out_dict = {}
|
| 79 |
+
metric = "utts_unique"
|
| 80 |
+
print("Calculating {}".format(metric))
|
| 81 |
+
utts_all = []
|
| 82 |
+
for split in dataset_hf.keys():
|
| 83 |
+
# extract speakers from file_id
|
| 84 |
+
if (split == "test"):
|
| 85 |
+
utts_split = dataset_hf_secret[split]["ref_orig"]
|
| 86 |
+
else:
|
| 87 |
+
utts_split = dataset_hf[split]["ref_orig"]
|
| 88 |
+
utts_all = utts_all + utts_split
|
| 89 |
+
utts_uniq = list(set(utts_split))
|
| 90 |
+
utts_uniq_count = len(utts_uniq)
|
| 91 |
+
#print(split, utts_uniq_count)
|
| 92 |
+
out_dict[split] = utts_uniq_count
|
| 93 |
+
# add number of samples for all splits
|
| 94 |
+
out_dict["all_splits"] = len(list(set(utts_all)))
|
| 95 |
+
return out_dict,utts_all
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def words_per_split(dataset_hf, dataset_hf_secret):
|
| 99 |
+
# input - huggingface dataset object
|
| 100 |
+
# output - dictionary with statistics about audio duration per split
|
| 101 |
+
out_dict = {}
|
| 102 |
+
metric = "words"
|
| 103 |
+
print("Calculating {}".format(metric))
|
| 104 |
+
|
| 105 |
+
for split in dataset_hf.keys():
|
| 106 |
+
# extract speakers from file_id
|
| 107 |
+
if (split == "test"):
|
| 108 |
+
utts_all = dataset_hf_secret[split]["ref_orig"]
|
| 109 |
+
else:
|
| 110 |
+
utts_all = dataset_hf[split]["ref_orig"]
|
| 111 |
+
utts_lenghts = [len(utt.split(" ")) for utt in utts_all]
|
| 112 |
+
words_all_count = sum(utts_lenghts)
|
| 113 |
+
#print(split, words_all_count)
|
| 114 |
+
out_dict[split] = words_all_count
|
| 115 |
+
# add number of samples for all splits
|
| 116 |
+
out_dict["all_splits"] = sum(out_dict.values())
|
| 117 |
+
return out_dict
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def uniq_words_per_split(dataset_hf, dataset_hf_secret):
|
| 121 |
+
# input - huggingface dataset object
|
| 122 |
+
# output - dictionary with statistics about audio duration per split
|
| 123 |
+
out_dict = {}
|
| 124 |
+
out_words_list = []
|
| 125 |
+
metric = "words_unique"
|
| 126 |
+
print("Calculating {}".format(metric))
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
for split in dataset_hf.keys():
|
| 130 |
+
# extract speakers from file_id
|
| 131 |
+
if (split == "test"):
|
| 132 |
+
utts_all = dataset_hf_secret[split]["ref_orig"]
|
| 133 |
+
else:
|
| 134 |
+
utts_all = dataset_hf[split]["ref_orig"]
|
| 135 |
+
|
| 136 |
+
words_all = " ".join(utts_all).split(" ")
|
| 137 |
+
words_uniq = list(set(words_all))
|
| 138 |
+
out_words_list = out_words_list + words_uniq
|
| 139 |
+
words_uniq_count = len(words_uniq)
|
| 140 |
+
#print(split, words_uniq_count)
|
| 141 |
+
out_dict[split] = words_uniq_count
|
| 142 |
+
|
| 143 |
+
# add number of samples for all splits
|
| 144 |
+
out_words_uniq = list(set((out_words_list)))
|
| 145 |
+
out_words_uniq_count = len(out_words_uniq)
|
| 146 |
+
out_dict["all_splits"] = out_words_uniq_count
|
| 147 |
+
#print("all", out_words_uniq_count)
|
| 148 |
+
|
| 149 |
+
return out_dict, out_words_uniq
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def chars_per_split(dataset_hf, dataset_hf_secret):
|
| 153 |
+
# input - huggingface dataset object
|
| 154 |
+
# output - dictionary with statistics about audio duration per split
|
| 155 |
+
out_dict = {}
|
| 156 |
+
|
| 157 |
+
metric = "chars"
|
| 158 |
+
print("Calculating {}".format(metric))
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
for split in dataset_hf.keys():
|
| 162 |
+
# extract speakers from file_id
|
| 163 |
+
if (split=="test"):
|
| 164 |
+
utts_all = dataset_hf_secret[split]["ref_orig"]
|
| 165 |
+
else:
|
| 166 |
+
utts_all = dataset_hf[split]["ref_orig"]
|
| 167 |
+
words_all = " ".join(utts_all).split(" ")
|
| 168 |
+
chars_all = " ".join(words_all)
|
| 169 |
+
chars_all_count = len(chars_all)
|
| 170 |
+
#print(split, chars_all_count)
|
| 171 |
+
out_dict[split] = chars_all_count
|
| 172 |
+
# add number of samples for all splits
|
| 173 |
+
out_dict["all_splits"] = sum(out_dict.values())
|
| 174 |
+
return out_dict
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def uniq_chars_per_split(dataset_hf, dataset_hf_secret):
|
| 178 |
+
# input - huggingface dataset object
|
| 179 |
+
# output - dictionary with statistics about audio duration per split
|
| 180 |
+
out_dict = {}
|
| 181 |
+
out_chars_list = []
|
| 182 |
+
metric = "chars_unique"
|
| 183 |
+
print("Calculating {}".format(metric))
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
for split in dataset_hf.keys():
|
| 187 |
+
# extract speakers from file_id
|
| 188 |
+
if(split == "test"):
|
| 189 |
+
utts_all = dataset_hf_secret[split]["ref_orig"]
|
| 190 |
+
else:
|
| 191 |
+
utts_all = dataset_hf[split]["ref_orig"]
|
| 192 |
+
words_all = " ".join(utts_all).split(" ")
|
| 193 |
+
words_uniq = list(set(words_all))
|
| 194 |
+
chars_uniq = list(set("".join(words_uniq)))
|
| 195 |
+
chars_uniq_count = len(chars_uniq)
|
| 196 |
+
#print(split, chars_uniq_count)
|
| 197 |
+
out_dict[split] = chars_uniq_count
|
| 198 |
+
out_chars_list = out_chars_list + chars_uniq
|
| 199 |
+
# add number of samples for all splits
|
| 200 |
+
out_chars_uniq = list(set((out_chars_list)))
|
| 201 |
+
out_chars_uniq_count = len(out_chars_uniq)
|
| 202 |
+
out_dict["all_splits"] = out_chars_uniq_count
|
| 203 |
+
#print("all", out_chars_uniq_count)
|
| 204 |
+
|
| 205 |
+
return out_dict, out_chars_uniq
|
| 206 |
+
|
| 207 |
+
def meta_cov_per_split(dataset_hf, meta_field):
|
| 208 |
+
# input - huggingface dataset object
|
| 209 |
+
# output - dictionary with statistics about audio duration per split
|
| 210 |
+
no_meta=False
|
| 211 |
+
# TODO move to config
|
| 212 |
+
if meta_field == 'speaker_age':
|
| 213 |
+
buckets = buckets_age
|
| 214 |
+
if meta_field == 'speaker_sex':
|
| 215 |
+
buckets = buckets_sex
|
| 216 |
+
out_dict = {}
|
| 217 |
+
metric = "meta_cov_" + meta_field
|
| 218 |
+
print("Calculating {}".format(metric))
|
| 219 |
+
|
| 220 |
+
meta_info_all = 0
|
| 221 |
+
meta_info_not_null_all = 0
|
| 222 |
+
for split in dataset_hf.keys():
|
| 223 |
+
|
| 224 |
+
# extract speakers from file_id
|
| 225 |
+
meta_info = dataset_hf[split][meta_field]
|
| 226 |
+
meta_info_count = len(meta_info)
|
| 227 |
+
meta_info_all += meta_info_count
|
| 228 |
+
# calculate coverage
|
| 229 |
+
meta_info_not_null_count = len([x for x in meta_info if x != "N/A"])
|
| 230 |
+
if meta_info_not_null_count == 0:
|
| 231 |
+
out_dict[split] = "N/A"
|
| 232 |
+
continue
|
| 233 |
+
meta_info_not_null_all += meta_info_not_null_count
|
| 234 |
+
meta_info_coverage = round(meta_info_not_null_count / meta_info_count, 2)
|
| 235 |
+
#print(split, meta_info_coverage)
|
| 236 |
+
|
| 237 |
+
# add number of samples for all splits
|
| 238 |
+
out_dict[split] = meta_info_coverage
|
| 239 |
+
|
| 240 |
+
# add number of samples for all splits
|
| 241 |
+
if (meta_info_not_null_all == 0):
|
| 242 |
+
out_dict["all_splits"] = "N/A"
|
| 243 |
+
else:
|
| 244 |
+
out_dict["all_splits"] = round(meta_info_not_null_all/meta_info_all,2 )
|
| 245 |
+
return out_dict
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def speech_rate_words_per_split(dataset_hf, dataset_hf_secret):
|
| 249 |
+
# input - huggingface dataset object
|
| 250 |
+
# output - dictionary with statistics about audio duration per split
|
| 251 |
+
out_dict = {}
|
| 252 |
+
metric = "words_per_second"
|
| 253 |
+
print("Calculating {}".format(metric))
|
| 254 |
+
|
| 255 |
+
words_all_count = 0
|
| 256 |
+
audio_total_length_seconds = 0
|
| 257 |
+
|
| 258 |
+
for split in dataset_hf.keys():
|
| 259 |
+
# extract speakers from file_id
|
| 260 |
+
if (split == "test"):
|
| 261 |
+
utts_split = dataset_hf_secret[split]["ref_orig"]
|
| 262 |
+
else:
|
| 263 |
+
utts_split = dataset_hf[split]["ref_orig"]
|
| 264 |
+
words_split = " ".join(utts_split).split(" ")
|
| 265 |
+
words_split_count = len(words_split)
|
| 266 |
+
words_all_count += words_split_count
|
| 267 |
+
audio_split_length_seconds = sum(dataset_hf[split]["audio_duration_seconds"])
|
| 268 |
+
audio_total_length_seconds += audio_split_length_seconds
|
| 269 |
+
speech_rate = round(words_split_count / audio_split_length_seconds, 2)
|
| 270 |
+
#print(split, speech_rate)
|
| 271 |
+
out_dict[split] = speech_rate
|
| 272 |
+
# add number of samples for all splits
|
| 273 |
+
out_dict["all_splits"] = round(words_all_count / audio_total_length_seconds, 2)
|
| 274 |
+
return out_dict
|
| 275 |
+
|
| 276 |
+
def speech_rate_chars_per_split(dataset_hf, dataset_hf_secret):
|
| 277 |
+
# input - huggingface dataset object
|
| 278 |
+
# output - dictionary with statistics about audio duration per split
|
| 279 |
+
out_dict = {}
|
| 280 |
+
metric = "chars_per_second"
|
| 281 |
+
print("Calculating {}".format(metric))
|
| 282 |
+
|
| 283 |
+
chars_all_count = 0
|
| 284 |
+
audio_total_length_seconds = 0
|
| 285 |
+
|
| 286 |
+
for split in dataset_hf.keys():
|
| 287 |
+
# extract speakers from file_id
|
| 288 |
+
if (split == "test"):
|
| 289 |
+
utts_split = dataset_hf_secret[split]["ref_orig"]
|
| 290 |
+
else:
|
| 291 |
+
utts_split = dataset_hf[split]["ref_orig"]
|
| 292 |
+
words_split = " ".join(utts_split).split(" ")
|
| 293 |
+
chars_split_count = len("".join(words_split))
|
| 294 |
+
chars_all_count += chars_split_count
|
| 295 |
+
audio_split_length_seconds = sum(dataset_hf[split]["audio_duration_seconds"])
|
| 296 |
+
audio_total_length_seconds += audio_split_length_seconds
|
| 297 |
+
speech_rate = round(chars_split_count / audio_split_length_seconds, 2)
|
| 298 |
+
#print(split, speech_rate)
|
| 299 |
+
out_dict[split] = speech_rate
|
| 300 |
+
# add number of samples for all splits
|
| 301 |
+
out_dict["all_splits"] = round(chars_all_count / audio_total_length_seconds, 2)
|
| 302 |
+
return out_dict
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
# distribution of speaker age
|
| 306 |
+
def meta_distribution_text(dataset_hf, meta_field):
|
| 307 |
+
no_meta=False
|
| 308 |
+
if meta_field == 'speaker_age':
|
| 309 |
+
buckets = buckets_age
|
| 310 |
+
if meta_field == 'speaker_sex':
|
| 311 |
+
buckets = buckets_sex
|
| 312 |
+
|
| 313 |
+
# input - huggingface dataset object
|
| 314 |
+
# output - dictionary with statistics about audio duration per split
|
| 315 |
+
out_dict = {}
|
| 316 |
+
metric = "distribution_" + meta_field
|
| 317 |
+
print("Calculating {}".format(metric))
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
values_count_total = {}
|
| 321 |
+
for bucket in buckets:
|
| 322 |
+
values_count_total[bucket]=0
|
| 323 |
+
|
| 324 |
+
for split in dataset_hf.keys():
|
| 325 |
+
out_dict[split] = {}
|
| 326 |
+
# extract speakers from file_id
|
| 327 |
+
meta_info = dataset_hf[split][meta_field]
|
| 328 |
+
meta_info_not_null = [x for x in meta_info if x != "N/A"]
|
| 329 |
+
|
| 330 |
+
if len(meta_info_not_null) == 0:
|
| 331 |
+
out_dict[split]="N/A"
|
| 332 |
+
no_meta=True
|
| 333 |
+
continue
|
| 334 |
+
for bucket in buckets:
|
| 335 |
+
values_count = meta_info_not_null.count(bucket)
|
| 336 |
+
values_count_total[bucket] += values_count
|
| 337 |
+
out_dict[split][bucket] = round(values_count/len(meta_info_not_null),2)
|
| 338 |
+
#print(split, out_dict[split])
|
| 339 |
+
|
| 340 |
+
# add number of samples for all splits
|
| 341 |
+
if (no_meta):
|
| 342 |
+
out_dict["all_splits"] = "N/A"
|
| 343 |
+
return out_dict
|
| 344 |
+
|
| 345 |
+
out_dict["all_splits"] = {}
|
| 346 |
+
# calculate total number of samples in values_count_total
|
| 347 |
+
for bucket in buckets:
|
| 348 |
+
total_samples = sum(values_count_total.values())
|
| 349 |
+
out_dict["all_splits"][bucket] = round(values_count_total[bucket]/total_samples,2)
|
| 350 |
+
return out_dict
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def recordings_per_speaker(dataset_hf):
|
| 355 |
+
recordings_per_speaker_stats_dict = {}
|
| 356 |
+
|
| 357 |
+
# input - huggingface dataset object
|
| 358 |
+
# output - dictionary with statistics about audio duration per split
|
| 359 |
+
out_dict_stats = {}
|
| 360 |
+
out_dict_contents = {}
|
| 361 |
+
|
| 362 |
+
metric = "recordings_per_speaker"
|
| 363 |
+
print("Calculating {}".format(metric))
|
| 364 |
+
|
| 365 |
+
recordings_per_speaker_stats_dict_all = {}
|
| 366 |
+
recordings_total=0
|
| 367 |
+
|
| 368 |
+
speakers_total = 0
|
| 369 |
+
|
| 370 |
+
for split in dataset_hf.keys():
|
| 371 |
+
# extract speakers from file_id
|
| 372 |
+
audiopaths = dataset_hf[split]["audioname"]
|
| 373 |
+
speaker_prefixes = [str(fileid).split("-")[0:5] for fileid in audiopaths]
|
| 374 |
+
|
| 375 |
+
speakers_dict_split = {}
|
| 376 |
+
# create dictionary with list of audio paths matching speaker prefix
|
| 377 |
+
|
| 378 |
+
# Create initial dictionary keys from speaker prefixes
|
| 379 |
+
for speaker_prefix in speaker_prefixes:
|
| 380 |
+
speaker_prefix_str = "-".join(speaker_prefix)
|
| 381 |
+
speakers_dict_split[speaker_prefix_str] = []
|
| 382 |
+
|
| 383 |
+
# Populate the dictionary with matching audio paths
|
| 384 |
+
for audio_path in audiopaths:
|
| 385 |
+
for speaker_prefix_str in speakers_dict_split.keys():
|
| 386 |
+
if speaker_prefix_str in audio_path:
|
| 387 |
+
speakers_dict_split[speaker_prefix_str].append(audio_path)
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
# iterate of speaker_dict prefixes and calculate number of recordings per speaker.
|
| 391 |
+
recordings_per_speaker_stats_dict_split = {}
|
| 392 |
+
for speaker_prefix_str in speakers_dict_split.keys():
|
| 393 |
+
recordings_per_speaker_stats_dict_split[speaker_prefix_str] = len(speakers_dict_split[speaker_prefix_str])
|
| 394 |
+
|
| 395 |
+
out_dict_contents[split] = {}
|
| 396 |
+
out_dict_contents[split] = recordings_per_speaker_stats_dict_split
|
| 397 |
+
|
| 398 |
+
# use recordings_per_speaker_stats to calculate statistics like min, max, avg, median, std
|
| 399 |
+
out_dict_stats[split] = {}
|
| 400 |
+
speakers_split = len(list(recordings_per_speaker_stats_dict_split.keys()))
|
| 401 |
+
speakers_total += speakers_split
|
| 402 |
+
|
| 403 |
+
recordings_split = len(audiopaths)
|
| 404 |
+
recordings_total += recordings_split
|
| 405 |
+
|
| 406 |
+
average_recordings_per_speaker = round( recordings_split / speakers_split,2)
|
| 407 |
+
out_dict_stats[split]["average"] = average_recordings_per_speaker
|
| 408 |
+
out_dict_stats[split]["std"] = round(np.std(list(recordings_per_speaker_stats_dict_split.values())),2)
|
| 409 |
+
out_dict_stats[split]["median"] = np.median(list(recordings_per_speaker_stats_dict_split.values()))
|
| 410 |
+
out_dict_stats[split]["min"] = min(recordings_per_speaker_stats_dict_split.values())
|
| 411 |
+
out_dict_stats[split]["max"] = max(recordings_per_speaker_stats_dict_split.values())
|
| 412 |
+
|
| 413 |
+
recordings_per_speaker_stats_dict_all = recordings_per_speaker_stats_dict_all | recordings_per_speaker_stats_dict_split
|
| 414 |
+
# add number of samples for all splits
|
| 415 |
+
|
| 416 |
+
average_recordings_per_speaker_all = round( recordings_total / speakers_total , 2)
|
| 417 |
+
out_dict_stats["all_splits"] = {}
|
| 418 |
+
out_dict_stats["all_splits"]["average"] = average_recordings_per_speaker_all
|
| 419 |
+
out_dict_stats["all_splits"]["std"] = round(np.std(list(recordings_per_speaker_stats_dict_all.values())),2)
|
| 420 |
+
out_dict_stats["all_splits"]["median"] = np.median(list(recordings_per_speaker_stats_dict_all.values()))
|
| 421 |
+
out_dict_stats["all_splits"]["min"] = min(recordings_per_speaker_stats_dict_all.values())
|
| 422 |
+
out_dict_stats["all_splits"]["max"] = max(recordings_per_speaker_stats_dict_all.values())
|
| 423 |
+
out_dict_contents["all_splits"] = recordings_per_speaker_stats_dict_all
|
| 424 |
+
return out_dict_stats, out_dict_contents
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
def meta_distribution_bar_plot(dataset_hf, output_dir, dimension = "speaker_sex"):
|
| 428 |
+
pass
|
| 429 |
+
|
| 430 |
+
def meta_distribution_violin_plot(dataset_hf, output_dir, metric = "audio_duration_seconds", dimension = "speaker_sex"):
|
| 431 |
+
# input - huggingface dataset object
|
| 432 |
+
# output - figure with distribution of audio duration per sex
|
| 433 |
+
out_dict = {}
|
| 434 |
+
|
| 435 |
+
print("Generating violin plat for metric {} for dimension {}".format(metric, dimension))
|
| 436 |
+
|
| 437 |
+
# drop samples for which dimension column values are equal to "N/A"
|
| 438 |
+
for split in dataset_hf.keys():
|
| 439 |
+
df_dataset = pd.DataFrame(dataset_hf[split])
|
| 440 |
+
|
| 441 |
+
# remove values equal to "N/A" for column dimension
|
| 442 |
+
df_filtered = df_dataset[df_dataset[dimension] != "N/A"]
|
| 443 |
+
df_filtered = df_filtered[df_filtered[dimension] != "other"]
|
| 444 |
+
df_filtered = df_filtered[df_filtered[dimension] != "unknown"]
|
| 445 |
+
if df_filtered.empty:
|
| 446 |
+
print("No data for split {} and dimension {}".format(split, dimension))
|
| 447 |
+
continue
|
| 448 |
+
|
| 449 |
+
if (len(df_filtered)>=5000):
|
| 450 |
+
sample_size = 5000
|
| 451 |
+
print("Selecting sample of size {}".format(sample_size))
|
| 452 |
+
else:
|
| 453 |
+
sample_size = len(df_filtered)
|
| 454 |
+
print("Selecting full split of size {}".format(sample_size))
|
| 455 |
+
|
| 456 |
+
df = df_filtered.sample(sample_size)
|
| 457 |
+
# if df_filtered is empty, skip violin plot generation for this split and dimension
|
| 458 |
+
|
| 459 |
+
print("Generating plot")
|
| 460 |
+
plt.figure(figsize=(20, 15))
|
| 461 |
+
plot = sns.violinplot(data = df, hue=dimension, x='dataset', y=metric, split=True, fill = False,inner = 'quart', legend='auto', common_norm=True)
|
| 462 |
+
plot.set_xticklabels(plot.get_xticklabels(), rotation = 30, horizontalalignment = 'right')
|
| 463 |
+
|
| 464 |
+
plt.title('Violin plot of {} by {} for split {}'.format(metric, dimension, split))
|
| 465 |
+
plt.xlabel(dimension)
|
| 466 |
+
plt.ylabel(metric)
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
#plt.show(
|
| 470 |
+
# save figure to file
|
| 471 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 472 |
+
output_fn = os.path.join(output_dir, metric + "-" + dimension + "-" + split + ".png")
|
| 473 |
+
plt.savefig(output_fn)
|
| 474 |
+
print("Plot generation completed")
|
| 475 |
+
|
| 476 |
+
def read_reports(dataset_name):
|
| 477 |
+
|
| 478 |
+
json_contents = "./reports/{}/dataset_contents.json".format(dataset_name)
|
| 479 |
+
json_stats = "reports/{}/dataset_statistics.json".format(dataset_name)
|
| 480 |
+
|
| 481 |
+
with open(json_contents, 'r') as file:
|
| 482 |
+
contents_dict = json.load(file)
|
| 483 |
+
|
| 484 |
+
with open(json_stats, 'r') as file:
|
| 485 |
+
stats_dict = json.load(file)
|
| 486 |
+
|
| 487 |
+
return(stats_dict, contents_dict)
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
def add_test_split_stats_from_secret_dataset(stats_dict_public, stats_dict_secret):
|
| 491 |
+
# merge contents if dictionaries for fields utts, words, words_unique, chars, chars_unique and speech_rate
|
| 492 |
+
for dataset in stats_dict_public.keys():
|
| 493 |
+
print(dataset)
|
| 494 |
+
for metric in stats_dict_secret[dataset].keys():
|
| 495 |
+
for split in stats_dict_secret[dataset][metric].keys():
|
| 496 |
+
if split == "test":
|
| 497 |
+
stats_dict_public[dataset][metric][split] = stats_dict_secret[dataset][metric][split]
|
| 498 |
+
|
| 499 |
+
return(stats_dict_public)
|
| 500 |
+
|
| 501 |
+
def dict_to_multindex_df(dict_in, all_splits=False):
|
| 502 |
+
# Creating a MultiIndex DataFrame
|
| 503 |
+
rows = []
|
| 504 |
+
for dataset, metrics in dict_in.items():
|
| 505 |
+
if (dataset == "all"):
|
| 506 |
+
continue
|
| 507 |
+
for metric, splits in metrics.items():
|
| 508 |
+
for split, value in splits.items():
|
| 509 |
+
if (all_splits):
|
| 510 |
+
if (split == "all_splits"):
|
| 511 |
+
rows.append((dataset, metric, split, value))
|
| 512 |
+
else:
|
| 513 |
+
if (split == "all_splits"):
|
| 514 |
+
continue
|
| 515 |
+
rows.append((dataset, metric, split, value))
|
| 516 |
+
|
| 517 |
+
# Convert to DataFrame
|
| 518 |
+
df = pd.DataFrame(rows, columns=['dataset', 'metric', 'split', 'value'])
|
| 519 |
+
df.set_index(['dataset', 'metric', 'split'], inplace=True)
|
| 520 |
+
|
| 521 |
+
return(df)
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
def dict_to_multindex_df_all_splits(dict_in):
|
| 525 |
+
# Creating a MultiIndex DataFrame
|
| 526 |
+
rows = []
|
| 527 |
+
for dataset, metrics in dict_in.items():
|
| 528 |
+
if (dataset == "all"):
|
| 529 |
+
continue
|
| 530 |
+
for metric, splits in metrics.items():
|
| 531 |
+
for split, value in splits.items():
|
| 532 |
+
if (split == "all_splits"):
|
| 533 |
+
rows.append((dataset, metric, split, value))
|
| 534 |
+
|
| 535 |
+
# Convert to DataFrame
|
| 536 |
+
df = pd.DataFrame(rows, columns=['dataset', 'metric', 'split', 'value'])
|
| 537 |
+
df.set_index(['dataset', 'metric', 'split'], inplace=True)
|
| 538 |
+
|
| 539 |
+
return(df)
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
def extract_stats_to_agg(df_multindex_per_split, metrics):
|
| 543 |
+
# input - multiindex dataframe has three indexes - dataset, metric, split
|
| 544 |
+
|
| 545 |
+
# select only relevant metrics
|
| 546 |
+
df_agg_splits = df_multindex_per_split.loc[(slice(None), metrics), :]
|
| 547 |
+
|
| 548 |
+
# unstack - move rows per split to columns
|
| 549 |
+
df_agg_splits = df_agg_splits.unstack(level ='split')
|
| 550 |
+
|
| 551 |
+
# aggregate values for all splits
|
| 552 |
+
df_agg_splits['value', 'total'] = df_agg_splits['value'].sum(axis=1)
|
| 553 |
+
# drop columns with splits
|
| 554 |
+
df_agg_splits.columns = df_agg_splits.columns.droplevel(0)
|
| 555 |
+
columns_to_drop = ['test', 'train', 'validation']
|
| 556 |
+
df_agg_splits.drop(columns = columns_to_drop, inplace = True)
|
| 557 |
+
|
| 558 |
+
# move rows corresponding to specific metrics into specific columns
|
| 559 |
+
df_agg_splits = df_agg_splits.unstack(level ='metric')
|
| 560 |
+
df_agg_splits.columns = df_agg_splits.columns.droplevel(0)
|
| 561 |
+
|
| 562 |
+
return(df_agg_splits)
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
|
| 566 |
+
def extract_stats_all_splits(df_multiindex_all_splits, metrics):
|
| 567 |
+
|
| 568 |
+
df_all_splits = df_multiindex_all_splits.loc[(slice(None), metrics), :]
|
| 569 |
+
|
| 570 |
+
df_all_splits = df_all_splits.unstack(level ='metric')
|
| 571 |
+
df_all_splits.columns = df_all_splits.columns.droplevel(0)
|
| 572 |
+
|
| 573 |
+
#print(df_all_splits)
|
| 574 |
+
df_all_splits = df_all_splits.droplevel('split', axis=0)
|
| 575 |
+
|
| 576 |
+
return(df_all_splits)
|
| 577 |
+
|
| 578 |
+
def extract_stats_for_dataset_card(df_multindex_per_split, subset, metrics, add_total=False):
|
| 579 |
+
|
| 580 |
+
print(df_multindex_per_split)
|
| 581 |
+
df_metrics_subset = df_multindex_per_split
|
| 582 |
+
|
| 583 |
+
df_metrics_subset = df_metrics_subset.unstack(level ='split')
|
| 584 |
+
df_metrics_subset.columns = df_metrics_subset.columns.droplevel(0)
|
| 585 |
+
|
| 586 |
+
df_metrics_subset = df_metrics_subset.loc[(slice(None), metrics), :]
|
| 587 |
+
|
| 588 |
+
df_metrics_subset = df_metrics_subset.query("dataset == '{}'".format(subset))
|
| 589 |
+
# change order of columns to train validation test
|
| 590 |
+
df_metrics_subset.reset_index(inplace=True)
|
| 591 |
+
if (add_total):
|
| 592 |
+
new_columns = ['metric', 'train', 'validation', 'test', 'total']
|
| 593 |
+
total = df_metrics_subset[['train', 'validation','test']].sum(axis=1)
|
| 594 |
+
df_metrics_subset['total'] = total
|
| 595 |
+
else:
|
| 596 |
+
new_columns = ['metric', 'train', 'validation', 'test']
|
| 597 |
+
|
| 598 |
+
df_metrics_subset = df_metrics_subset.reindex(columns=new_columns)
|
| 599 |
+
df_metrics_subset.set_index('metric', inplace=True)
|
| 600 |
+
|
| 601 |
+
return(df_metrics_subset)
|