import pandas as pd from process_from_parquet import read_parquet_file, process_parquet_df, save_to_csv def process_partition(partition, process_row_with_params): """ Process the partition after first row processing. Covert the series result to dataframe to further processing for audio partition. """ result = partition.apply(process_row_with_params, axis=1) field_name = ["path", "url" ,"type", "duration", "language", "transcript", "tag", "split", "license"] return pd.DataFrame(result.tolist(), columns=field_name) # Convert Series to DataFrame def _get_split(parquet_file): if "train" in parquet_file: return "train" elif "test" in parquet_file: return "test" elif "validation" in parquet_file: return "validation" else: return "train" def process_row(row, parquet_file_name): """ The function to process each row from dataframe. Return the metadata as dictionary. """ metadata = {} metadata["path"] = f"{row["segment_id"]}.wav" metadata["url"] = f"https://huggingface.co/datasets/meetween/mumospee_gigaspeech/resolve/main/gigaspeech-parquet/{parquet_file_name}" metadata["type"] = "audio" try: metadata["duration"] = str(round(float(row['end_time']) - float(row['begin_time']), 2)) except Exception as e: metadata["duration"] = "n/a" metadata["language"] = "en" metadata["transcript"] = row["text_transformed"] metadata["tag"] = "GigaSpeech" metadata["split"] = _get_split(parquet_file_name) metadata["license"] = "apache-2.0" return metadata def main(config): parquet_df, file_name = read_parquet_file(config["parquet_file_path"], npartitions=config["npartitions"], top=config["top"]) processed_df = process_parquet_df(parquet_df=parquet_df, file_name=file_name, process_row_func=process_row, process_partition=process_partition) save_to_csv(processed_df, final_path=config["final_path"])