File size: 2,097 Bytes
e3b7714
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import pandas as pd
from process_from_parquet import read_parquet_file, process_parquet_df, save_to_csv

def process_partition(partition, process_row_with_params):
    """
    Process the partition after first row processing. 
    Covert the series result to dataframe to further processing for audio partition.
    
    """
    result = partition.apply(process_row_with_params, axis=1)
    field_name = ["path", "url" ,"type", "duration", "language", "transcript", "tag", "split", "license"]
    return pd.DataFrame(result.tolist(), columns=field_name)  # Convert Series to DataFrame

def _get_split(parquet_file):
    if "train" in parquet_file:
        return "train"
    elif "test" in parquet_file:
        return "test"
    elif "validation" in parquet_file:
        return "validation"
    else:
        return "train"

def process_row(row, parquet_file_name):
    """
    The function to process each row from dataframe.
    Return the metadata as dictionary. 
    
    """
    metadata = {}
    metadata["path"] = f"{row["segment_id"]}.wav"
    metadata["url"] = f"https://huggingface.co/datasets/meetween/mumospee_gigaspeech/resolve/main/gigaspeech-parquet/{parquet_file_name}"
    metadata["type"] = "audio"
    try:
        metadata["duration"] = str(round(float(row['end_time']) - float(row['begin_time']), 2))
    except Exception as e:
        metadata["duration"] = "n/a"
    metadata["language"] = "en"
    metadata["transcript"] = row["text_transformed"]
    metadata["tag"] = "GigaSpeech"
    metadata["split"] = _get_split(parquet_file_name)
    metadata["license"] = "apache-2.0"

    return metadata

def main(config):
    parquet_df, file_name = read_parquet_file(config["parquet_file_path"], npartitions=config["npartitions"], top=config["top"])

    processed_df = process_parquet_df(parquet_df=parquet_df,
                                     file_name=file_name,
                                     process_row_func=process_row,
                                     process_partition=process_partition)

    save_to_csv(processed_df, final_path=config["final_path"])