jayliqinzhang commited on
Commit
e3b7714
·
verified ·
1 Parent(s): bda21eb

Upload script/gigaspeech.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. script/gigaspeech.py +56 -0
script/gigaspeech.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from process_from_parquet import read_parquet_file, process_parquet_df, save_to_csv
3
+
4
+ def process_partition(partition, process_row_with_params):
5
+ """
6
+ Process the partition after first row processing.
7
+ Covert the series result to dataframe to further processing for audio partition.
8
+
9
+ """
10
+ result = partition.apply(process_row_with_params, axis=1)
11
+ field_name = ["path", "url" ,"type", "duration", "language", "transcript", "tag", "split", "license"]
12
+ return pd.DataFrame(result.tolist(), columns=field_name) # Convert Series to DataFrame
13
+
14
+ def _get_split(parquet_file):
15
+ if "train" in parquet_file:
16
+ return "train"
17
+ elif "test" in parquet_file:
18
+ return "test"
19
+ elif "validation" in parquet_file:
20
+ return "validation"
21
+ else:
22
+ return "train"
23
+
24
+ def process_row(row, parquet_file_name):
25
+ """
26
+ The function to process each row from dataframe.
27
+ Return the metadata as dictionary.
28
+
29
+ """
30
+ metadata = {}
31
+ metadata["path"] = f"{row["segment_id"]}.wav"
32
+ metadata["url"] = f"https://huggingface.co/datasets/meetween/mumospee_gigaspeech/resolve/main/gigaspeech-parquet/{parquet_file_name}"
33
+ metadata["type"] = "audio"
34
+ try:
35
+ metadata["duration"] = str(round(float(row['end_time']) - float(row['begin_time']), 2))
36
+ except Exception as e:
37
+ metadata["duration"] = "n/a"
38
+ metadata["language"] = "en"
39
+ metadata["transcript"] = row["text_transformed"]
40
+ metadata["tag"] = "GigaSpeech"
41
+ metadata["split"] = _get_split(parquet_file_name)
42
+ metadata["license"] = "apache-2.0"
43
+
44
+ return metadata
45
+
46
+ def main(config):
47
+ parquet_df, file_name = read_parquet_file(config["parquet_file_path"], npartitions=config["npartitions"], top=config["top"])
48
+
49
+ processed_df = process_parquet_df(parquet_df=parquet_df,
50
+ file_name=file_name,
51
+ process_row_func=process_row,
52
+ process_partition=process_partition)
53
+
54
+ save_to_csv(processed_df, final_path=config["final_path"])
55
+
56
+