jayliqinzhang commited on
Commit
4db4715
·
verified ·
1 Parent(s): ee81d8b

Upload script/libritts.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. script/libritts.py +62 -0
script/libritts.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from process_from_parquet import read_parquet_file, process_parquet_df, save_to_csv
2
+
3
+ import pandas as pd
4
+
5
+ def process_partition(partition, process_row_with_params):
6
+ """
7
+ Process the partition after first row processing.
8
+ Covert the series result to dataframe to further processing for audio partition.
9
+
10
+ """
11
+ result = partition.apply(process_row_with_params, axis=1)
12
+ field_name = ["path", "url" ,"type", "duration", "language", "transcript", "tag", "split", "license"]
13
+ return pd.DataFrame(result.tolist(), columns=field_name) # Convert Series to DataFrame
14
+
15
+
16
+ def _get_split(parquet_file):
17
+ if "train" in parquet_file:
18
+ return "train"
19
+ elif "test" in parquet_file:
20
+ return "test"
21
+ elif "dev" in parquet_file:
22
+ return "validation"
23
+ else:
24
+ return "train"
25
+
26
+
27
+ def _modify_value(value):
28
+ a = round(value, 2)
29
+ return a
30
+
31
+ def process_row(row, parquet_file_name):
32
+ """
33
+ The function to process each row from dataframe.
34
+ Return the metadata as dictionary.
35
+
36
+ """
37
+
38
+ metadata = {}
39
+ split = _get_split(parquet_file_name)
40
+
41
+ metadata["path"] = "n/a"
42
+ metadata["url"] = f"https://huggingface.co/datasets/meetween/mumospee_libritts/resolve/main/libritts-parquet/{parquet_file_name}"
43
+ metadata["transcript"] = row["original"]
44
+ metadata["type"] = "audio"
45
+ metadata["duration"] = _modify_value(row["duration"])
46
+ metadata["language"] = "en"
47
+ metadata["tag"] = "LibriTTS"
48
+ metadata["split"] = split
49
+ metadata["license"] = "CC-BY-4.0"
50
+
51
+ return metadata
52
+
53
+
54
+ def main(config):
55
+ parquet_df, file_name = read_parquet_file(config["parquet_file_path"], top=config["top"])
56
+
57
+ processed_df = process_parquet_df(parquet_df=parquet_df,
58
+ file_name=file_name,
59
+ process_row_func=process_row,
60
+ process_partition=process_partition)
61
+
62
+ save_to_csv(processed_df, final_path=config["final_path"])