jerpint commited on
Commit
f5f457e
·
1 Parent(s): e57b7be

remove unnecessary columns

Browse files
Files changed (2) hide show
  1. create_valid_csv.py +15 -12
  2. metadata-valid.csv +2 -2
create_valid_csv.py CHANGED
@@ -50,37 +50,40 @@ if __name__ == "__main__":
50
  output_json = "files.json"
51
  output_csv = "metadata-valid.csv"
52
  metadata_csv = "metadata-balanced.csv"
53
- models = ["commonvoice", "metavoice", "playht", "stylettsv2", "xttsv2"]
 
 
54
 
55
  # Load the metadata
56
  df = pd.read_csv(metadata_csv)
 
57
 
58
  # Create the JSON file
59
- data = create_json(df, models, output_json)
60
 
61
  # Get paths that are only available for all models
62
- valid_paths = [path for path in data if len(data[path]) == len(models)]
63
 
64
  # Filter dataframe to only include valid paths
65
  valid_df = df[df.path.isin(valid_paths)]
66
 
67
  # Create an entry for each model in csv
68
  all_dfs = []
69
- for model in models:
70
- valid_df_model = valid_df.copy()
71
- valid_df_model["source"] = model
72
- all_dfs.append(valid_df_model)
73
 
74
  # Add is_cloned_voice column
75
- is_cloned_voice = model != "commonvoice"
76
- valid_df_model["is_cloned_voice"] = is_cloned_voice
77
 
78
  # Add fname column
79
- valid_df_model["filename"] = valid_df_model["path"]
80
 
81
  # Add path column
82
- valid_df_model["path"] = valid_df_model["path"].apply(
83
- lambda path: os.path.join(model, path)
84
  )
85
 
86
  all_df = pd.concat(all_dfs, ignore_index=True)
 
50
  output_json = "files.json"
51
  output_csv = "metadata-valid.csv"
52
  metadata_csv = "metadata-balanced.csv"
53
+ sources = ["commonvoice", "metavoice", "playht", "stylettsv2", "xttsv2"]
54
+ columns_to_keep = ["path", "age", "gender", "accents", "sentence"] # from the original metadata of commonvoice
55
+
56
 
57
  # Load the metadata
58
  df = pd.read_csv(metadata_csv)
59
+ df = df[columns_to_keep]
60
 
61
  # Create the JSON file
62
+ data = create_json(df, sources, output_json)
63
 
64
  # Get paths that are only available for all models
65
+ valid_paths = [path for path in data if len(data[path]) == len(sources)]
66
 
67
  # Filter dataframe to only include valid paths
68
  valid_df = df[df.path.isin(valid_paths)]
69
 
70
  # Create an entry for each model in csv
71
  all_dfs = []
72
+ for source in sources:
73
+ valid_df_source = valid_df.copy()
74
+ valid_df_source["source"] = source
75
+ all_dfs.append(valid_df_source)
76
 
77
  # Add is_cloned_voice column
78
+ is_cloned_voice = source != "commonvoice"
79
+ valid_df_source["is_cloned_voice"] = is_cloned_voice
80
 
81
  # Add fname column
82
+ valid_df_source["filename"] = valid_df_source["path"]
83
 
84
  # Add path column
85
+ valid_df_source["path"] = valid_df_source["path"].apply(
86
+ lambda path: os.path.join(source, path)
87
  )
88
 
89
  all_df = pd.concat(all_dfs, ignore_index=True)
metadata-valid.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf161465f3cc561871b02471f833fada89a767b6546ab48c0026696a0fd8b10b
3
- size 1814016
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d078991bedfe4aecaca197f17f857bd395c80a3cfc39dab4a7330fa42834059
3
+ size 849080