Xinsheng-Wang commited on
Commit
3ff32f6
·
verified ·
1 Parent(s): dfdb5bd

Delete merge_metadata.py

Browse files
Files changed (1) hide show
  1. merge_metadata.py +0 -113
merge_metadata.py DELETED
@@ -1,113 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- 将 gmo-svs/annotation 和 soulx-singer-eval/annotation 下的 JSONL 文件
4
- 整理到一个 metadata.csv 文件中。
5
- """
6
-
7
- import json
8
- import csv
9
- from pathlib import Path
10
-
11
-
12
- def load_jsonl(filepath: Path) -> list[dict]:
13
- """加载 JSONL 文件,返回记录列表"""
14
- records = []
15
- with open(filepath, "r", encoding="utf-8") as f:
16
- for line in f:
17
- line = line.strip()
18
- if line:
19
- records.append(json.loads(line))
20
- return records
21
-
22
-
23
- def process_record(record: dict, source: str) -> dict:
24
- """
25
- 处理单条记录:
26
- - 添加 source 字段区分 gmo-svs 和 soulx-singer-eval
27
- - 将 wav_fn 重命名为 wav_path("wa v" 为拼写错误,同等处理)
28
- """
29
- processed = {}
30
- wav_path = None
31
- for key, value in record.items():
32
- if key == "wav_fn":
33
- wav_path = value # 优先使用 wav_fn
34
- elif key == "wa v": # 原数据中的拼写错误
35
- if wav_path is None:
36
- wav_path = value
37
- # 不加入 processed,避免输出错误列名
38
- else:
39
- processed[key] = value
40
- processed["wav_path"] = wav_path
41
- processed["source"] = source
42
- return processed
43
-
44
-
45
- def serialize_value(value):
46
- """将值序列化为 CSV 可存储的字符串"""
47
- if isinstance(value, (list, tuple)):
48
- return json.dumps(value, ensure_ascii=False)
49
- return value
50
-
51
-
52
- def flatten_record_for_csv(record: dict) -> dict:
53
- """
54
- 展平记录用于 CSV 写入。
55
- 确保 source 在合适位置,列表类型转为 JSON 字符串。
56
- """
57
- # 定义列顺序:source 放前面,wav_path 在 wav_fn 位置
58
- result = {}
59
- for key, value in record.items():
60
- result[key] = serialize_value(value)
61
- return result
62
-
63
-
64
- def main():
65
- base_dir = Path(__file__).parent
66
-
67
- # 定义数据源:(annotation目录路径, 来源标识)
68
- sources = [
69
- (base_dir / "gmo-svs" / "annotation", "gmo-svs"),
70
- (base_dir / "soulx-singer-eval" / "annotation", "soulx-singer-eval"),
71
- ]
72
-
73
- all_records = []
74
-
75
- for ann_dir, source_name in sources:
76
- if not ann_dir.exists():
77
- print(f"警告: 目录不存在,跳过: {ann_dir}")
78
- continue
79
-
80
- for jsonl_file in sorted(ann_dir.glob("*.jsonl")):
81
- records = load_jsonl(jsonl_file)
82
- for rec in records:
83
- processed = process_record(rec, source_name)
84
- all_records.append(processed)
85
- print(f"已加载 {jsonl_file.name}: {len(records)} 条")
86
-
87
- if not all_records:
88
- print("未找到任何记录")
89
- return
90
-
91
- # 收集所有列名,确保 source 和 wav_path 在合适位置
92
- all_keys = set()
93
- for rec in all_records:
94
- all_keys.update(rec.keys())
95
-
96
- # 列顺序:source 优先,wav_path 替代 wav_fn
97
- preferred_order = ["source", "item_name", "dataset", "song_name", "language", "singer", "wav_path"]
98
- remaining = sorted(all_keys - set(preferred_order))
99
- fieldnames = [k for k in preferred_order if k in all_keys] + remaining
100
-
101
- output_path = base_dir / "metadata.csv"
102
- with open(output_path, "w", encoding="utf-8", newline="") as f:
103
- writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction="ignore")
104
- writer.writeheader()
105
- for rec in all_records:
106
- row = flatten_record_for_csv(rec)
107
- writer.writerow(row)
108
-
109
- print(f"\n完成! 共 {len(all_records)} 条记录已写入 {output_path}")
110
-
111
-
112
- if __name__ == "__main__":
113
- main()