File size: 3,174 Bytes
0cd4b22
 
 
 
 
 
 
 
 
 
 
8b090a7
0cd4b22
8b090a7
 
 
 
 
 
 
0cd4b22
 
 
 
 
 
 
 
8b090a7
 
 
 
 
 
0cd4b22
 
 
8b090a7
0cd4b22
 
 
 
 
 
 
 
 
 
 
 
8b090a7
0cd4b22
 
 
 
 
 
 
 
 
 
b1b39c6
 
0cd4b22
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import os
import sys
import json

# 将 scripts/tokenizers 添加到 sys.path,以便导入 process_asr_text_tokenizer
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "scripts", "tokenizers")))

# 由于 process_asr_text_tokenizer 内部使用了双下划线的函数,并且在 import 的时候定义了 parser,
# 为了避免冲突,我们直接调用其内部使用的函数,或者使用 os.system 来调用脚本
# 考虑到动态计算 VOCAB_SIZE,我们首先读取 manifest,收集所有的字符来计算 VOCAB_SIZE

def calculate_vocab_size(manifest_paths):
    unique_chars = set()
    for manifest_path in manifest_paths:
        with open(manifest_path, 'r', encoding='utf-8') as f:
            for line in f:
                item = json.loads(line)
                text = item.get('text', '')
                for char in text:
                    unique_chars.add(char)
    
    # 词汇表大小 = 独立字符数 + 预留特殊 token (如 <s>, </s>, <pad>, <unk> 等) 的数量
    # 通常预留 10-20 个位置给特殊 token 和未见过的字符,这里我们加 20
    vocab_size = len(unique_chars) + 20
    # 为了后续的计算,通常建议 vocab_size 为 2 的幂或特定倍数,这里直接返回计算结果
    return vocab_size

def main():
    manifest_paths = [
        "data/common_voice_11_0/ja/train/train_common_voice_11_0_manifest.json",
        "data/common_voice_11_0/ja/test_tarred_1bk/tarred_audio_manifest.json",
        "data/common_voice_11_0/ja/validation/validation_common_voice_11_0_manifest.json"
    ]
    manifest_paths_str = ",".join(manifest_paths)
    data_root = "data/common_voice_11_0/ja/tokenizers"
    
    print("Calculating dynamic VOCAB_SIZE...")
    vocab_size = calculate_vocab_size(manifest_paths)
    print(f"Calculated VOCAB_SIZE: {vocab_size}")

    # 调用 process_asr_text_tokenizer
    # 由于 process_asr_text_tokenizer 在导入时会解析命令行参数,所以我们采用 subprocess 的方式调用
    # 这样可以避免 argparse 报错,同时为了让脚本找到 nemo,我们将当前目录加入 PYTHONPATH
    import subprocess
    env = os.environ.copy()
    env["PYTHONPATH"] = os.path.abspath(os.path.dirname(__file__)) + os.pathsep + env.get("PYTHONPATH", "")

    command = [
        "python",
        "scripts/tokenizers/process_asr_text_tokenizer.py",
        f"--manifest={manifest_paths_str}",
        f"--vocab_size={vocab_size}",
        f"--data_root={data_root}",
        "--tokenizer=spe",
        "--spe_type=bpe"
    ]
    
    # 尝试在 conda 的 NeMo 环境中执行
    command = ["conda", "run", "-n", "NeMo"] + command
    
    print(f"Running command: \n{' '.join(command)}")
    # 在 Windows 上运行 conda 这种批处理/环境变量命令,需要 shell=True,或者指定 conda 的绝对路径
    result = subprocess.run(command, env=env, shell=True)
    exit_code = result.returncode
    if exit_code != 0:
        print(f"Error executing tokenizer script, exit code: {exit_code}")
        sys.exit(1)
    else:
        print("Tokenizer processing completed successfully.")

if __name__ == "__main__":
    main()