simler commited on
Commit
b245537
·
verified ·
1 Parent(s): 5eceff1

Upload 68 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. genie_tts/Audio/Audio.py +51 -0
  2. genie_tts/Audio/ReferenceAudio.py +82 -0
  3. genie_tts/Audio/__init__.py +0 -0
  4. genie_tts/Converter/Converter.py +11 -0
  5. genie_tts/Converter/__init__.py +0 -0
  6. genie_tts/Converter/load_state_dict.py +26 -0
  7. genie_tts/Converter/utils.py +30 -0
  8. genie_tts/Converter/v2/Converter.py +146 -0
  9. genie_tts/Converter/v2/EncoderConverter.py +106 -0
  10. genie_tts/Converter/v2/T2SConverter.py +125 -0
  11. genie_tts/Converter/v2/VITSConverter.py +129 -0
  12. genie_tts/Converter/v2/__init__.py +0 -0
  13. genie_tts/Converter/v2ProPlus/Converter.py +89 -0
  14. genie_tts/Converter/v2ProPlus/PromptEncoderConverter.py +128 -0
  15. genie_tts/Core/Inference.py +112 -0
  16. genie_tts/Core/Resources.py +76 -0
  17. genie_tts/Core/TTSPlayer.py +241 -0
  18. genie_tts/Core/__init__.py +0 -0
  19. genie_tts/Data/v2/Keys/t2s_onnx_keys.txt +291 -0
  20. genie_tts/Data/v2/Keys/vits_onnx_keys.txt +668 -0
  21. genie_tts/Data/v2/Models/t2s_encoder_fp32.onnx +3 -0
  22. genie_tts/Data/v2/Models/t2s_first_stage_decoder_fp32.onnx +3 -0
  23. genie_tts/Data/v2/Models/t2s_stage_decoder_fp32.onnx +3 -0
  24. genie_tts/Data/v2/Models/vits_fp32.onnx +3 -0
  25. genie_tts/Data/v2ProPlus/Keys/prompt_encoder_weights.txt +23 -0
  26. genie_tts/Data/v2ProPlus/Keys/vits_weights.txt +650 -0
  27. genie_tts/Data/v2ProPlus/Models/prompt_encoder_fp32.onnx +3 -0
  28. genie_tts/Data/v2ProPlus/Models/vits_fp32.onnx +3 -0
  29. genie_tts/G2P/Chinese/ChineseG2P.py +186 -0
  30. genie_tts/G2P/Chinese/CorrectPronunciation.py +50 -0
  31. genie_tts/G2P/Chinese/Erhua.py +49 -0
  32. genie_tts/G2P/Chinese/Normalization/__init__.py +0 -0
  33. genie_tts/G2P/Chinese/Normalization/char_convert.py +35 -0
  34. genie_tts/G2P/Chinese/Normalization/chronology.py +144 -0
  35. genie_tts/G2P/Chinese/Normalization/constants.py +61 -0
  36. genie_tts/G2P/Chinese/Normalization/num.py +340 -0
  37. genie_tts/G2P/Chinese/Normalization/phonecode.py +59 -0
  38. genie_tts/G2P/Chinese/Normalization/quantifier.py +62 -0
  39. genie_tts/G2P/Chinese/Normalization/text_normlization.py +169 -0
  40. genie_tts/G2P/Chinese/ToneSandhi.py +354 -0
  41. genie_tts/G2P/Chinese/__init__.py +0 -0
  42. genie_tts/G2P/English/EnglishG2P.py +296 -0
  43. genie_tts/G2P/English/Normalization.py +286 -0
  44. genie_tts/G2P/English/WordSegment.py +143 -0
  45. genie_tts/G2P/English/__init__.py +0 -0
  46. genie_tts/G2P/Japanese/JapaneseG2P.py +150 -0
  47. genie_tts/G2P/Japanese/__init__.py +0 -0
  48. genie_tts/G2P/SymbolsV2.py +119 -0
  49. genie_tts/G2P/__init__.py +0 -0
  50. genie_tts/GUI/AudioPlayer.py +94 -0
genie_tts/Audio/Audio.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import soundfile as sf
3
+ import soxr
4
+ import numpy as np
5
+ import logging
6
+ from typing import Optional
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+ # 音频时长建议范围 (秒)
11
+ MIN_DURATION_S = 3
12
+ MAX_DURATION_S = 10
13
+ # 在音频末尾追加的静音时长 (秒)
14
+ SILENCE_TO_APPEND_S = 0.3
15
+ # 模型期望的目标采样率
16
+ TARGET_SAMPLING_RATE = 16000
17
+
18
+
19
+ def load_audio(
20
+ audio_path: str,
21
+ target_sampling_rate: int = TARGET_SAMPLING_RATE
22
+ ) -> Optional[np.ndarray]:
23
+ try:
24
+ wav, original_sr = sf.read(audio_path, dtype='float32')
25
+ if wav.ndim > 1:
26
+ wav = np.mean(wav, axis=1) # 多声道转单声道。
27
+ if original_sr != target_sampling_rate:
28
+ wav = soxr.resample(wav, original_sr, target_sampling_rate, quality='hq') # 重采样。
29
+
30
+ except Exception as e:
31
+ logger.error(f"Failed to load reference audio: {audio_path}. Error: {e}")
32
+ return None
33
+
34
+ # 检查音频长度是否在建议范围之外
35
+ min_samples = int(MIN_DURATION_S * target_sampling_rate)
36
+ max_samples = int(MAX_DURATION_S * target_sampling_rate)
37
+ if not (min_samples <= wav.shape[0] <= max_samples):
38
+ duration = len(wav) / target_sampling_rate
39
+ logger.warning(
40
+ f"The reference audio '{os.path.basename(audio_path)}' has a duration of {duration:.2f} seconds, "
41
+ f"which is outside the recommended range of {MIN_DURATION_S} to {MAX_DURATION_S} seconds!"
42
+ )
43
+
44
+ # 创建并拼接静音
45
+ silence_samples = int(SILENCE_TO_APPEND_S * target_sampling_rate)
46
+ silence_array = np.zeros(silence_samples, dtype=np.float32)
47
+ wav_processed = np.concatenate([wav, silence_array])
48
+
49
+ # 为模型输入增加批次维度
50
+ # wav_processed = np.expand_dims(wav_processed, axis=0)
51
+ return wav_processed
genie_tts/Audio/ReferenceAudio.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..Utils.Utils import LRUCacheDict
2
+ from ..GetPhonesAndBert import get_phones_and_bert
3
+ from ..Audio.Audio import load_audio
4
+ from ..ModelManager import model_manager
5
+
6
+ from onnxruntime import InferenceSession
7
+ import os
8
+ import numpy as np
9
+ import soxr
10
+ from typing import Optional, Dict
11
+
12
+
13
+ class ReferenceAudio:
14
+ _prompt_cache: Dict[str, 'ReferenceAudio'] = LRUCacheDict(
15
+ capacity=int(os.getenv('Max_Cached_Reference_Audio', '10')))
16
+
17
+ def __new__(cls, prompt_wav: str, prompt_text: str, language: str):
18
+ if prompt_wav in cls._prompt_cache:
19
+ instance = cls._prompt_cache[prompt_wav]
20
+ if instance.text != prompt_text: # 如果文本与缓存内记录的不同,则更新。
21
+ instance.set_text(prompt_text, language=language)
22
+ return instance
23
+
24
+ instance = super().__new__(cls)
25
+ cls._prompt_cache[prompt_wav] = instance
26
+ return instance
27
+
28
+ def __init__(self, prompt_wav: str, prompt_text: str, language: str):
29
+ if hasattr(self, '_initialized'):
30
+ return
31
+
32
+ # 文本相关。
33
+ self.text: str = prompt_text
34
+ self.phonemes_seq: Optional[np.ndarray] = None
35
+ self.text_bert: Optional[np.ndarray] = None
36
+ self.set_text(prompt_text, language=language)
37
+
38
+ # 音频相关。
39
+ self.audio_32k: Optional[np.ndarray] = load_audio(
40
+ audio_path=prompt_wav,
41
+ target_sampling_rate=32000
42
+ )
43
+ self.audio_16k: np.ndarray = soxr.resample(self.audio_32k, 32000, 16000, quality='hq')
44
+
45
+ # 修复:添加 0.3 秒静音填充,防止参考音频内容泄露到生成结果中
46
+ # 这是 GPT-SoVITS 的做法,帮助模型区分参考内容和目标内容的边界
47
+ zero_padding_16k = np.zeros(int(16000 * 0.3), dtype=self.audio_16k.dtype)
48
+ audio_16k_padded = np.concatenate([self.audio_16k, zero_padding_16k])
49
+
50
+ self.audio_32k = np.expand_dims(self.audio_32k, axis=0)
51
+ self.audio_16k = np.expand_dims(self.audio_16k, axis=0) # 增加 Batch_Size 维度
52
+
53
+ if not model_manager.cn_hubert:
54
+ model_manager.load_cn_hubert()
55
+ # 使用添加了静音填充的音频提取 SSL 特征
56
+ self.ssl_content: Optional[np.ndarray] = model_manager.cn_hubert.run(
57
+ None, {'input_values': np.expand_dims(audio_16k_padded, axis=0)}
58
+ )[0]
59
+
60
+ self.global_emb: Optional[np.ndarray] = None
61
+ self.global_emb_advanced: Optional[np.ndarray] = None
62
+
63
+ self._initialized = True
64
+
65
+ def set_text(self, prompt_text: str, language: str) -> None:
66
+ self.text = prompt_text
67
+ self.phonemes_seq, self.text_bert = get_phones_and_bert(prompt_text, language=language)
68
+
69
+ @classmethod
70
+ def clear_cache(cls) -> None:
71
+ """清空 ReferenceAudio 的缓存"""
72
+ cls._prompt_cache.clear()
73
+
74
+ def update_global_emb(self, prompt_encoder: InferenceSession) -> None:
75
+ if self.global_emb is not None:
76
+ return
77
+ if model_manager.load_sv_model():
78
+ sv_emb = model_manager.speaker_verification_model.run(None, {'waveform': self.audio_16k})[0]
79
+ self.global_emb, self.global_emb_advanced = prompt_encoder.run(None, {
80
+ 'ref_audio': self.audio_32k,
81
+ 'sv_emb': sv_emb,
82
+ })
genie_tts/Audio/__init__.py ADDED
File without changes
genie_tts/Converter/Converter.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .v2.Converter import convert as convert_v2
2
+ from .v2ProPlus.Converter import convert as convert_v2pp
3
+
4
+ import os
5
+
6
+
7
+ def convert(torch_ckpt_path: str, torch_pth_path: str, output_dir: str) -> None:
8
+ if os.path.getsize(torch_pth_path) > 150 * 1024 * 1024: # 大于 150 MB
9
+ convert_v2pp(torch_ckpt_path, torch_pth_path, output_dir)
10
+ else:
11
+ convert_v2(torch_ckpt_path, torch_pth_path, output_dir)
genie_tts/Converter/__init__.py ADDED
File without changes
genie_tts/Converter/load_state_dict.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+
4
+ sys.path.append(os.path.dirname(__file__))
5
+
6
+ import torch
7
+ from io import BytesIO
8
+ import utils
9
+
10
+
11
+ def load_sovits_model(pth_path: str, device: str = 'cpu'):
12
+ f = open(pth_path, "rb")
13
+ meta = f.read(2)
14
+ if meta != b"PK":
15
+ # noinspection PyTypeChecker
16
+ data = b"PK" + f.read()
17
+ bio = BytesIO()
18
+ # noinspection PyTypeChecker
19
+ bio.write(data)
20
+ bio.seek(0)
21
+ return torch.load(bio, map_location=device, weights_only=False)
22
+ return torch.load(pth_path, map_location=device, weights_only=False)
23
+
24
+
25
+ def load_gpt_model(ckpt_path: str, device: str = 'cpu'):
26
+ return torch.load(ckpt_path, map_location=device, weights_only=True)
genie_tts/Converter/utils.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class HParams:
2
+ def __init__(self, **kwargs):
3
+ for k, v in kwargs.items():
4
+ if type(v) == dict:
5
+ v = HParams(**v)
6
+ self[k] = v
7
+
8
+ def keys(self):
9
+ return self.__dict__.keys()
10
+
11
+ def items(self):
12
+ return self.__dict__.items()
13
+
14
+ def values(self):
15
+ return self.__dict__.values()
16
+
17
+ def __len__(self):
18
+ return len(self.__dict__)
19
+
20
+ def __getitem__(self, key):
21
+ return getattr(self, key)
22
+
23
+ def __setitem__(self, key, value):
24
+ return setattr(self, key, value)
25
+
26
+ def __contains__(self, key):
27
+ return key in self.__dict__
28
+
29
+ def __repr__(self):
30
+ return self.__dict__.__repr__()
genie_tts/Converter/v2/Converter.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .VITSConverter import VITSConverter
2
+ from .T2SConverter import T2SModelConverter
3
+ from .EncoderConverter import EncoderConverter
4
+ from ...Utils.Constants import PACKAGE_NAME
5
+
6
+ import logging
7
+ from typing import Optional, Tuple
8
+ import re
9
+ import os
10
+ import shutil
11
+ import traceback
12
+ import importlib.resources
13
+ import contextlib
14
+
15
+ logger = logging.getLogger()
16
+
17
+ CACHE_DIR = os.path.join(os.getcwd(), "Cache")
18
+ ENCODER_RESOURCE_PATH = "Data/v2/Models/t2s_encoder_fp32.onnx"
19
+ STAGE_DECODER_RESOURCE_PATH = "Data/v2/Models/t2s_stage_decoder_fp32.onnx"
20
+ FIRST_STAGE_DECODER_RESOURCE_PATH = "Data/v2/Models/t2s_first_stage_decoder_fp32.onnx"
21
+ VITS_RESOURCE_PATH = "Data/v2/Models/vits_fp32.onnx"
22
+ T2S_KEYS_RESOURCE_PATH = "Data/v2/Keys/t2s_onnx_keys.txt"
23
+ VITS_KEYS_RESOURCE_PATH = "Data/v2/Keys/vits_onnx_keys.txt"
24
+
25
+
26
+ def find_ckpt_and_pth(directory: str) -> Tuple[Optional[str], Optional[str]]:
27
+ """
28
+ 在 directory(不递归子目录)里查找:
29
+ - .ckpt:从所有 .ckpt 文件名中搜索 'e{正整数}' 作为 epoch(找不到则视为 e0),
30
+ 选择 epoch 最大的那个文件(若无则为 None)
31
+ - .pth :从所有 .pth 文件名中搜索 'e{正整数}' 作为 epoch(找不到则视为 e0),
32
+ 选择 epoch 最大的那个文件(若无则为 None)
33
+ 若出现相同 epoch,选修改时间较新的文件以打破平手。
34
+ """
35
+ best_ckpt_path: Optional[str] = None
36
+ best_ckpt_epoch: int = -1
37
+
38
+ best_pth_path: Optional[str] = None
39
+ best_pth_epoch: int = -1
40
+
41
+ for filename in os.listdir(directory):
42
+ full_path = os.path.join(directory, filename)
43
+
44
+ if not os.path.isfile(full_path):
45
+ continue
46
+
47
+ # 提取 epoch
48
+ m = re.search(r"e(\d+)", filename, flags=re.IGNORECASE)
49
+ epoch = int(m.group(1)) if m else 0
50
+
51
+ # .ckpt 文件处理
52
+ if filename.lower().endswith(".ckpt"):
53
+ if (
54
+ epoch > best_ckpt_epoch
55
+ or (
56
+ epoch == best_ckpt_epoch
57
+ and best_ckpt_path is not None
58
+ and os.path.getmtime(full_path) > os.path.getmtime(best_ckpt_path)
59
+ )
60
+ ):
61
+ best_ckpt_epoch = epoch
62
+ best_ckpt_path = full_path
63
+
64
+ # .pth 文件处理
65
+ elif filename.lower().endswith(".pth"):
66
+ if (
67
+ epoch > best_pth_epoch
68
+ or (
69
+ epoch == best_pth_epoch
70
+ and best_pth_path is not None
71
+ and os.path.getmtime(full_path) > os.path.getmtime(best_pth_path)
72
+ )
73
+ ):
74
+ best_pth_epoch = epoch
75
+ best_pth_path = full_path
76
+
77
+ return best_ckpt_path, best_pth_path
78
+
79
+
80
+ def remove_folder(folder: str) -> None:
81
+ try:
82
+ if os.path.exists(folder):
83
+ shutil.rmtree(folder)
84
+ logger.info(f"🧹 Folder cleaned: {folder}")
85
+ except Exception as e:
86
+ logger.error(f"❌ Failed to clean folder {folder}: {e}")
87
+
88
+
89
+ def convert(torch_ckpt_path: str,
90
+ torch_pth_path: str,
91
+ output_dir: str):
92
+ # 确保缓存和输出目录存在
93
+ os.makedirs(CACHE_DIR, exist_ok=True)
94
+ os.makedirs(output_dir, exist_ok=True)
95
+
96
+ if len(os.listdir(output_dir)) > 0:
97
+ logger.warning(f"The output directory {output_dir} is not empty!")
98
+
99
+ with contextlib.ExitStack() as stack:
100
+ files = importlib.resources.files(PACKAGE_NAME)
101
+
102
+ def enter(p):
103
+ return stack.enter_context(importlib.resources.as_file(files.joinpath(p)))
104
+
105
+ encoder_onnx_path = enter(ENCODER_RESOURCE_PATH)
106
+ stage_decoder_path = enter(STAGE_DECODER_RESOURCE_PATH)
107
+ first_stage_decoder_path = enter(FIRST_STAGE_DECODER_RESOURCE_PATH)
108
+ vits_onnx_path = enter(VITS_RESOURCE_PATH)
109
+ t2s_keys_path = enter(T2S_KEYS_RESOURCE_PATH)
110
+ vits_keys_path = enter(VITS_KEYS_RESOURCE_PATH)
111
+
112
+ converter_1 = T2SModelConverter(
113
+ torch_ckpt_path=torch_ckpt_path,
114
+ stage_decoder_onnx_path=str(stage_decoder_path),
115
+ first_stage_decoder_onnx_path=str(first_stage_decoder_path),
116
+ key_list_file=str(t2s_keys_path),
117
+ output_dir=output_dir,
118
+ cache_dir=CACHE_DIR,
119
+ )
120
+ converter_2 = VITSConverter(
121
+ torch_pth_path=torch_pth_path,
122
+ vits_onnx_path=str(vits_onnx_path),
123
+ key_list_file=str(vits_keys_path),
124
+ output_dir=output_dir,
125
+ cache_dir=CACHE_DIR,
126
+ )
127
+ converter_3 = EncoderConverter(
128
+ ckpt_path=torch_ckpt_path,
129
+ pth_path=torch_pth_path,
130
+ onnx_input_path=str(encoder_onnx_path),
131
+ output_dir=output_dir,
132
+ )
133
+
134
+ try:
135
+ converter_1.run_full_process()
136
+ converter_2.run_full_process()
137
+ converter_3.run_full_process()
138
+ logger.info(f"🎉 Conversion successful! Saved to: {os.path.abspath(output_dir)}\n"
139
+ f"- Model Type: V2")
140
+ except Exception:
141
+ logger.error(f"❌ A critical error occurred during the conversion process")
142
+ logger.error(traceback.format_exc())
143
+ remove_folder(output_dir) # 只在失败时清理输出目录
144
+ finally:
145
+ # 无论成功还是失败,都尝试清理缓存目录
146
+ remove_folder(CACHE_DIR)
genie_tts/Converter/v2/EncoderConverter.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import onnx
3
+ import os
4
+
5
+ from ..load_state_dict import load_gpt_model, load_sovits_model
6
+
7
+
8
+ class EncoderConverter:
9
+ """
10
+ 一个转换器,用于为 t2s_encoder 模型创建:
11
+ 1. 一个从 .ckpt 和 .pth 文件中合并而来的全精度 (fp32) .bin 权重文件。
12
+ 2. 一个链接到该 .bin 文件的 ONNX 模型。
13
+ """
14
+
15
+ def __init__(self,
16
+ ckpt_path: str,
17
+ pth_path: str,
18
+ onnx_input_path: str,
19
+ output_dir: str,
20
+ ):
21
+ self.ckpt_path: str = ckpt_path
22
+ self.pth_path: str = pth_path
23
+ self.onnx_input_path: str = onnx_input_path
24
+ self.output_dir: str = output_dir
25
+
26
+ # 定义最终输出文件的路径
27
+ self.output_bin_path: str = os.path.join(self.output_dir, "t2s_encoder_fp32.bin")
28
+ self.output_onnx_path: str = os.path.join(self.output_dir, "t2s_encoder_fp32.onnx")
29
+
30
+ # 确保输出目录存在
31
+ os.makedirs(self.output_dir, exist_ok=True)
32
+
33
+ # 检查所有输入文件是否存在
34
+ for path in [self.ckpt_path, self.pth_path, self.onnx_input_path]:
35
+ if not os.path.exists(path):
36
+ raise FileNotFoundError(f"Error: Input file not found! Path: {path}")
37
+
38
+ def run_full_process(self):
39
+ # 1. 定义固定的 ONNX 权重键列表 (此顺序决定了 .bin 文件的布局)
40
+ onnx_keys = [
41
+ "encoder.ar_text_embedding.word_embeddings.weight",
42
+ "encoder.bert_proj.weight",
43
+ "encoder.bert_proj.bias",
44
+ "encoder.ar_text_position.alpha",
45
+ "vits.ssl_proj.weight",
46
+ "vits.ssl_proj.bias",
47
+ "vits.quantizer.vq.layers.0._codebook.embed"
48
+ ]
49
+
50
+ # 2. 加载所有必要的模型和权重
51
+ ckpt_state_dict = load_gpt_model(self.ckpt_path)['weight']
52
+ pth_state_dict = load_sovits_model(self.pth_path)['weight']
53
+ model = onnx.load(self.onnx_input_path, load_external_data=False)
54
+ initializer_map = {init.name: init for init in model.graph.initializer}
55
+ current_offset = 0
56
+ bin_filename = os.path.basename(self.output_bin_path)
57
+
58
+ # 3. 生成 .bin 文件并同步修改 ONNX 模型
59
+ with open(self.output_bin_path, 'wb') as f_bin:
60
+ for onnx_key in onnx_keys:
61
+ source_key = ""
62
+ source_dict = None
63
+
64
+ if onnx_key.startswith("encoder."):
65
+ source_key = "model." + onnx_key[len("encoder."):]
66
+ source_dict = ckpt_state_dict
67
+ elif onnx_key.startswith("vits."):
68
+ source_key = onnx_key[len("vits."):]
69
+ source_dict = pth_state_dict
70
+
71
+ if source_dict is None:
72
+ raise ValueError(
73
+ f"❌ Critical error: Unable to determine the weight source for ONNX key '{onnx_key}'.")
74
+ # 从源文件中提取张量
75
+ tensor = source_dict.get(source_key)
76
+ if tensor is None:
77
+ raise ValueError(
78
+ f"❌ Critical error: Key '{source_key}' (corresponding to ONNX key '{onnx_key}') not found in the source file.")
79
+
80
+ # 转换为 fp32 numpy 数组并获取字节
81
+ numpy_array_fp32 = tensor.to(torch.float32).cpu().numpy()
82
+ tensor_bytes = numpy_array_fp32.tobytes()
83
+ tensor_length = len(tensor_bytes)
84
+ f_bin.write(tensor_bytes)
85
+
86
+ # 在 ONNX 模型中找到对应的 initializer 并修改它
87
+ if onnx_key in initializer_map:
88
+ tensor_proto = initializer_map[onnx_key]
89
+
90
+ tensor_proto.ClearField('raw_data')
91
+ tensor_proto.data_location = onnx.TensorProto.EXTERNAL
92
+ del tensor_proto.external_data[:]
93
+
94
+ keys_to_set = ["location", "offset", "length"]
95
+ values_to_set = [bin_filename, str(current_offset), str(tensor_length)]
96
+
97
+ for k, v in zip(keys_to_set, values_to_set):
98
+ entry = tensor_proto.external_data.add()
99
+ entry.key = k
100
+ entry.value = v
101
+
102
+ # 更新下一个权重的偏移量
103
+ current_offset += tensor_length
104
+
105
+ # 4. 保存修改后的 ONNX 模型
106
+ onnx.save(model, self.output_onnx_path)
genie_tts/Converter/v2/T2SConverter.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import onnx
3
+ import numpy as np
4
+ import json
5
+ import os
6
+ from collections import OrderedDict
7
+
8
+ from ..load_state_dict import load_gpt_model
9
+
10
+
11
+ class T2SModelConverter:
12
+ """
13
+ 一个专门的转换器,用于处理 t2s (Text-to-Speech) 模型。
14
+ - PyTorch 模型: .ckpt 文件
15
+ - ONNX 模型: t2s_stage_decoder_fp32.onnx
16
+ - 遵循特定的键名映射规则。
17
+ """
18
+
19
+ def __init__(self,
20
+ torch_ckpt_path: str,
21
+ stage_decoder_onnx_path: str,
22
+ first_stage_decoder_onnx_path: str,
23
+ key_list_file: str,
24
+ output_dir: str,
25
+ cache_dir: str,
26
+ ):
27
+ self.torch_ckpt_path: str = torch_ckpt_path
28
+ self.stage_decoder_onnx_path: str = stage_decoder_onnx_path
29
+ self.first_stage_decoder_onnx_path: str = first_stage_decoder_onnx_path
30
+ self.key_list_file: str = key_list_file
31
+ self.output_dir: str = output_dir
32
+ self.cache_dir: str = cache_dir
33
+
34
+ os.makedirs(self.output_dir, exist_ok=True)
35
+ os.makedirs(self.output_dir, exist_ok=True)
36
+
37
+ # 定义输出文件路径
38
+ self.fp16_bin_path: str = os.path.join(self.output_dir, "t2s_shared_fp16.bin")
39
+ self.index_table_path: str = os.path.join(self.cache_dir, "t2s_weights_index_fp32.json")
40
+ self.relinked_encoder_path: str = os.path.join(self.output_dir, "t2s_encoder_fp32.onnx")
41
+ self.relinked_stage_decoder_path: str = os.path.join(self.output_dir, "t2s_stage_decoder_fp32.onnx")
42
+ self.relinked_first_stage_decoder_path: str = os.path.join(self.output_dir, "t2s_first_stage_decoder_fp32.onnx")
43
+ self.reconstructed_fp32_bin_path = os.path.join(self.output_dir, "t2s_shared_fp32.bin")
44
+
45
+ def step1_create_fp16_bin_with_key_mapping(self):
46
+ """
47
+ (1) 根据特定的键映射规则,从 .ckpt 创建 fp16 .bin 和 fp32 索引。
48
+ (已根据用户验证脚本的正确逻辑进行最终修正)
49
+ """
50
+ if not os.path.exists(self.key_list_file):
51
+ raise FileNotFoundError(
52
+ f"Error: Stage 1 requires the key list file, but it was not found: {self.key_list_file}")
53
+
54
+ with open(self.key_list_file, 'r') as f:
55
+ onnx_keys = [line.strip() for line in f.readlines()]
56
+
57
+ ckpt_data = load_gpt_model(self.torch_ckpt_path)
58
+ if 'weight' not in ckpt_data:
59
+ raise KeyError(
60
+ f"❌ Error: 'weight' key not found in the .ckpt file. Top-level keys in the file are: {list(ckpt_data.keys())}")
61
+
62
+ torch_state_dict = ckpt_data['weight']
63
+
64
+ index_table = OrderedDict()
65
+ current_fp32_offset = 0
66
+
67
+ with open(self.fp16_bin_path, 'wb') as f_bin:
68
+ for onnx_key in onnx_keys:
69
+ transformed_onnx_key = onnx_key.replace('transformer_encoder', 'h')
70
+ torch_lookup_key = f"model.{transformed_onnx_key}"
71
+ torch_tensor = torch_state_dict.get(torch_lookup_key)
72
+ numpy_array_fp16 = torch_tensor.to(torch.float16).cpu().numpy()
73
+ f_bin.write(numpy_array_fp16.tobytes())
74
+ tensor_length_fp32 = numpy_array_fp16.nbytes * 2
75
+ index_table[onnx_key] = {'offset': current_fp32_offset, 'length': tensor_length_fp32}
76
+ current_fp32_offset += tensor_length_fp32
77
+
78
+ with open(self.index_table_path, 'w') as f_json:
79
+ json.dump(index_table, f_json, indent=4) # type: ignore
80
+
81
+ def step2_relink_onnx_for_fp32(self, old_model: str, new_model: str):
82
+ """
83
+ (2) 根据 fp32 索引表,修改 ONNX 模型,使其链接到未来的全精度 .bin。
84
+ (使用与第一个脚本相同的、更稳定的底层方法)
85
+ """
86
+ if not os.path.exists(self.index_table_path):
87
+ raise FileNotFoundError(
88
+ f"Error: Stage 2 requires the index file, but it was not found: {self.index_table_path}")
89
+
90
+ # 加载描述 fp32 布局的索引表
91
+ with open(self.index_table_path, 'r') as f:
92
+ index_table = json.load(f)
93
+
94
+ model = onnx.load_model(old_model, load_external_data=False)
95
+ reconstructed_bin_filename = os.path.basename(self.reconstructed_fp32_bin_path)
96
+
97
+ for tensor in model.graph.initializer:
98
+ if tensor.name in index_table:
99
+ tensor.ClearField('raw_data')
100
+ tensor.data_location = onnx.TensorProto.EXTERNAL
101
+ info = index_table[tensor.name]
102
+ del tensor.external_data[:]
103
+ keys = ["location", "offset", "length"]
104
+ values = [reconstructed_bin_filename, str(info['offset']), str(info['length'])]
105
+
106
+ for k, v in zip(keys, values):
107
+ entry = tensor.external_data.add()
108
+ entry.key = k
109
+ entry.value = v
110
+
111
+ onnx.save(model, new_model)
112
+
113
+ @staticmethod
114
+ def step3_reconstruct_fp32_bin_from_fp16(fp16_bin_path: str, output_fp32_bin_path: str):
115
+ """
116
+ (3) 静态工具函数:从半精度 .bin 文件还原出全精度 .bin 文件。
117
+ """
118
+ fp16_array = np.fromfile(fp16_bin_path, dtype=np.float16)
119
+ fp32_array = fp16_array.astype(np.float32)
120
+ fp32_array.tofile(output_fp32_bin_path)
121
+
122
+ def run_full_process(self):
123
+ self.step1_create_fp16_bin_with_key_mapping()
124
+ self.step2_relink_onnx_for_fp32(self.stage_decoder_onnx_path, self.relinked_stage_decoder_path)
125
+ self.step2_relink_onnx_for_fp32(self.first_stage_decoder_onnx_path, self.relinked_first_stage_decoder_path)
genie_tts/Converter/v2/VITSConverter.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import onnx
3
+ import numpy as np
4
+ import json
5
+ import os
6
+ from collections import OrderedDict
7
+
8
+ from ..load_state_dict import load_sovits_model
9
+
10
+
11
+ class VITSConverter:
12
+ """
13
+ 一个转换器,用于从 PyTorch 模型创建:
14
+ 1. 一个用于分发的半精度 (fp16) .bin 权重文件。
15
+ 2. 一个与全精度 (fp32) 布局兼容的 ONNX 模型。
16
+ 3. 一个可以将 fp16 .bin 文件还原为 fp32 .bin 的工具函数。
17
+ """
18
+
19
+ def __init__(self,
20
+ torch_pth_path: str,
21
+ vits_onnx_path: str,
22
+ key_list_file: str,
23
+ output_dir: str,
24
+ cache_dir: str,
25
+ ):
26
+ self.torch_pth_path: str = torch_pth_path
27
+ self.vits_onnx_path: str = vits_onnx_path
28
+ self.key_list_file: str = key_list_file
29
+ self.output_dir: str = output_dir
30
+ self.cache_dir: str = cache_dir
31
+ # 定义输出文件路径
32
+ self.fp16_bin_path: str = os.path.join(self.output_dir, "vits_fp16.bin")
33
+ self.index_table_path: str = os.path.join(self.cache_dir, "vits_weights_index_fp32.json")
34
+ self.relinked_fp32_onnx_path: str = os.path.join(self.output_dir, "vits_fp32.onnx")
35
+ self.reconstructed_fp32_bin_path: str = os.path.join(self.output_dir, "vits_fp32.bin")
36
+
37
+ # 确保输出目录存在
38
+ os.makedirs(self.cache_dir, exist_ok=True)
39
+ os.makedirs(self.output_dir, exist_ok=True)
40
+
41
+ if not os.path.exists(self.key_list_file):
42
+ raise FileNotFoundError(f"Error: Key list file not found! Path: {self.key_list_file}")
43
+
44
+ def step1_create_fp16_bin_and_fp32_index(self):
45
+ """
46
+ (1) 创建一个半精度 (fp16) 的 .bin 文件,但生成一个
47
+ 描述全精度 (fp32) 布局的索引表。
48
+ """
49
+ # 加载 key 列表
50
+ with open(self.key_list_file, 'r') as f:
51
+ onnx_keys = [line.strip() for line in f.readlines()]
52
+
53
+ # 加载 PyTorch 模型权重
54
+ torch_state_dict = load_sovits_model(self.torch_pth_path)['weight']
55
+
56
+ index_table = OrderedDict()
57
+ current_fp32_offset = 0
58
+
59
+ with open(self.fp16_bin_path, 'wb') as f_bin:
60
+ for onnx_key in onnx_keys:
61
+ torch_key = onnx_key[len("vq_model."):] if onnx_key.startswith("vq_model.") else onnx_key
62
+
63
+ torch_tensor = torch_state_dict.get(torch_key)
64
+ if torch_tensor is None:
65
+ raise ValueError(f"❌ Critical error: Key '{torch_key}' not found in the PyTorch weights")
66
+
67
+ # 转换为 fp16 并写入文件
68
+ torch_tensor_fp16 = torch_tensor.to(torch.float16)
69
+ numpy_array_fp16 = torch_tensor_fp16.cpu().numpy()
70
+ tensor_bytes_fp16 = numpy_array_fp16.tobytes()
71
+ f_bin.write(tensor_bytes_fp16)
72
+ tensor_length_fp32 = len(tensor_bytes_fp16) * 2
73
+ index_table[onnx_key] = {
74
+ 'offset': current_fp32_offset,
75
+ 'length': tensor_length_fp32
76
+ }
77
+ current_fp32_offset += tensor_length_fp32
78
+
79
+ # 保存描述 fp32 布局的索引表
80
+ with open(self.index_table_path, 'w') as f_json:
81
+ json.dump(index_table, f_json, indent=4) # type: ignore
82
+
83
+ def step2_relink_onnx_for_fp32(self):
84
+ """
85
+ (2) 根据 fp32 索引表,修改 ONNX 模型,使其链接到一个
86
+ 未来的、全精度的 .bin 文件。
87
+ """
88
+ # 加载描述 fp32 布局的索引表
89
+ with open(self.index_table_path, 'r') as f:
90
+ index_table = json.load(f)
91
+
92
+ model = onnx.load_model(self.vits_onnx_path, load_external_data=False)
93
+ reconstructed_bin_filename = os.path.basename(self.reconstructed_fp32_bin_path)
94
+
95
+ for tensor in model.graph.initializer:
96
+ if tensor.name in index_table:
97
+ tensor.ClearField('raw_data')
98
+ tensor.data_location = onnx.TensorProto.EXTERNAL
99
+ info = index_table[tensor.name]
100
+
101
+ del tensor.external_data[:]
102
+
103
+ keys = ["location", "offset", "length"]
104
+ values = [reconstructed_bin_filename, str(info['offset']), str(info['length'])]
105
+
106
+ for k, v in zip(keys, values):
107
+ entry = tensor.external_data.add()
108
+ entry.key = k
109
+ entry.value = v
110
+
111
+ # 保存修改后的、链接到 fp32 权重的 ONNX 模型
112
+ onnx.save(model, self.relinked_fp32_onnx_path)
113
+
114
+ @staticmethod
115
+ def step3_reconstruct_fp32_bin_from_fp16(fp16_bin_path: str, output_fp32_bin_path: str):
116
+ """
117
+ (3) 静态工具函数:从半精度 .bin 文件还原出全精度 .bin 文件。
118
+
119
+ Args:
120
+ fp16_bin_path (str): 输入的半精度 .bin 文件路径。
121
+ output_fp32_bin_path (str): 输出的全精度 .bin 文件路径。
122
+ """
123
+ fp16_array = np.fromfile(fp16_bin_path, dtype=np.float16)
124
+ fp32_array = fp16_array.astype(np.float32)
125
+ fp32_array.tofile(output_fp32_bin_path)
126
+
127
+ def run_full_process(self):
128
+ self.step1_create_fp16_bin_and_fp32_index()
129
+ self.step2_relink_onnx_for_fp32()
genie_tts/Converter/v2/__init__.py ADDED
File without changes
genie_tts/Converter/v2ProPlus/Converter.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import traceback
3
+ import os
4
+ import contextlib
5
+ import importlib.resources
6
+
7
+ from ...Utils.Constants import PACKAGE_NAME
8
+ from ..v2.VITSConverter import VITSConverter
9
+ from ..v2.T2SConverter import T2SModelConverter
10
+ from ..v2.EncoderConverter import EncoderConverter
11
+ from ..v2.Converter import (ENCODER_RESOURCE_PATH, STAGE_DECODER_RESOURCE_PATH,
12
+ FIRST_STAGE_DECODER_RESOURCE_PATH, T2S_KEYS_RESOURCE_PATH, CACHE_DIR, remove_folder)
13
+ from .PromptEncoderConverter import PromptEncoderConverter
14
+
15
+ logger = logging.getLogger()
16
+
17
+ # 使用 V2 ProPlus 的文件。
18
+ VITS_RESOURCE_PATH = "Data/v2ProPlus/Models/vits_fp32.onnx"
19
+ PROMPT_ENCODER_RESOURCE_PATH = "Data/v2ProPlus/Models/prompt_encoder_fp32.onnx"
20
+ VITS_KEYS_RESOURCE_PATH = "Data/v2ProPlus/Keys/vits_weights.txt"
21
+ PROMPT_ENCODER_KEYS_RESOURCE_PATH = "Data/v2ProPlus/Keys/prompt_encoder_weights.txt"
22
+
23
+
24
+ def convert(torch_ckpt_path: str, torch_pth_path: str, output_dir: str) -> None:
25
+ # 确保缓存和输出目录存在
26
+ os.makedirs(CACHE_DIR, exist_ok=True)
27
+ os.makedirs(output_dir, exist_ok=True)
28
+
29
+ if len(os.listdir(output_dir)) > 0:
30
+ logger.warning(f"The output directory {output_dir} is not empty!")
31
+
32
+ with contextlib.ExitStack() as stack:
33
+ files = importlib.resources.files(PACKAGE_NAME)
34
+
35
+ def enter(p: str) -> str:
36
+ return str(stack.enter_context(importlib.resources.as_file(files.joinpath(p))))
37
+
38
+ encoder_onnx_path = enter(ENCODER_RESOURCE_PATH)
39
+ stage_decoder_path = enter(STAGE_DECODER_RESOURCE_PATH)
40
+ first_stage_decoder_path = enter(FIRST_STAGE_DECODER_RESOURCE_PATH)
41
+ vits_onnx_path = enter(VITS_RESOURCE_PATH)
42
+ t2s_keys_path = enter(T2S_KEYS_RESOURCE_PATH)
43
+ vits_keys_path = enter(VITS_KEYS_RESOURCE_PATH)
44
+ prompt_encoder_path = enter(PROMPT_ENCODER_RESOURCE_PATH)
45
+ prompt_encoder_keys_path = enter(PROMPT_ENCODER_KEYS_RESOURCE_PATH)
46
+
47
+ converter_1 = T2SModelConverter(
48
+ torch_ckpt_path=torch_ckpt_path,
49
+ stage_decoder_onnx_path=stage_decoder_path,
50
+ first_stage_decoder_onnx_path=first_stage_decoder_path,
51
+ key_list_file=t2s_keys_path,
52
+ output_dir=output_dir,
53
+ cache_dir=CACHE_DIR,
54
+ )
55
+ converter_2 = VITSConverter(
56
+ torch_pth_path=torch_pth_path,
57
+ vits_onnx_path=vits_onnx_path,
58
+ key_list_file=vits_keys_path,
59
+ output_dir=output_dir,
60
+ cache_dir=CACHE_DIR,
61
+ )
62
+ converter_3 = EncoderConverter(
63
+ ckpt_path=torch_ckpt_path,
64
+ pth_path=torch_pth_path,
65
+ onnx_input_path=encoder_onnx_path,
66
+ output_dir=output_dir,
67
+ )
68
+ converter_4 = PromptEncoderConverter(
69
+ torch_pth_path=torch_pth_path,
70
+ prompt_encoder_onnx_path=prompt_encoder_path,
71
+ key_list_file=prompt_encoder_keys_path,
72
+ output_dir=output_dir,
73
+ cache_dir=CACHE_DIR,
74
+ )
75
+
76
+ try:
77
+ converter_1.run_full_process()
78
+ converter_2.run_full_process()
79
+ converter_3.run_full_process()
80
+ converter_4.run_full_process()
81
+ logger.info(f"🎉 Conversion successful! Saved to: {os.path.abspath(output_dir)}\n"
82
+ f"- Model Type: V2ProPlus")
83
+ except Exception:
84
+ logger.error(f"❌ A critical error occurred during the conversion process")
85
+ logger.error(traceback.format_exc())
86
+ remove_folder(output_dir) # 只在失败时清理输出目录
87
+ finally:
88
+ # 无论成功还是失败,都尝试清理缓存目录
89
+ remove_folder(CACHE_DIR)
genie_tts/Converter/v2ProPlus/PromptEncoderConverter.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import onnx
3
+ import json
4
+ import os
5
+ from collections import OrderedDict
6
+
7
+ from ..load_state_dict import load_sovits_model
8
+
9
+
10
+ class PromptEncoderConverter:
11
+ """
12
+ 一个转换器,用于从 PyTorch 模型创建:
13
+ 1. 一个用于分发的半精度 (fp16) .bin 权重文件。
14
+ 2. 一个与全精度 (fp32) 布局兼容的 ONNX 模型。
15
+ 3. 一个可以将 fp16 .bin 文件还原为 fp32 .bin 的工具函数。
16
+ """
17
+
18
+ def __init__(self,
19
+ torch_pth_path: str,
20
+ prompt_encoder_onnx_path: str,
21
+ key_list_file: str,
22
+ output_dir: str,
23
+ cache_dir: str,
24
+ ):
25
+ self.torch_pth_path: str = torch_pth_path
26
+ self.vits_onnx_path: str = prompt_encoder_onnx_path
27
+ self.key_list_file: str = key_list_file
28
+ self.output_dir: str = output_dir
29
+ self.cache_dir: str = cache_dir
30
+ # 定义输出文件路径
31
+ self.fp16_bin_path: str = os.path.join(self.output_dir, "prompt_encoder_fp16.bin")
32
+ self.index_table_path: str = os.path.join(self.cache_dir, "prompt_encoder_weights_index_fp32.json")
33
+ self.relinked_fp32_onnx_path: str = os.path.join(self.output_dir, "prompt_encoder_fp32.onnx")
34
+ self.reconstructed_fp32_bin_path: str = os.path.join(self.output_dir, "prompt_encoder_fp32.bin")
35
+
36
+ # 确保输出目录存在
37
+ os.makedirs(self.cache_dir, exist_ok=True)
38
+ os.makedirs(self.output_dir, exist_ok=True)
39
+
40
+ if not os.path.exists(self.key_list_file):
41
+ raise FileNotFoundError(f"错误: Key 列表文件未找到! 路径: {self.key_list_file}")
42
+
43
+ def step1_create_fp16_bin_and_fp32_index(self):
44
+ """
45
+ (1) 创建一个半精度 (fp16) 的 .bin 文件,但生成一个
46
+ 描述全精度 (fp32) 布局的索引表。
47
+ """
48
+ # 加载 key 列表
49
+ with open(self.key_list_file, 'r') as f:
50
+ onnx_keys = [line.strip() for line in f.readlines()]
51
+
52
+ # 加载 PyTorch 模型权重
53
+ torch_state_dict = load_sovits_model(self.torch_pth_path)['weight']
54
+
55
+ index_table = OrderedDict()
56
+ # 这个偏移量将按照 fp32 的大小进行累加
57
+ current_fp32_offset = 0
58
+
59
+ with open(self.fp16_bin_path, 'wb') as f_bin:
60
+ for onnx_key in onnx_keys:
61
+ torch_key = onnx_key[len("vq_model."):] if onnx_key.startswith("vq_model.") else onnx_key
62
+
63
+ torch_tensor = torch_state_dict.get(torch_key)
64
+ if torch_tensor is None:
65
+ raise ValueError(f"❌ 严重错误: 在 PyTorch 权重中找不到 Key '{torch_key}'")
66
+
67
+ # 转换为 fp16 并写入文件
68
+ torch_tensor_fp16 = torch_tensor.to(torch.float16)
69
+ numpy_array_fp16 = torch_tensor_fp16.cpu().numpy()
70
+ tensor_bytes_fp16 = numpy_array_fp16.tobytes()
71
+ f_bin.write(tensor_bytes_fp16)
72
+
73
+ # 关键步骤:计算并记录 fp32 的长度和偏移量
74
+ # 一个 fp32 = 4 字节, 一个 fp16 = 2 字节。所以 fp32 长度是 fp16 的两倍。
75
+ tensor_length_fp32 = len(tensor_bytes_fp16) * 2
76
+
77
+ index_table[onnx_key] = {
78
+ 'offset': current_fp32_offset,
79
+ 'length': tensor_length_fp32
80
+ }
81
+
82
+ # 偏移量也按照 fp32 的长度进行累加
83
+ current_fp32_offset += tensor_length_fp32
84
+
85
+ # 保存描述 fp32 布局的索引表
86
+ with open(self.index_table_path, 'w') as f_json:
87
+ json.dump(index_table, f_json, indent=4) # type: ignore
88
+
89
+ def step2_relink_onnx_for_fp32(self):
90
+ """
91
+ (2) 根据 fp32 索引表,修改 ONNX 模型,使其链接到一个
92
+ 未来的、全精度的 .bin 文件。
93
+ """
94
+ # 加载描述 fp32 布局的索引表
95
+ with open(self.index_table_path, 'r') as f:
96
+ index_table = json.load(f)
97
+
98
+ # 加载 ONNX 模型结构
99
+ model = onnx.load_model(self.vits_onnx_path, load_external_data=False)
100
+
101
+ # 这个 ONNX 模型将要链接的 .bin 文件名
102
+ reconstructed_bin_filename = os.path.basename(self.reconstructed_fp32_bin_path)
103
+
104
+ for tensor in model.graph.initializer:
105
+ if tensor.name in index_table:
106
+ tensor.ClearField('raw_data')
107
+ tensor.data_location = onnx.TensorProto.EXTERNAL
108
+ info = index_table[tensor.name]
109
+
110
+ del tensor.external_data[:]
111
+
112
+ keys = ["location", "offset", "length"]
113
+ values = [reconstructed_bin_filename, str(info['offset']), str(info['length'])]
114
+
115
+ for k, v in zip(keys, values):
116
+ entry = tensor.external_data.add()
117
+ entry.key = k
118
+ entry.value = v
119
+
120
+ # 保存修改后的、链接到 fp32 权重的 ONNX 模型
121
+ onnx.save(model, self.relinked_fp32_onnx_path)
122
+
123
+ def run_full_process(self):
124
+ """
125
+ 按顺序执行核心的转换步骤 (1 和 2)。
126
+ """
127
+ self.step1_create_fp16_bin_and_fp32_index()
128
+ self.step2_relink_onnx_for_fp32()
genie_tts/Core/Inference.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import onnxruntime as ort
2
+ import numpy as np
3
+ from typing import List, Optional
4
+ import threading
5
+
6
+ from ..Audio.ReferenceAudio import ReferenceAudio
7
+ from ..GetPhonesAndBert import get_phones_and_bert
8
+
9
+ MAX_T2S_LEN = 1000
10
+
11
+
12
+ class GENIE:
13
+ def __init__(self):
14
+ self.stop_event: threading.Event = threading.Event()
15
+
16
+ def tts(
17
+ self,
18
+ text: str,
19
+ prompt_audio: ReferenceAudio,
20
+ encoder: ort.InferenceSession,
21
+ first_stage_decoder: ort.InferenceSession,
22
+ stage_decoder: ort.InferenceSession,
23
+ vocoder: ort.InferenceSession,
24
+ prompt_encoder: Optional[ort.InferenceSession],
25
+ language: str = 'japanese',
26
+ ) -> Optional[np.ndarray]:
27
+ text = '。' + text # 防止漏第一句。
28
+ text_seq, text_bert = get_phones_and_bert(text, language=language)
29
+
30
+ semantic_tokens: np.ndarray = self.t2s_cpu(
31
+ ref_seq=prompt_audio.phonemes_seq,
32
+ ref_bert=prompt_audio.text_bert,
33
+ text_seq=text_seq,
34
+ text_bert=text_bert,
35
+ ssl_content=prompt_audio.ssl_content,
36
+ encoder=encoder,
37
+ first_stage_decoder=first_stage_decoder,
38
+ stage_decoder=stage_decoder,
39
+ )
40
+
41
+ eos_indices = np.where(semantic_tokens >= 1024) # 剔除不合法的元素,例如 EOS Token。
42
+ if len(eos_indices[0]) > 0:
43
+ first_eos_index = eos_indices[-1][0]
44
+ semantic_tokens = semantic_tokens[..., :first_eos_index]
45
+
46
+ if prompt_encoder is None:
47
+ return vocoder.run(None, {
48
+ "text_seq": text_seq,
49
+ "pred_semantic": semantic_tokens,
50
+ "ref_audio": prompt_audio.audio_32k
51
+ })[0]
52
+ else:
53
+ # V2ProPlus 新增。
54
+ prompt_audio.update_global_emb(prompt_encoder=prompt_encoder)
55
+ audio_chunk = vocoder.run(None, {
56
+ "text_seq": text_seq,
57
+ "pred_semantic": semantic_tokens,
58
+ "ge": prompt_audio.global_emb,
59
+ "ge_advanced": prompt_audio.global_emb_advanced,
60
+ })[0]
61
+ return audio_chunk
62
+
63
+ def t2s_cpu(
64
+ self,
65
+ ref_seq: np.ndarray,
66
+ ref_bert: np.ndarray,
67
+ text_seq: np.ndarray,
68
+ text_bert: np.ndarray,
69
+ ssl_content: np.ndarray,
70
+ encoder: ort.InferenceSession,
71
+ first_stage_decoder: ort.InferenceSession,
72
+ stage_decoder: ort.InferenceSession,
73
+ ) -> Optional[np.ndarray]:
74
+ """在CPU上运行T2S模型"""
75
+ # Encoder
76
+ x, prompts = encoder.run(
77
+ None,
78
+ {
79
+ "ref_seq": ref_seq,
80
+ "text_seq": text_seq,
81
+ "ref_bert": ref_bert,
82
+ "text_bert": text_bert,
83
+ "ssl_content": ssl_content,
84
+ },
85
+ )
86
+
87
+ # First Stage Decoder
88
+ y, y_emb, *present_key_values = first_stage_decoder.run(
89
+ None, {"x": x, "prompts": prompts}
90
+ )
91
+
92
+ # Stage Decoder
93
+ input_names: List[str] = [inp.name for inp in stage_decoder.get_inputs()]
94
+ idx: int = 0
95
+ for idx in range(0, 500):
96
+ if self.stop_event.is_set():
97
+ return None
98
+ input_feed = {
99
+ name: data
100
+ for name, data in zip(input_names, [y, y_emb, *present_key_values])
101
+ }
102
+ outputs = stage_decoder.run(None, input_feed)
103
+ y, y_emb, stop_condition_tensor, *present_key_values = outputs
104
+
105
+ if stop_condition_tensor:
106
+ break
107
+
108
+ y[0, -1] = 0
109
+ return np.expand_dims(y[:, -idx:], axis=0)
110
+
111
+
112
+ tts_client: GENIE = GENIE()
genie_tts/Core/Resources.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from huggingface_hub import snapshot_download
3
+
4
+
5
+ def download_genie_data() -> None:
6
+ print(f"🚀 Starting download Genie-TTS resources… This may take a few moments. ⏳")
7
+ snapshot_download(
8
+ repo_id="High-Logic/Genie",
9
+ repo_type="model",
10
+ allow_patterns="GenieData/*",
11
+ local_dir=".",
12
+ local_dir_use_symlinks=True, # 软链接
13
+ )
14
+ print("✅ Genie-TTS resources downloaded successfully.")
15
+
16
+
17
+ def ensure_exists(path: str, name: str):
18
+ if not os.path.exists(path):
19
+ raise FileNotFoundError(
20
+ f"Required directory or file '{name}' was not found at: {path}\n"
21
+ f"Please download the pretrained models and place them under './GenieData', "
22
+ f"or set the environment variable GENIE_DATA_DIR to the correct directory."
23
+ )
24
+
25
+
26
+ """
27
+ 文件结构与项目 Midori 同步。
28
+ """
29
+
30
+ GENIE_DATA_DIR: str = os.getenv(
31
+ "GENIE_DATA_DIR",
32
+ "./GenieData"
33
+ )
34
+
35
+ """
36
+ Japanese_G2P_DIR: str = os.getenv(
37
+ "Japanese_G2P_DIR",
38
+ f"{GENIE_DATA_DIR}/G2P/JapaneseG2P"
39
+ )
40
+ """
41
+
42
+ English_G2P_DIR: str = os.getenv(
43
+ "English_G2P_DIR",
44
+ f"{GENIE_DATA_DIR}/G2P/EnglishG2P"
45
+ )
46
+
47
+ Chinese_G2P_DIR: str = os.getenv(
48
+ "Chinese_G2P_DIR",
49
+ f"{GENIE_DATA_DIR}/G2P/ChineseG2P"
50
+ )
51
+
52
+ HUBERT_MODEL_DIR: str = os.getenv(
53
+ "HUBERT_MODEL_DIR",
54
+ f"{GENIE_DATA_DIR}/chinese-hubert-base"
55
+ )
56
+
57
+ SV_MODEL: str = os.getenv(
58
+ "SV_MODEL",
59
+ f"{GENIE_DATA_DIR}/speaker_encoder.onnx"
60
+ )
61
+
62
+ ROBERTA_MODEL_DIR: str = os.getenv(
63
+ "ROBERTA_MODEL_DIR",
64
+ f"{GENIE_DATA_DIR}/RoBERTa"
65
+ )
66
+
67
+ if not os.path.exists(GENIE_DATA_DIR):
68
+ print("⚠️ GenieData folder not found.")
69
+ choice = input("Would you like to download it automatically from HuggingFace? (y/N): ").strip().lower()
70
+ if choice == "y":
71
+ download_genie_data()
72
+
73
+ # ---- Run directory checks ----
74
+ ensure_exists(HUBERT_MODEL_DIR, "HUBERT_MODEL_DIR")
75
+ ensure_exists(SV_MODEL, "SV_MODEL")
76
+ # ensure_exists(ROBERTA_MODEL_DIR, "ROBERTA_MODEL_DIR")
genie_tts/Core/TTSPlayer.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 文件: .../Core/TTSPlayer.py
2
+
3
+ import queue
4
+ import os
5
+ import threading
6
+
7
+ import numpy as np
8
+ import wave
9
+ from typing import Optional, List, Callable
10
+ import logging
11
+
12
+ from ..Utils.TextSplitter import TextSplitter
13
+ from ..Core.Inference import tts_client
14
+ from ..ModelManager import model_manager
15
+ from ..Utils.Shared import context
16
+ from ..Utils.Utils import clear_queue
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+ STREAM_END = 'STREAM_END' # 这是一个特殊的标记,表示文本流结束
21
+ AUDIO_STREAM_END = 'AUDIO_STREAM_END' # 新增:特殊的标记,表示音频流播放结束
22
+
23
+
24
+ class TTSPlayer:
25
+ def __init__(self, sample_rate: int = 32000):
26
+ self._text_splitter = TextSplitter()
27
+
28
+ self.sample_rate: int = sample_rate
29
+ self.channels: int = 1
30
+ self.bytes_per_sample: int = 2 # 16-bit audio
31
+
32
+ self._text_queue: queue.Queue = queue.Queue()
33
+ self._audio_queue: queue.Queue = queue.Queue()
34
+
35
+ self._stop_event: threading.Event = threading.Event()
36
+ self._tts_done_event: threading.Event = threading.Event()
37
+ self._playback_done_event: threading.Event = threading.Event() # 新增:用于标记播放完成
38
+ self._api_lock: threading.Lock = threading.Lock()
39
+
40
+ self._tts_worker: Optional[threading.Thread] = None
41
+ self._playback_worker: Optional[threading.Thread] = None
42
+
43
+ self._play: bool = False
44
+ self._current_save_path: Optional[str] = None
45
+ self._session_audio_chunks: List[np.ndarray] = []
46
+ self._split: bool = False
47
+
48
+ self._chunk_callback: Optional[Callable[[Optional[bytes]], None]] = None
49
+
50
+ @staticmethod
51
+ def _preprocess_for_playback(audio_float: np.ndarray) -> bytes:
52
+ audio_int16 = (audio_float.squeeze() * 32767).astype(np.int16)
53
+ return audio_int16.tobytes()
54
+
55
+ def _tts_worker_loop(self):
56
+ """从文本队列取句子,生成音频,并通过回调函数或音频队列分发。"""
57
+ while not self._stop_event.is_set():
58
+ try:
59
+ sentence = self._text_queue.get(timeout=1)
60
+ if sentence is None or self._stop_event.is_set():
61
+ break
62
+ except queue.Empty:
63
+ continue
64
+
65
+ try:
66
+ if sentence is STREAM_END:
67
+ if self._current_save_path and self._session_audio_chunks:
68
+ self._save_session_audio()
69
+
70
+ # 在TTS工作线程完成时,通过回调发送结束信号
71
+ if self._chunk_callback:
72
+ self._chunk_callback(None)
73
+
74
+ # 新增:如果开启了播放,通知音频队列流已结束
75
+ if self._play:
76
+ self._audio_queue.put(AUDIO_STREAM_END)
77
+
78
+ self._tts_done_event.set()
79
+ continue
80
+
81
+ gsv_model = model_manager.get(context.current_speaker)
82
+ if not gsv_model or not context.current_prompt_audio:
83
+ logger.error("Missing model or reference audio.")
84
+ continue
85
+
86
+ tts_client.stop_event.clear()
87
+ audio_chunk = tts_client.tts(
88
+ text=sentence,
89
+ prompt_audio=context.current_prompt_audio,
90
+ encoder=gsv_model.T2S_ENCODER,
91
+ first_stage_decoder=gsv_model.T2S_FIRST_STAGE_DECODER,
92
+ stage_decoder=gsv_model.T2S_STAGE_DECODER,
93
+ vocoder=gsv_model.VITS,
94
+ prompt_encoder=gsv_model.PROMPT_ENCODER,
95
+ language=gsv_model.LANGUAGE,
96
+ )
97
+
98
+ if audio_chunk is not None:
99
+ if self._play:
100
+ self._audio_queue.put(audio_chunk)
101
+ if self._current_save_path:
102
+ self._session_audio_chunks.append(audio_chunk)
103
+
104
+ # 使用回调函数处理流式数据
105
+ if self._chunk_callback:
106
+ audio_data = self._preprocess_for_playback(audio_chunk)
107
+ self._chunk_callback(audio_data)
108
+
109
+ except Exception as e:
110
+ logger.error(f"A critical error occurred while processing the TTS task: {e}", exc_info=True)
111
+ # 发生错误时,也要确保发送结束信号
112
+ if self._chunk_callback:
113
+ self._chunk_callback(None)
114
+ self._tts_done_event.set()
115
+
116
+ def _playback_worker_loop(self):
117
+ try:
118
+ import sounddevice as sd
119
+ with sd.OutputStream(samplerate=self.sample_rate,
120
+ channels=self.channels,
121
+ dtype='float32') as stream:
122
+ while not self._stop_event.is_set():
123
+ try:
124
+ audio_chunk = self._audio_queue.get(timeout=1)
125
+ if audio_chunk is None:
126
+ break
127
+ if audio_chunk is AUDIO_STREAM_END:
128
+ self._playback_done_event.set()
129
+ continue
130
+ stream.write(audio_chunk.squeeze())
131
+ except queue.Empty:
132
+ continue
133
+ except Exception as e:
134
+ logger.error(f"Error during audio playback: {e}", exc_info=True)
135
+
136
+ except Exception as e:
137
+ logger.warning(f"Failed to initialize sounddevice: {e}. Audio playback will be skipped.")
138
+ # 如果音频设备初始化失败,即使不播放,也要消费队列中的结束信号,防止主线程死锁
139
+ while not self._stop_event.is_set():
140
+ try:
141
+ item = self._audio_queue.get(timeout=0.5)
142
+ if item is None:
143
+ break
144
+ if item is AUDIO_STREAM_END:
145
+ self._playback_done_event.set()
146
+ except queue.Empty:
147
+ continue
148
+
149
+ def _save_session_audio(self):
150
+ try:
151
+ full_audio = np.concatenate(self._session_audio_chunks, axis=0)
152
+ with wave.open(self._current_save_path, 'wb') as wf:
153
+ wf.setnchannels(self.channels)
154
+ wf.setsampwidth(self.bytes_per_sample)
155
+ wf.setframerate(self.sample_rate)
156
+ wf.writeframes(self._preprocess_for_playback(full_audio))
157
+ logger.info(f"Audio successfully saved to {os.path.abspath(self._current_save_path)}")
158
+ except Exception as e:
159
+ logger.error(f"Failed to save audio: {e}")
160
+ finally:
161
+ self._session_audio_chunks = []
162
+ self._current_save_path = None
163
+
164
+ def start_session(
165
+ self,
166
+ play: bool = False,
167
+ split: bool = False,
168
+ save_path: Optional[str] = None,
169
+ chunk_callback: Optional[Callable[[Optional[bytes]], None]] = None
170
+ ):
171
+ with self._api_lock:
172
+ self._tts_done_event.clear()
173
+ self._playback_done_event.clear() # 新增:重置播放完成事件
174
+ self._chunk_callback = chunk_callback
175
+ self._stop_event.clear()
176
+
177
+ if self._tts_worker is None or not self._tts_worker.is_alive():
178
+ self._tts_worker = threading.Thread(target=self._tts_worker_loop, daemon=True)
179
+ self._tts_worker.start()
180
+
181
+ if self._playback_worker is None or not self._playback_worker.is_alive():
182
+ self._playback_worker = threading.Thread(target=self._playback_worker_loop, daemon=True)
183
+ self._playback_worker.start()
184
+
185
+ clear_queue(self._text_queue)
186
+ clear_queue(self._audio_queue)
187
+
188
+ self._play = play
189
+ self._split = split
190
+ self._current_save_path = save_path
191
+ self._session_audio_chunks = []
192
+
193
+ def feed(self, text_chunk: str):
194
+ with self._api_lock:
195
+ if not text_chunk:
196
+ return
197
+ if self._split:
198
+ sentences = self._text_splitter.split(text_chunk.strip())
199
+ for sentence in sentences:
200
+ self._text_queue.put(sentence)
201
+ else:
202
+ self._text_queue.put(text_chunk)
203
+
204
+ def end_session(self):
205
+ with self._api_lock:
206
+ self._text_queue.put(STREAM_END)
207
+
208
+ def stop(self):
209
+ with self._api_lock:
210
+ if self._tts_worker is None and self._playback_worker is None:
211
+ return
212
+ if self._stop_event.is_set():
213
+ return
214
+ tts_client.stop_event.set()
215
+ self._stop_event.set()
216
+ self._tts_done_event.set()
217
+ self._text_queue.put(None)
218
+ self._audio_queue.put(None)
219
+ if self._tts_worker and self._tts_worker.is_alive():
220
+ self._tts_worker.join()
221
+ if self._playback_worker and self._playback_worker.is_alive():
222
+ self._playback_worker.join()
223
+ self._tts_worker = None
224
+ self._playback_worker = None
225
+
226
+ def wait_for_tts_completion(self):
227
+ if self._tts_done_event.is_set():
228
+ return
229
+ self._tts_done_event.wait()
230
+
231
+ def wait_for_playback_done(self):
232
+ # 1. 首先等待TTS生成全部完成
233
+ self.wait_for_tts_completion()
234
+
235
+ # 2. 如果开启了播放且没有被强制停止,则等待播放结束
236
+ if self._play and not self._stop_event.is_set():
237
+ if not self._playback_done_event.is_set():
238
+ self._playback_done_event.wait()
239
+
240
+
241
+ tts_player: TTSPlayer = TTSPlayer()
genie_tts/Core/__init__.py ADDED
File without changes
genie_tts/Data/v2/Keys/t2s_onnx_keys.txt ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ar_audio_embedding.word_embeddings.weight
2
+ ar_audio_position.alpha
3
+ transformer_encoder.layers.0.self_attn.in_proj_weight
4
+ transformer_encoder.layers.0.self_attn.in_proj_bias
5
+ transformer_encoder.layers.0.self_attn.out_proj.weight
6
+ transformer_encoder.layers.0.self_attn.out_proj.bias
7
+ transformer_encoder.layers.0.linear1.weight
8
+ transformer_encoder.layers.0.linear1.bias
9
+ transformer_encoder.layers.0.linear2.weight
10
+ transformer_encoder.layers.0.linear2.bias
11
+ transformer_encoder.layers.0.norm1.weight
12
+ transformer_encoder.layers.0.norm1.bias
13
+ transformer_encoder.layers.0.norm2.weight
14
+ transformer_encoder.layers.0.norm2.bias
15
+ transformer_encoder.layers.1.self_attn.in_proj_weight
16
+ transformer_encoder.layers.1.self_attn.in_proj_bias
17
+ transformer_encoder.layers.1.self_attn.out_proj.weight
18
+ transformer_encoder.layers.1.self_attn.out_proj.bias
19
+ transformer_encoder.layers.1.linear1.weight
20
+ transformer_encoder.layers.1.linear1.bias
21
+ transformer_encoder.layers.1.linear2.weight
22
+ transformer_encoder.layers.1.linear2.bias
23
+ transformer_encoder.layers.1.norm1.weight
24
+ transformer_encoder.layers.1.norm1.bias
25
+ transformer_encoder.layers.1.norm2.weight
26
+ transformer_encoder.layers.1.norm2.bias
27
+ transformer_encoder.layers.2.self_attn.in_proj_weight
28
+ transformer_encoder.layers.2.self_attn.in_proj_bias
29
+ transformer_encoder.layers.2.self_attn.out_proj.weight
30
+ transformer_encoder.layers.2.self_attn.out_proj.bias
31
+ transformer_encoder.layers.2.linear1.weight
32
+ transformer_encoder.layers.2.linear1.bias
33
+ transformer_encoder.layers.2.linear2.weight
34
+ transformer_encoder.layers.2.linear2.bias
35
+ transformer_encoder.layers.2.norm1.weight
36
+ transformer_encoder.layers.2.norm1.bias
37
+ transformer_encoder.layers.2.norm2.weight
38
+ transformer_encoder.layers.2.norm2.bias
39
+ transformer_encoder.layers.3.self_attn.in_proj_weight
40
+ transformer_encoder.layers.3.self_attn.in_proj_bias
41
+ transformer_encoder.layers.3.self_attn.out_proj.weight
42
+ transformer_encoder.layers.3.self_attn.out_proj.bias
43
+ transformer_encoder.layers.3.linear1.weight
44
+ transformer_encoder.layers.3.linear1.bias
45
+ transformer_encoder.layers.3.linear2.weight
46
+ transformer_encoder.layers.3.linear2.bias
47
+ transformer_encoder.layers.3.norm1.weight
48
+ transformer_encoder.layers.3.norm1.bias
49
+ transformer_encoder.layers.3.norm2.weight
50
+ transformer_encoder.layers.3.norm2.bias
51
+ transformer_encoder.layers.4.self_attn.in_proj_weight
52
+ transformer_encoder.layers.4.self_attn.in_proj_bias
53
+ transformer_encoder.layers.4.self_attn.out_proj.weight
54
+ transformer_encoder.layers.4.self_attn.out_proj.bias
55
+ transformer_encoder.layers.4.linear1.weight
56
+ transformer_encoder.layers.4.linear1.bias
57
+ transformer_encoder.layers.4.linear2.weight
58
+ transformer_encoder.layers.4.linear2.bias
59
+ transformer_encoder.layers.4.norm1.weight
60
+ transformer_encoder.layers.4.norm1.bias
61
+ transformer_encoder.layers.4.norm2.weight
62
+ transformer_encoder.layers.4.norm2.bias
63
+ transformer_encoder.layers.5.self_attn.in_proj_weight
64
+ transformer_encoder.layers.5.self_attn.in_proj_bias
65
+ transformer_encoder.layers.5.self_attn.out_proj.weight
66
+ transformer_encoder.layers.5.self_attn.out_proj.bias
67
+ transformer_encoder.layers.5.linear1.weight
68
+ transformer_encoder.layers.5.linear1.bias
69
+ transformer_encoder.layers.5.linear2.weight
70
+ transformer_encoder.layers.5.linear2.bias
71
+ transformer_encoder.layers.5.norm1.weight
72
+ transformer_encoder.layers.5.norm1.bias
73
+ transformer_encoder.layers.5.norm2.weight
74
+ transformer_encoder.layers.5.norm2.bias
75
+ transformer_encoder.layers.6.self_attn.in_proj_weight
76
+ transformer_encoder.layers.6.self_attn.in_proj_bias
77
+ transformer_encoder.layers.6.self_attn.out_proj.weight
78
+ transformer_encoder.layers.6.self_attn.out_proj.bias
79
+ transformer_encoder.layers.6.linear1.weight
80
+ transformer_encoder.layers.6.linear1.bias
81
+ transformer_encoder.layers.6.linear2.weight
82
+ transformer_encoder.layers.6.linear2.bias
83
+ transformer_encoder.layers.6.norm1.weight
84
+ transformer_encoder.layers.6.norm1.bias
85
+ transformer_encoder.layers.6.norm2.weight
86
+ transformer_encoder.layers.6.norm2.bias
87
+ transformer_encoder.layers.7.self_attn.in_proj_weight
88
+ transformer_encoder.layers.7.self_attn.in_proj_bias
89
+ transformer_encoder.layers.7.self_attn.out_proj.weight
90
+ transformer_encoder.layers.7.self_attn.out_proj.bias
91
+ transformer_encoder.layers.7.linear1.weight
92
+ transformer_encoder.layers.7.linear1.bias
93
+ transformer_encoder.layers.7.linear2.weight
94
+ transformer_encoder.layers.7.linear2.bias
95
+ transformer_encoder.layers.7.norm1.weight
96
+ transformer_encoder.layers.7.norm1.bias
97
+ transformer_encoder.layers.7.norm2.weight
98
+ transformer_encoder.layers.7.norm2.bias
99
+ transformer_encoder.layers.8.self_attn.in_proj_weight
100
+ transformer_encoder.layers.8.self_attn.in_proj_bias
101
+ transformer_encoder.layers.8.self_attn.out_proj.weight
102
+ transformer_encoder.layers.8.self_attn.out_proj.bias
103
+ transformer_encoder.layers.8.linear1.weight
104
+ transformer_encoder.layers.8.linear1.bias
105
+ transformer_encoder.layers.8.linear2.weight
106
+ transformer_encoder.layers.8.linear2.bias
107
+ transformer_encoder.layers.8.norm1.weight
108
+ transformer_encoder.layers.8.norm1.bias
109
+ transformer_encoder.layers.8.norm2.weight
110
+ transformer_encoder.layers.8.norm2.bias
111
+ transformer_encoder.layers.9.self_attn.in_proj_weight
112
+ transformer_encoder.layers.9.self_attn.in_proj_bias
113
+ transformer_encoder.layers.9.self_attn.out_proj.weight
114
+ transformer_encoder.layers.9.self_attn.out_proj.bias
115
+ transformer_encoder.layers.9.linear1.weight
116
+ transformer_encoder.layers.9.linear1.bias
117
+ transformer_encoder.layers.9.linear2.weight
118
+ transformer_encoder.layers.9.linear2.bias
119
+ transformer_encoder.layers.9.norm1.weight
120
+ transformer_encoder.layers.9.norm1.bias
121
+ transformer_encoder.layers.9.norm2.weight
122
+ transformer_encoder.layers.9.norm2.bias
123
+ transformer_encoder.layers.10.self_attn.in_proj_weight
124
+ transformer_encoder.layers.10.self_attn.in_proj_bias
125
+ transformer_encoder.layers.10.self_attn.out_proj.weight
126
+ transformer_encoder.layers.10.self_attn.out_proj.bias
127
+ transformer_encoder.layers.10.linear1.weight
128
+ transformer_encoder.layers.10.linear1.bias
129
+ transformer_encoder.layers.10.linear2.weight
130
+ transformer_encoder.layers.10.linear2.bias
131
+ transformer_encoder.layers.10.norm1.weight
132
+ transformer_encoder.layers.10.norm1.bias
133
+ transformer_encoder.layers.10.norm2.weight
134
+ transformer_encoder.layers.10.norm2.bias
135
+ transformer_encoder.layers.11.self_attn.in_proj_weight
136
+ transformer_encoder.layers.11.self_attn.in_proj_bias
137
+ transformer_encoder.layers.11.self_attn.out_proj.weight
138
+ transformer_encoder.layers.11.self_attn.out_proj.bias
139
+ transformer_encoder.layers.11.linear1.weight
140
+ transformer_encoder.layers.11.linear1.bias
141
+ transformer_encoder.layers.11.linear2.weight
142
+ transformer_encoder.layers.11.linear2.bias
143
+ transformer_encoder.layers.11.norm1.weight
144
+ transformer_encoder.layers.11.norm1.bias
145
+ transformer_encoder.layers.11.norm2.weight
146
+ transformer_encoder.layers.11.norm2.bias
147
+ transformer_encoder.layers.12.self_attn.in_proj_weight
148
+ transformer_encoder.layers.12.self_attn.in_proj_bias
149
+ transformer_encoder.layers.12.self_attn.out_proj.weight
150
+ transformer_encoder.layers.12.self_attn.out_proj.bias
151
+ transformer_encoder.layers.12.linear1.weight
152
+ transformer_encoder.layers.12.linear1.bias
153
+ transformer_encoder.layers.12.linear2.weight
154
+ transformer_encoder.layers.12.linear2.bias
155
+ transformer_encoder.layers.12.norm1.weight
156
+ transformer_encoder.layers.12.norm1.bias
157
+ transformer_encoder.layers.12.norm2.weight
158
+ transformer_encoder.layers.12.norm2.bias
159
+ transformer_encoder.layers.13.self_attn.in_proj_weight
160
+ transformer_encoder.layers.13.self_attn.in_proj_bias
161
+ transformer_encoder.layers.13.self_attn.out_proj.weight
162
+ transformer_encoder.layers.13.self_attn.out_proj.bias
163
+ transformer_encoder.layers.13.linear1.weight
164
+ transformer_encoder.layers.13.linear1.bias
165
+ transformer_encoder.layers.13.linear2.weight
166
+ transformer_encoder.layers.13.linear2.bias
167
+ transformer_encoder.layers.13.norm1.weight
168
+ transformer_encoder.layers.13.norm1.bias
169
+ transformer_encoder.layers.13.norm2.weight
170
+ transformer_encoder.layers.13.norm2.bias
171
+ transformer_encoder.layers.14.self_attn.in_proj_weight
172
+ transformer_encoder.layers.14.self_attn.in_proj_bias
173
+ transformer_encoder.layers.14.self_attn.out_proj.weight
174
+ transformer_encoder.layers.14.self_attn.out_proj.bias
175
+ transformer_encoder.layers.14.linear1.weight
176
+ transformer_encoder.layers.14.linear1.bias
177
+ transformer_encoder.layers.14.linear2.weight
178
+ transformer_encoder.layers.14.linear2.bias
179
+ transformer_encoder.layers.14.norm1.weight
180
+ transformer_encoder.layers.14.norm1.bias
181
+ transformer_encoder.layers.14.norm2.weight
182
+ transformer_encoder.layers.14.norm2.bias
183
+ transformer_encoder.layers.15.self_attn.in_proj_weight
184
+ transformer_encoder.layers.15.self_attn.in_proj_bias
185
+ transformer_encoder.layers.15.self_attn.out_proj.weight
186
+ transformer_encoder.layers.15.self_attn.out_proj.bias
187
+ transformer_encoder.layers.15.linear1.weight
188
+ transformer_encoder.layers.15.linear1.bias
189
+ transformer_encoder.layers.15.linear2.weight
190
+ transformer_encoder.layers.15.linear2.bias
191
+ transformer_encoder.layers.15.norm1.weight
192
+ transformer_encoder.layers.15.norm1.bias
193
+ transformer_encoder.layers.15.norm2.weight
194
+ transformer_encoder.layers.15.norm2.bias
195
+ transformer_encoder.layers.16.self_attn.in_proj_weight
196
+ transformer_encoder.layers.16.self_attn.in_proj_bias
197
+ transformer_encoder.layers.16.self_attn.out_proj.weight
198
+ transformer_encoder.layers.16.self_attn.out_proj.bias
199
+ transformer_encoder.layers.16.linear1.weight
200
+ transformer_encoder.layers.16.linear1.bias
201
+ transformer_encoder.layers.16.linear2.weight
202
+ transformer_encoder.layers.16.linear2.bias
203
+ transformer_encoder.layers.16.norm1.weight
204
+ transformer_encoder.layers.16.norm1.bias
205
+ transformer_encoder.layers.16.norm2.weight
206
+ transformer_encoder.layers.16.norm2.bias
207
+ transformer_encoder.layers.17.self_attn.in_proj_weight
208
+ transformer_encoder.layers.17.self_attn.in_proj_bias
209
+ transformer_encoder.layers.17.self_attn.out_proj.weight
210
+ transformer_encoder.layers.17.self_attn.out_proj.bias
211
+ transformer_encoder.layers.17.linear1.weight
212
+ transformer_encoder.layers.17.linear1.bias
213
+ transformer_encoder.layers.17.linear2.weight
214
+ transformer_encoder.layers.17.linear2.bias
215
+ transformer_encoder.layers.17.norm1.weight
216
+ transformer_encoder.layers.17.norm1.bias
217
+ transformer_encoder.layers.17.norm2.weight
218
+ transformer_encoder.layers.17.norm2.bias
219
+ transformer_encoder.layers.18.self_attn.in_proj_weight
220
+ transformer_encoder.layers.18.self_attn.in_proj_bias
221
+ transformer_encoder.layers.18.self_attn.out_proj.weight
222
+ transformer_encoder.layers.18.self_attn.out_proj.bias
223
+ transformer_encoder.layers.18.linear1.weight
224
+ transformer_encoder.layers.18.linear1.bias
225
+ transformer_encoder.layers.18.linear2.weight
226
+ transformer_encoder.layers.18.linear2.bias
227
+ transformer_encoder.layers.18.norm1.weight
228
+ transformer_encoder.layers.18.norm1.bias
229
+ transformer_encoder.layers.18.norm2.weight
230
+ transformer_encoder.layers.18.norm2.bias
231
+ transformer_encoder.layers.19.self_attn.in_proj_weight
232
+ transformer_encoder.layers.19.self_attn.in_proj_bias
233
+ transformer_encoder.layers.19.self_attn.out_proj.weight
234
+ transformer_encoder.layers.19.self_attn.out_proj.bias
235
+ transformer_encoder.layers.19.linear1.weight
236
+ transformer_encoder.layers.19.linear1.bias
237
+ transformer_encoder.layers.19.linear2.weight
238
+ transformer_encoder.layers.19.linear2.bias
239
+ transformer_encoder.layers.19.norm1.weight
240
+ transformer_encoder.layers.19.norm1.bias
241
+ transformer_encoder.layers.19.norm2.weight
242
+ transformer_encoder.layers.19.norm2.bias
243
+ transformer_encoder.layers.20.self_attn.in_proj_weight
244
+ transformer_encoder.layers.20.self_attn.in_proj_bias
245
+ transformer_encoder.layers.20.self_attn.out_proj.weight
246
+ transformer_encoder.layers.20.self_attn.out_proj.bias
247
+ transformer_encoder.layers.20.linear1.weight
248
+ transformer_encoder.layers.20.linear1.bias
249
+ transformer_encoder.layers.20.linear2.weight
250
+ transformer_encoder.layers.20.linear2.bias
251
+ transformer_encoder.layers.20.norm1.weight
252
+ transformer_encoder.layers.20.norm1.bias
253
+ transformer_encoder.layers.20.norm2.weight
254
+ transformer_encoder.layers.20.norm2.bias
255
+ transformer_encoder.layers.21.self_attn.in_proj_weight
256
+ transformer_encoder.layers.21.self_attn.in_proj_bias
257
+ transformer_encoder.layers.21.self_attn.out_proj.weight
258
+ transformer_encoder.layers.21.self_attn.out_proj.bias
259
+ transformer_encoder.layers.21.linear1.weight
260
+ transformer_encoder.layers.21.linear1.bias
261
+ transformer_encoder.layers.21.linear2.weight
262
+ transformer_encoder.layers.21.linear2.bias
263
+ transformer_encoder.layers.21.norm1.weight
264
+ transformer_encoder.layers.21.norm1.bias
265
+ transformer_encoder.layers.21.norm2.weight
266
+ transformer_encoder.layers.21.norm2.bias
267
+ transformer_encoder.layers.22.self_attn.in_proj_weight
268
+ transformer_encoder.layers.22.self_attn.in_proj_bias
269
+ transformer_encoder.layers.22.self_attn.out_proj.weight
270
+ transformer_encoder.layers.22.self_attn.out_proj.bias
271
+ transformer_encoder.layers.22.linear1.weight
272
+ transformer_encoder.layers.22.linear1.bias
273
+ transformer_encoder.layers.22.linear2.weight
274
+ transformer_encoder.layers.22.linear2.bias
275
+ transformer_encoder.layers.22.norm1.weight
276
+ transformer_encoder.layers.22.norm1.bias
277
+ transformer_encoder.layers.22.norm2.weight
278
+ transformer_encoder.layers.22.norm2.bias
279
+ transformer_encoder.layers.23.self_attn.in_proj_weight
280
+ transformer_encoder.layers.23.self_attn.in_proj_bias
281
+ transformer_encoder.layers.23.self_attn.out_proj.weight
282
+ transformer_encoder.layers.23.self_attn.out_proj.bias
283
+ transformer_encoder.layers.23.linear1.weight
284
+ transformer_encoder.layers.23.linear1.bias
285
+ transformer_encoder.layers.23.linear2.weight
286
+ transformer_encoder.layers.23.linear2.bias
287
+ transformer_encoder.layers.23.norm1.weight
288
+ transformer_encoder.layers.23.norm1.bias
289
+ transformer_encoder.layers.23.norm2.weight
290
+ transformer_encoder.layers.23.norm2.bias
291
+ ar_predict_layer.weight
genie_tts/Data/v2/Keys/vits_onnx_keys.txt ADDED
@@ -0,0 +1,668 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ vq_model.dec.cond.bias
2
+ vq_model.dec.cond.weight
3
+ vq_model.dec.conv_post.weight
4
+ vq_model.dec.conv_pre.bias
5
+ vq_model.dec.conv_pre.weight
6
+ vq_model.dec.resblocks.0.convs1.0.bias
7
+ vq_model.dec.resblocks.0.convs1.0.weight_g
8
+ vq_model.dec.resblocks.0.convs1.0.weight_v
9
+ vq_model.dec.resblocks.0.convs1.1.bias
10
+ vq_model.dec.resblocks.0.convs1.1.weight_g
11
+ vq_model.dec.resblocks.0.convs1.1.weight_v
12
+ vq_model.dec.resblocks.0.convs1.2.bias
13
+ vq_model.dec.resblocks.0.convs1.2.weight_g
14
+ vq_model.dec.resblocks.0.convs1.2.weight_v
15
+ vq_model.dec.resblocks.0.convs2.0.bias
16
+ vq_model.dec.resblocks.0.convs2.0.weight_g
17
+ vq_model.dec.resblocks.0.convs2.0.weight_v
18
+ vq_model.dec.resblocks.0.convs2.1.bias
19
+ vq_model.dec.resblocks.0.convs2.1.weight_g
20
+ vq_model.dec.resblocks.0.convs2.1.weight_v
21
+ vq_model.dec.resblocks.0.convs2.2.bias
22
+ vq_model.dec.resblocks.0.convs2.2.weight_g
23
+ vq_model.dec.resblocks.0.convs2.2.weight_v
24
+ vq_model.dec.resblocks.1.convs1.0.bias
25
+ vq_model.dec.resblocks.1.convs1.0.weight_g
26
+ vq_model.dec.resblocks.1.convs1.0.weight_v
27
+ vq_model.dec.resblocks.1.convs1.1.bias
28
+ vq_model.dec.resblocks.1.convs1.1.weight_g
29
+ vq_model.dec.resblocks.1.convs1.1.weight_v
30
+ vq_model.dec.resblocks.1.convs1.2.bias
31
+ vq_model.dec.resblocks.1.convs1.2.weight_g
32
+ vq_model.dec.resblocks.1.convs1.2.weight_v
33
+ vq_model.dec.resblocks.1.convs2.0.bias
34
+ vq_model.dec.resblocks.1.convs2.0.weight_g
35
+ vq_model.dec.resblocks.1.convs2.0.weight_v
36
+ vq_model.dec.resblocks.1.convs2.1.bias
37
+ vq_model.dec.resblocks.1.convs2.1.weight_g
38
+ vq_model.dec.resblocks.1.convs2.1.weight_v
39
+ vq_model.dec.resblocks.1.convs2.2.bias
40
+ vq_model.dec.resblocks.1.convs2.2.weight_g
41
+ vq_model.dec.resblocks.1.convs2.2.weight_v
42
+ vq_model.dec.resblocks.10.convs1.0.bias
43
+ vq_model.dec.resblocks.10.convs1.0.weight_g
44
+ vq_model.dec.resblocks.10.convs1.0.weight_v
45
+ vq_model.dec.resblocks.10.convs1.1.bias
46
+ vq_model.dec.resblocks.10.convs1.1.weight_g
47
+ vq_model.dec.resblocks.10.convs1.1.weight_v
48
+ vq_model.dec.resblocks.10.convs1.2.bias
49
+ vq_model.dec.resblocks.10.convs1.2.weight_g
50
+ vq_model.dec.resblocks.10.convs1.2.weight_v
51
+ vq_model.dec.resblocks.10.convs2.0.bias
52
+ vq_model.dec.resblocks.10.convs2.0.weight_g
53
+ vq_model.dec.resblocks.10.convs2.0.weight_v
54
+ vq_model.dec.resblocks.10.convs2.1.bias
55
+ vq_model.dec.resblocks.10.convs2.1.weight_g
56
+ vq_model.dec.resblocks.10.convs2.1.weight_v
57
+ vq_model.dec.resblocks.10.convs2.2.bias
58
+ vq_model.dec.resblocks.10.convs2.2.weight_g
59
+ vq_model.dec.resblocks.10.convs2.2.weight_v
60
+ vq_model.dec.resblocks.11.convs1.0.bias
61
+ vq_model.dec.resblocks.11.convs1.0.weight_g
62
+ vq_model.dec.resblocks.11.convs1.0.weight_v
63
+ vq_model.dec.resblocks.11.convs1.1.bias
64
+ vq_model.dec.resblocks.11.convs1.1.weight_g
65
+ vq_model.dec.resblocks.11.convs1.1.weight_v
66
+ vq_model.dec.resblocks.11.convs1.2.bias
67
+ vq_model.dec.resblocks.11.convs1.2.weight_g
68
+ vq_model.dec.resblocks.11.convs1.2.weight_v
69
+ vq_model.dec.resblocks.11.convs2.0.bias
70
+ vq_model.dec.resblocks.11.convs2.0.weight_g
71
+ vq_model.dec.resblocks.11.convs2.0.weight_v
72
+ vq_model.dec.resblocks.11.convs2.1.bias
73
+ vq_model.dec.resblocks.11.convs2.1.weight_g
74
+ vq_model.dec.resblocks.11.convs2.1.weight_v
75
+ vq_model.dec.resblocks.11.convs2.2.bias
76
+ vq_model.dec.resblocks.11.convs2.2.weight_g
77
+ vq_model.dec.resblocks.11.convs2.2.weight_v
78
+ vq_model.dec.resblocks.12.convs1.0.bias
79
+ vq_model.dec.resblocks.12.convs1.0.weight_g
80
+ vq_model.dec.resblocks.12.convs1.0.weight_v
81
+ vq_model.dec.resblocks.12.convs1.1.bias
82
+ vq_model.dec.resblocks.12.convs1.1.weight_g
83
+ vq_model.dec.resblocks.12.convs1.1.weight_v
84
+ vq_model.dec.resblocks.12.convs1.2.bias
85
+ vq_model.dec.resblocks.12.convs1.2.weight_g
86
+ vq_model.dec.resblocks.12.convs1.2.weight_v
87
+ vq_model.dec.resblocks.12.convs2.0.bias
88
+ vq_model.dec.resblocks.12.convs2.0.weight_g
89
+ vq_model.dec.resblocks.12.convs2.0.weight_v
90
+ vq_model.dec.resblocks.12.convs2.1.bias
91
+ vq_model.dec.resblocks.12.convs2.1.weight_g
92
+ vq_model.dec.resblocks.12.convs2.1.weight_v
93
+ vq_model.dec.resblocks.12.convs2.2.bias
94
+ vq_model.dec.resblocks.12.convs2.2.weight_g
95
+ vq_model.dec.resblocks.12.convs2.2.weight_v
96
+ vq_model.dec.resblocks.13.convs1.0.bias
97
+ vq_model.dec.resblocks.13.convs1.0.weight_g
98
+ vq_model.dec.resblocks.13.convs1.0.weight_v
99
+ vq_model.dec.resblocks.13.convs1.1.bias
100
+ vq_model.dec.resblocks.13.convs1.1.weight_g
101
+ vq_model.dec.resblocks.13.convs1.1.weight_v
102
+ vq_model.dec.resblocks.13.convs1.2.bias
103
+ vq_model.dec.resblocks.13.convs1.2.weight_g
104
+ vq_model.dec.resblocks.13.convs1.2.weight_v
105
+ vq_model.dec.resblocks.13.convs2.0.bias
106
+ vq_model.dec.resblocks.13.convs2.0.weight_g
107
+ vq_model.dec.resblocks.13.convs2.0.weight_v
108
+ vq_model.dec.resblocks.13.convs2.1.bias
109
+ vq_model.dec.resblocks.13.convs2.1.weight_g
110
+ vq_model.dec.resblocks.13.convs2.1.weight_v
111
+ vq_model.dec.resblocks.13.convs2.2.bias
112
+ vq_model.dec.resblocks.13.convs2.2.weight_g
113
+ vq_model.dec.resblocks.13.convs2.2.weight_v
114
+ vq_model.dec.resblocks.14.convs1.0.bias
115
+ vq_model.dec.resblocks.14.convs1.0.weight_g
116
+ vq_model.dec.resblocks.14.convs1.0.weight_v
117
+ vq_model.dec.resblocks.14.convs1.1.bias
118
+ vq_model.dec.resblocks.14.convs1.1.weight_g
119
+ vq_model.dec.resblocks.14.convs1.1.weight_v
120
+ vq_model.dec.resblocks.14.convs1.2.bias
121
+ vq_model.dec.resblocks.14.convs1.2.weight_g
122
+ vq_model.dec.resblocks.14.convs1.2.weight_v
123
+ vq_model.dec.resblocks.14.convs2.0.bias
124
+ vq_model.dec.resblocks.14.convs2.0.weight_g
125
+ vq_model.dec.resblocks.14.convs2.0.weight_v
126
+ vq_model.dec.resblocks.14.convs2.1.bias
127
+ vq_model.dec.resblocks.14.convs2.1.weight_g
128
+ vq_model.dec.resblocks.14.convs2.1.weight_v
129
+ vq_model.dec.resblocks.14.convs2.2.bias
130
+ vq_model.dec.resblocks.14.convs2.2.weight_g
131
+ vq_model.dec.resblocks.14.convs2.2.weight_v
132
+ vq_model.dec.resblocks.2.convs1.0.bias
133
+ vq_model.dec.resblocks.2.convs1.0.weight_g
134
+ vq_model.dec.resblocks.2.convs1.0.weight_v
135
+ vq_model.dec.resblocks.2.convs1.1.bias
136
+ vq_model.dec.resblocks.2.convs1.1.weight_g
137
+ vq_model.dec.resblocks.2.convs1.1.weight_v
138
+ vq_model.dec.resblocks.2.convs1.2.bias
139
+ vq_model.dec.resblocks.2.convs1.2.weight_g
140
+ vq_model.dec.resblocks.2.convs1.2.weight_v
141
+ vq_model.dec.resblocks.2.convs2.0.bias
142
+ vq_model.dec.resblocks.2.convs2.0.weight_g
143
+ vq_model.dec.resblocks.2.convs2.0.weight_v
144
+ vq_model.dec.resblocks.2.convs2.1.bias
145
+ vq_model.dec.resblocks.2.convs2.1.weight_g
146
+ vq_model.dec.resblocks.2.convs2.1.weight_v
147
+ vq_model.dec.resblocks.2.convs2.2.bias
148
+ vq_model.dec.resblocks.2.convs2.2.weight_g
149
+ vq_model.dec.resblocks.2.convs2.2.weight_v
150
+ vq_model.dec.resblocks.3.convs1.0.bias
151
+ vq_model.dec.resblocks.3.convs1.0.weight_g
152
+ vq_model.dec.resblocks.3.convs1.0.weight_v
153
+ vq_model.dec.resblocks.3.convs1.1.bias
154
+ vq_model.dec.resblocks.3.convs1.1.weight_g
155
+ vq_model.dec.resblocks.3.convs1.1.weight_v
156
+ vq_model.dec.resblocks.3.convs1.2.bias
157
+ vq_model.dec.resblocks.3.convs1.2.weight_g
158
+ vq_model.dec.resblocks.3.convs1.2.weight_v
159
+ vq_model.dec.resblocks.3.convs2.0.bias
160
+ vq_model.dec.resblocks.3.convs2.0.weight_g
161
+ vq_model.dec.resblocks.3.convs2.0.weight_v
162
+ vq_model.dec.resblocks.3.convs2.1.bias
163
+ vq_model.dec.resblocks.3.convs2.1.weight_g
164
+ vq_model.dec.resblocks.3.convs2.1.weight_v
165
+ vq_model.dec.resblocks.3.convs2.2.bias
166
+ vq_model.dec.resblocks.3.convs2.2.weight_g
167
+ vq_model.dec.resblocks.3.convs2.2.weight_v
168
+ vq_model.dec.resblocks.4.convs1.0.bias
169
+ vq_model.dec.resblocks.4.convs1.0.weight_g
170
+ vq_model.dec.resblocks.4.convs1.0.weight_v
171
+ vq_model.dec.resblocks.4.convs1.1.bias
172
+ vq_model.dec.resblocks.4.convs1.1.weight_g
173
+ vq_model.dec.resblocks.4.convs1.1.weight_v
174
+ vq_model.dec.resblocks.4.convs1.2.bias
175
+ vq_model.dec.resblocks.4.convs1.2.weight_g
176
+ vq_model.dec.resblocks.4.convs1.2.weight_v
177
+ vq_model.dec.resblocks.4.convs2.0.bias
178
+ vq_model.dec.resblocks.4.convs2.0.weight_g
179
+ vq_model.dec.resblocks.4.convs2.0.weight_v
180
+ vq_model.dec.resblocks.4.convs2.1.bias
181
+ vq_model.dec.resblocks.4.convs2.1.weight_g
182
+ vq_model.dec.resblocks.4.convs2.1.weight_v
183
+ vq_model.dec.resblocks.4.convs2.2.bias
184
+ vq_model.dec.resblocks.4.convs2.2.weight_g
185
+ vq_model.dec.resblocks.4.convs2.2.weight_v
186
+ vq_model.dec.resblocks.5.convs1.0.bias
187
+ vq_model.dec.resblocks.5.convs1.0.weight_g
188
+ vq_model.dec.resblocks.5.convs1.0.weight_v
189
+ vq_model.dec.resblocks.5.convs1.1.bias
190
+ vq_model.dec.resblocks.5.convs1.1.weight_g
191
+ vq_model.dec.resblocks.5.convs1.1.weight_v
192
+ vq_model.dec.resblocks.5.convs1.2.bias
193
+ vq_model.dec.resblocks.5.convs1.2.weight_g
194
+ vq_model.dec.resblocks.5.convs1.2.weight_v
195
+ vq_model.dec.resblocks.5.convs2.0.bias
196
+ vq_model.dec.resblocks.5.convs2.0.weight_g
197
+ vq_model.dec.resblocks.5.convs2.0.weight_v
198
+ vq_model.dec.resblocks.5.convs2.1.bias
199
+ vq_model.dec.resblocks.5.convs2.1.weight_g
200
+ vq_model.dec.resblocks.5.convs2.1.weight_v
201
+ vq_model.dec.resblocks.5.convs2.2.bias
202
+ vq_model.dec.resblocks.5.convs2.2.weight_g
203
+ vq_model.dec.resblocks.5.convs2.2.weight_v
204
+ vq_model.dec.resblocks.6.convs1.0.bias
205
+ vq_model.dec.resblocks.6.convs1.0.weight_g
206
+ vq_model.dec.resblocks.6.convs1.0.weight_v
207
+ vq_model.dec.resblocks.6.convs1.1.bias
208
+ vq_model.dec.resblocks.6.convs1.1.weight_g
209
+ vq_model.dec.resblocks.6.convs1.1.weight_v
210
+ vq_model.dec.resblocks.6.convs1.2.bias
211
+ vq_model.dec.resblocks.6.convs1.2.weight_g
212
+ vq_model.dec.resblocks.6.convs1.2.weight_v
213
+ vq_model.dec.resblocks.6.convs2.0.bias
214
+ vq_model.dec.resblocks.6.convs2.0.weight_g
215
+ vq_model.dec.resblocks.6.convs2.0.weight_v
216
+ vq_model.dec.resblocks.6.convs2.1.bias
217
+ vq_model.dec.resblocks.6.convs2.1.weight_g
218
+ vq_model.dec.resblocks.6.convs2.1.weight_v
219
+ vq_model.dec.resblocks.6.convs2.2.bias
220
+ vq_model.dec.resblocks.6.convs2.2.weight_g
221
+ vq_model.dec.resblocks.6.convs2.2.weight_v
222
+ vq_model.dec.resblocks.7.convs1.0.bias
223
+ vq_model.dec.resblocks.7.convs1.0.weight_g
224
+ vq_model.dec.resblocks.7.convs1.0.weight_v
225
+ vq_model.dec.resblocks.7.convs1.1.bias
226
+ vq_model.dec.resblocks.7.convs1.1.weight_g
227
+ vq_model.dec.resblocks.7.convs1.1.weight_v
228
+ vq_model.dec.resblocks.7.convs1.2.bias
229
+ vq_model.dec.resblocks.7.convs1.2.weight_g
230
+ vq_model.dec.resblocks.7.convs1.2.weight_v
231
+ vq_model.dec.resblocks.7.convs2.0.bias
232
+ vq_model.dec.resblocks.7.convs2.0.weight_g
233
+ vq_model.dec.resblocks.7.convs2.0.weight_v
234
+ vq_model.dec.resblocks.7.convs2.1.bias
235
+ vq_model.dec.resblocks.7.convs2.1.weight_g
236
+ vq_model.dec.resblocks.7.convs2.1.weight_v
237
+ vq_model.dec.resblocks.7.convs2.2.bias
238
+ vq_model.dec.resblocks.7.convs2.2.weight_g
239
+ vq_model.dec.resblocks.7.convs2.2.weight_v
240
+ vq_model.dec.resblocks.8.convs1.0.bias
241
+ vq_model.dec.resblocks.8.convs1.0.weight_g
242
+ vq_model.dec.resblocks.8.convs1.0.weight_v
243
+ vq_model.dec.resblocks.8.convs1.1.bias
244
+ vq_model.dec.resblocks.8.convs1.1.weight_g
245
+ vq_model.dec.resblocks.8.convs1.1.weight_v
246
+ vq_model.dec.resblocks.8.convs1.2.bias
247
+ vq_model.dec.resblocks.8.convs1.2.weight_g
248
+ vq_model.dec.resblocks.8.convs1.2.weight_v
249
+ vq_model.dec.resblocks.8.convs2.0.bias
250
+ vq_model.dec.resblocks.8.convs2.0.weight_g
251
+ vq_model.dec.resblocks.8.convs2.0.weight_v
252
+ vq_model.dec.resblocks.8.convs2.1.bias
253
+ vq_model.dec.resblocks.8.convs2.1.weight_g
254
+ vq_model.dec.resblocks.8.convs2.1.weight_v
255
+ vq_model.dec.resblocks.8.convs2.2.bias
256
+ vq_model.dec.resblocks.8.convs2.2.weight_g
257
+ vq_model.dec.resblocks.8.convs2.2.weight_v
258
+ vq_model.dec.resblocks.9.convs1.0.bias
259
+ vq_model.dec.resblocks.9.convs1.0.weight_g
260
+ vq_model.dec.resblocks.9.convs1.0.weight_v
261
+ vq_model.dec.resblocks.9.convs1.1.bias
262
+ vq_model.dec.resblocks.9.convs1.1.weight_g
263
+ vq_model.dec.resblocks.9.convs1.1.weight_v
264
+ vq_model.dec.resblocks.9.convs1.2.bias
265
+ vq_model.dec.resblocks.9.convs1.2.weight_g
266
+ vq_model.dec.resblocks.9.convs1.2.weight_v
267
+ vq_model.dec.resblocks.9.convs2.0.bias
268
+ vq_model.dec.resblocks.9.convs2.0.weight_g
269
+ vq_model.dec.resblocks.9.convs2.0.weight_v
270
+ vq_model.dec.resblocks.9.convs2.1.bias
271
+ vq_model.dec.resblocks.9.convs2.1.weight_g
272
+ vq_model.dec.resblocks.9.convs2.1.weight_v
273
+ vq_model.dec.resblocks.9.convs2.2.bias
274
+ vq_model.dec.resblocks.9.convs2.2.weight_g
275
+ vq_model.dec.resblocks.9.convs2.2.weight_v
276
+ vq_model.dec.ups.0.bias
277
+ vq_model.dec.ups.0.weight_g
278
+ vq_model.dec.ups.0.weight_v
279
+ vq_model.dec.ups.1.bias
280
+ vq_model.dec.ups.1.weight_g
281
+ vq_model.dec.ups.1.weight_v
282
+ vq_model.dec.ups.2.bias
283
+ vq_model.dec.ups.2.weight_g
284
+ vq_model.dec.ups.2.weight_v
285
+ vq_model.dec.ups.3.bias
286
+ vq_model.dec.ups.3.weight_g
287
+ vq_model.dec.ups.3.weight_v
288
+ vq_model.dec.ups.4.bias
289
+ vq_model.dec.ups.4.weight_g
290
+ vq_model.dec.ups.4.weight_v
291
+ vq_model.enc_p.encoder2.attn_layers.0.conv_k.bias
292
+ vq_model.enc_p.encoder2.attn_layers.0.conv_k.weight
293
+ vq_model.enc_p.encoder2.attn_layers.0.conv_o.bias
294
+ vq_model.enc_p.encoder2.attn_layers.0.conv_o.weight
295
+ vq_model.enc_p.encoder2.attn_layers.0.conv_q.bias
296
+ vq_model.enc_p.encoder2.attn_layers.0.conv_q.weight
297
+ vq_model.enc_p.encoder2.attn_layers.0.conv_v.bias
298
+ vq_model.enc_p.encoder2.attn_layers.0.conv_v.weight
299
+ vq_model.enc_p.encoder2.attn_layers.0.emb_rel_k
300
+ vq_model.enc_p.encoder2.attn_layers.0.emb_rel_v
301
+ vq_model.enc_p.encoder2.attn_layers.1.conv_k.bias
302
+ vq_model.enc_p.encoder2.attn_layers.1.conv_k.weight
303
+ vq_model.enc_p.encoder2.attn_layers.1.conv_o.bias
304
+ vq_model.enc_p.encoder2.attn_layers.1.conv_o.weight
305
+ vq_model.enc_p.encoder2.attn_layers.1.conv_q.bias
306
+ vq_model.enc_p.encoder2.attn_layers.1.conv_q.weight
307
+ vq_model.enc_p.encoder2.attn_layers.1.conv_v.bias
308
+ vq_model.enc_p.encoder2.attn_layers.1.conv_v.weight
309
+ vq_model.enc_p.encoder2.attn_layers.1.emb_rel_k
310
+ vq_model.enc_p.encoder2.attn_layers.1.emb_rel_v
311
+ vq_model.enc_p.encoder2.attn_layers.2.conv_k.bias
312
+ vq_model.enc_p.encoder2.attn_layers.2.conv_k.weight
313
+ vq_model.enc_p.encoder2.attn_layers.2.conv_o.bias
314
+ vq_model.enc_p.encoder2.attn_layers.2.conv_o.weight
315
+ vq_model.enc_p.encoder2.attn_layers.2.conv_q.bias
316
+ vq_model.enc_p.encoder2.attn_layers.2.conv_q.weight
317
+ vq_model.enc_p.encoder2.attn_layers.2.conv_v.bias
318
+ vq_model.enc_p.encoder2.attn_layers.2.conv_v.weight
319
+ vq_model.enc_p.encoder2.attn_layers.2.emb_rel_k
320
+ vq_model.enc_p.encoder2.attn_layers.2.emb_rel_v
321
+ vq_model.enc_p.encoder2.ffn_layers.0.conv_1.bias
322
+ vq_model.enc_p.encoder2.ffn_layers.0.conv_1.weight
323
+ vq_model.enc_p.encoder2.ffn_layers.0.conv_2.bias
324
+ vq_model.enc_p.encoder2.ffn_layers.0.conv_2.weight
325
+ vq_model.enc_p.encoder2.ffn_layers.1.conv_1.bias
326
+ vq_model.enc_p.encoder2.ffn_layers.1.conv_1.weight
327
+ vq_model.enc_p.encoder2.ffn_layers.1.conv_2.bias
328
+ vq_model.enc_p.encoder2.ffn_layers.1.conv_2.weight
329
+ vq_model.enc_p.encoder2.ffn_layers.2.conv_1.bias
330
+ vq_model.enc_p.encoder2.ffn_layers.2.conv_1.weight
331
+ vq_model.enc_p.encoder2.ffn_layers.2.conv_2.bias
332
+ vq_model.enc_p.encoder2.ffn_layers.2.conv_2.weight
333
+ vq_model.enc_p.encoder2.norm_layers_1.0.beta
334
+ vq_model.enc_p.encoder2.norm_layers_1.0.gamma
335
+ vq_model.enc_p.encoder2.norm_layers_1.1.beta
336
+ vq_model.enc_p.encoder2.norm_layers_1.1.gamma
337
+ vq_model.enc_p.encoder2.norm_layers_1.2.beta
338
+ vq_model.enc_p.encoder2.norm_layers_1.2.gamma
339
+ vq_model.enc_p.encoder2.norm_layers_2.0.beta
340
+ vq_model.enc_p.encoder2.norm_layers_2.0.gamma
341
+ vq_model.enc_p.encoder2.norm_layers_2.1.beta
342
+ vq_model.enc_p.encoder2.norm_layers_2.1.gamma
343
+ vq_model.enc_p.encoder2.norm_layers_2.2.beta
344
+ vq_model.enc_p.encoder2.norm_layers_2.2.gamma
345
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_k.bias
346
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_k.weight
347
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_o.bias
348
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_o.weight
349
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_q.bias
350
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_q.weight
351
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_v.bias
352
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_v.weight
353
+ vq_model.enc_p.encoder_ssl.attn_layers.0.emb_rel_k
354
+ vq_model.enc_p.encoder_ssl.attn_layers.0.emb_rel_v
355
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_k.bias
356
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_k.weight
357
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_o.bias
358
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_o.weight
359
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_q.bias
360
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_q.weight
361
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_v.bias
362
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_v.weight
363
+ vq_model.enc_p.encoder_ssl.attn_layers.1.emb_rel_k
364
+ vq_model.enc_p.encoder_ssl.attn_layers.1.emb_rel_v
365
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_k.bias
366
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_k.weight
367
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_o.bias
368
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_o.weight
369
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_q.bias
370
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_q.weight
371
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_v.bias
372
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_v.weight
373
+ vq_model.enc_p.encoder_ssl.attn_layers.2.emb_rel_k
374
+ vq_model.enc_p.encoder_ssl.attn_layers.2.emb_rel_v
375
+ vq_model.enc_p.encoder_ssl.ffn_layers.0.conv_1.bias
376
+ vq_model.enc_p.encoder_ssl.ffn_layers.0.conv_1.weight
377
+ vq_model.enc_p.encoder_ssl.ffn_layers.0.conv_2.bias
378
+ vq_model.enc_p.encoder_ssl.ffn_layers.0.conv_2.weight
379
+ vq_model.enc_p.encoder_ssl.ffn_layers.1.conv_1.bias
380
+ vq_model.enc_p.encoder_ssl.ffn_layers.1.conv_1.weight
381
+ vq_model.enc_p.encoder_ssl.ffn_layers.1.conv_2.bias
382
+ vq_model.enc_p.encoder_ssl.ffn_layers.1.conv_2.weight
383
+ vq_model.enc_p.encoder_ssl.ffn_layers.2.conv_1.bias
384
+ vq_model.enc_p.encoder_ssl.ffn_layers.2.conv_1.weight
385
+ vq_model.enc_p.encoder_ssl.ffn_layers.2.conv_2.bias
386
+ vq_model.enc_p.encoder_ssl.ffn_layers.2.conv_2.weight
387
+ vq_model.enc_p.encoder_ssl.norm_layers_1.0.beta
388
+ vq_model.enc_p.encoder_ssl.norm_layers_1.0.gamma
389
+ vq_model.enc_p.encoder_ssl.norm_layers_1.1.beta
390
+ vq_model.enc_p.encoder_ssl.norm_layers_1.1.gamma
391
+ vq_model.enc_p.encoder_ssl.norm_layers_1.2.beta
392
+ vq_model.enc_p.encoder_ssl.norm_layers_1.2.gamma
393
+ vq_model.enc_p.encoder_ssl.norm_layers_2.0.beta
394
+ vq_model.enc_p.encoder_ssl.norm_layers_2.0.gamma
395
+ vq_model.enc_p.encoder_ssl.norm_layers_2.1.beta
396
+ vq_model.enc_p.encoder_ssl.norm_layers_2.1.gamma
397
+ vq_model.enc_p.encoder_ssl.norm_layers_2.2.beta
398
+ vq_model.enc_p.encoder_ssl.norm_layers_2.2.gamma
399
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_k.bias
400
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_k.weight
401
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_o.bias
402
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_o.weight
403
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_q.bias
404
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_q.weight
405
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_v.bias
406
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_v.weight
407
+ vq_model.enc_p.encoder_text.attn_layers.0.emb_rel_k
408
+ vq_model.enc_p.encoder_text.attn_layers.0.emb_rel_v
409
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_k.bias
410
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_k.weight
411
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_o.bias
412
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_o.weight
413
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_q.bias
414
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_q.weight
415
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_v.bias
416
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_v.weight
417
+ vq_model.enc_p.encoder_text.attn_layers.1.emb_rel_k
418
+ vq_model.enc_p.encoder_text.attn_layers.1.emb_rel_v
419
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_k.bias
420
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_k.weight
421
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_o.bias
422
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_o.weight
423
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_q.bias
424
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_q.weight
425
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_v.bias
426
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_v.weight
427
+ vq_model.enc_p.encoder_text.attn_layers.2.emb_rel_k
428
+ vq_model.enc_p.encoder_text.attn_layers.2.emb_rel_v
429
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_k.bias
430
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_k.weight
431
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_o.bias
432
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_o.weight
433
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_q.bias
434
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_q.weight
435
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_v.bias
436
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_v.weight
437
+ vq_model.enc_p.encoder_text.attn_layers.3.emb_rel_k
438
+ vq_model.enc_p.encoder_text.attn_layers.3.emb_rel_v
439
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_k.bias
440
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_k.weight
441
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_o.bias
442
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_o.weight
443
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_q.bias
444
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_q.weight
445
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_v.bias
446
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_v.weight
447
+ vq_model.enc_p.encoder_text.attn_layers.4.emb_rel_k
448
+ vq_model.enc_p.encoder_text.attn_layers.4.emb_rel_v
449
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_k.bias
450
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_k.weight
451
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_o.bias
452
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_o.weight
453
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_q.bias
454
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_q.weight
455
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_v.bias
456
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_v.weight
457
+ vq_model.enc_p.encoder_text.attn_layers.5.emb_rel_k
458
+ vq_model.enc_p.encoder_text.attn_layers.5.emb_rel_v
459
+ vq_model.enc_p.encoder_text.ffn_layers.0.conv_1.bias
460
+ vq_model.enc_p.encoder_text.ffn_layers.0.conv_1.weight
461
+ vq_model.enc_p.encoder_text.ffn_layers.0.conv_2.bias
462
+ vq_model.enc_p.encoder_text.ffn_layers.0.conv_2.weight
463
+ vq_model.enc_p.encoder_text.ffn_layers.1.conv_1.bias
464
+ vq_model.enc_p.encoder_text.ffn_layers.1.conv_1.weight
465
+ vq_model.enc_p.encoder_text.ffn_layers.1.conv_2.bias
466
+ vq_model.enc_p.encoder_text.ffn_layers.1.conv_2.weight
467
+ vq_model.enc_p.encoder_text.ffn_layers.2.conv_1.bias
468
+ vq_model.enc_p.encoder_text.ffn_layers.2.conv_1.weight
469
+ vq_model.enc_p.encoder_text.ffn_layers.2.conv_2.bias
470
+ vq_model.enc_p.encoder_text.ffn_layers.2.conv_2.weight
471
+ vq_model.enc_p.encoder_text.ffn_layers.3.conv_1.bias
472
+ vq_model.enc_p.encoder_text.ffn_layers.3.conv_1.weight
473
+ vq_model.enc_p.encoder_text.ffn_layers.3.conv_2.bias
474
+ vq_model.enc_p.encoder_text.ffn_layers.3.conv_2.weight
475
+ vq_model.enc_p.encoder_text.ffn_layers.4.conv_1.bias
476
+ vq_model.enc_p.encoder_text.ffn_layers.4.conv_1.weight
477
+ vq_model.enc_p.encoder_text.ffn_layers.4.conv_2.bias
478
+ vq_model.enc_p.encoder_text.ffn_layers.4.conv_2.weight
479
+ vq_model.enc_p.encoder_text.ffn_layers.5.conv_1.bias
480
+ vq_model.enc_p.encoder_text.ffn_layers.5.conv_1.weight
481
+ vq_model.enc_p.encoder_text.ffn_layers.5.conv_2.bias
482
+ vq_model.enc_p.encoder_text.ffn_layers.5.conv_2.weight
483
+ vq_model.enc_p.encoder_text.norm_layers_1.0.beta
484
+ vq_model.enc_p.encoder_text.norm_layers_1.0.gamma
485
+ vq_model.enc_p.encoder_text.norm_layers_1.1.beta
486
+ vq_model.enc_p.encoder_text.norm_layers_1.1.gamma
487
+ vq_model.enc_p.encoder_text.norm_layers_1.2.beta
488
+ vq_model.enc_p.encoder_text.norm_layers_1.2.gamma
489
+ vq_model.enc_p.encoder_text.norm_layers_1.3.beta
490
+ vq_model.enc_p.encoder_text.norm_layers_1.3.gamma
491
+ vq_model.enc_p.encoder_text.norm_layers_1.4.beta
492
+ vq_model.enc_p.encoder_text.norm_layers_1.4.gamma
493
+ vq_model.enc_p.encoder_text.norm_layers_1.5.beta
494
+ vq_model.enc_p.encoder_text.norm_layers_1.5.gamma
495
+ vq_model.enc_p.encoder_text.norm_layers_2.0.beta
496
+ vq_model.enc_p.encoder_text.norm_layers_2.0.gamma
497
+ vq_model.enc_p.encoder_text.norm_layers_2.1.beta
498
+ vq_model.enc_p.encoder_text.norm_layers_2.1.gamma
499
+ vq_model.enc_p.encoder_text.norm_layers_2.2.beta
500
+ vq_model.enc_p.encoder_text.norm_layers_2.2.gamma
501
+ vq_model.enc_p.encoder_text.norm_layers_2.3.beta
502
+ vq_model.enc_p.encoder_text.norm_layers_2.3.gamma
503
+ vq_model.enc_p.encoder_text.norm_layers_2.4.beta
504
+ vq_model.enc_p.encoder_text.norm_layers_2.4.gamma
505
+ vq_model.enc_p.encoder_text.norm_layers_2.5.beta
506
+ vq_model.enc_p.encoder_text.norm_layers_2.5.gamma
507
+ vq_model.enc_p.mrte.c_post.bias
508
+ vq_model.enc_p.mrte.c_post.weight
509
+ vq_model.enc_p.mrte.c_pre.bias
510
+ vq_model.enc_p.mrte.c_pre.weight
511
+ vq_model.enc_p.mrte.cross_attention.conv_k.bias
512
+ vq_model.enc_p.mrte.cross_attention.conv_k.weight
513
+ vq_model.enc_p.mrte.cross_attention.conv_o.bias
514
+ vq_model.enc_p.mrte.cross_attention.conv_o.weight
515
+ vq_model.enc_p.mrte.cross_attention.conv_q.bias
516
+ vq_model.enc_p.mrte.cross_attention.conv_q.weight
517
+ vq_model.enc_p.mrte.cross_attention.conv_v.bias
518
+ vq_model.enc_p.mrte.cross_attention.conv_v.weight
519
+ vq_model.enc_p.mrte.text_pre.bias
520
+ vq_model.enc_p.mrte.text_pre.weight
521
+ vq_model.enc_p.proj.bias
522
+ vq_model.enc_p.proj.weight
523
+ vq_model.enc_p.ssl_proj.bias
524
+ vq_model.enc_p.ssl_proj.weight
525
+ vq_model.enc_p.text_embedding.weight
526
+ vq_model.flow.flows.0.enc.cond_layer.bias
527
+ vq_model.flow.flows.0.enc.cond_layer.weight_g
528
+ vq_model.flow.flows.0.enc.cond_layer.weight_v
529
+ vq_model.flow.flows.0.enc.in_layers.0.bias
530
+ vq_model.flow.flows.0.enc.in_layers.0.weight_g
531
+ vq_model.flow.flows.0.enc.in_layers.0.weight_v
532
+ vq_model.flow.flows.0.enc.in_layers.1.bias
533
+ vq_model.flow.flows.0.enc.in_layers.1.weight_g
534
+ vq_model.flow.flows.0.enc.in_layers.1.weight_v
535
+ vq_model.flow.flows.0.enc.in_layers.2.bias
536
+ vq_model.flow.flows.0.enc.in_layers.2.weight_g
537
+ vq_model.flow.flows.0.enc.in_layers.2.weight_v
538
+ vq_model.flow.flows.0.enc.in_layers.3.bias
539
+ vq_model.flow.flows.0.enc.in_layers.3.weight_g
540
+ vq_model.flow.flows.0.enc.in_layers.3.weight_v
541
+ vq_model.flow.flows.0.enc.res_skip_layers.0.bias
542
+ vq_model.flow.flows.0.enc.res_skip_layers.0.weight_g
543
+ vq_model.flow.flows.0.enc.res_skip_layers.0.weight_v
544
+ vq_model.flow.flows.0.enc.res_skip_layers.1.bias
545
+ vq_model.flow.flows.0.enc.res_skip_layers.1.weight_g
546
+ vq_model.flow.flows.0.enc.res_skip_layers.1.weight_v
547
+ vq_model.flow.flows.0.enc.res_skip_layers.2.bias
548
+ vq_model.flow.flows.0.enc.res_skip_layers.2.weight_g
549
+ vq_model.flow.flows.0.enc.res_skip_layers.2.weight_v
550
+ vq_model.flow.flows.0.enc.res_skip_layers.3.bias
551
+ vq_model.flow.flows.0.enc.res_skip_layers.3.weight_g
552
+ vq_model.flow.flows.0.enc.res_skip_layers.3.weight_v
553
+ vq_model.flow.flows.0.post.bias
554
+ vq_model.flow.flows.0.post.weight
555
+ vq_model.flow.flows.0.pre.bias
556
+ vq_model.flow.flows.0.pre.weight
557
+ vq_model.flow.flows.2.enc.cond_layer.bias
558
+ vq_model.flow.flows.2.enc.cond_layer.weight_g
559
+ vq_model.flow.flows.2.enc.cond_layer.weight_v
560
+ vq_model.flow.flows.2.enc.in_layers.0.bias
561
+ vq_model.flow.flows.2.enc.in_layers.0.weight_g
562
+ vq_model.flow.flows.2.enc.in_layers.0.weight_v
563
+ vq_model.flow.flows.2.enc.in_layers.1.bias
564
+ vq_model.flow.flows.2.enc.in_layers.1.weight_g
565
+ vq_model.flow.flows.2.enc.in_layers.1.weight_v
566
+ vq_model.flow.flows.2.enc.in_layers.2.bias
567
+ vq_model.flow.flows.2.enc.in_layers.2.weight_g
568
+ vq_model.flow.flows.2.enc.in_layers.2.weight_v
569
+ vq_model.flow.flows.2.enc.in_layers.3.bias
570
+ vq_model.flow.flows.2.enc.in_layers.3.weight_g
571
+ vq_model.flow.flows.2.enc.in_layers.3.weight_v
572
+ vq_model.flow.flows.2.enc.res_skip_layers.0.bias
573
+ vq_model.flow.flows.2.enc.res_skip_layers.0.weight_g
574
+ vq_model.flow.flows.2.enc.res_skip_layers.0.weight_v
575
+ vq_model.flow.flows.2.enc.res_skip_layers.1.bias
576
+ vq_model.flow.flows.2.enc.res_skip_layers.1.weight_g
577
+ vq_model.flow.flows.2.enc.res_skip_layers.1.weight_v
578
+ vq_model.flow.flows.2.enc.res_skip_layers.2.bias
579
+ vq_model.flow.flows.2.enc.res_skip_layers.2.weight_g
580
+ vq_model.flow.flows.2.enc.res_skip_layers.2.weight_v
581
+ vq_model.flow.flows.2.enc.res_skip_layers.3.bias
582
+ vq_model.flow.flows.2.enc.res_skip_layers.3.weight_g
583
+ vq_model.flow.flows.2.enc.res_skip_layers.3.weight_v
584
+ vq_model.flow.flows.2.post.bias
585
+ vq_model.flow.flows.2.post.weight
586
+ vq_model.flow.flows.2.pre.bias
587
+ vq_model.flow.flows.2.pre.weight
588
+ vq_model.flow.flows.4.enc.cond_layer.bias
589
+ vq_model.flow.flows.4.enc.cond_layer.weight_g
590
+ vq_model.flow.flows.4.enc.cond_layer.weight_v
591
+ vq_model.flow.flows.4.enc.in_layers.0.bias
592
+ vq_model.flow.flows.4.enc.in_layers.0.weight_g
593
+ vq_model.flow.flows.4.enc.in_layers.0.weight_v
594
+ vq_model.flow.flows.4.enc.in_layers.1.bias
595
+ vq_model.flow.flows.4.enc.in_layers.1.weight_g
596
+ vq_model.flow.flows.4.enc.in_layers.1.weight_v
597
+ vq_model.flow.flows.4.enc.in_layers.2.bias
598
+ vq_model.flow.flows.4.enc.in_layers.2.weight_g
599
+ vq_model.flow.flows.4.enc.in_layers.2.weight_v
600
+ vq_model.flow.flows.4.enc.in_layers.3.bias
601
+ vq_model.flow.flows.4.enc.in_layers.3.weight_g
602
+ vq_model.flow.flows.4.enc.in_layers.3.weight_v
603
+ vq_model.flow.flows.4.enc.res_skip_layers.0.bias
604
+ vq_model.flow.flows.4.enc.res_skip_layers.0.weight_g
605
+ vq_model.flow.flows.4.enc.res_skip_layers.0.weight_v
606
+ vq_model.flow.flows.4.enc.res_skip_layers.1.bias
607
+ vq_model.flow.flows.4.enc.res_skip_layers.1.weight_g
608
+ vq_model.flow.flows.4.enc.res_skip_layers.1.weight_v
609
+ vq_model.flow.flows.4.enc.res_skip_layers.2.bias
610
+ vq_model.flow.flows.4.enc.res_skip_layers.2.weight_g
611
+ vq_model.flow.flows.4.enc.res_skip_layers.2.weight_v
612
+ vq_model.flow.flows.4.enc.res_skip_layers.3.bias
613
+ vq_model.flow.flows.4.enc.res_skip_layers.3.weight_g
614
+ vq_model.flow.flows.4.enc.res_skip_layers.3.weight_v
615
+ vq_model.flow.flows.4.post.bias
616
+ vq_model.flow.flows.4.post.weight
617
+ vq_model.flow.flows.4.pre.bias
618
+ vq_model.flow.flows.4.pre.weight
619
+ vq_model.flow.flows.6.enc.cond_layer.bias
620
+ vq_model.flow.flows.6.enc.cond_layer.weight_g
621
+ vq_model.flow.flows.6.enc.cond_layer.weight_v
622
+ vq_model.flow.flows.6.enc.in_layers.0.bias
623
+ vq_model.flow.flows.6.enc.in_layers.0.weight_g
624
+ vq_model.flow.flows.6.enc.in_layers.0.weight_v
625
+ vq_model.flow.flows.6.enc.in_layers.1.bias
626
+ vq_model.flow.flows.6.enc.in_layers.1.weight_g
627
+ vq_model.flow.flows.6.enc.in_layers.1.weight_v
628
+ vq_model.flow.flows.6.enc.in_layers.2.bias
629
+ vq_model.flow.flows.6.enc.in_layers.2.weight_g
630
+ vq_model.flow.flows.6.enc.in_layers.2.weight_v
631
+ vq_model.flow.flows.6.enc.in_layers.3.bias
632
+ vq_model.flow.flows.6.enc.in_layers.3.weight_g
633
+ vq_model.flow.flows.6.enc.in_layers.3.weight_v
634
+ vq_model.flow.flows.6.enc.res_skip_layers.0.bias
635
+ vq_model.flow.flows.6.enc.res_skip_layers.0.weight_g
636
+ vq_model.flow.flows.6.enc.res_skip_layers.0.weight_v
637
+ vq_model.flow.flows.6.enc.res_skip_layers.1.bias
638
+ vq_model.flow.flows.6.enc.res_skip_layers.1.weight_g
639
+ vq_model.flow.flows.6.enc.res_skip_layers.1.weight_v
640
+ vq_model.flow.flows.6.enc.res_skip_layers.2.bias
641
+ vq_model.flow.flows.6.enc.res_skip_layers.2.weight_g
642
+ vq_model.flow.flows.6.enc.res_skip_layers.2.weight_v
643
+ vq_model.flow.flows.6.enc.res_skip_layers.3.bias
644
+ vq_model.flow.flows.6.enc.res_skip_layers.3.weight_g
645
+ vq_model.flow.flows.6.enc.res_skip_layers.3.weight_v
646
+ vq_model.flow.flows.6.post.bias
647
+ vq_model.flow.flows.6.post.weight
648
+ vq_model.flow.flows.6.pre.bias
649
+ vq_model.flow.flows.6.pre.weight
650
+ vq_model.quantizer.vq.layers.0._codebook.embed
651
+ vq_model.ref_enc.fc.fc.bias
652
+ vq_model.ref_enc.fc.fc.weight
653
+ vq_model.ref_enc.slf_attn.fc.bias
654
+ vq_model.ref_enc.slf_attn.fc.weight
655
+ vq_model.ref_enc.slf_attn.w_ks.bias
656
+ vq_model.ref_enc.slf_attn.w_ks.weight
657
+ vq_model.ref_enc.slf_attn.w_qs.bias
658
+ vq_model.ref_enc.slf_attn.w_qs.weight
659
+ vq_model.ref_enc.slf_attn.w_vs.bias
660
+ vq_model.ref_enc.slf_attn.w_vs.weight
661
+ vq_model.ref_enc.spectral.0.fc.bias
662
+ vq_model.ref_enc.spectral.0.fc.weight
663
+ vq_model.ref_enc.spectral.3.fc.bias
664
+ vq_model.ref_enc.spectral.3.fc.weight
665
+ vq_model.ref_enc.temporal.0.conv1.conv.bias
666
+ vq_model.ref_enc.temporal.0.conv1.conv.weight
667
+ vq_model.ref_enc.temporal.1.conv1.conv.bias
668
+ vq_model.ref_enc.temporal.1.conv1.conv.weight
genie_tts/Data/v2/Models/t2s_encoder_fp32.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6eb1acd47c8e6d36b777886981a49122e8e070a5eb9888d458fb188dc139f75
3
+ size 14568
genie_tts/Data/v2/Models/t2s_first_stage_decoder_fp32.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee7ddd22f27247f49b028c5ae8911f1f8b9ce97538c612dbb9f96057799da4c2
3
+ size 423076
genie_tts/Data/v2/Models/t2s_stage_decoder_fp32.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de6189e17efbad95479be95ca3691f19e7c69bdaaf63b171aa27283e41660dea
3
+ size 422151
genie_tts/Data/v2/Models/vits_fp32.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:634c21025f9fea03901c1fb5741a98ada512adeb67183e79b261c059fa4d842a
3
+ size 1654845
genie_tts/Data/v2ProPlus/Keys/prompt_encoder_weights.txt ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ref_enc.spectral.0.fc.weight
2
+ ref_enc.spectral.0.fc.bias
3
+ ref_enc.spectral.3.fc.weight
4
+ ref_enc.spectral.3.fc.bias
5
+ ref_enc.temporal.0.conv1.conv.weight
6
+ ref_enc.temporal.0.conv1.conv.bias
7
+ ref_enc.temporal.1.conv1.conv.weight
8
+ ref_enc.temporal.1.conv1.conv.bias
9
+ ref_enc.slf_attn.w_qs.weight
10
+ ref_enc.slf_attn.w_qs.bias
11
+ ref_enc.slf_attn.w_ks.weight
12
+ ref_enc.slf_attn.w_ks.bias
13
+ ref_enc.slf_attn.w_vs.weight
14
+ ref_enc.slf_attn.w_vs.bias
15
+ ref_enc.slf_attn.fc.weight
16
+ ref_enc.slf_attn.fc.bias
17
+ ref_enc.fc.fc.weight
18
+ ref_enc.fc.fc.bias
19
+ sv_emb.weight
20
+ sv_emb.bias
21
+ ge_to512.weight
22
+ ge_to512.bias
23
+ prelu.weight
genie_tts/Data/v2ProPlus/Keys/vits_weights.txt ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ vq_model.enc_p.ssl_proj.weight
2
+ vq_model.enc_p.ssl_proj.bias
3
+ vq_model.enc_p.encoder_ssl.attn_layers.0.emb_rel_k
4
+ vq_model.enc_p.encoder_ssl.attn_layers.0.emb_rel_v
5
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_q.weight
6
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_q.bias
7
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_k.weight
8
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_k.bias
9
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_v.weight
10
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_v.bias
11
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_o.weight
12
+ vq_model.enc_p.encoder_ssl.attn_layers.0.conv_o.bias
13
+ vq_model.enc_p.encoder_ssl.attn_layers.1.emb_rel_k
14
+ vq_model.enc_p.encoder_ssl.attn_layers.1.emb_rel_v
15
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_q.weight
16
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_q.bias
17
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_k.weight
18
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_k.bias
19
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_v.weight
20
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_v.bias
21
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_o.weight
22
+ vq_model.enc_p.encoder_ssl.attn_layers.1.conv_o.bias
23
+ vq_model.enc_p.encoder_ssl.attn_layers.2.emb_rel_k
24
+ vq_model.enc_p.encoder_ssl.attn_layers.2.emb_rel_v
25
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_q.weight
26
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_q.bias
27
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_k.weight
28
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_k.bias
29
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_v.weight
30
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_v.bias
31
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_o.weight
32
+ vq_model.enc_p.encoder_ssl.attn_layers.2.conv_o.bias
33
+ vq_model.enc_p.encoder_ssl.norm_layers_1.0.gamma
34
+ vq_model.enc_p.encoder_ssl.norm_layers_1.0.beta
35
+ vq_model.enc_p.encoder_ssl.norm_layers_1.1.gamma
36
+ vq_model.enc_p.encoder_ssl.norm_layers_1.1.beta
37
+ vq_model.enc_p.encoder_ssl.norm_layers_1.2.gamma
38
+ vq_model.enc_p.encoder_ssl.norm_layers_1.2.beta
39
+ vq_model.enc_p.encoder_ssl.ffn_layers.0.conv_1.weight
40
+ vq_model.enc_p.encoder_ssl.ffn_layers.0.conv_1.bias
41
+ vq_model.enc_p.encoder_ssl.ffn_layers.0.conv_2.weight
42
+ vq_model.enc_p.encoder_ssl.ffn_layers.0.conv_2.bias
43
+ vq_model.enc_p.encoder_ssl.ffn_layers.1.conv_1.weight
44
+ vq_model.enc_p.encoder_ssl.ffn_layers.1.conv_1.bias
45
+ vq_model.enc_p.encoder_ssl.ffn_layers.1.conv_2.weight
46
+ vq_model.enc_p.encoder_ssl.ffn_layers.1.conv_2.bias
47
+ vq_model.enc_p.encoder_ssl.ffn_layers.2.conv_1.weight
48
+ vq_model.enc_p.encoder_ssl.ffn_layers.2.conv_1.bias
49
+ vq_model.enc_p.encoder_ssl.ffn_layers.2.conv_2.weight
50
+ vq_model.enc_p.encoder_ssl.ffn_layers.2.conv_2.bias
51
+ vq_model.enc_p.encoder_ssl.norm_layers_2.0.gamma
52
+ vq_model.enc_p.encoder_ssl.norm_layers_2.0.beta
53
+ vq_model.enc_p.encoder_ssl.norm_layers_2.1.gamma
54
+ vq_model.enc_p.encoder_ssl.norm_layers_2.1.beta
55
+ vq_model.enc_p.encoder_ssl.norm_layers_2.2.gamma
56
+ vq_model.enc_p.encoder_ssl.norm_layers_2.2.beta
57
+ vq_model.enc_p.encoder_text.attn_layers.0.emb_rel_k
58
+ vq_model.enc_p.encoder_text.attn_layers.0.emb_rel_v
59
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_q.weight
60
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_q.bias
61
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_k.weight
62
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_k.bias
63
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_v.weight
64
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_v.bias
65
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_o.weight
66
+ vq_model.enc_p.encoder_text.attn_layers.0.conv_o.bias
67
+ vq_model.enc_p.encoder_text.attn_layers.1.emb_rel_k
68
+ vq_model.enc_p.encoder_text.attn_layers.1.emb_rel_v
69
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_q.weight
70
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_q.bias
71
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_k.weight
72
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_k.bias
73
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_v.weight
74
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_v.bias
75
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_o.weight
76
+ vq_model.enc_p.encoder_text.attn_layers.1.conv_o.bias
77
+ vq_model.enc_p.encoder_text.attn_layers.2.emb_rel_k
78
+ vq_model.enc_p.encoder_text.attn_layers.2.emb_rel_v
79
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_q.weight
80
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_q.bias
81
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_k.weight
82
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_k.bias
83
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_v.weight
84
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_v.bias
85
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_o.weight
86
+ vq_model.enc_p.encoder_text.attn_layers.2.conv_o.bias
87
+ vq_model.enc_p.encoder_text.attn_layers.3.emb_rel_k
88
+ vq_model.enc_p.encoder_text.attn_layers.3.emb_rel_v
89
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_q.weight
90
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_q.bias
91
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_k.weight
92
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_k.bias
93
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_v.weight
94
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_v.bias
95
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_o.weight
96
+ vq_model.enc_p.encoder_text.attn_layers.3.conv_o.bias
97
+ vq_model.enc_p.encoder_text.attn_layers.4.emb_rel_k
98
+ vq_model.enc_p.encoder_text.attn_layers.4.emb_rel_v
99
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_q.weight
100
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_q.bias
101
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_k.weight
102
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_k.bias
103
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_v.weight
104
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_v.bias
105
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_o.weight
106
+ vq_model.enc_p.encoder_text.attn_layers.4.conv_o.bias
107
+ vq_model.enc_p.encoder_text.attn_layers.5.emb_rel_k
108
+ vq_model.enc_p.encoder_text.attn_layers.5.emb_rel_v
109
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_q.weight
110
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_q.bias
111
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_k.weight
112
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_k.bias
113
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_v.weight
114
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_v.bias
115
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_o.weight
116
+ vq_model.enc_p.encoder_text.attn_layers.5.conv_o.bias
117
+ vq_model.enc_p.encoder_text.norm_layers_1.0.gamma
118
+ vq_model.enc_p.encoder_text.norm_layers_1.0.beta
119
+ vq_model.enc_p.encoder_text.norm_layers_1.1.gamma
120
+ vq_model.enc_p.encoder_text.norm_layers_1.1.beta
121
+ vq_model.enc_p.encoder_text.norm_layers_1.2.gamma
122
+ vq_model.enc_p.encoder_text.norm_layers_1.2.beta
123
+ vq_model.enc_p.encoder_text.norm_layers_1.3.gamma
124
+ vq_model.enc_p.encoder_text.norm_layers_1.3.beta
125
+ vq_model.enc_p.encoder_text.norm_layers_1.4.gamma
126
+ vq_model.enc_p.encoder_text.norm_layers_1.4.beta
127
+ vq_model.enc_p.encoder_text.norm_layers_1.5.gamma
128
+ vq_model.enc_p.encoder_text.norm_layers_1.5.beta
129
+ vq_model.enc_p.encoder_text.ffn_layers.0.conv_1.weight
130
+ vq_model.enc_p.encoder_text.ffn_layers.0.conv_1.bias
131
+ vq_model.enc_p.encoder_text.ffn_layers.0.conv_2.weight
132
+ vq_model.enc_p.encoder_text.ffn_layers.0.conv_2.bias
133
+ vq_model.enc_p.encoder_text.ffn_layers.1.conv_1.weight
134
+ vq_model.enc_p.encoder_text.ffn_layers.1.conv_1.bias
135
+ vq_model.enc_p.encoder_text.ffn_layers.1.conv_2.weight
136
+ vq_model.enc_p.encoder_text.ffn_layers.1.conv_2.bias
137
+ vq_model.enc_p.encoder_text.ffn_layers.2.conv_1.weight
138
+ vq_model.enc_p.encoder_text.ffn_layers.2.conv_1.bias
139
+ vq_model.enc_p.encoder_text.ffn_layers.2.conv_2.weight
140
+ vq_model.enc_p.encoder_text.ffn_layers.2.conv_2.bias
141
+ vq_model.enc_p.encoder_text.ffn_layers.3.conv_1.weight
142
+ vq_model.enc_p.encoder_text.ffn_layers.3.conv_1.bias
143
+ vq_model.enc_p.encoder_text.ffn_layers.3.conv_2.weight
144
+ vq_model.enc_p.encoder_text.ffn_layers.3.conv_2.bias
145
+ vq_model.enc_p.encoder_text.ffn_layers.4.conv_1.weight
146
+ vq_model.enc_p.encoder_text.ffn_layers.4.conv_1.bias
147
+ vq_model.enc_p.encoder_text.ffn_layers.4.conv_2.weight
148
+ vq_model.enc_p.encoder_text.ffn_layers.4.conv_2.bias
149
+ vq_model.enc_p.encoder_text.ffn_layers.5.conv_1.weight
150
+ vq_model.enc_p.encoder_text.ffn_layers.5.conv_1.bias
151
+ vq_model.enc_p.encoder_text.ffn_layers.5.conv_2.weight
152
+ vq_model.enc_p.encoder_text.ffn_layers.5.conv_2.bias
153
+ vq_model.enc_p.encoder_text.norm_layers_2.0.gamma
154
+ vq_model.enc_p.encoder_text.norm_layers_2.0.beta
155
+ vq_model.enc_p.encoder_text.norm_layers_2.1.gamma
156
+ vq_model.enc_p.encoder_text.norm_layers_2.1.beta
157
+ vq_model.enc_p.encoder_text.norm_layers_2.2.gamma
158
+ vq_model.enc_p.encoder_text.norm_layers_2.2.beta
159
+ vq_model.enc_p.encoder_text.norm_layers_2.3.gamma
160
+ vq_model.enc_p.encoder_text.norm_layers_2.3.beta
161
+ vq_model.enc_p.encoder_text.norm_layers_2.4.gamma
162
+ vq_model.enc_p.encoder_text.norm_layers_2.4.beta
163
+ vq_model.enc_p.encoder_text.norm_layers_2.5.gamma
164
+ vq_model.enc_p.encoder_text.norm_layers_2.5.beta
165
+ vq_model.enc_p.text_embedding.weight
166
+ vq_model.enc_p.mrte.cross_attention.conv_q.weight
167
+ vq_model.enc_p.mrte.cross_attention.conv_q.bias
168
+ vq_model.enc_p.mrte.cross_attention.conv_k.weight
169
+ vq_model.enc_p.mrte.cross_attention.conv_k.bias
170
+ vq_model.enc_p.mrte.cross_attention.conv_v.weight
171
+ vq_model.enc_p.mrte.cross_attention.conv_v.bias
172
+ vq_model.enc_p.mrte.cross_attention.conv_o.weight
173
+ vq_model.enc_p.mrte.cross_attention.conv_o.bias
174
+ vq_model.enc_p.mrte.c_pre.weight
175
+ vq_model.enc_p.mrte.c_pre.bias
176
+ vq_model.enc_p.mrte.text_pre.weight
177
+ vq_model.enc_p.mrte.text_pre.bias
178
+ vq_model.enc_p.mrte.c_post.weight
179
+ vq_model.enc_p.mrte.c_post.bias
180
+ vq_model.enc_p.encoder2.attn_layers.0.emb_rel_k
181
+ vq_model.enc_p.encoder2.attn_layers.0.emb_rel_v
182
+ vq_model.enc_p.encoder2.attn_layers.0.conv_q.weight
183
+ vq_model.enc_p.encoder2.attn_layers.0.conv_q.bias
184
+ vq_model.enc_p.encoder2.attn_layers.0.conv_k.weight
185
+ vq_model.enc_p.encoder2.attn_layers.0.conv_k.bias
186
+ vq_model.enc_p.encoder2.attn_layers.0.conv_v.weight
187
+ vq_model.enc_p.encoder2.attn_layers.0.conv_v.bias
188
+ vq_model.enc_p.encoder2.attn_layers.0.conv_o.weight
189
+ vq_model.enc_p.encoder2.attn_layers.0.conv_o.bias
190
+ vq_model.enc_p.encoder2.attn_layers.1.emb_rel_k
191
+ vq_model.enc_p.encoder2.attn_layers.1.emb_rel_v
192
+ vq_model.enc_p.encoder2.attn_layers.1.conv_q.weight
193
+ vq_model.enc_p.encoder2.attn_layers.1.conv_q.bias
194
+ vq_model.enc_p.encoder2.attn_layers.1.conv_k.weight
195
+ vq_model.enc_p.encoder2.attn_layers.1.conv_k.bias
196
+ vq_model.enc_p.encoder2.attn_layers.1.conv_v.weight
197
+ vq_model.enc_p.encoder2.attn_layers.1.conv_v.bias
198
+ vq_model.enc_p.encoder2.attn_layers.1.conv_o.weight
199
+ vq_model.enc_p.encoder2.attn_layers.1.conv_o.bias
200
+ vq_model.enc_p.encoder2.attn_layers.2.emb_rel_k
201
+ vq_model.enc_p.encoder2.attn_layers.2.emb_rel_v
202
+ vq_model.enc_p.encoder2.attn_layers.2.conv_q.weight
203
+ vq_model.enc_p.encoder2.attn_layers.2.conv_q.bias
204
+ vq_model.enc_p.encoder2.attn_layers.2.conv_k.weight
205
+ vq_model.enc_p.encoder2.attn_layers.2.conv_k.bias
206
+ vq_model.enc_p.encoder2.attn_layers.2.conv_v.weight
207
+ vq_model.enc_p.encoder2.attn_layers.2.conv_v.bias
208
+ vq_model.enc_p.encoder2.attn_layers.2.conv_o.weight
209
+ vq_model.enc_p.encoder2.attn_layers.2.conv_o.bias
210
+ vq_model.enc_p.encoder2.norm_layers_1.0.gamma
211
+ vq_model.enc_p.encoder2.norm_layers_1.0.beta
212
+ vq_model.enc_p.encoder2.norm_layers_1.1.gamma
213
+ vq_model.enc_p.encoder2.norm_layers_1.1.beta
214
+ vq_model.enc_p.encoder2.norm_layers_1.2.gamma
215
+ vq_model.enc_p.encoder2.norm_layers_1.2.beta
216
+ vq_model.enc_p.encoder2.ffn_layers.0.conv_1.weight
217
+ vq_model.enc_p.encoder2.ffn_layers.0.conv_1.bias
218
+ vq_model.enc_p.encoder2.ffn_layers.0.conv_2.weight
219
+ vq_model.enc_p.encoder2.ffn_layers.0.conv_2.bias
220
+ vq_model.enc_p.encoder2.ffn_layers.1.conv_1.weight
221
+ vq_model.enc_p.encoder2.ffn_layers.1.conv_1.bias
222
+ vq_model.enc_p.encoder2.ffn_layers.1.conv_2.weight
223
+ vq_model.enc_p.encoder2.ffn_layers.1.conv_2.bias
224
+ vq_model.enc_p.encoder2.ffn_layers.2.conv_1.weight
225
+ vq_model.enc_p.encoder2.ffn_layers.2.conv_1.bias
226
+ vq_model.enc_p.encoder2.ffn_layers.2.conv_2.weight
227
+ vq_model.enc_p.encoder2.ffn_layers.2.conv_2.bias
228
+ vq_model.enc_p.encoder2.norm_layers_2.0.gamma
229
+ vq_model.enc_p.encoder2.norm_layers_2.0.beta
230
+ vq_model.enc_p.encoder2.norm_layers_2.1.gamma
231
+ vq_model.enc_p.encoder2.norm_layers_2.1.beta
232
+ vq_model.enc_p.encoder2.norm_layers_2.2.gamma
233
+ vq_model.enc_p.encoder2.norm_layers_2.2.beta
234
+ vq_model.enc_p.proj.weight
235
+ vq_model.enc_p.proj.bias
236
+ vq_model.dec.conv_pre.weight
237
+ vq_model.dec.conv_pre.bias
238
+ vq_model.dec.ups.0.bias
239
+ vq_model.dec.ups.0.weight_g
240
+ vq_model.dec.ups.0.weight_v
241
+ vq_model.dec.ups.1.bias
242
+ vq_model.dec.ups.1.weight_g
243
+ vq_model.dec.ups.1.weight_v
244
+ vq_model.dec.ups.2.bias
245
+ vq_model.dec.ups.2.weight_g
246
+ vq_model.dec.ups.2.weight_v
247
+ vq_model.dec.ups.3.bias
248
+ vq_model.dec.ups.3.weight_g
249
+ vq_model.dec.ups.3.weight_v
250
+ vq_model.dec.ups.4.bias
251
+ vq_model.dec.ups.4.weight_g
252
+ vq_model.dec.ups.4.weight_v
253
+ vq_model.dec.resblocks.0.convs1.0.bias
254
+ vq_model.dec.resblocks.0.convs1.0.weight_g
255
+ vq_model.dec.resblocks.0.convs1.0.weight_v
256
+ vq_model.dec.resblocks.0.convs1.1.bias
257
+ vq_model.dec.resblocks.0.convs1.1.weight_g
258
+ vq_model.dec.resblocks.0.convs1.1.weight_v
259
+ vq_model.dec.resblocks.0.convs1.2.bias
260
+ vq_model.dec.resblocks.0.convs1.2.weight_g
261
+ vq_model.dec.resblocks.0.convs1.2.weight_v
262
+ vq_model.dec.resblocks.0.convs2.0.bias
263
+ vq_model.dec.resblocks.0.convs2.0.weight_g
264
+ vq_model.dec.resblocks.0.convs2.0.weight_v
265
+ vq_model.dec.resblocks.0.convs2.1.bias
266
+ vq_model.dec.resblocks.0.convs2.1.weight_g
267
+ vq_model.dec.resblocks.0.convs2.1.weight_v
268
+ vq_model.dec.resblocks.0.convs2.2.bias
269
+ vq_model.dec.resblocks.0.convs2.2.weight_g
270
+ vq_model.dec.resblocks.0.convs2.2.weight_v
271
+ vq_model.dec.resblocks.1.convs1.0.bias
272
+ vq_model.dec.resblocks.1.convs1.0.weight_g
273
+ vq_model.dec.resblocks.1.convs1.0.weight_v
274
+ vq_model.dec.resblocks.1.convs1.1.bias
275
+ vq_model.dec.resblocks.1.convs1.1.weight_g
276
+ vq_model.dec.resblocks.1.convs1.1.weight_v
277
+ vq_model.dec.resblocks.1.convs1.2.bias
278
+ vq_model.dec.resblocks.1.convs1.2.weight_g
279
+ vq_model.dec.resblocks.1.convs1.2.weight_v
280
+ vq_model.dec.resblocks.1.convs2.0.bias
281
+ vq_model.dec.resblocks.1.convs2.0.weight_g
282
+ vq_model.dec.resblocks.1.convs2.0.weight_v
283
+ vq_model.dec.resblocks.1.convs2.1.bias
284
+ vq_model.dec.resblocks.1.convs2.1.weight_g
285
+ vq_model.dec.resblocks.1.convs2.1.weight_v
286
+ vq_model.dec.resblocks.1.convs2.2.bias
287
+ vq_model.dec.resblocks.1.convs2.2.weight_g
288
+ vq_model.dec.resblocks.1.convs2.2.weight_v
289
+ vq_model.dec.resblocks.2.convs1.0.bias
290
+ vq_model.dec.resblocks.2.convs1.0.weight_g
291
+ vq_model.dec.resblocks.2.convs1.0.weight_v
292
+ vq_model.dec.resblocks.2.convs1.1.bias
293
+ vq_model.dec.resblocks.2.convs1.1.weight_g
294
+ vq_model.dec.resblocks.2.convs1.1.weight_v
295
+ vq_model.dec.resblocks.2.convs1.2.bias
296
+ vq_model.dec.resblocks.2.convs1.2.weight_g
297
+ vq_model.dec.resblocks.2.convs1.2.weight_v
298
+ vq_model.dec.resblocks.2.convs2.0.bias
299
+ vq_model.dec.resblocks.2.convs2.0.weight_g
300
+ vq_model.dec.resblocks.2.convs2.0.weight_v
301
+ vq_model.dec.resblocks.2.convs2.1.bias
302
+ vq_model.dec.resblocks.2.convs2.1.weight_g
303
+ vq_model.dec.resblocks.2.convs2.1.weight_v
304
+ vq_model.dec.resblocks.2.convs2.2.bias
305
+ vq_model.dec.resblocks.2.convs2.2.weight_g
306
+ vq_model.dec.resblocks.2.convs2.2.weight_v
307
+ vq_model.dec.resblocks.3.convs1.0.bias
308
+ vq_model.dec.resblocks.3.convs1.0.weight_g
309
+ vq_model.dec.resblocks.3.convs1.0.weight_v
310
+ vq_model.dec.resblocks.3.convs1.1.bias
311
+ vq_model.dec.resblocks.3.convs1.1.weight_g
312
+ vq_model.dec.resblocks.3.convs1.1.weight_v
313
+ vq_model.dec.resblocks.3.convs1.2.bias
314
+ vq_model.dec.resblocks.3.convs1.2.weight_g
315
+ vq_model.dec.resblocks.3.convs1.2.weight_v
316
+ vq_model.dec.resblocks.3.convs2.0.bias
317
+ vq_model.dec.resblocks.3.convs2.0.weight_g
318
+ vq_model.dec.resblocks.3.convs2.0.weight_v
319
+ vq_model.dec.resblocks.3.convs2.1.bias
320
+ vq_model.dec.resblocks.3.convs2.1.weight_g
321
+ vq_model.dec.resblocks.3.convs2.1.weight_v
322
+ vq_model.dec.resblocks.3.convs2.2.bias
323
+ vq_model.dec.resblocks.3.convs2.2.weight_g
324
+ vq_model.dec.resblocks.3.convs2.2.weight_v
325
+ vq_model.dec.resblocks.4.convs1.0.bias
326
+ vq_model.dec.resblocks.4.convs1.0.weight_g
327
+ vq_model.dec.resblocks.4.convs1.0.weight_v
328
+ vq_model.dec.resblocks.4.convs1.1.bias
329
+ vq_model.dec.resblocks.4.convs1.1.weight_g
330
+ vq_model.dec.resblocks.4.convs1.1.weight_v
331
+ vq_model.dec.resblocks.4.convs1.2.bias
332
+ vq_model.dec.resblocks.4.convs1.2.weight_g
333
+ vq_model.dec.resblocks.4.convs1.2.weight_v
334
+ vq_model.dec.resblocks.4.convs2.0.bias
335
+ vq_model.dec.resblocks.4.convs2.0.weight_g
336
+ vq_model.dec.resblocks.4.convs2.0.weight_v
337
+ vq_model.dec.resblocks.4.convs2.1.bias
338
+ vq_model.dec.resblocks.4.convs2.1.weight_g
339
+ vq_model.dec.resblocks.4.convs2.1.weight_v
340
+ vq_model.dec.resblocks.4.convs2.2.bias
341
+ vq_model.dec.resblocks.4.convs2.2.weight_g
342
+ vq_model.dec.resblocks.4.convs2.2.weight_v
343
+ vq_model.dec.resblocks.5.convs1.0.bias
344
+ vq_model.dec.resblocks.5.convs1.0.weight_g
345
+ vq_model.dec.resblocks.5.convs1.0.weight_v
346
+ vq_model.dec.resblocks.5.convs1.1.bias
347
+ vq_model.dec.resblocks.5.convs1.1.weight_g
348
+ vq_model.dec.resblocks.5.convs1.1.weight_v
349
+ vq_model.dec.resblocks.5.convs1.2.bias
350
+ vq_model.dec.resblocks.5.convs1.2.weight_g
351
+ vq_model.dec.resblocks.5.convs1.2.weight_v
352
+ vq_model.dec.resblocks.5.convs2.0.bias
353
+ vq_model.dec.resblocks.5.convs2.0.weight_g
354
+ vq_model.dec.resblocks.5.convs2.0.weight_v
355
+ vq_model.dec.resblocks.5.convs2.1.bias
356
+ vq_model.dec.resblocks.5.convs2.1.weight_g
357
+ vq_model.dec.resblocks.5.convs2.1.weight_v
358
+ vq_model.dec.resblocks.5.convs2.2.bias
359
+ vq_model.dec.resblocks.5.convs2.2.weight_g
360
+ vq_model.dec.resblocks.5.convs2.2.weight_v
361
+ vq_model.dec.resblocks.6.convs1.0.bias
362
+ vq_model.dec.resblocks.6.convs1.0.weight_g
363
+ vq_model.dec.resblocks.6.convs1.0.weight_v
364
+ vq_model.dec.resblocks.6.convs1.1.bias
365
+ vq_model.dec.resblocks.6.convs1.1.weight_g
366
+ vq_model.dec.resblocks.6.convs1.1.weight_v
367
+ vq_model.dec.resblocks.6.convs1.2.bias
368
+ vq_model.dec.resblocks.6.convs1.2.weight_g
369
+ vq_model.dec.resblocks.6.convs1.2.weight_v
370
+ vq_model.dec.resblocks.6.convs2.0.bias
371
+ vq_model.dec.resblocks.6.convs2.0.weight_g
372
+ vq_model.dec.resblocks.6.convs2.0.weight_v
373
+ vq_model.dec.resblocks.6.convs2.1.bias
374
+ vq_model.dec.resblocks.6.convs2.1.weight_g
375
+ vq_model.dec.resblocks.6.convs2.1.weight_v
376
+ vq_model.dec.resblocks.6.convs2.2.bias
377
+ vq_model.dec.resblocks.6.convs2.2.weight_g
378
+ vq_model.dec.resblocks.6.convs2.2.weight_v
379
+ vq_model.dec.resblocks.7.convs1.0.bias
380
+ vq_model.dec.resblocks.7.convs1.0.weight_g
381
+ vq_model.dec.resblocks.7.convs1.0.weight_v
382
+ vq_model.dec.resblocks.7.convs1.1.bias
383
+ vq_model.dec.resblocks.7.convs1.1.weight_g
384
+ vq_model.dec.resblocks.7.convs1.1.weight_v
385
+ vq_model.dec.resblocks.7.convs1.2.bias
386
+ vq_model.dec.resblocks.7.convs1.2.weight_g
387
+ vq_model.dec.resblocks.7.convs1.2.weight_v
388
+ vq_model.dec.resblocks.7.convs2.0.bias
389
+ vq_model.dec.resblocks.7.convs2.0.weight_g
390
+ vq_model.dec.resblocks.7.convs2.0.weight_v
391
+ vq_model.dec.resblocks.7.convs2.1.bias
392
+ vq_model.dec.resblocks.7.convs2.1.weight_g
393
+ vq_model.dec.resblocks.7.convs2.1.weight_v
394
+ vq_model.dec.resblocks.7.convs2.2.bias
395
+ vq_model.dec.resblocks.7.convs2.2.weight_g
396
+ vq_model.dec.resblocks.7.convs2.2.weight_v
397
+ vq_model.dec.resblocks.8.convs1.0.bias
398
+ vq_model.dec.resblocks.8.convs1.0.weight_g
399
+ vq_model.dec.resblocks.8.convs1.0.weight_v
400
+ vq_model.dec.resblocks.8.convs1.1.bias
401
+ vq_model.dec.resblocks.8.convs1.1.weight_g
402
+ vq_model.dec.resblocks.8.convs1.1.weight_v
403
+ vq_model.dec.resblocks.8.convs1.2.bias
404
+ vq_model.dec.resblocks.8.convs1.2.weight_g
405
+ vq_model.dec.resblocks.8.convs1.2.weight_v
406
+ vq_model.dec.resblocks.8.convs2.0.bias
407
+ vq_model.dec.resblocks.8.convs2.0.weight_g
408
+ vq_model.dec.resblocks.8.convs2.0.weight_v
409
+ vq_model.dec.resblocks.8.convs2.1.bias
410
+ vq_model.dec.resblocks.8.convs2.1.weight_g
411
+ vq_model.dec.resblocks.8.convs2.1.weight_v
412
+ vq_model.dec.resblocks.8.convs2.2.bias
413
+ vq_model.dec.resblocks.8.convs2.2.weight_g
414
+ vq_model.dec.resblocks.8.convs2.2.weight_v
415
+ vq_model.dec.resblocks.9.convs1.0.bias
416
+ vq_model.dec.resblocks.9.convs1.0.weight_g
417
+ vq_model.dec.resblocks.9.convs1.0.weight_v
418
+ vq_model.dec.resblocks.9.convs1.1.bias
419
+ vq_model.dec.resblocks.9.convs1.1.weight_g
420
+ vq_model.dec.resblocks.9.convs1.1.weight_v
421
+ vq_model.dec.resblocks.9.convs1.2.bias
422
+ vq_model.dec.resblocks.9.convs1.2.weight_g
423
+ vq_model.dec.resblocks.9.convs1.2.weight_v
424
+ vq_model.dec.resblocks.9.convs2.0.bias
425
+ vq_model.dec.resblocks.9.convs2.0.weight_g
426
+ vq_model.dec.resblocks.9.convs2.0.weight_v
427
+ vq_model.dec.resblocks.9.convs2.1.bias
428
+ vq_model.dec.resblocks.9.convs2.1.weight_g
429
+ vq_model.dec.resblocks.9.convs2.1.weight_v
430
+ vq_model.dec.resblocks.9.convs2.2.bias
431
+ vq_model.dec.resblocks.9.convs2.2.weight_g
432
+ vq_model.dec.resblocks.9.convs2.2.weight_v
433
+ vq_model.dec.resblocks.10.convs1.0.bias
434
+ vq_model.dec.resblocks.10.convs1.0.weight_g
435
+ vq_model.dec.resblocks.10.convs1.0.weight_v
436
+ vq_model.dec.resblocks.10.convs1.1.bias
437
+ vq_model.dec.resblocks.10.convs1.1.weight_g
438
+ vq_model.dec.resblocks.10.convs1.1.weight_v
439
+ vq_model.dec.resblocks.10.convs1.2.bias
440
+ vq_model.dec.resblocks.10.convs1.2.weight_g
441
+ vq_model.dec.resblocks.10.convs1.2.weight_v
442
+ vq_model.dec.resblocks.10.convs2.0.bias
443
+ vq_model.dec.resblocks.10.convs2.0.weight_g
444
+ vq_model.dec.resblocks.10.convs2.0.weight_v
445
+ vq_model.dec.resblocks.10.convs2.1.bias
446
+ vq_model.dec.resblocks.10.convs2.1.weight_g
447
+ vq_model.dec.resblocks.10.convs2.1.weight_v
448
+ vq_model.dec.resblocks.10.convs2.2.bias
449
+ vq_model.dec.resblocks.10.convs2.2.weight_g
450
+ vq_model.dec.resblocks.10.convs2.2.weight_v
451
+ vq_model.dec.resblocks.11.convs1.0.bias
452
+ vq_model.dec.resblocks.11.convs1.0.weight_g
453
+ vq_model.dec.resblocks.11.convs1.0.weight_v
454
+ vq_model.dec.resblocks.11.convs1.1.bias
455
+ vq_model.dec.resblocks.11.convs1.1.weight_g
456
+ vq_model.dec.resblocks.11.convs1.1.weight_v
457
+ vq_model.dec.resblocks.11.convs1.2.bias
458
+ vq_model.dec.resblocks.11.convs1.2.weight_g
459
+ vq_model.dec.resblocks.11.convs1.2.weight_v
460
+ vq_model.dec.resblocks.11.convs2.0.bias
461
+ vq_model.dec.resblocks.11.convs2.0.weight_g
462
+ vq_model.dec.resblocks.11.convs2.0.weight_v
463
+ vq_model.dec.resblocks.11.convs2.1.bias
464
+ vq_model.dec.resblocks.11.convs2.1.weight_g
465
+ vq_model.dec.resblocks.11.convs2.1.weight_v
466
+ vq_model.dec.resblocks.11.convs2.2.bias
467
+ vq_model.dec.resblocks.11.convs2.2.weight_g
468
+ vq_model.dec.resblocks.11.convs2.2.weight_v
469
+ vq_model.dec.resblocks.12.convs1.0.bias
470
+ vq_model.dec.resblocks.12.convs1.0.weight_g
471
+ vq_model.dec.resblocks.12.convs1.0.weight_v
472
+ vq_model.dec.resblocks.12.convs1.1.bias
473
+ vq_model.dec.resblocks.12.convs1.1.weight_g
474
+ vq_model.dec.resblocks.12.convs1.1.weight_v
475
+ vq_model.dec.resblocks.12.convs1.2.bias
476
+ vq_model.dec.resblocks.12.convs1.2.weight_g
477
+ vq_model.dec.resblocks.12.convs1.2.weight_v
478
+ vq_model.dec.resblocks.12.convs2.0.bias
479
+ vq_model.dec.resblocks.12.convs2.0.weight_g
480
+ vq_model.dec.resblocks.12.convs2.0.weight_v
481
+ vq_model.dec.resblocks.12.convs2.1.bias
482
+ vq_model.dec.resblocks.12.convs2.1.weight_g
483
+ vq_model.dec.resblocks.12.convs2.1.weight_v
484
+ vq_model.dec.resblocks.12.convs2.2.bias
485
+ vq_model.dec.resblocks.12.convs2.2.weight_g
486
+ vq_model.dec.resblocks.12.convs2.2.weight_v
487
+ vq_model.dec.resblocks.13.convs1.0.bias
488
+ vq_model.dec.resblocks.13.convs1.0.weight_g
489
+ vq_model.dec.resblocks.13.convs1.0.weight_v
490
+ vq_model.dec.resblocks.13.convs1.1.bias
491
+ vq_model.dec.resblocks.13.convs1.1.weight_g
492
+ vq_model.dec.resblocks.13.convs1.1.weight_v
493
+ vq_model.dec.resblocks.13.convs1.2.bias
494
+ vq_model.dec.resblocks.13.convs1.2.weight_g
495
+ vq_model.dec.resblocks.13.convs1.2.weight_v
496
+ vq_model.dec.resblocks.13.convs2.0.bias
497
+ vq_model.dec.resblocks.13.convs2.0.weight_g
498
+ vq_model.dec.resblocks.13.convs2.0.weight_v
499
+ vq_model.dec.resblocks.13.convs2.1.bias
500
+ vq_model.dec.resblocks.13.convs2.1.weight_g
501
+ vq_model.dec.resblocks.13.convs2.1.weight_v
502
+ vq_model.dec.resblocks.13.convs2.2.bias
503
+ vq_model.dec.resblocks.13.convs2.2.weight_g
504
+ vq_model.dec.resblocks.13.convs2.2.weight_v
505
+ vq_model.dec.resblocks.14.convs1.0.bias
506
+ vq_model.dec.resblocks.14.convs1.0.weight_g
507
+ vq_model.dec.resblocks.14.convs1.0.weight_v
508
+ vq_model.dec.resblocks.14.convs1.1.bias
509
+ vq_model.dec.resblocks.14.convs1.1.weight_g
510
+ vq_model.dec.resblocks.14.convs1.1.weight_v
511
+ vq_model.dec.resblocks.14.convs1.2.bias
512
+ vq_model.dec.resblocks.14.convs1.2.weight_g
513
+ vq_model.dec.resblocks.14.convs1.2.weight_v
514
+ vq_model.dec.resblocks.14.convs2.0.bias
515
+ vq_model.dec.resblocks.14.convs2.0.weight_g
516
+ vq_model.dec.resblocks.14.convs2.0.weight_v
517
+ vq_model.dec.resblocks.14.convs2.1.bias
518
+ vq_model.dec.resblocks.14.convs2.1.weight_g
519
+ vq_model.dec.resblocks.14.convs2.1.weight_v
520
+ vq_model.dec.resblocks.14.convs2.2.bias
521
+ vq_model.dec.resblocks.14.convs2.2.weight_g
522
+ vq_model.dec.resblocks.14.convs2.2.weight_v
523
+ vq_model.dec.conv_post.weight
524
+ vq_model.dec.cond.weight
525
+ vq_model.dec.cond.bias
526
+ vq_model.flow.flows.0.pre.weight
527
+ vq_model.flow.flows.0.pre.bias
528
+ vq_model.flow.flows.0.enc.in_layers.0.bias
529
+ vq_model.flow.flows.0.enc.in_layers.0.weight_g
530
+ vq_model.flow.flows.0.enc.in_layers.0.weight_v
531
+ vq_model.flow.flows.0.enc.in_layers.1.bias
532
+ vq_model.flow.flows.0.enc.in_layers.1.weight_g
533
+ vq_model.flow.flows.0.enc.in_layers.1.weight_v
534
+ vq_model.flow.flows.0.enc.in_layers.2.bias
535
+ vq_model.flow.flows.0.enc.in_layers.2.weight_g
536
+ vq_model.flow.flows.0.enc.in_layers.2.weight_v
537
+ vq_model.flow.flows.0.enc.in_layers.3.bias
538
+ vq_model.flow.flows.0.enc.in_layers.3.weight_g
539
+ vq_model.flow.flows.0.enc.in_layers.3.weight_v
540
+ vq_model.flow.flows.0.enc.res_skip_layers.0.bias
541
+ vq_model.flow.flows.0.enc.res_skip_layers.0.weight_g
542
+ vq_model.flow.flows.0.enc.res_skip_layers.0.weight_v
543
+ vq_model.flow.flows.0.enc.res_skip_layers.1.bias
544
+ vq_model.flow.flows.0.enc.res_skip_layers.1.weight_g
545
+ vq_model.flow.flows.0.enc.res_skip_layers.1.weight_v
546
+ vq_model.flow.flows.0.enc.res_skip_layers.2.bias
547
+ vq_model.flow.flows.0.enc.res_skip_layers.2.weight_g
548
+ vq_model.flow.flows.0.enc.res_skip_layers.2.weight_v
549
+ vq_model.flow.flows.0.enc.res_skip_layers.3.bias
550
+ vq_model.flow.flows.0.enc.res_skip_layers.3.weight_g
551
+ vq_model.flow.flows.0.enc.res_skip_layers.3.weight_v
552
+ vq_model.flow.flows.0.enc.cond_layer.bias
553
+ vq_model.flow.flows.0.enc.cond_layer.weight_g
554
+ vq_model.flow.flows.0.enc.cond_layer.weight_v
555
+ vq_model.flow.flows.0.post.weight
556
+ vq_model.flow.flows.0.post.bias
557
+ vq_model.flow.flows.2.pre.weight
558
+ vq_model.flow.flows.2.pre.bias
559
+ vq_model.flow.flows.2.enc.in_layers.0.bias
560
+ vq_model.flow.flows.2.enc.in_layers.0.weight_g
561
+ vq_model.flow.flows.2.enc.in_layers.0.weight_v
562
+ vq_model.flow.flows.2.enc.in_layers.1.bias
563
+ vq_model.flow.flows.2.enc.in_layers.1.weight_g
564
+ vq_model.flow.flows.2.enc.in_layers.1.weight_v
565
+ vq_model.flow.flows.2.enc.in_layers.2.bias
566
+ vq_model.flow.flows.2.enc.in_layers.2.weight_g
567
+ vq_model.flow.flows.2.enc.in_layers.2.weight_v
568
+ vq_model.flow.flows.2.enc.in_layers.3.bias
569
+ vq_model.flow.flows.2.enc.in_layers.3.weight_g
570
+ vq_model.flow.flows.2.enc.in_layers.3.weight_v
571
+ vq_model.flow.flows.2.enc.res_skip_layers.0.bias
572
+ vq_model.flow.flows.2.enc.res_skip_layers.0.weight_g
573
+ vq_model.flow.flows.2.enc.res_skip_layers.0.weight_v
574
+ vq_model.flow.flows.2.enc.res_skip_layers.1.bias
575
+ vq_model.flow.flows.2.enc.res_skip_layers.1.weight_g
576
+ vq_model.flow.flows.2.enc.res_skip_layers.1.weight_v
577
+ vq_model.flow.flows.2.enc.res_skip_layers.2.bias
578
+ vq_model.flow.flows.2.enc.res_skip_layers.2.weight_g
579
+ vq_model.flow.flows.2.enc.res_skip_layers.2.weight_v
580
+ vq_model.flow.flows.2.enc.res_skip_layers.3.bias
581
+ vq_model.flow.flows.2.enc.res_skip_layers.3.weight_g
582
+ vq_model.flow.flows.2.enc.res_skip_layers.3.weight_v
583
+ vq_model.flow.flows.2.enc.cond_layer.bias
584
+ vq_model.flow.flows.2.enc.cond_layer.weight_g
585
+ vq_model.flow.flows.2.enc.cond_layer.weight_v
586
+ vq_model.flow.flows.2.post.weight
587
+ vq_model.flow.flows.2.post.bias
588
+ vq_model.flow.flows.4.pre.weight
589
+ vq_model.flow.flows.4.pre.bias
590
+ vq_model.flow.flows.4.enc.in_layers.0.bias
591
+ vq_model.flow.flows.4.enc.in_layers.0.weight_g
592
+ vq_model.flow.flows.4.enc.in_layers.0.weight_v
593
+ vq_model.flow.flows.4.enc.in_layers.1.bias
594
+ vq_model.flow.flows.4.enc.in_layers.1.weight_g
595
+ vq_model.flow.flows.4.enc.in_layers.1.weight_v
596
+ vq_model.flow.flows.4.enc.in_layers.2.bias
597
+ vq_model.flow.flows.4.enc.in_layers.2.weight_g
598
+ vq_model.flow.flows.4.enc.in_layers.2.weight_v
599
+ vq_model.flow.flows.4.enc.in_layers.3.bias
600
+ vq_model.flow.flows.4.enc.in_layers.3.weight_g
601
+ vq_model.flow.flows.4.enc.in_layers.3.weight_v
602
+ vq_model.flow.flows.4.enc.res_skip_layers.0.bias
603
+ vq_model.flow.flows.4.enc.res_skip_layers.0.weight_g
604
+ vq_model.flow.flows.4.enc.res_skip_layers.0.weight_v
605
+ vq_model.flow.flows.4.enc.res_skip_layers.1.bias
606
+ vq_model.flow.flows.4.enc.res_skip_layers.1.weight_g
607
+ vq_model.flow.flows.4.enc.res_skip_layers.1.weight_v
608
+ vq_model.flow.flows.4.enc.res_skip_layers.2.bias
609
+ vq_model.flow.flows.4.enc.res_skip_layers.2.weight_g
610
+ vq_model.flow.flows.4.enc.res_skip_layers.2.weight_v
611
+ vq_model.flow.flows.4.enc.res_skip_layers.3.bias
612
+ vq_model.flow.flows.4.enc.res_skip_layers.3.weight_g
613
+ vq_model.flow.flows.4.enc.res_skip_layers.3.weight_v
614
+ vq_model.flow.flows.4.enc.cond_layer.bias
615
+ vq_model.flow.flows.4.enc.cond_layer.weight_g
616
+ vq_model.flow.flows.4.enc.cond_layer.weight_v
617
+ vq_model.flow.flows.4.post.weight
618
+ vq_model.flow.flows.4.post.bias
619
+ vq_model.flow.flows.6.pre.weight
620
+ vq_model.flow.flows.6.pre.bias
621
+ vq_model.flow.flows.6.enc.in_layers.0.bias
622
+ vq_model.flow.flows.6.enc.in_layers.0.weight_g
623
+ vq_model.flow.flows.6.enc.in_layers.0.weight_v
624
+ vq_model.flow.flows.6.enc.in_layers.1.bias
625
+ vq_model.flow.flows.6.enc.in_layers.1.weight_g
626
+ vq_model.flow.flows.6.enc.in_layers.1.weight_v
627
+ vq_model.flow.flows.6.enc.in_layers.2.bias
628
+ vq_model.flow.flows.6.enc.in_layers.2.weight_g
629
+ vq_model.flow.flows.6.enc.in_layers.2.weight_v
630
+ vq_model.flow.flows.6.enc.in_layers.3.bias
631
+ vq_model.flow.flows.6.enc.in_layers.3.weight_g
632
+ vq_model.flow.flows.6.enc.in_layers.3.weight_v
633
+ vq_model.flow.flows.6.enc.res_skip_layers.0.bias
634
+ vq_model.flow.flows.6.enc.res_skip_layers.0.weight_g
635
+ vq_model.flow.flows.6.enc.res_skip_layers.0.weight_v
636
+ vq_model.flow.flows.6.enc.res_skip_layers.1.bias
637
+ vq_model.flow.flows.6.enc.res_skip_layers.1.weight_g
638
+ vq_model.flow.flows.6.enc.res_skip_layers.1.weight_v
639
+ vq_model.flow.flows.6.enc.res_skip_layers.2.bias
640
+ vq_model.flow.flows.6.enc.res_skip_layers.2.weight_g
641
+ vq_model.flow.flows.6.enc.res_skip_layers.2.weight_v
642
+ vq_model.flow.flows.6.enc.res_skip_layers.3.bias
643
+ vq_model.flow.flows.6.enc.res_skip_layers.3.weight_g
644
+ vq_model.flow.flows.6.enc.res_skip_layers.3.weight_v
645
+ vq_model.flow.flows.6.enc.cond_layer.bias
646
+ vq_model.flow.flows.6.enc.cond_layer.weight_g
647
+ vq_model.flow.flows.6.enc.cond_layer.weight_v
648
+ vq_model.flow.flows.6.post.weight
649
+ vq_model.flow.flows.6.post.bias
650
+ vq_model.quantizer.vq.layers.0._codebook.embed
genie_tts/Data/v2ProPlus/Models/prompt_encoder_fp32.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de17d88fdfe9598f9d25710b7299bccac3c0a79851cd8085fc210492260cc0b9
3
+ size 44533
genie_tts/Data/v2ProPlus/Models/vits_fp32.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f9cb0882bbd028e73e5b54b94b54f53370a0f65605ca7dee4e4f1ab5edac89c
3
+ size 1613193
genie_tts/G2P/Chinese/ChineseG2P.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ from typing import List, Tuple, Dict
4
+ import logging
5
+
6
+ from pypinyin.contrib.tone_convert import to_finals_tone3, to_initials
7
+ import jieba_fast
8
+ import jieba_fast.posseg as psg
9
+ from g2pM import G2pM
10
+
11
+ from ...Core.Resources import Chinese_G2P_DIR
12
+ from ..SymbolsV2 import symbols_v2, symbol_to_id_v2
13
+ from .ToneSandhi import ToneSandhi
14
+ from .Normalization.text_normlization import TextNormalizer
15
+ from .CorrectPronunciation import correct_pronunciation
16
+ from .Erhua import ErhuaProcessor
17
+
18
+ jieba_fast.setLogLevel(logging.ERROR)
19
+
20
+ PUNCTUATION = ["!", "?", "…", ",", ".", "-"]
21
+ PUNCTUATION_REPLACEMENTS = {
22
+ ":": ",", ";": ",", ",": ",", "。": ".", "!": "!",
23
+ "?": "?", "\n": ".", "·": ",", "、": ",", "$": ".",
24
+ "/": ",", "—": "-", "~": "…", "~": "…",
25
+ }
26
+ SPECIAL_REPLACEMENTS = {"...": "…"} # 特殊的多字符替换
27
+
28
+
29
+ class ChineseG2P:
30
+ def __init__(self):
31
+ # --- 资源加载 ---
32
+ self.g2pm: G2pM = G2pM()
33
+ self.tone_modifier: ToneSandhi = ToneSandhi()
34
+ self.erhua_processor: ErhuaProcessor = ErhuaProcessor()
35
+ self.text_normalizer: TextNormalizer = TextNormalizer()
36
+ self.pinyin_to_symbol_map: Dict[str, str] = {}
37
+
38
+ # 预编译正则
39
+ # 1. 匹配替换表中的字符
40
+ self.pattern_punct_map = re.compile("|".join(re.escape(p) for p in PUNCTUATION_REPLACEMENTS.keys()))
41
+ # 2. 过滤非中文字符和允许的标点
42
+ allowed_chars = "".join(re.escape(p) for p in PUNCTUATION)
43
+ self.pattern_filter = re.compile(r"[^\u4e00-\u9fa5" + allowed_chars + r"]+")
44
+ # 3. 句内分割 (Lookbehind)
45
+ self.pattern_split = re.compile(r"(?<=[{0}])\s*".format(allowed_chars))
46
+ # 4. 连续标点去重
47
+ self.pattern_consecutive = re.compile(f"([{allowed_chars}])\\1+")
48
+ # 5. 英文单词移除
49
+ self.pattern_eng = re.compile(r"[a-zA-Z]+")
50
+
51
+ # --- 拼音映射查找表 (用于 _pinyin_to_opencpop_phones) ---
52
+ self.v_rep_map = {"uei": "ui", "iou": "iu", "uen": "un"}
53
+ self.pinyin_rep_map = {"ing": "ying", "i": "yi", "in": "yin", "u": "wu"}
54
+ self.single_rep_map = {"v": "yu", "e": "e", "i": "y", "u": "w"}
55
+
56
+ self.load_opencpop_dict()
57
+
58
+ def load_opencpop_dict(self):
59
+ # 加载 Opencpop 映射表
60
+ map_path = os.path.join(Chinese_G2P_DIR, "opencpop-strict.txt")
61
+ with open(map_path, 'r', encoding='utf-8') as f:
62
+ for line in f:
63
+ parts = line.strip().split("\t")
64
+ if len(parts) >= 2:
65
+ self.pinyin_to_symbol_map[parts[0]] = parts[1]
66
+
67
+ def _replace_punctuation(self, text: str) -> str:
68
+ """处理特定字符替换和非标准标点清洗"""
69
+ # text = text.replace("嗯", "恩").replace("呣", "母")
70
+ for k, v in SPECIAL_REPLACEMENTS.items():
71
+ text = text.replace(k, v)
72
+ text = self.pattern_punct_map.sub(lambda x: PUNCTUATION_REPLACEMENTS[x.group()], text)
73
+ text = self.pattern_filter.sub("", text)
74
+ return text
75
+
76
+ def normalize_text(self, text: str) -> str:
77
+ """执行完整的文本归一化流程"""
78
+ # 1. TextNormalizer 转换 (如数字转汉字)
79
+ sentences = self.text_normalizer.normalize(text)
80
+ # 2. 标点映射与清洗
81
+ dest_parts = [self._replace_punctuation(s) for s in sentences]
82
+ dest_text = "".join(dest_parts)
83
+ # 3. 避免重复标点
84
+ dest_text = self.pattern_consecutive.sub(r"\1", dest_text)
85
+ return dest_text
86
+
87
+ def _pinyin_to_opencpop_phones(self, c: str, v: str) -> List[str]:
88
+ """将声母韵母转换为 Opencpop 格式的音素"""
89
+ # 提取声调
90
+ v_without_tone = v[:-1]
91
+ tone = v[-1]
92
+ if c:
93
+ # 多音节逻辑
94
+ final = self.v_rep_map.get(v_without_tone, v_without_tone)
95
+ pinyin_key = c + final
96
+ else:
97
+ # 零声母/单音节逻辑
98
+ temp_key = c + v_without_tone # c is empty string here usually
99
+ if temp_key in self.pinyin_rep_map:
100
+ pinyin_key = self.pinyin_rep_map[temp_key]
101
+ else:
102
+ # 处理首字母变化
103
+ if temp_key and temp_key[0] in self.single_rep_map:
104
+ pinyin_key = self.single_rep_map[temp_key[0]] + temp_key[1:]
105
+ else:
106
+ pinyin_key = temp_key
107
+ # 查表获取音素
108
+ phone_str = self.pinyin_to_symbol_map[pinyin_key]
109
+ new_c, new_v = phone_str.split(" ")
110
+ new_v = new_v + tone
111
+ return [new_c, new_v]
112
+
113
+ def g2p(self, text: str) -> Tuple[List[str], List[int]]:
114
+ """生成音素列表和 Word-to-Phone 映射"""
115
+ sentences = [i for i in self.pattern_split.split(text) if i.strip() != ""]
116
+ all_phones = []
117
+ all_word2ph = []
118
+ for seg in sentences:
119
+ # 移除英文
120
+ seg = self.pattern_eng.sub("", seg)
121
+ # 分词
122
+ seg_cut = psg.lcut(seg)
123
+ seg_cut = self.tone_modifier.pre_merge_for_modify(seg_cut)
124
+ initials = []
125
+ finals = []
126
+ # G2PM 整句推理
127
+ pinyins = self.g2pm(seg, char_split=True)
128
+ pinyins = [p.replace("u:", "v") for p in pinyins]
129
+ pre_word_length = 0
130
+ for word, pos in seg_cut:
131
+ now_word_length = pre_word_length + len(word)
132
+ if pos == "eng":
133
+ pre_word_length = now_word_length
134
+ continue
135
+ word_pinyins = pinyins[pre_word_length:now_word_length]
136
+ # 多音字修正
137
+ word_pinyins = correct_pronunciation(word, word_pinyins)
138
+ sub_initials = []
139
+ sub_finals = []
140
+ for pinyin in word_pinyins:
141
+ if pinyin[0].isalpha():
142
+ sub_initials.append(to_initials(pinyin))
143
+ sub_finals.append(to_finals_tone3(pinyin, neutral_tone_with_five=True))
144
+ else:
145
+ # 处理非字母(如标点)
146
+ sub_initials.append(pinyin)
147
+ sub_finals.append(pinyin)
148
+ pre_word_length = now_word_length
149
+ # 变调处理
150
+ sub_finals = self.tone_modifier.modified_tone(word, pos, sub_finals)
151
+ # 儿化处理
152
+ sub_initials, sub_finals = self.erhua_processor.merge_erhua(sub_initials, sub_finals, word, pos)
153
+ initials.extend(sub_initials)
154
+ finals.extend(sub_finals)
155
+
156
+ for c, v in zip(initials, finals):
157
+ if c == v:
158
+ # 标点符号逻辑
159
+ all_phones.append(c)
160
+ all_word2ph.append(1)
161
+ else:
162
+ # 正常拼音转换逻辑
163
+ try:
164
+ phone_pair = self._pinyin_to_opencpop_phones(c, v)
165
+ all_phones.extend(phone_pair)
166
+ all_word2ph.append(len(phone_pair))
167
+ except KeyError:
168
+ # 遇到未知的拼音组合,记录错误或跳过
169
+ continue
170
+
171
+ return all_phones, all_word2ph
172
+
173
+ def process(self, text: str) -> Tuple[str, List[str], List[int], List[int]]:
174
+ normalized_text = self.normalize_text(text)
175
+ # print(normalized_text)
176
+ phones, word2ph = self.g2p(normalized_text)
177
+ phones = [ph for ph in phones if ph in symbols_v2]
178
+ phones_ids = [symbol_to_id_v2[ph] for ph in phones]
179
+ return normalized_text, phones, phones_ids, word2ph
180
+
181
+
182
+ processor: ChineseG2P = ChineseG2P()
183
+
184
+
185
+ def chinese_to_phones(text: str) -> Tuple[str, List[str], List[int], List[int]]:
186
+ return processor.process(text)
genie_tts/G2P/Chinese/CorrectPronunciation.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pickle
3
+ from typing import List, Dict, Any, Union
4
+
5
+ from ...Core.Resources import Chinese_G2P_DIR
6
+
7
+ # 常量定义
8
+ DEFAULT_CACHE_PATH = os.path.join(Chinese_G2P_DIR, "polyphonic.pickle")
9
+
10
+
11
+ class PolyphonicDictManager:
12
+ _data: Dict[str, Any] = {}
13
+
14
+ @classmethod
15
+ def get_data(cls, path: str = DEFAULT_CACHE_PATH) -> Dict[str, Any]:
16
+ if not cls._data:
17
+ with open(path, "rb") as f:
18
+ cls._data = pickle.load(f)
19
+ return cls._data
20
+
21
+
22
+ def correct_pronunciation(word: str, word_pinyin: List[str]) -> Union[List[str], str]:
23
+ """
24
+ 根据加载的字典修正发音,作为供外部程序调用的独立接口。
25
+ 逻辑:优先查找整词修正,如果没有整词匹配,则遍历每个字符进行单字修正。
26
+
27
+ Input:
28
+ word (str): 原始中文字符串,例如 "银行"。
29
+ word_pinyins (List[str]): 当前预测的拼音列表,例如 ['yin2', 'xing2']。
30
+
31
+ Output:
32
+ Union[List[str], str]: 修正后的拼音列表或字符串。
33
+
34
+ Example:
35
+ # 字典包含整词 {'银行': ['yin2', 'hang2']}
36
+ result = correct_pronunciation("银行", ["yin2", "xing2"])
37
+ # Result: ["yin2", "hang2"]
38
+ """
39
+ pp_dict = PolyphonicDictManager.get_data()
40
+ new_word_pinyin = list(word_pinyin)
41
+ # 1. 尝试整词匹配
42
+ if new_pinyin := pp_dict.get(word):
43
+ return new_pinyin
44
+ # 2. 逐字修正
45
+ for idx, w in enumerate(word):
46
+ if idx >= len(new_word_pinyin):
47
+ break
48
+ if w_pinyin := pp_dict.get(w):
49
+ new_word_pinyin[idx] = w_pinyin[0]
50
+ return new_word_pinyin
genie_tts/G2P/Chinese/Erhua.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Tuple, Set
2
+
3
+
4
+ class ErhuaProcessor:
5
+ """
6
+ 处理中文G2P中的儿化音逻辑。
7
+ """
8
+
9
+ def __init__(self):
10
+ self.must_erhua: Set[str] = {
11
+ "小院儿", "胡同儿", "范儿", "老汉儿", "撒欢儿", "寻老礼儿", "妥妥儿", "媳妇儿"
12
+ }
13
+ self.not_erhua: Set[str] = {
14
+ "虐儿", "为儿", "护儿", "瞒儿", "救儿", "替儿", "有儿", "一儿", "我儿", "俺儿",
15
+ "妻儿", "拐儿", "聋儿", "乞儿", "患儿", "幼儿", "孤儿", "婴儿", "婴幼儿", "连体儿",
16
+ "脑瘫儿", "流浪儿", "体弱儿", "混血儿", "蜜雪儿", "舫儿", "祖儿", "美儿", "应采儿", "可儿",
17
+ "侄儿", "孙儿", "侄孙儿", "女儿", "男儿", "红孩儿", "花儿", "虫儿", "马儿", "鸟儿",
18
+ "猪儿", "猫儿", "狗儿", "少儿",
19
+ }
20
+
21
+ def merge_erhua(self, initials: List[str], finals: List[str], word: str, pos: str) -> Tuple[List[str], List[str]]:
22
+ # 1. 修正 er1 发音为 er2 (当'儿'在词尾且发音为er1时)
23
+ for i, phn in enumerate(finals):
24
+ if i == len(finals) - 1 and word[i] == "儿" and phn == "er1":
25
+ finals[i] = "er2"
26
+ # 2. 检查是否跳过儿化处理
27
+ if word not in self.must_erhua and (word in self.not_erhua or pos in {"a", "j", "nr"}):
28
+ return initials, finals
29
+ # 3. 长度校验 (处理如 "……" 等长度不一致的特殊符号情况)
30
+ if len(finals) != len(word):
31
+ return initials, finals
32
+ # 4. 执行儿化合并逻辑 (与前一个字发同音)
33
+ new_initials = []
34
+ new_finals = []
35
+ for i, phn in enumerate(finals):
36
+ # 判断是否需要合并儿化音
37
+ # 条件: 是最后一个字 + 是"儿" + 发音是er2/er5 + 后两字不在非儿化表中 + 前面已有韵母
38
+ if (
39
+ i == len(finals) - 1
40
+ and word[i] == "儿"
41
+ and phn in {"er2", "er5"}
42
+ and word[-2:] not in self.not_erhua
43
+ and new_finals
44
+ ):
45
+ # 将 'er' 加上前一个字的声调
46
+ phn = "er" + new_finals[-1][-1]
47
+ new_initials.append(initials[i])
48
+ new_finals.append(phn)
49
+ return new_initials, new_finals
genie_tts/G2P/Chinese/Normalization/__init__.py ADDED
File without changes
genie_tts/G2P/Chinese/Normalization/char_convert.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Traditional and simplified Chinese conversion, a simplified character may correspond to multiple traditional characters."""
16
+
17
+ simplified_charcters = "制咖片型超声盘鉴定仔点他命书歌粉巾字帐恤手指记忆棒形转弯沟光○〇㐄㐅㐆㐌㐖毒㐜㐡㐤㐰㐺㑇㑳㒳㒸㔾㗂㗎㝵㞎㞙㞞以㢲㢴㤅㥁㥯㨗㫺㬎㮎㮚㮸㲋㲱㲾㳮涧㵪㶸㷖㷭㹢㹴犬㺢狓㺵碗㽮㿝䍃䔢䖟䖸䗈䗥䗪䝓射䥯䦉䯝鲃鱼䲔䳗鹅䵹鼄䶑一对应映射丁不识下儿子做二休世丘之貉并中台原则串为甚谓干净了百事无成八变五十些人得道鸡升天代如并来去个国政策劲幽灵在欧洲游荡接样萝卜坑侧化传价元论醇共再准刀两断切分耕耘收获钱货物向看旧就绪险刻千金动劳永逸匙零夜半卡通回复返影踪反常态口咬气句话同吐快吹周味呼诺呜品红锅哄而散起唱和问三知生熟团漆黑火糟堆场空块面塌糊涂尘染壁厢夔已足多情露水大早到晚夫妻当关万莫开失古恨套所料既往孔见提师要家主审寸阴难买斗牛小撮部阵局展身层巴掌帆风顺席地带过年计于春头载四季期被蛇怕井绳度愿式份弹顷深前律径心意念差愁孤行俱全房厅交遮打技长把抓死拿眼泪鼻涕钥锁折段抿拍即合扫排掬挥拨拥上入击洞掷揽改故辙败文值名斑方面旁族日秋餐隔雅里终父旦时晌会霎间晃暴寒曝更月望垠际朝夕本正经利杯羹东西板枝独秀根筋杆进条龙服务概模次函数又性程总付步脚印趋登毛拔呵氧氮碳决雌雄波未平派谎言流清楚白准溜烟潭有获闻是处降琴鹤甲病发可拾沙目然了直以相眨穿睹瞥瞬矢的解石鸟神教秉虔诚秘种窝蜂穷窍笑置笔苟勾销抹杀煞等奖箍节吃箭仇双雕诗筹箩筐系列纸级士官统丝毫挂维网尽线微吭响股脑胎脉承腔臂力致效资源址器举功投般说讲规贸易叶障着慎满皆输号木电池衣倾钟高低视仁觉醒览遗角银币触溃九鼎蔽抄出驷马追重语破贫洗贯走路安蹴至几蹶振跃役胆汗较辈轮辞赞退六连遍递边针血锤音错门思闪真倒项栽雾类保护川先惊乍体哄鳞爪鸣滴泡邻域党专鼓作齐炒丑烯亥克内酯冬加奴卯肝炎基尺梁街裤镐客宠庭巳汝昌烷玲磊糖肇酉醛啷青县韪良香骨鲷丂七集河市弦喜嘴张舌堵区工业姊妹星架构巧彩扭歪拼凑余热曜武州爷浮屠美乡老阶树荤素碎落能魄鳃鳗珠丄丅丆万俟丈尚摸母娘量管群亚虎必我堂令申件装伏位博侠义界表女墟台戏臭皮匠胜诸葛亮赛顶倍催请运算包立叉戟离疫苗土史志演围揭瓦晒夷姑婆帝村宝烂尖杉碱屉桌山岔岛由纪峡坝库镇废从德后拗汤治旬食明昧曹朋友框栏极权幂曲归依猫民氟硼氯磷铁江侗自旅法司洋浦梅园温暖湾焦班幸用田略番叠皇炮捶硝苯酸腺苷棱草镜穗跳远索锦纲聚氰胺联店胚膲爱色堇紫罗兰芝茶饭菱云虫藏藩乱叛苏亲债凳学座恐恋柱测肌腹衩锥系貂企乌跪叩军车农题迭都甘油屯奏键短阿姨陪姐只顾茅庐槽驾魂鲜鹿页其菜单乘任供势午齿汉组织吊调泻唇坡城报坟外夸将尉建筑岸岗公床扬新剑升杭林栗校楼标款汽社浣海商馆剧院钢华港机械广媒环球融第医科证券综财乐育游涨犹岭疏瘾睑确兵领导缴肢膛船艾瑟尔苍蔡虞效衫覆访诉课谕议轨述野钩限敌鞋颌颔颚饶首龈站例修凡划垂届属崽颏厨拜挫摆放旋削棋榻槛礼沉注滑营狱画确仪聘花葬诏员跌辖周达酒锚闸陷陆雨雪飞威丌于丹久乏予理评产亢卑亦乎舞己悲矩圆词害志但住佞佳便俗信票案幅翁倦伦假偏倚斜亏鬼敲停备伤脾胃仅此像俭匮免宜穴焉戴兼容许冻伯仲负彼昼皂轩轾实刊划颠卫战哥比省非好黄饰别拘束掩奶睬选择摇扰烦苦枚写协厌及格受欢迎约只估侵犯割状告或缺抗拒挽撤救药喻磨灭端倪少逆逾越避靠适吉誉吝玉含延咎歹听啻渊善谋均匀堪忍够太惹妙妥妨孕症孝术室完纳推冠积宣疑辩栗碴称屈挠屑干涉衡待很忙恶忿怎么怠急耻恭息悦惑惜惟想愉愧怍慌愤启懂懈怀材才紧招认扣抵拉舍也罢插揣冒搭撞南墙扩核支攻敢雷攀敬里吗需景智暇曾罪遇朽枉止况竞争辱求愈渝溶济左右袒困补爽特寂寞示弱找谢畏强疾徐痛痒冤符眠睦瞅董何厚云措活疲羞者轻玻璃祥兆禁���稂莠稳佛换答简结果盟绝缕途给谈否羁翼耐肖胫毋宁兴舒若菲莱痕迹窠臼虚衰脸兔撒鹰棺范该详讳抬泰让须眉象众赀账费灰赖奇虑训辍辨菽麦辛近送透逞徒速续逮捕遂遑违逊斧钺艰醉锈随观弃显饱脂肪使丏丐帮丒且慢末丕替桃宗王尊凉爵各图屋脊粮署录坛吾禄职胄袭君厦丗北壑桐疹损逢陵鹬丙寅戌氨腈唑纶辰酮脱氢酶醚丞丢现掉纱帽弄扯炮碗丠両丣坐存激肩臻蒂莲悖序驱丨丩丫挺杈髻鬟细介俄伊犁京尼布订普渡央委监察检查剂圈设警队斯督剩震境航舶革防托播促质版蝾螈锋研艺历残消频谱精密制造陲邮候埔坚压坜凹汇执府究邦俘摄寮彬狼岳肺肿庸英讯诊埋粒胞括控码韩暑枪枢砥澳哇牟寿甸钻探篇签缀缝继耳肯照妇埃悬璧轴柜台辣搁浅邪跑纤阮阳私囊魔丮丰姿采丱烧丳丵丶丷丸参寨朗桂瑞砂衷霞貌凤仆舰因嫌宰峰干络牌持旨祭祷簿编罚宾办丼丿乀乂乃乄仰慕盛旷留考验阔乆乇么丑麽乊湖燃乑乒乓乕乖僻忤戾离谬迕乗危肥劫除隙浪婿乙炔肠酰吡咯盐乚乛乜嘢卿玄宫尾狐龟塔嶷兄弟泉章霄钉耙乞扎哀怜恕讨乢乣乤乥乧乨乩童乪乫乭乳晕汁液瑶浆牙癌突窦罩腐胶猪酪蛋糕菌瘤乴乵乶乷乸乹乺乼乾俸冰嘉哕嚎坤妈尸垒旱枯涸俐渴潮涩煸豆燥爹瘦瘪癣瞪袋脆姜贝隆馏乿亀亁叫咕攘扔搞男砸窜蓬麻亃亄亅却亇迟典今临繁累卵奉婚聪躬巨与迁添裂副宿岁怪恶尕仑愣杆硅硫钛铀锰芑杂异钠砷胂磺琥珀舱棍簧胡茬盗浩盆贩郎腿亍洪亐互欠助勉惠操斥诿系户译亓墓碑刑铃卅渠缤纷斗米旗宪钒灯徽瘟祖拳福谷丰脏腑绑肉腌苓蕴桥铺霸颜闹判喷冈底蛙陉矿亖亘亜罕们娜桑那努哈喀弗烈曼松森杜氏杯奥琛敦戊穆圣裔汇薛孙亟亡佚虏羊牢奋释卷卸契媾感额睫缠谊趾塞挤纽阻还配驰庄亨洛祚亪享津沪畿郊慈菴枇杷膏亭阁锃丽亳亶亹诛初责翻疯偶杰丛稠妖拖寰居吸授慧蜗吞壮魅狗矛盾益渣患忧稀描猿梦暂涯畜祸缘沸搜引擎臣横纭谁混援蒸兽狮税剖亻亼亽亡什献刹邡么仂仃仄仆富怨仈仉毕昔晨壳绍仍仏仒仕宦仗欺恃腰叹叹炬梓讫施仙后琼逝仚仝仞仟悔仡佬偿填泊拓扑簇羔购顿钦佩发棻阃驭养亿儆尤借帧赈凌叙帖李柔刚沃眦睚戒讹取飨读仨仫仮著泳卧躺韶夏裁仳仵唯贤凭钓诞仿似宋佛讽伀硕盼鹅伄儅伈伉俪柯始娃迈戈坦堡帕茨萨庙玛莉莎藤霍姆伋伍奢胥廷芳豪伎俩侍汛勒希羲雏伐憩整谟闲闲伕伙伴颐伜伝伢叔恒兹恩翰伱伲侣伶俜悧鼬伸懒缩喇叭伹伺伻伽倻辐伾似佃伫布乔妮墨佉卢佌贷劣廉昂档浓矮伞洼缓耗胸谷迷挡率龋宅沫舍疗佐贰佑占优据铧尝呢须鲁晓佗佘余坪寺瓜铳僧蒙芒陀龛哼呕坊奸孽弊揖祟茧缚誓贼佝偻瞀佟你夺赶佡佢佣佤佧贾佪佫佯佰佱洁绩酿肴佴卷佶佷佸佹佺佻佼佽佾具唤窘坏娱怒慨硬习惯聋膨胀蔓骇贵痹侀侁侂侃侄侅鸿燕侇侈糜靡侉侌妾侏儒仓鼠侐侑侔仑侘侚链侜偎傍钴循柳葫芦附価侮骂蔑侯岩截蚀局贴壶嬛宴捷携桶笺酌俣狭膝狄俅俉俊俏俎俑俓俔谚俚俛黎健呈固墒增守康箱湿祐镖镳杠盒靖膜龄俞豹猎噪孚封札筒托衍鸽剪撰稿炼厂禊练缮葺俯瞰撑冲效俳俴俵俶俷俺备俾伥倂倅储卒惶敷猝逃颉蓄崇隐倌倏忽刺蜡烛噍嚼坍扁抽毙葱楣灌灶粪背薮卖赔闭霉腾倓倔幸倘倜傥倝借箸挹浇阅倡狂倢倣値倥偬倨傲倩匡嗣冲柝珍倬倭寇猩倮倶倷倹勤赞偁偃充伪吏嗓寐惺扮拱芫茜藉虢钞偈伟晶偌宕距析滤殿疼瘫注颇偓偕鸭歇滞偝偟偢忘怡旺偨偩逼偫偭偯偰偱偲侦缉蹄偷减惰漏窥窃偸偺迹傀儡傅傈僳骂篱傎奎琳迪叟芭傒傔傕伧悉荒傜傞傢傣芽逼佣婢傮睨寄檄诵谣颂伛担辜弓惨蒿悼疤傺傻屄臆巢泄箧羡盖轧颓傿㑩僄僇佥僊働僎侨僔僖僚僝伪僣僤侥僦猴偾僩僬僭僮僯僰雇僵殖签静僾僿征陇儁侬儃儇侩朴薄儊儋儌儍傧儓俦侪拟尽儜儞儤儦儩汰哉寡渥裕酷儭儱罐儳儵儹傩俨儽兀臬臲鹫允勋勋宙宵帅憝彝谐嫂阋畅沛溢盈饥赫凶悍狠猛顽愚妣斩秦遣鞭耀敏荣槃泽爆碟磁秃缆辉霁卤朵娄孜烽酱勃汀箕裘钳耶蒙蕾彻兑软遭黜兎児韵媳爸兕觥兖兙兛兜售鍪肚兝兞兟兡兢兣樽殓涅睡禀籍赘泌啡肽奸幕涵涝熵疚眷稃衬讧赴焕椒歼植跏没试误猜栖窗肋袖颊兪卦撇胡岐廓轿疸枫茴珑厕秩募勺吨寓斤历亩迫筷厘最淫螺韬兮宽匪筛襄赢轭复兲诈刃堰戎痞蚁饷它冀铸冂冃円冇冉册嫁厉砺竭醮冏牧冑冓冔冕冖冗冘冞冢窄抑诬冥冫烘菇蛰冷凝坨橇淇淋炭饼砖碛窖醋雕雹霜冱冶炉艳嘲峻滩淡漠煖飕饮冼冽凃凄怆梗凅凇净凊凋敝蒙凔凛遵汞脢凞几凢処凰凯凵凶焰凸折刷纹预丧喽奔巡榜殡芙蓉租笼辑鞘萃凼锯镬刁蛮刂娩崩批拆摊掰蘖骤歧颗秒袂赃勿嘱忌磋琢肤刈羽刎讼戮舂桨艇刓刖霹雳刜创犊刡恙墅帜筵致劫劫刨昏默攸尿欲熏润薰圭删刮痧铲刱刲刳刴刵踏磅戳柏槐绣芹苋猬舟铭鹄鹜劫剁剃辫刭锉履铅克剌姻咽哨廊掠桅沿召瞻翅赵卜渺茫郭剒剔剕沥剚愎毅讷才剜剥啄采剞剟剡剣剤䌽剐肾驶黏剰袍剀紊铲剸剺剽剿劁劂札劈啪柴扳啦刘奭姥夼昫涓熙禅禹锡翔雁鹗刽刿弩柄蜻蛉劒劓劖劘劙澜篑赏矶釜晋甜薪逐劦熔纣虐赤囚劬劭労劵效劻劼劾峭艮勅勇励勍勐腊脖庞漫饲荡粥辄勖勗勘骄馁碌泮雇捐竹骑殊阱绩朴恳谨剿勧勩勯勰劢勋勷劝惩慰诫谏勹芡践阑匁庇拯粟扎袱裹饺匆遽匈匉匊匋匍匐茎匏匕妆痰脓蛹斋苑烤蹈塘羌熊阀螳螂疆碚竿纬荷茵邙魏匚匜匝匟扶稷匣匦拢匸匹耦匽匾匿卂叮疮禧轸堤棚迢钧炼卄卆遐卉瓷盲瓶当胱腱裸卋卌卍卐怯污贱鄙龌龊陋卓溪唐梯渔陈枣泥漳浔涧梨芬谯赡辕迦郑単驴弈洽鳌卛占筮卝卞卟吩啉屎翠厄卣卨卪卬卮榫袄玺绶钮蚤惧殆笃耸卲帘帙绕恤卼卽厂厎厓厔厖厗奚厘厍厜厝谅厕厤厥厪腻孢厮厰厳厣厹厺粕垢芜菁厼厾叁悟茸薯叄吵笄悌哺讥坫垄弧芯杠潜婴刍袁诘贪谍煽馈驳収岳缔灾贿骗叚叡吻拦蘑蜜诀燧玩砚筝椎蔺铜逗骊另觅叨唠谒杵姓喊嚷嚣咚咛塑寻恼憎擦只泣渗蝠叱吒咄咤喝籀黛舵舷叵叶铎懿昭穰苴辽叻叼吁堑嫖赌瞧爬众抒吅吆夥卺橡涤抱纵摩郡唁坠扇篮膀袜颈吋忾谘酬哭妓媛暗表缰迩妃羿絮蕃浑拐葵暮隅吔吖啶嗪戚吜啬噬咽吟哦咏吠吧唧嗒咐吪隽咀征燐苞茹钙哧吮吰吱嘎吲哚吴栋娇窟孟箫忠晗淞阖闾趼宇呐睛嘘拂捧疵熄竽笛糠吼吽呀吕韦蒙呃呆笨呇贡呉罄呋喃呎呏呔呠呡痴呣呤呦呧瑛眩扒晬淑姬瑜璇鹃呪呫哔嚅嗫呬呯呰呱呲咧噌钝呴呶呷呸呺呻哱咻啸噜吁坎坷逻呿咁咂咆哮咇咈咋蟹煦珅蔼咍咑咒诅咔哒嚓咾哝哩喱咗咠咡咢咣咥咦咨嗟询咩咪咫啮啮咭咮咱咲咳呛嗽咴啕咸咹咺呙喉咿婉恸悯赋矜绿茗蓝哂抢瞒哆嗦啰噻啾滨彗哋哌哎唷哟哏哐哞哢哤哪里哫啼喘哰哲萎蚌哳咩哽哿呗唅唆唈唉唎唏哗尧棣殇璜睿肃唔睇唕吣唞唣喳唪唬唰喏唲唳唵嘛唶唸唹唻唼唾唿啁啃鹦鹉啅埠栈榷祺铺鞅飙啊啍啎啐啓啕啖啗啜哑祈啢衔啤啥啫啱啲啵啺饥啽噶昆沁喁喂喆裙喈咙喋喌喎喑喒喓喔粗喙幛庆滋鹊喟喣喤喥喦喧骚喨喩梆吃葡萄喭驼挑吓碰枞瓣纯疱藻趟铬喵営喹喺喼喿嗀嗃嗄嗅嗈嗉嗊嗍嗐嗑嗔诟嗕嗖嗙嗛嗜痂癖嗝嗡嗤嗥嗨唢嗬嗯嗰嗲嗵叽嗷嗹嗾嗿嘀嘁嘂嘅惋嘈峪禾荫啀嘌嘏嘐嘒啯啧嘚唛嘞嘟囔嘣嘥嘦嘧嘬嘭这谑严敞馋松哓嘶嗥呒虾嘹嘻啴嘿噀噂噅噇噉噎噏噔噗噘噙噚咝噞噢噤蝉皿噩噫噭嗳噱哙噳嚏涌洒欲巫霏噷噼嚃嚄嚆抖哜尝嚔苏嚚嚜嚞嚟呖嚬嚭嚮嚯亸喾饬按竣苛嚵嘤啭冁呓膪谦囍囒囓囗囘萧酚飘溅谛囝溯眸纥銮鹘囟殉囡団囤囥囧囨囱囫囵囬囮囯囲図囶囷囸囹圄圉拟囻囿圀圂圃圊粹蠹赦圌垦圏滚鲱凿枘圕圛圜圞坯埂壤骸炕祠窑豚绅魠鲮鳖圧握圩圪垯圬圮圯炸岬幔毯祇窨菩溉圳圴圻圾坂坆沾坋坌舛壈昆垫墩椅坒坓坩埚坭坰坱坳坴坵坻坼杨挣涎帘垃垈垌垍垓垔垕垗垚垛垝垣垞垟垤垧垮垵垺垾垿埀畔埄埆埇埈埌殃隍埏埒埕埗埜垭埤埦埧埭埯埰埲埳埴埵埶绋埸培怖桩础辅埼埽堀诃侄庑堃堄摧磐贞韧砌堈堉垩堋堌堍堎垴堙堞堠礁堧堨舆堭堮蜓摘堲堳堽堿塁塄塈煤茔棵塍垲埘塓绸塕鸦沽虱塙冢塝缪塡坞埙塥塩塬塱场螨塼塽塾塿墀墁墈墉墐夯増毁墝墠墦渍钵墫墬堕墰墺墙橱壅壆壊壌壎壒榨蒜壔壕壖圹垆壜壝垅壡壬壭壱売壴壹壻壸寝壿夂夅夆変夊夌漱邑夓腕泄甥御骼夗夘夙衮瑙妊娠醣枭珊莺鹭戗幻魇夤蹀秘擂鸫姚宛闺屿庾挞拇賛蛤裨菠氅漓捞湄蚊霆鲨箐篆篷荆肆舅荔鲆巷惭骰辟邱镕镰阪漂烩鲵鲽鳄鸨胪鹏妒峨谭枰晏玑癸祝秤竺牡籁恢罡蝼蝎赐绒御梭夬夭砣榆怙枕夶夹馅奄崛葩谲奈贺祀赠奌奂奓奕䜣詝奘奜奠奡奣陶奨奁魁奫奬奰娲孩贬隶酥宄狡猾她姹嫣妁毡荼皋膻蝇嫔妄妍嫉媚娆妗趣妚妞妤碍妬娅妯娌妲妳妵妺姁姅姉姗姒姘姙姜姝姞姣姤姧姫姮娥姱姸姺姽婀娀诱慑胁娉婷娑娓娟娣娭娯娵娶娸娼婊婐婕婞婤婥溪孺婧婪婬婹婺婼婽媁媄媊媕媞媟媠媢媬媮妫媲媵媸媺媻媪眯媿嫄嫈袅嫏嫕妪嫘嫚嫜嫠嫡嫦嫩嫪毐嫫嫬嫰妩嫺娴嫽嫿妫嬃嬅嬉耍婵痴艳嬔嬖嬗嫱袅嫒嬢嬷嬦嬬嬭幼嬲嬴婶嬹嬾嬿孀娘孅娈孏曰癫屏孑孓雀孖斟篓谜摺孛矻鸠崮轲祜鸾孥邈毓棠膑孬孭孰孱孳孵泛罔衔孻孪宀宁冗拙株薇掣抚琪瓿榴谧弥宊濂祁瑕宍宏碁宓邸谳実潢町宥宧宨宬徵崎骏掖阙臊煮禽蚕宸豫寀寁寥寃檐庶寎暄碜寔寖寘寙寛寠苫寤肘洱滥蒗陕核寪弘绰螽宝擅疙瘩晷対檐専尃尅赎绌缭畴衅尌峙醌襟痲碧屁昊槌淘恵瀑牝畑莓缸羚觑蔻脏躁尔尓锐尗尙尜尟尢��尨尪尬尭尰擒尲尶尴尸尹潽蠖蛾尻扣梢蚴鳍脬蹲屇屌蚵屐屃挪屖屘屙屛屝屡屣峦嶂岩舄屧屦屩屪屃屮戍驻钾崖嵛巅旮旯楂榄榉芋茱萸靛麓屴屹屺屼岀岊岌岍阜岑彭巩岒岝岢岚岣岧岨岫岱岵岷峁峇峋峒峓峞峠嵋峨峰峱岘峹峿崀崁崆祯崋崌崃岖昆崒崔嵬巍萤颢崚崞崟崠峥巆崤崦崧殂岽崱崳崴崶崿嵂嵇嵊泗嵌嵎嵒嵓岁嵙嵞嵡嵩嵫嵯嵴嵼嵾嵝崭崭晴嶋嶌嶒嶓嵚崂嶙嶝嶞峤嶡嶢峄嶨嶭嶮嶰嶲岙嵘巂巃巇巉岿巌巓巘巛滇芎巟巠弋回巣巤炊擘蜥蟒蛊觋巰蜀彦淖杏茂甫楞巻巽帼巿帛斐鲫蕊帑帔帗帚琉汶帟帡帣帨裙帯帰帷帹暆帏幄帮幋幌幏帻幙帮幞幠幡幢幦幨幩幪帱幭幯幰遥蹉跎馀庚鉴幵幷稚邃庀庁広庄庈庉笠庋跋庖牺庠庤庥鲸庬庱庳庴庵馨衢庹庿廃厩廆廋廌廎廏廐廑廒荫廖廛厮搏锣廞弛袤廥廧廨廪廱绵踵髓廸迫瓯邺廻廼廾廿躔弁皱弇弌弍弎弐弑吊诡憾荐弝弢弣弤弨弭弮弰弪霖繇焘斌旭溥骞弶弸弼弾彀彄别累纠强彔彖彘彟彟陌彤贻彧绘虹彪炳雕蔚鸥彰瘅彲彳彴仿彷徉徨彸彽踩敛旆徂徇徊渭畲铉裼従筌徘徙徜徕膳苏萌渐徬徭醺徯徳徴潘徻徼忀瘁胖燎怦悸颤扉犀澎湃砰恍惚绞隘忉惮挨饿忐忑忒忖応忝忞耿忡忪忭忮忱忸怩忻悠懑怏遏怔怗怚怛怞怼黍讶怫怭懦怱怲恍怵惕怸怹恁恂恇恉恌恏恒恓恔恘恚恛恝恞恟恠恣恧眄恪恫恬澹恰恿悀悁悃悄悆悊悐悒晦悚悛悜悝悤您悩悪悮悰悱凄恻德悴怅惘闷悻悾惄愫钟蒐惆惇惌惎惏惓惔惙惛耄惝疟浊恿惦德恽惴蠢惸拈愀愃愆愈愊愍愐愑愒愓愔愕恪氓蠢騃昵惬赧悫愬愮愯恺愼慁恿慅慆慇霭慉慊愠慝慥怄怂慬慱悭慴慵慷戚焚憀灼郁憃惫憋憍眺捏轼愦憔憖憙憧憬憨憪憭怃憯憷憸憹憺懃懅懆邀懊懋怿懔懐懞懠懤懥恹懫懮懰懱毖懵遁梁雍忏懽戁戄戆戉戋戕戛戝戛戠戡戢戣戤戥戦戬戭戯轰戱披菊牖戸戹戺戻卯戽锹扂楔扃扆扈扊杖牵绢铐镯赉扐搂搅烊盹瞌跟趸镲靶鼾払扗玫腮扛扞扠扡扢盔押扤扦扱罾揄绥鞍郤窾扻扼扽抃抆抈抉抌抏瞎抔缳缢擞抜拗択抨摔歉蹿牾抶抻搐泵菸拃拄拊髀抛拌脯拎拏拑擢秧沓曳挛迂拚拝拠拡拫拭拮踢拴拶拷攒拽掇芥橐簪摹疔挈瓢骥捺蹻挌挍挎挐拣挓挖掘浚挙揍聩挲挶挟挿捂捃捄捅捆捉捋胳膊揎捌捍捎躯蛛捗捘捙捜捥捩扪捭据捱捻捼捽掀掂抡臀膘掊掎掏掐笙掔掗掞棉芍掤搪阐掫掮掯揉掱掲掽掾揃揅揆搓揌诨揕揗揘揜揝揞揠揥揩揪揫橥遒麈揰揲揵揶揸背揺搆搉搊搋搌搎搔搕撼橹捣搘搠搡搢搣搤搥搦搧搨搬楦裢讪赸掏搰搲搳搴揾搷搽搾搿摀摁摂摃摎掴摒摓跤摙摛掼摞摠摦喉羯摭摮挚摰摲抠摴抟摷掺摽撂撃撅稻撊撋挦锏泼撕撙撚㧑挢撢掸撦撅撩撬撱朔揿蚍蜉挝捡擀掳闯擉缶觚擐擕擖擗擡擣擤澡腚擧擨擩擫擭摈拧撷擸撸擽擿攃摅撵攉攥攐攓撄搀撺每攩攫辔澄攮攰攲攴轶攷砭讦攽碘敁敃敇敉叙敎筏敔敕敖闰诲敜煌敧敪敳敹敺敻敿斁衽斄牒绉诌斉斎斓鹑谰驳鳢斒筲斛斝斞斠斡斢斨斫斮晾沂潟颖绛邵斲斸釳於琅斾斿旀旗旃旄涡旌旎旐旒旓旖旛旝旟旡旣浴旰獭魃旴时旻旼旽昀昃昄昇昉晰躲澈熹皎皓矾昑昕昜昝昞昡昤晖笋昦昨是昱昳昴昶昺昻晁蹇隧蔬髦晄晅晒晛晜晞晟晡晢晤晥曦晩萘莹顗晿暁暋暌暍暐暔暕煅旸暝暠暡曚暦暨暪朦胧昵暲殄冯暵暸暹暻暾曀晔昙曈曌曏曐暧曘曙曛叠昽曩骆曱甴肱曷牍禺锟曽沧耽朁朅朆杪栓夸竟粘绦朊膺朏朐朓朕朘朙瞄觐溘饔飧朠朢朣栅椆淀虱朩朮朰朱炆璋钰炽鹮朳槿朵朾朿杅杇杌陧欣钊湛漼楷瀍煜玟缨翱肇舜贽适逵杓杕杗杙荀蘅杝杞脩珓筊杰榔狍閦颦缅莞杲杳眇杴杶杸杻杼枋枌枒枓衾葄翘纾逋枙狸桠枟槁枲枳枴枵枷枸橼枹枻柁柂柃柅柈柊柎某柑橘柒柘柙柚柜柞栎柟柢柣柤柩柬柮柰柲橙柶柷柸柺査柿栃栄栒栔栘栝栟柏栩栫栭栱栲栳栴檀栵栻桀骜桁镁桄桉桋桎梏椹葚桓桔桕桜桟桫椤桭杯桯桲桴桷桹湘溟梃梊梍梐潼栀枧梜梠梡梣梧梩梱梲梳梴梵梹棁棃樱棐棑棕榈簑绷蓑枨棘棜棨棩棪棫棬棯棰棱棳棸棹椁棼碗椄苕椈椊椋椌椐椑椓椗検椤椪椰椳椴椵椷椸椽椿楀匾楅篪楋楍楎楗楘楙楛楝楟楠楢楥桢楩楪楫楬楮楯楰梅楸楹楻楽榀榃榊榎槺榕榖榘榛狉莽搒笞榠榡榤榥榦榧杩榭榰榱梿霰榼榾桤槊闩槎槑槔槖様槜槢槥椠槪槭椮槱槲槻槼槾樆樊樏樑樕樗樘樛樟樠樧樨権樲樴樵猢狲桦樻罍樾樿橁橄橆桡笥龠橕橚橛辆椭橤橧竖膈跨橾橿檩檃檇柽檍檎檑檖檗桧槚檠樯檨檫檬梼槟檴檵柠棹櫆櫌栉櫜椟櫡槠栌枥榇栊櫹棂茄櫽欀欂欃欐欑栾欙棂溴欨欬欱欵欶欷歔欸欹欻欼欿歁歃歆艎歈歊莳蝶歓歕歘歙歛歜欤歠蹦诠镶蹒跚升陟歩歮歯歰歳歴璞歺瞑歾殁夭殈殍殑殗殜殙殛殒殢殣殥殪殚僵殰殳荃殷殸殹蛟殻肴谤殴毈毉喂毎���蕈毗毘毚茛邓毧毬毳毷毹毽毾毵牦氄氆靴氉氊氇氍氐聊氕氖気氘氙氚氛氜氝氡汹焊痉氤氲氥氦铝锌氪烃氩铵痤汪浒漉痘盂碾菖蒲蕹蛭螅氵冰氹氺氽烫氾氿渚汆汊汋汍汎汏汐汔汕褟汙汚汜蓠沼秽蔑汧汨汩汭汲汳汴堤汾沄沅沆瀣沇沈葆浸沦湎溺痼疴沌沍沏沐沔沕沘浜畹砾沚沢沬沭沮沰沱灢沴沷籽沺烹濡洄泂肛泅泆涌肓泐泑泒泓泔泖泙泚泜泝泠漩馍涛粼泞藓鳅泩泫泭泯铢泱泲洇洊泾琵琶荽蓟箔洌洎洏洑潄濯洙洚洟洢洣洧洨洩痢滔洫洮洳洴洵洸洹洺洼洿淌蜚浄浉浙赣渫浠浡浤浥淼瀚浬浭翩萍浯浰蜃淀苔蛞蝓蜇螵蛸煲鲤浃浼浽溦涂涊涐涑涒涔滂莅涘涙涪涫涬涮涴涶涷涿淄淅淆淊凄黯淓淙涟淜淝淟淠淢淤渌淦淩猥藿亵淬淮淯淰淳诣涞纺淸淹炖癯绮渇済渉渋渓渕涣渟渢滓渤澥渧渨渮渰渲渶渼湅湉湋湍湑湓湔黔湜湝浈湟湢湣湩湫湮麟湱湲湴涅満沩溍溎溏溛舐漭溠溤溧驯溮溱溲溳溵溷溻溼溽溾滁滃滉滊荥滏稽滕滘汇滝滫滮羼耷卤滹浐煎漈漊漎绎漕漖漘漙沤漜漪漾漥漦漯漰溆漶漷濞潀颍潎潏潕潗潚潝潞潠潦祉疡潲潵滗潸潺潾涠澁澂澃澉澌澍澐澒澔澙渑澣澦澧澨澫澬浍澰澴澶澼熏郁濆濇濈濉濊貊濔疣濜濠濩觞浚濮盥潍濲泺瀁滢渎渖瀌浏瀒瀔濒泸瀛潇潆瀡潴泷濑瀬弥潋瀳瀵瀹瀺瀼沣滠灉灋灒漓灖灏灞灠滦灥灨滟灪蜴灮烬獴灴灸灺炁炅鱿炗炘炙炤炫疽烙钎炯炰炱炲炴炷毁炻烀烋瘴鲳烓烔焙烜烝烳饪烺焃焄耆焌焐焓焗焜焞焠焢焮焯焱焼煁煃煆煇煊熠煍熬煐炜煕暖熏硷霾煚煝煟煠茕矸煨琐炀萁煳煺煻熀熅熇熉罴荧穹炝熘熛熜稔谙烁熤熨熯熰眶蚂颎熳熸熿燀烨燂燄盏燊燋燏燔隼燖焖燠燡灿燨燮燹燻燽燿爇爊爓爚爝爟爨蟾爯爰为爻丬爿牀牁牂牄牋窗牏牓窗釉牚腩蒡虻牠虽蛎牣牤牮牯牲牳牴牷牸牼绊牿靬犂犄犆犇犉犍犎犒荦犗犛犟犠犨犩犪犮犰狳犴犵犺狁甩狃狆狎狒獾狘狙黠狨狩狫狴狷狺狻豕狈蜘猁猇猈猊猋猓猖獗猗猘狰狞犸猞猟獕猭猱猲猳猷猸猹猺玃獀獃獉獍獏獐獒毙獙獚獜獝獞獠獢獣獧鼇蹊狯猃獬豸狝獯鬻獳犷猕猡玁菟玅玆玈珉糁禛郅玍玎玓瓅玔玕玖玗玘玞玠玡玢玤玥玦珏瑰玭玳瑁玶玷玹玼珂珇珈瑚珌馐馔珔珖珙珛珞珡珣珥珧珩珪佩珶珷珺珽琀琁陨玡琇琖琚琠琤琦琨琫琬琭琮琯琰琱琲琅琴珐珲瑀瑂瑄瑉玮瑑瑔瑗瑢瑭瑱瑲瑳瑽瑾瑿璀璨璁璅璆璈琏璊璐璘璚璝璟璠璡璥瑷璩璪璫璯璲玙璸璺璿瓀璎瓖瓘瓒瓛脐瓞瓠瓤瓧瓩瓮瓰瓱瓴瓸瓻瓼甀甁甃甄甇甋甍甎甏甑甒甓甔瓮甖甗饴蔗甙诧钜粱盎锈团甡褥産甪甬甭甮宁铠甹甽甾甿畀畁畇畈畊畋畎畓畚畛畟鄂畤畦畧荻畯畳畵畷畸畽畾疃叠疋疍疎箪疐疒疕疘疝疢疥疧疳疶疿痁痄痊痌痍痏痐痒痔痗瘢痚痠痡痣痦痩痭痯痱痳痵痻痿瘀痖瘃瘈瘉瘊瘌瘏瘐痪瘕瘖瘙瘚瘛疭瘜瘝瘗瘠瘥瘨瘭瘆瘯瘰疬瘳疠瘵瘸瘺瘘瘼癃痨痫癈癎癐癔癙癜癠疖症癞蟆癪瘿痈発踔绀蔫酵皙砬砒翎翳蔹钨镴皑鹎驹暨粤褶皀皁荚皃镈皈皌皋皒朱皕皖皘皜皝皞皤皦皨皪皫皭糙绽皴皲皻皽盅盋碗盍盚盝踞盦盩秋千盬盭眦睁瞤盯盱眙裰盵盻睐眂眅眈眊県眑眕眚眛眞眢眣眭眳眴眵眹瞓眽郛睃睅睆睊睍睎困睒睖睙睟睠睢睥睪睾睯睽睾眯瞈瞋瞍逛瞏瞕瞖眍䁖瞟瞠瞢瞫瞭瞳瞵瞷瞹瞽阇瞿眬矉矍铄矔矗矙瞩矞矟矠矣矧矬矫矰矱硪碇磙罅舫阡、矼矽礓砃砅砆砉砍砑砕砝砟砠砢砦砧砩砫砮砳艏砵砹砼硇硌硍硎硏硐硒硜硖砗磲茚钡硭硻硾碃碉碏碣碓碔碞碡碪碫碬砀碯碲砜碻礴磈磉磎硙磔磕磖磛磟磠磡磤磥蹭磪磬磴磵磹磻硗礀硚礅礌礐礚礜礞礤礧礮砻礲礵礽礿祂祄祅祆禳祊祍祏祓祔祕祗祘祛祧祫祲祻祼饵脔锢禂禇禋祦禔祎隋禖禘禚禜禝禠祃禢禤禥禨禫祢禴禸秆秈秊闱飒秋秏秕笈蘵赁秠秣秪秫秬秭秷秸稊稌稍稑稗稙稛稞稬秸稲稹稼颡稿穂穄穇穈穉穋稣贮穏穜穟秾穑穣穤穧穨穭穮穵穸窿阒窀窂窅窆窈窕窊窋窌窒窗窔窞窣窬黩蹙窑窳窴窵窭窸窗竁竃竈竑竜并竦竖篦篾笆鲛竾笉笊笎笏笐靥笓笤箓笪笫笭笮笰笱笲笳笵笸笻筀筅筇筈筎筑筘筠筤筥筦笕筒筭箸筰筱筳筴宴筸箂个箊箎箑箒箘箙箛箜篌箝箠箬镞箯箴箾篁筼筜篘篙篚篛篜篝篟篠篡篢篥篧篨篭篰篲筚篴篶篹篼箦簁簃簆簉簋簌簏簜簟簠簥簦簨簬簰簸簻籊藤籒籓籔签籚篯箨籣籥籧笾簖籫籯芾麴籵籸籹籼粁秕粋粑粔粝粛粞粢粧粨粲粳稗粻粽辟粿糅糆糈糌糍糒糔萼糗蛆蹋糢糨糬粽糯糱籴粜糸糺紃蹼鲣霉纡纨绔纫闽襻紑纰纮锭鸢鹞纴紞紟扎紩紬绂绁纻紽紾绐絁絃絅経絍绗絏缡褵絓絖絘絜绚絣螯絪絫聒絰絵绝絺絻絿綀绡綅绠绨绣綌綍綎捆綖綘継続缎绻綦綪线綮綯绾罟蝽綷縩绺绫緁绲緅緆缁绯緌緎総緑绱緖缃缄缂绵缗緤褓缌纂緪緰缑缈缏缇縁縃縄萦缙缒縏缣縕缞縚缜缟缛縠縡縢縦绦縯縰骋缧縳纤缦絷缥縻衙縿繄缫繈繊繋繐缯繖繘繙繠缋繣繨缰缲繸繻缱纁纆纇缬缵纩纑纕缵纙纚纛缾罃罆坛罋罂罎罏罖罘罛罝罠罣罥罦罨罫罭锾罳罶罹罻罽罿羂羃羇芈蕉51鸵羑羖羌羜羝羢羣羟羧羭羮羰羱羵羶羸藜鲐翀翃翅翊翌翏翕翛翟翡翣翥翦跹翪翫翚翮翯翱翽翾翿板饕鸹锨耋耇耎耏专耒耜耔耞耡耤耨耩耪耧耰鬓耵聍聃聆聎聝聡聦聱聴聂聼阈聿肄肏肐肕腋肙肜肟肧胛肫肬肭肰肴肵肸肼胊胍胏胑胔胗胙胝胠铨胤胦胩胬胭胯胰胲胴胹胻胼胾脇脘脝脞脡脣脤脥脧脰脲脳腆腊腌臜腍腒腓胨腜腠脶腥腧腬腯踝蹬镣腴腶蠕诽膂腽嗉膇膋膔腘膗膙膟黐膣膦膫膰膴膵膷脍臃臄臇臈臌臐臑臓膘臖臙臛臝臞臧蓐诩臽臾臿舀舁鳑鲏舋舎舔舗馆舝舠舡舢舨舭舲舳舴舸舺艁艄艅艉艋艑艕艖艗艘艚艜艟艣舣艨艩舻艬艭荏艴艳艸艹艻艿芃芄芊萰陂藭芏芔芘芚蕙芟芣芤茉芧芨芩芪芮芰鲢芴芷芸荛豢芼芿苄苒苘苙苜蓿苠苡苣荬苤苎苪镑苶苹苺苻苾茀茁范蠡萣茆茇茈茌茍茖茞茠茢茥茦菰茭茯茳藨茷藘茼荁荄荅荇荈菅蜢鸮荍荑荘豆荵荸荠莆莒莔莕莘莙莚莛莜莝莦莨菪莩莪莭莰莿菀菆菉菎菏菐菑菓菔芲菘菝菡菢菣菥蓂菧菫毂蓥菶菷菹醢菺菻菼菾萅萆苌萋萏萐萑萜萩萱萴莴扁萻葇葍葎葑荭葖葙葠葥苇葧葭药葳葴葶葸葹葽蒄蒎莼茏薹莅蒟蒻蒢蒦蒨蒭藁蒯蒱鉾蒴蒹蒺蒽荪蓁蓆蓇蓊蓌蓍蓏蓓蓖蓧蓪蓫荜跣藕苁蓰蓱莼蓷蓺蓼蔀蔂蔃蔆蔇蔉蔊蔋蔌蔎蔕蔘蔙蒌蔟锷蒋雯茑蔯蔳麻蔵蔸蔾荨蒇蕋蕍荞蕐蕑芸莸蕖蕗蕝蕞蕠蕡蒉蕣蕤蕨蕳蓣蕸蕺蕻薀薁薃薅薆荟薉芗薏薐蔷薖薘剃谔钗薜薠薢薤薧薨薫薬薳薶薷薸薽薾薿藄藇藋荩藐藙藚藟藦藳藴苈藷藾蘀蘁蕲苹蘗蘘蘝蘤蘧蘩蘸蘼虀虆虍蟠虒虓虖虡虣虥虩虬虰蛵蛇虷鳟虺虼蚆蚈蚋蚓蚔蚖蚘蚜蚡蚣蚧蚨蚩蚪蚯蚰蜒蚱蚳蚶蚹蚺蚻蚿蛀蛁蛄蛅蝮蛌蛍蛐蟮蛑蛓蛔蛘蛚蛜蛡蛣蜊蛩蛱蜕螫蜅蚬蜈蝣蜋蜍蜎蜑蠊蜛饯蜞蜣蜨蜩蜮蜱蜷蜺蜾蜿蝀蝃蝋蝌蝍蝎蝏蝗蝘蝙蝝鲼蝡蝤蝥猿蝰虻蝲蝴蝻螃蠏蛳螉螋螒螓螗螘螙螚蟥螟螣螥螬螭䗖螾螀蟀蟅蝈蟊蟋蟑蟓蟛蟜蟟蟢虮蟨蟪蟭蛲蟳蛏蟷蟺蟿蠁蠂蠃虿蠋蛴蠓蚝蠗蠙蠚蠛蠜蠧蟏蠩蜂蠮蠰蠲蠵蠸蠼蠽衁衄衄衇衈衉衋衎衒同衖胡衞裳钩衭衲衵衹衺衿袈裟袗袚袟袢袪袮袲袴袷袺袼褙袽裀裉袅裋夹裍裎裒裛裯裱裲裴裾褀褂褉褊裈褎褐褒褓褔褕袆褚褡褢褦褧褪褫袅褯褰褱裆褛褽褾襁褒襆裥襉襋襌襏襚襛襜裣襞襡襢褴襦襫襬襭襮襕襶襼襽襾覂覃覅霸覉覊覌覗觇覚覜觍觎覧覩觊觏覰観觌觔觕觖觜觽觝觡酲觩觫觭觱觳觯觷觼觾觿言赅讣訇訏訑訒诂讬訧訬訳訹证訾詀詅诋毁詈詊讵詑诒诐詗诎察詨诜詶詸詹詻诙诖誂誃诔锄诓誋诳诶悖誙诮诰誧説読誯谇訚谄谆諆諌诤诹诼諕谂谀諝谝諟喧谥諴諵谌谖誊謆謇歌謍謏謑谡谥謡謦謪谪讴謷謼谩哗譅譆譈譊讹譒撰谮鑫譞噪譩谵譬譱譲谴譸譹谫讅讆詟䜩雠讐谗谶讙谠讟谽豁豉豇岂豊豋豌豏豔豞豖豗豜豝豣豦豨豭豱豳豵豶豷豺豻貅貆狸猊貔貘䝙貜貤餍贳餸贶贲赂賏赊赇赒賝赓赕賨赍斗賮賵賸赚赙赜赟贉赆赑贕赝赬赭赱赳迄趁趂趄趐趑趒趔趡趦趫趮趯趱趴趵趷趹趺趿跁跂跅跆踬跄跐跕跖跗跙跛跦跧跩跫跬跮跱跲跴跺跼跽踅踆踈踉踊踒踖踘踜踟躇蹰踠踡踣踤踥踦踧跷踫踮逾踱踊踶踹踺踼踽躞蹁蹂躏蹎蹐蹓蹔跸蹚蹜蹝迹蹠蹡蹢跶蹧蹩蹪蹯鞠蹽躃躄躅踌跻躐踯跞躘躙躗躝躠蹑躜躧躩躭躰躬躶軃軆辊軏轫軘軜軝腭転軥軨軭軱轱辘軷轵轺軽軿輀輂辇辂辁輈挽輗辄辎辋輠輤輬輭輮辏輴輵輶輹輼辗辒轇轏轑轒辚轕轖轗轘轙轝轞轹轳罪辣辞辵辶辺込辿迅迋迍麿迓迣迤逦迥迨迮迸迺迻迿逄逅逌逍逑逓迳逖逡逭逯逴逶逹遄遅侦遘遛遝遢遨遫遯遰遴绕遹遻邂邅邉邋邎邕邗邘邛邠邢邧邨邯郸邰邲邳邴邶邷邽邾邿郃郄郇郈郔郕郗郙郚郜郝郞郏郠郢郪郫郯郰郲郳郴郷郹郾郿鄀鄄郓鄇鄈鄋鄍鄎鄏鄐鄑邹邬鄕郧鄗鄘鄚鄜鄞鄠鄢鄣鄤鄦鄩鄫鄬鄮鄯鄱郐鄷鄹邝鄻鄾鄿酃酅酆酇郦酊酋酎酏酐酣酔酕醄酖酗酞酡酢酤酩酴酹酺醁醅醆醊醍醐醑醓醖醝酝醡醤醨醪醭醯醰酦醲醴醵醸醹醼醽醾釂酾酽釆釈鲈镏阊钆钇钌钯钋鼢鼹钐钏釪釬釭釱钍釸钕钫鈃钭鈆鈇钚鈊鈌钤钣鈒鈤钬钪鈬铌铈钶铛钹铍钸钿鉄鉆铊铇鉌铋鉏铂钷铆钵鉥钲鉨钼钽鉱鉲鉶铰铒鉼铪銍銎铣銕镂铫铦铑铷銤铱铟銧铥铕铯銭銰焊銶锑锉汞鋂锒鋆鋈鋊铤鋍铗鋐鋑鋕鋘鋙锊锓锔锇铓鋭铖锆锂铽鋳鋹鋺鉴镚钎錀锞锖锫锩錍铔锕錔锱铮锛錞锬锜錤錩錬録铼錼锝钔锴鍉镀鍏鍐铡鍚锻锽锸锲锘鍫鍭鍱鍴锶鍹锗针锺锿镅鎉鎋鎌鎍鎏鎒鎓鎗镉鎚鎞镃鎤铩锼鎭鎯镒镍鎴镓��鎹镎镟鏊镆镠镝鏖铿锵鏚镗镘镛鏠鏦錾镤鏸镪鏻鏽鏾铙鐄鐇鐏铹镦镡鐗馗镫镢镨鐡锎镄鐩镌鐬鐱镭鐶鐻鐽镱鑀鑅镔鑐鑕鑚鑛鑢鑤镥鑪镧鑯鑱鑴鑵镊镢钃镻闫闬闶闳閒闵閗閟阂関合閤哄阆閲阉閺阎阏阍阌暗闉阕阗闑闒闿闘闚阚闟闠闤闼阞阢阤阨阬阯阹阼阽陁陑陔陛陜陡陥陬骘陴険陼陾阴隃隈隒隗隞隠隣隤隩隮隰颧隳隷隹雂雈雉雊雎雑雒雗雘雚雝雟雩雰雱驿霂霅霈霊沾霒霓霙霝霢霣霤霨霩霪霫霮靁叇叆靑靓靣腼靪靮靰靳靷靸靺靼靿鞀鞃鞄鞍鞗鞙鞚鞝鞞鞡鞣鞨鞫鞬鞮鞶鞹鞾鞑韅鞯驮韍韎韔韖韘韝韫韡韣韭韭韱韹韺頀刮頄顸顼頍颀颃颁頖頞頠頫頬颅頯頲颕頼悴顋顑颙颛颜顕顚顜颟顣颥颞飐飑台飓颸飏飖颽颾颿飀飂飚飌翻飡飣饲飥饨饫飮飧飶餀餂饸饹餇餈饽哺馂餖餗餚馄馃餟餠餤餧餩餪餫糊餮糇餲饧馎糕饩馈馊馌馒饇馑馓膳饎饐饘饟馕馘馥馝馡馣骝骡馵馹駃駄駅駆駉駋驽駓驵駗骀驸駜骂骈駪駬骃駴骎駹駽駾騂騄骓騆騉騋骒骐麟騑騒験騕骛騠騢騣騤騧骧騵驺骟騺蓦骖骠骢驆驈骅驌骁驎骣驒驔驖驙驦驩驫骺鲠骫骭肮骱骴骶骷髅骾髁髂髄髆膀髇髑髌髋髙髝髞髟髡髣髧髪髫髭髯髲髳髹髺髽髾鬁鬃鬅鬈鬋鬎鬏鬐鬑鬒鬖鬗鬘鬙鬠鬣斗鬫鬬阄鬯鬰鬲鬵鬷魆魈魊魋魍魉魑魖鳔魛魟魣魦魨魬鲂魵魸鮀鲅鮆鲧鲇鲍鲋鮓鲒鲕鮟鱇鮠鮦鮨鲔鲑鮶鮸鮿鲧鯄鯆鲩鯈鲻鯕鲭鲞鯙鯠鲲鯥鲰鲶鳀鯸鳊鲗䲠鹣鳇鰋鳄鳆鰕鰛鰜鲥鰤鳏鰦鳎鳐鳁鳓鰶鲦鲡鰼鰽鱀鱄鳙鱆鳕鱎鱐鳝鳝鳜鲟鲎鱠鳣鱨鲚鱮鱲鱵鱻鲅鳦凫鳯鳲鳷鳻鴂鴃鴄鸩鴈鴎鸰鴔鴗鸳鸯鸲鹆鸱鴠鴢鸪鴥鸸鹋鴳鸻鴷鴽鵀鵁鸺鹁鵖鵙鹈鹕鹅鵟鵩鹌鵫鵵鵷鵻鹍鶂鶊鶏鶒鹙鶗鶡鶤鶦鶬鶱鹟鶵鶸鶹鹡鶿鹚鷁鷃鷄鷇䴘䴘鷊鷏鹧鷕鹥鸷鷞鷟鸶鹪鹩鷩鷫鷭鹇鹇鸴鷾䴙鸂鸇䴙鸏鸑鸒鸓鸬鹳鸜鹂鹸咸鹾麀麂麃麄麇麋麌麐麑麒麚麛麝麤麸面麫麮麯麰麺麾黁黈黉黢黒黓黕黙黝黟黥黦黧黮黰黱黪黶黹黻黼黾鼋鼂鼃鼅鼈鼍鼏鼐鼒冬鼖鼙鼚鼛鼡鼩鼱鼪鼫鼯鼷鼽齁齆齇齈齉齌赍齑龀齕齗龅齚龇齞龃龉龆齢出齧齩齮齯齰齱齵齾厐龑龒龚龖龘龝龡龢龤"
18
+
19
+ traditional_characters = "制咖片型超聲盤鑒定仔點他命書歌粉巾字帳恤手指記憶棒形轉彎溝光○〇㐄㐅㐆㐌㐖毒㐜㐡㐤㐰㐺㑇㑳㒳㒸㔾㗂㗎㝵㞎㞙㞞㠯㢲㢴㤅㥁㥯㨗㫺㬎㮎㮚㮸㲋㲱㲾㳮㵎㵪㶸㷖㷭㹢㹴犬㺢狓㺵㼝㽮㿝䍃䔢䖟䖸䗈䗥䗪䝓䠶䥯䦉䯝䰾魚䲔䳗䳘䵹鼄䶑一對應映射丁不識下兒子做二休世丘之貉並中台原則串為甚謂乾淨了百事無成八變五十些人得道雞升天代如併來去個國政策勁幽靈在歐洲遊蕩接樣蘿蔔坑側化傳價元論醇共再准刀兩斷切分耕耘收穫錢貨物向看舊就緒險刻千金動勞永逸匙零夜半卡通回復返影蹤反常態口咬氣句話同吐快吹周味呼諾嗚品紅鍋哄而散起唱和問三知生熟團漆黑火糟堆場空塊麵塌糊塗塵染壁廂夔已足多情露水大早到晚夫妻當關萬莫開失古恨套所料既往孔見提師要家主審寸陰難買鬥牛小撮部陣局展身層巴掌帆風順席地帶過年計於春頭載四季期被蛇怕井繩度願式份彈頃深前律徑心意念差愁孤行俱全房廳交遮打技長把抓死拿眼淚鼻涕鑰鎖折段抿拍即合掃排掬揮撥擁上入擊洞擲攬改故轍敗文值名斑方面旁族日秋餐隔雅里終父旦時晌會霎間晃暴寒曝更月望垠際朝夕本正經利杯羹東西板枝獨秀根筋桿進條龍服務概模次函數又性程總付步腳印趨登毛拔呵氧氮碳決雌雄波未平派謊言流清楚白準溜煙潭有獲聞是處降琴鶴甲病發可拾沙目然瞭直以相眨穿睹瞥瞬矢的解石鳥神教秉虔誠秘種窩蜂窮竅笑置筆苟勾銷抹殺煞等獎箍節吃箭仇雙鵰詩籌籮筐系列紙級士官統絲毫掛維網盡線微吭響股腦胎脈承腔臂力致效資源址器舉功投般說講規貿易葉障著慎滿皆輸號木電池衣傾鐘高低視仁覺醒覽遺角銀幣觸潰九鼎蔽抄出駟馬追重語破貧洗貫走路安蹴至幾蹶振躍役膽汗較輩輪辭贊退六連遍遞邊針血錘音錯門思閃真倒項栽霧類保護川先驚乍體鬨鱗爪鳴滴泡鄰域黨專鼓作齊炒丑烯亥克內酯冬加奴卯肝炎基尺梁街褲鎬客寵庭巳汝昌烷玲磊糖肇酉醛啷青縣韙良香骨鯛丂七集河市弦喜嘴張舌堵區工業姊妹星架構巧彩扭歪拼湊餘熱曜武州爺浮屠美鄉老階樹葷素碎落能魄鰓鰻珠丄丅丆万俟丈尚摸母娘量管群亞虎必我堂令申件裝伏位博俠義界表女墟臺戲臭皮匠勝諸葛亮賽頂倍催請運算包立叉戟離疫苗土史志演圍揭瓦曬夷姑婆帝村寶爛尖杉鹼屜桌山岔島由紀峽壩庫鎮廢從德後拗湯治旬食明昧曹朋友框欄極權冪曲歸依貓民氟硼氯磷鐵江侗自旅法司洋浦梅園溫暖灣焦班幸用田略番疊皇炮捶硝苯酸腺苷稜草鏡穗跳遠索錦綱聚氰胺聯店胚膲愛色堇紫羅蘭芝茶飯菱雲蟲藏藩亂叛蘇親債凳學座恐戀柱測肌腹衩錐係貂企烏跪叩軍車農題迭都甘油屯奏鍵短阿姨陪姐隻顧茅廬槽駕魂鮮鹿頁其菜單乘任供勢午齒漢組織吊調瀉唇坡城報墳外夸將尉建築岸崗公床揚新劍昇杭林栗校樓標款汽社浣海商館劇院鋼華港機械廣媒環球融第醫科證券綜財樂育游漲猶嶺疏癮瞼確兵領導繳肢膛船艾瑟爾蒼蔡虞傚衫覆訪訴課諭議軌述野鉤限敵鞋頜頷顎饒首齦站例修凡劃垂屆屬崽頦廚拜挫擺放旋削棋榻檻禮沉注滑營獄畫确儀聘花葬詔員跌轄週達酒錨閘陷陸雨雪飛威丌于丹久乏予理評產亢卑亦乎舞己悲矩圓詞害誌但住佞佳便俗信票案幅翁倦倫假偏倚斜虧鬼敲停備傷脾胃僅此像儉匱免宜穴焉戴兼容許凍伯仲負彼晝皂軒輊實刊划顛衛戰哥比省非好黃飾別拘束掩奶睬選擇搖擾煩苦枚寫協厭及格受歡迎約只估侵犯割狀告或缺抗拒挽撤救藥喻磨滅端倪少逆逾越避靠適吉譽吝玉含延咎歹聽啻淵善謀均勻堪忍夠太惹妙妥妨孕症孝術室完納推冠積宣疑辯慄碴稱屈撓屑干涉衡待很忙惡忿怎麼怠急恥恭息悅惑惜惟想愉愧怍慌憤啟懂懈懷材才緊招認扣抵拉捨也罷插揣冒搭撞南牆擴核支攻敢雷攀敬裡嗎需景智暇曾罪遇朽枉止況競爭辱求癒渝溶濟左右袒困補爽特寂寞示弱找謝畏強疾徐痛癢冤符眠睦瞅董何厚云措活疲羞者輕玻璃祥兆禁移稂莠穩佛換答簡結果盟絕縷途給談否羈翼耐肖脛毋寧興舒若菲萊痕跡窠臼虛衰臉兔撒鷹棺範該詳諱抬泰讓鬚眉象眾貲賬費灰賴奇慮訓輟辨菽麥辛近送透逞徒速續逮捕遂遑違遜斧鉞艱醉鏽隨觀棄顯飽脂肪使丏丐幫丒且慢末丕替桃宗王尊涼爵各圖屋脊糧署錄壇吾祿職胄襲君廈丗北壑桐疹損逢陵鷸丙寅戌氨腈唑綸辰酮脫氫酶醚丞丟現掉紗帽弄扯砲碗丠両丣坐存激肩臻蒂蓮悖序驅丨丩丫挺杈髻鬟細介俄伊犁京尼布訂普渡央委監察檢查劑圈設警隊斯督剩震境航舶革防托播促質版蠑螈鋒研藝歷殘消頻譜精密製造陲郵候埔堅壓壢凹匯執府究邦俘攝寮彬狼嶽肺腫庸英訊診埋粒胞括控碼韓暑槍樞砥澳哇牟壽甸鑽探篇簽綴縫繼耳肯照婦埃懸璧軸櫃檯辣擱淺邪跑纖阮陽私囊魔丮丰姿采丱燒丳丵丶丷丸參寨朗桂瑞砂衷霞貌鳳僕艦因嫌宰峰幹絡牌持旨祭禱簿編罰賓辦丼丿乀乂乃乄仰慕盛曠留考驗闊乆乇么醜麼乊湖燃乑乒乓乕乖僻忤戾离謬迕乗危肥劫除隙浪婿乙炔腸酰吡咯鹽乚乛乜嘢卿玄宮尾狐龜塔嶷兄弟泉章霄釘耙乞扎哀憐恕討乢乣乤乥乧乨乩童乪乫乭乳暈汁液瑤漿牙癌突竇罩腐膠豬酪蛋糕菌瘤乴乵乶乷乸乹乺乼乾俸冰嘉噦嚎坤媽屍壘旱枯涸俐渴潮澀煸豆燥爹瘦癟癬瞪袋脆薑貝隆餾乿亀亁叫咕攘扔搞男砸竄蓬麻亃亄亅卻亇遲典今臨繁累卵奉婚聰躬巨與遷添裂副宿歲怪噁尕崙愣杆硅硫鈦鈾錳芑雜異鈉砷胂磺琥珀艙棍簧胡茬盜浩盆販郎腿亍洪亐互欠助勉惠操斥諉繫戶譯亓墓碑刑鈴卅渠繽紛斗米旗憲釩燈徽瘟祖拳福穀豐臟腑綁肉醃苓蘊橋鋪霸顏鬧判噴岡底蛙陘礦亖亙亜罕們娜桑那努哈喀弗烈曼松森杜氏盃奧琛敦戊穆聖裔彙薛孫亟亡佚虜羊牢奮釋卷卸契媾感額睫纏誼趾塞擠紐阻還配馳莊亨洛祚亪享津滬畿郊慈菴枇杷膏亭閣鋥麗亳亶亹誅初責翻瘋偶傑叢稠妖拖寰居吸授慧蝸吞壯魅狗矛盾益渣患憂稀描猿夢暫涯畜禍緣沸搜引擎臣橫紜誰混援蒸獸獅稅剖亻亼亽亾什獻剎邡麽仂仃仄仆富怨仈仉畢昔晨殼紹仍仏仒仕宦仗欺恃腰嘆歎炬梓訖施仙后瓊逝仚仝仞仟悔仡佬償填泊拓撲簇羔購頓欽佩髮棻閫馭養億儆尤藉幀賑凌敘帖李柔剛沃眥睚戒訛取饗讀仨仫仮著泳臥躺韶夏裁仳仵唯賢憑釣誕仿似宋彿諷伀碩盼鵝伄儅伈伉儷柯始娃邁戈坦堡帕茨薩廟瑪莉莎藤霍姆伋伍奢胥廷芳豪伎倆侍汛勒希羲雛伐憩整謨閑閒伕伙伴頤伜伝伢叔恆茲恩翰伱伲侶伶俜悧鼬伸懶縮喇叭伹伺伻伽倻輻伾佀佃佇佈喬妮墨佉盧佌貸劣廉昂檔濃矮傘窪緩耗胸谷迷擋率齲宅沫舍療佐貳佑佔優據鏵嘗呢須魯曉佗佘余坪寺瓜銃僧蒙芒陀龕哼嘔坊姦孽弊揖祟繭縛誓賊佝僂瞀佟你奪趕佡佢佣佤佧賈佪佫佯佰佱潔績釀餚佴捲佶佷佸佹佺佻佼佽佾具喚窘壞娛怒慨硬習慣聾膨脹蔓駭貴痺侀侁侂侃侄侅鴻燕侇侈糜靡侉侌妾侏儒倉鼠侐侑侔侖侘侚鏈侜偎傍鈷循柳葫蘆附価侮罵蔑侯岩截蝕侷貼壺嬛宴捷攜桶箋酌俁狹膝狄俅俉俊俏俎俑俓俔諺俚俛黎健呈固墒增守康箱濕祐鏢鑣槓盒靖膜齡俞豹獵噪孚封札筒託衍鴿剪撰稿煉廠禊練繕葺俯瞰撐衝俲俳俴俵俶俷俺俻俾倀倂倅儲卒惶敷猝逃頡蓄崇隱倌倏忽刺蠟燭噍嚼坍扁抽斃蔥楣灌灶糞背藪賣賠閉霉騰倓倔倖倘倜儻倝借箸挹澆閱倡狂倢倣値倥傯倨��倩匡嗣沖柝珍倬倭寇猩倮倶倷倹勤讚偁偃充偽吏嗓寐惺扮拱芫茜藉虢鈔偈偉晶偌宕距析濾殿疼癱註頗偓偕鴨歇滯偝偟偢忘怡旺偨偩偪偫偭偯偰偱偲偵緝蹄偷減惰漏窺竊偸偺迹傀儡傅傈僳傌籬傎奎琳迪叟芭傒傔傕傖悉荒傜傞傢傣芽逼傭婢傮睨寄檄誦謠頌傴擔辜弓慘蒿悼疤傺傻屄臆巢洩篋羨蓋軋頹傿儸僄僇僉僊働僎僑僔僖僚僝僞僣僤僥僦猴僨僩僬僭僮僯僰僱僵殖籤靜僾僿征隴儁儂儃儇儈朴薄儊儋儌儍儐儓儔儕儗儘儜儞儤儦儩汰哉寡渥裕酷儭儱罐儳儵儹儺儼儽兀臬臲鷲允勛勳宙宵帥憝彞諧嫂鬩暢沛溢盈飢赫兇悍狠猛頑愚妣斬秦遣鞭耀敏榮槃澤爆碟磁禿纜輝霽鹵朵婁孜烽醬勃汀箕裘鉗耶懞蕾徹兌軟遭黜兎児韻媳爸兕觥兗兙兛兜售鍪肚兝兞兟兡兢兣樽殮涅睡稟籍贅泌啡肽奸幕涵澇熵疚眷稃襯訌赴煥椒殲植跏沒試誤猜棲窗肋袖頰兪卦撇鬍岐廓轎疸楓茴瓏廁秩募勺噸寓斤曆畝迫筷釐最淫螺韜兮寬匪篩襄贏軛複兲詐刃堰戎痞蟻餉它冀鑄冂冃円冇冉冊嫁厲礪竭醮冏牧冑冓冔冕冖冗冘冞冢窄抑誣冥冫烘菇蟄冷凝坨橇淇淋炭餅磚磧窖醋雕雹霜冱冶爐艷嘲峻灘淡漠煖颼飲冼冽凃凄愴梗凅凇凈凊凋敝濛凔凜遵汞脢凞几凢処凰凱凵凶焰凸摺刷紋預喪嘍奔巡榜殯芙蓉租籠輯鞘萃凼鋸鑊刁蠻刂娩崩批拆攤掰櫱驟歧顆秒袂贓勿囑忌磋琢膚刈羽刎訟戮舂槳艇刓刖霹靂刜創犢刡恙墅幟筵緻刦刧刨昏默攸尿慾薰潤薰圭刪刮痧鏟刱刲刳刴刵踏磅戳柏槐繡芹莧蝟舟銘鵠鶩刼剁剃辮剄剉履鉛剋剌姻咽哨廊掠桅沿召瞻翅趙卜渺茫郭剒剔剕瀝剚愎毅訥纔剜剝啄採剞剟剡剣剤綵剮腎駛黏剰袍剴紊剷剸剺剽剿劁劂劄劈啪柴扳啦劉奭姥夼昫涓熙禪禹錫翔雁鶚劊劌弩柄蜻蛉劒劓劖劘劙瀾簣賞磯釜晉甜薪逐劦熔紂虐赤囚劬劭労劵効劻劼劾峭艮勅勇勵勍勐臘脖龐漫飼盪粥輒勖勗勘驕餒碌泮雇捐竹騎殊阱勣樸懇謹勦勧勩勯勰勱勲勷勸懲慰誡諫勹芡踐闌匁庇拯粟紮袱裹餃匆遽匈匉匊匋匍匐莖匏匕妝痰膿蛹齋苑烤蹈塘羌熊閥螳螂疆碚竿緯荷茵邙魏匚匜匝匟扶稷匣匭攏匸匹耦匽匾匿卂叮瘡禧軫堤棚迢鈞鍊卄卆遐卉瓷盲瓶噹胱腱裸卋卌卍卐怯污賤鄙齷齪陋卓溪唐梯漁陳棗泥漳潯澗梨芬譙贍轅迦鄭単驢弈洽鰲卛占筮卝卞卟吩啉屎翠厄卣卨卪卬卮榫襖璽綬鈕蚤懼殆篤聳卲帘帙繞卹卼卽厂厎厓厔厖厗奚厘厙厜厝諒厠厤厥厪膩孢厮厰厳厴厹厺粕垢蕪菁厼厾叁悟茸薯叄吵笄悌哺譏坫壟弧芯杠潛嬰芻袁詰貪諜煽饋駁収岳締災賄騙叚叡吻攔蘑蜜訣燧玩硯箏椎藺銅逗驪另覓叨嘮謁杵姓喊嚷囂咚嚀塑尋惱憎擦祇泣滲蝠叱吒咄咤喝籀黛舵舷叵叶鐸懿昭穰苴遼叻叼吁塹嫖賭瞧爬衆抒吅吆夥巹橡滌抱縱摩郡唁墜扇籃膀襪頸吋愾諮酬哭妓媛暗錶韁邇妃羿絮蕃渾拐葵暮隅吔吖啶嗪戚吜嗇噬嚥吟哦詠吠吧唧嗒咐吪雋咀徵燐苞茹鈣哧吮吰吱嘎吲哚吳棟嬌窟孟簫忠晗淞闔閭趼宇吶睛噓拂捧疵熄竽笛糠吼吽呀呂韋矇呃呆笨呇貢呉罄呋喃呎呏呔呠呡癡呣呤呦呧瑛眩扒晬淑姬瑜璇鵑呪呫嗶嚅囁呬呯呰呱呲咧噌鈍呴呶呷呸呺呻哱咻嘯嚕籲坎坷邏呿咁咂咆哮咇咈咋蟹煦珅藹咍咑咒詛咔噠嚓咾噥哩喱咗咠咡咢咣咥咦咨嗟詢咩咪咫嚙齧咭咮咱咲咳嗆嗽咴咷咸咹咺咼喉咿婉慟憫賦矜綠茗藍哂搶瞞哆嗦囉噻啾濱彗哋哌哎唷喲哏哐哞哢哤哪裏哫啼喘哰哲萎蚌哳哶哽哿唄唅唆唈唉唎唏嘩堯棣殤璜睿肅唔睇唕唚唞唣喳唪唬唰喏唲唳唵嘛唶唸唹唻唼唾唿啁啃鸚鵡啅埠棧榷祺舖鞅飆啊啍啎啐啓啕啖啗啜啞祈啢啣啤啥啫啱啲啵啺饑啽噶崑沁喁喂喆裙喈嚨喋喌喎喑喒喓喔粗喙幛慶滋鵲喟喣喤喥喦喧騷喨喩梆喫葡萄喭駝挑嚇碰樅瓣純皰藻趟鉻喵営喹喺喼喿嗀嗃嗄嗅嗈嗉嗊嗍嗐嗑嗔詬嗕嗖嗙嗛嗜痂癖嗝嗡嗤嗥嗨嗩嗬嗯嗰嗲嗵嘰嗷嗹嗾嗿嘀嘁嘂嘅惋嘈峪禾蔭嘊嘌嘏嘐嘒嘓嘖嘚嘜嘞嘟囔嘣嘥嘦嘧嘬嘭這謔嚴敞饞鬆嘵嘶嘷嘸蝦嘹嘻嘽嘿噀噂噅噇噉噎噏噔噗噘噙噚噝噞噢噤蟬皿噩噫噭噯噱噲噳嚏涌灑欲巫霏噷噼嚃嚄嚆抖嚌嚐嚔囌嚚嚜嚞嚟嚦嚬嚭嚮嚯嚲嚳飭按竣苛嚵嚶囀囅囈膪謙囍囒囓囗囘蕭酚飄濺諦囝溯眸紇鑾鶻囟殉囡団囤囥囧囨囪囫圇囬囮囯囲図囶囷囸囹圄圉擬囻囿圀圂圃圊粹蠹赦圌墾圏滾鯡鑿枘圕圛圜圞坯埂壤骸炕祠窯豚紳魠鯪鱉圧握圩圪垯圬圮圯炸岬幔毯祇窨菩溉圳圴圻圾坂坆沾坋坌舛壈昆墊墩椅坒坓坩堝坭坰坱坳坴坵坻坼楊掙涎簾垃垈垌垍垓垔垕垗垚垛垝垣垞垟垤垧垮垵垺垾垿埀畔埄埆埇埈埌殃隍埏埒埕埗埜埡埤埦埧埭埯埰埲埳埴埵埶紼埸培怖樁礎輔埼埽堀訶姪廡堃堄摧磐貞韌砌堈堉堊堋堌堍堎堖堙堞堠礁堧堨輿堭堮蜓摘堲堳堽堿塁塄塈煤塋棵塍塏塒塓綢���鴉沽虱塙塚塝繆塡塢塤塥塩塬塱塲蟎塼塽塾塿墀墁墈墉墐夯増毀墝墠墦漬缽墫墬墮墰墺墻櫥壅壆壊壌壎壒榨蒜壔壕壖壙壚壜壝壠壡壬壭壱売壴壹壻壼寢壿夂夅夆変夊夌漱邑夓腕泄甥禦骼夗夘夙袞瑙妊娠醣梟珊鶯鷺戧幻魘夤蹀祕擂鶇姚宛閨嶼庾撻拇賛蛤裨菠氅漓撈湄蚊霆鯊箐篆篷荊肆舅荔鮃巷慚骰辟邱鎔鐮阪漂燴鯢鰈鱷鴇臚鵬妒峨譚枰晏璣癸祝秤竺牡籟恢罡螻蠍賜絨御梭夬夭砣榆怙枕夶夾餡奄崛葩譎奈賀祀贈奌奐奓奕訢詝奘奜奠奡奣陶奨奩魁奫奬奰媧孩貶隸酥宄狡猾她奼嫣妁氈荼皋膻蠅嬪妄妍嫉媚嬈妗趣妚妞妤礙妬婭妯娌妲妳妵妺姁姅姉姍姒姘姙姜姝姞姣姤姧姫姮娥姱姸姺姽婀娀誘懾脅娉婷娑娓娟娣娭娯娵娶娸娼婊婐婕婞婤婥谿孺婧婪婬婹婺婼婽媁媄媊媕媞媟媠媢媬媮媯媲媵媸媺媻媼眯媿嫄嫈嫋嫏嫕嫗嫘嫚嫜嫠嫡嫦嫩嫪毐嫫嫬嫰嫵嫺嫻嫽嫿嬀嬃嬅嬉耍嬋痴豔嬔嬖嬗嬙嬝嬡嬢嬤嬦嬬嬭幼嬲嬴嬸嬹嬾嬿孀孃孅孌孏曰癲屏孑孓雀孖斟簍謎摺孛矻鳩崮軻祜鸞孥邈毓棠臏孬孭孰孱孳孵泛罔銜孻孿宀宁宂拙株薇掣撫琪瓿榴謐彌宊濂祁瑕宍宏碁宓邸讞実潢町宥宧宨宬徵崎駿掖闕臊煮禽蠶宸豫寀寁寥寃簷庶寎暄磣寔寖寘寙寛寠苫寤肘洱濫蒗陝覈寪弘綽螽寳擅疙瘩晷対檐専尃尅贖絀繚疇釁尌峙醌襟痲碧屁昊槌淘恵瀑牝畑莓缸羚覷蔻髒躁尒尓銳尗尙尜尟尢尥尨尪尬尭尰擒尲尶尷尸尹潽蠖蛾尻釦梢蚴鰭脬蹲屇屌蚵屐屓挪屖屘屙屛屝屢屣巒嶂巖舄屧屨屩屪屭屮戍駐鉀崖嵛巔旮旯楂欖櫸芋茱萸靛麓屴屹屺屼岀岊岌岍阜岑彭鞏岒岝岢嵐岣岧岨岫岱岵岷峁峇峋峒峓峞峠嵋峩峯峱峴峹峿崀崁崆禎崋崌崍嶇崐崒崔嵬巍螢顥崚崞崟崠崢巆崤崦崧殂崬崱崳崴崶崿嵂嵇嵊泗嵌嵎嵒嵓嵗嵙嵞嵡嵩嵫嵯嵴嵼嵾嶁嶃嶄晴嶋嶌嶒嶓嶔嶗嶙嶝嶞嶠嶡嶢嶧嶨嶭嶮嶰嶲嶴嶸巂巃巇巉巋巌巓巘巛滇芎巟巠弋迴巣巤炊擘蜥蟒蠱覡巰蜀彥淖杏茂甫楞巻巽幗巿帛斐鯽蕊帑帔帗帚琉汶帟帡帣帨帬帯帰帷帹暆幃幄幇幋幌幏幘幙幚幞幠幡幢幦幨幩幪幬幭幯幰遙蹉跎餘庚鑑幵幷稚邃庀庁広庄庈庉笠庋跋庖犧庠庤庥鯨庬庱庳庴庵馨衢庹庿廃廄廆廋廌廎廏廐廑廒廕廖廛廝搏鑼廞弛袤廥廧廨廩廱綿踵髓廸廹甌鄴廻廼廾廿躔弁皺弇弌弍弎弐弒弔詭憾薦弝弢弣弤弨弭弮弰弳霖繇燾斌旭溥騫弶弸弼弾彀彄彆纍糾彊彔彖彘彟彠陌彤貽彧繪虹彪炳彫蔚鷗彰癉彲彳彴彷彷徉徨彸彽踩斂旆徂徇徊渭畬鉉裼従筌徘徙徜徠膳甦萌漸徬徭醺徯徳徴潘徻徼忀瘁胖燎怦悸顫扉犀澎湃砰恍惚絞隘忉憚挨餓忐忑忒忖応忝忞耿忡忪忭忮忱忸怩忻悠懣怏遏怔怗怚怛怞懟黍訝怫怭懦怱怲怳怵惕怸怹恁恂恇恉恌恏恒恓恔恘恚恛恝恞恟恠恣恧眄恪恫恬澹恰恿悀悁悃悄悆悊悐悒晦悚悛悜悝悤您悩悪悮悰悱悽惻悳悴悵惘悶悻悾惄愫鍾蒐惆惇惌惎惏惓惔惙惛耄惝瘧濁惥惦惪惲惴惷惸拈愀愃愆愈愊愍愐愑愒愓愔愕愙氓蠢騃昵愜赧愨愬愮愯愷愼慁慂慅慆慇靄慉慊慍慝慥慪慫慬慱慳慴慵慷慼焚憀灼鬱憃憊憋憍眺捏軾憒憔憖憙憧憬憨憪憭憮憯憷憸憹憺懃懅懆邀懊懋懌懍懐懞懠懤懥懨懫懮懰懱毖懵遁樑雍懺懽戁戄戇戉戔戕戛戝戞戠戡戢戣戤戥戦戩戭戯轟戱披菊牖戸戹戺戻戼戽鍬扂楔扃扆扈扊杖牽絹銬鐲賚扐摟攪烊盹瞌跟躉鑔靶鼾払扗玫腮扛扞扠扡扢盔押扤扦扱罾揄綏鞍郤窾扻扼扽抃抆抈抉抌抏瞎抔繯縊擻抜抝択抨摔歉躥牾抶抻搐泵菸拃拄拊髀拋拌脯拎拏拑擢秧沓曳攣迂拚拝拠拡拫拭拮踢拴拶拷攢拽掇芥橐簪摹疔挈瓢驥捺蹻挌挍挎挐揀挓挖掘浚挙揍聵挲挶挾挿捂捃捄捅捆捉捋胳膊揎捌捍捎軀蛛捗捘捙捜捥捩捫捭据捱捻捼捽掀掂掄臀膘掊掎掏掐笙掔掗掞棉芍掤搪闡掫掮掯揉掱掲掽掾揃揅揆搓揌諢揕揗揘揜揝揞揠揥揩揪揫櫫遒麈揰揲揵揶揸揹揺搆搉搊搋搌搎搔搕撼櫓搗搘搠搡搢搣搤搥搦搧搨搬楦褳訕赸搯搰搲搳搴搵搷搽搾搿摀摁摂摃摎摑摒摓跤摙摛摜摞摠摦睺羯摭摮摯摰摲摳摴摶摷摻摽撂撃撅稻撊撋撏鐧潑撕撙撚撝撟撢撣撦撧撩撬撱朔撳蚍蜉撾撿擀擄闖擉缶觚擐擕擖擗擡擣擤澡腚擧擨擩擫擭擯擰擷擸擼擽擿攃攄攆攉攥攐攓攖攙攛每攩攫轡澄攮攰攲攴軼攷砭訐攽碘敁敃敇敉敍敎筏敔敕敖閏誨敜煌敧敪敱敹敺敻敿斁衽斄牒縐謅斉斎斕鶉讕駮鱧斒筲斛斝斞斠斡斢斨斫斮晾沂潟穎絳邵斲斸釳於琅斾斿旀旂旃旄渦旌旎旐旒旓旖旛旝旟旡旣浴旰獺魃旴旹旻旼旽昀昃昄昇昉晰躲澈熹皎皓礬昑昕昜昝昞昡昤暉筍昦昨昰昱昳昴昶昺昻晁蹇隧蔬髦晄晅晒晛晜晞晟晡晢晤晥曦晩萘瑩顗晿暁暋暌暍暐暔暕煅暘暝暠暡曚暦暨暪朦朧暱暲殄馮暵暸暹暻暾曀曄曇曈曌曏曐曖曘曙曛曡曨曩駱曱甴肱曷牘禺錕曽滄耽朁朅朆杪栓誇竟粘絛朊膺朏朐朓朕朘朙瞄覲溘饔飧朠朢朣柵椆澱蝨朩朮朰朱炆璋鈺熾鹮朳槿朶朾朿杅杇杌隉欣釗湛漼楷瀍煜玟纓翱肈舜贄适逵杓杕杗杙荀蘅杝杞脩珓筊杰榔狍閦顰緬莞杲杳眇杴杶杸杻杼枋枌枒枓衾葄翹紓逋枙狸椏枟槁枲枳枴枵枷枸櫞枹枻柁柂柃柅柈柊柎某柑橘柒柘柙柚柜柞櫟柟柢柣柤柩柬柮柰柲橙柶柷柸柺査柿栃栄栒栔栘栝栟栢栩栫栭栱栲栳栴檀栵栻桀驁桁鎂桄桉桋桎梏椹葚桓桔桕桜桟桫欏桭桮桯桲桴桷桹湘溟梃梊梍梐潼梔梘梜梠梡梣梧梩梱梲梳梴梵梹棁棃櫻棐棑棕櫚簑繃蓑棖棘棜棨棩棪棫棬棯棰棱棳棸棹槨棼椀椄苕椈椊椋椌椐椑椓椗検椤椪椰椳椴椵椷椸椽椿楀楄楅篪楋楍楎楗楘楙楛楝楟楠楢楥楨楩楪楫楬楮楯楰楳楸楹楻楽榀榃榊榎槺榕榖榘榛狉莽榜笞榠榡榤榥榦榧榪榭榰榱槤霰榼榾榿槊閂槎槑槔槖様槜槢槥槧槪槭槮槱槲槻槼槾樆樊樏樑樕樗樘樛樟樠樧樨権樲樴樵猢猻樺樻罍樾樿橁橄橆橈笥龠橕橚橛輛橢橤橧豎膈跨橾橿檁檃檇檉檍檎檑檖檗檜檟檠檣檨檫檬檮檳檴檵檸櫂櫆櫌櫛櫜櫝櫡櫧櫨櫪櫬櫳櫹櫺茄櫽欀欂欃欐欑欒欙欞溴欨欬欱欵欶欷歔欸欹欻欼欿歁歃歆艎歈歊蒔蝶歓歕歘歙歛歜歟歠蹦詮鑲蹣跚陞陟歩歮歯歰歳歴璞歺瞑歾歿殀殈殍殑殗殜殙殛殞殢殣殥殪殫殭殰殳荃殷殸殹蛟殻殽謗毆毈毉餵毎毑蕈毗毘毚茛鄧毧毬毳毷毹毽毾毿氂氄氆靴氉氊氌氍氐聊氕氖気氘氙氚氛氜氝氡洶焊痙氤氳氥氦鋁鋅氪烴氬銨痤汪滸漉痘盂碾菖蒲蕹蛭螅氵氷氹氺氽燙氾氿渚汆汊汋汍汎汏汐汔汕褟汙汚汜蘺沼穢衊汧汨汩汭汲汳汴隄汾沄沅沆瀣沇沈葆浸淪湎溺痼痾沌沍沏沐沔沕沘浜畹礫沚沢沬沭沮沰沱灢沴沷籽沺烹濡洄泂肛泅泆湧肓泐泑泒泓泔泖泙泚泜泝泠漩饃濤粼濘蘚鰍泩泫泭泯銖泱泲洇洊涇琵琶荽薊箔洌洎洏洑潄濯洙洚洟洢洣洧洨洩痢滔洫洮洳洴洵洸洹洺洼洿淌蜚浄浉浙贛渫浠浡浤浥淼瀚浬浭翩萍浯浰蜃淀苔蛞蝓蜇螵蛸煲鯉浹浼浽溦涂涊涐涑涒涔滂涖涘涙涪涫涬涮涴涶涷涿淄淅淆淊淒黯淓淙漣淜淝淟淠淢淤淥淦淩猥藿褻淬淮淯淰淳詣淶紡淸淹燉癯綺渇済渉渋渓渕渙渟渢滓渤澥渧渨渮渰渲渶渼湅湉湋湍湑湓湔黔湜湝湞湟湢湣湩湫湮麟湱湲湴湼満溈溍溎溏溛舐漭溠溤溧馴溮溱溲溳溵溷溻溼溽溾滁滃滉滊滎滏稽滕滘滙滝滫滮羼耷滷滹滻煎漈漊漎繹漕漖漘漙漚漜漪漾漥漦漯漰漵漶漷濞潀潁潎潏潕潗潚潝潞潠潦祉瘍潲潵潷潸潺潾潿澁澂澃澉澌澍澐澒澔澙澠澣澦澧澨澫澬澮澰澴澶澼熏郁濆濇濈濉濊貊濔疣濜濠濩觴濬濮盥濰濲濼瀁瀅瀆瀋瀌瀏瀒瀔瀕瀘瀛瀟瀠瀡瀦瀧瀨瀬瀰瀲瀳瀵瀹瀺瀼灃灄灉灋灒灕灖灝灞灠灤灥灨灩灪蜴灮燼獴灴灸灺炁炅魷炗炘炙炤炫疽烙釺炯炰炱炲炴炷燬炻烀烋瘴鯧烓烔焙烜烝烳飪烺焃焄耆焌焐焓焗焜焞焠焢焮焯焱焼煁煃煆煇煊熠煍熬煐煒煕煗燻礆霾煚煝煟煠煢矸煨瑣煬萁煳煺煻熀熅熇熉羆熒穹熗熘熛熜稔諳爍熤熨熯熰眶螞熲熳熸熿燀燁燂燄盞燊燋燏燔隼燖燜燠燡燦燨燮燹燻燽燿爇爊爓爚爝爟爨蟾爯爰爲爻爿爿牀牁牂牄牋牎牏牓牕釉牚腩蒡虻牠雖蠣牣牤牮牯牲牳牴牷牸牼絆牿靬犂犄犆犇犉犍犎犒犖犗犛犟犠犨犩犪犮犰狳犴犵犺狁甩狃狆狎狒獾狘狙黠狨狩狫狴狷狺狻豕狽蜘猁猇猈猊猋猓猖獗猗猘猙獰獁猞猟獕猭猱猲猳猷猸猹猺玃獀獃獉獍獏獐獒獘獙獚獜獝獞獠獢獣獧鼇蹊獪獫獬豸獮獯鬻獳獷獼玀玁菟玅玆玈珉糝禛郅玍玎玓瓅玔玕玖玗玘玞玠玡玢玤玥玦玨瑰玭玳瑁玶玷玹玼珂珇珈瑚珌饈饌珔珖珙珛珞珡珣珥珧珩珪珮珶珷珺珽琀琁隕琊琇琖琚琠琤琦琨琫琬琭琮琯琰琱琲瑯琹琺琿瑀瑂瑄瑉瑋瑑瑔瑗瑢瑭瑱瑲瑳瑽瑾瑿璀璨璁璅璆璈璉璊璐璘璚璝璟璠璡璥璦璩璪璫璯璲璵璸璺璿瓀瓔瓖瓘瓚瓛臍瓞瓠瓤瓧瓩瓮瓰瓱瓴瓸瓻瓼甀甁甃甄甇甋甍甎甏甑甒甓甔甕甖甗飴蔗甙詫鉅粱盎銹糰甡褥産甪甬甭甮甯鎧甹甽甾甿畀畁畇畈畊畋畎畓畚畛畟鄂畤畦畧荻畯畳畵畷畸畽畾疃疉疋疍疎簞疐疒疕疘疝疢疥疧疳疶疿痁痄痊痌痍痏痐痒痔痗瘢痚痠痡痣痦痩痭痯痱痳痵痻痿瘀瘂瘃瘈瘉瘊瘌瘏瘐瘓瘕瘖瘙瘚瘛瘲瘜瘝瘞瘠瘥瘨瘭瘮瘯瘰癧瘳癘瘵瘸瘺瘻瘼癃癆癇癈癎癐癔癙癜癠癤癥癩蟆癪癭癰発踔紺蔫酵皙砬砒翎翳蘞鎢鑞皚鵯駒鱀粵褶皀皁莢皃鎛皈皌皐皒硃皕皖皘皜皝皞皤皦皨皪皫皭糙綻皴皸皻皽盅盋盌盍盚盝踞盦盩鞦韆盬盭眦睜瞤盯盱眙裰盵盻睞眂眅眈眊県眑眕眚眛眞眢眣眭眳眴眵眹瞓眽郛睃睅睆睊睍睎睏睒睖睙睟睠睢睥睪睪睯睽睾瞇瞈瞋瞍逛瞏瞕瞖瞘瞜瞟瞠瞢瞫瞭瞳瞵瞷瞹瞽闍瞿矓矉矍鑠矔矗矙矚矞矟矠矣矧矬矯矰矱硪碇磙��舫阡、矼矽礓砃砅砆砉砍砑砕砝砟砠砢砦砧砩砫砮砳艏砵砹砼硇硌硍硎硏硐硒硜硤硨磲茚鋇硭硻硾碃碉碏碣碓碔碞碡碪碫碬碭碯碲碸碻礡磈磉磎磑磔磕磖磛磟磠磡磤磥蹭磪磬磴磵磹磻磽礀礄礅礌礐礚礜礞礤礧礮礱礲礵礽礿祂祄祅祆禳祊祍祏祓祔祕祗祘祛祧祫祲祻祼餌臠錮禂禇禋禑禔禕隋禖禘禚禜禝禠禡禢禤禥禨禫禰禴禸稈秈秊闈颯秌秏秕笈蘵賃秠秣秪秫秬秭秷秸稊稌稍稑稗稙稛稞稬稭稲稹稼顙稾穂穄穇穈穉穋穌貯穏穜穟穠穡穣穤穧穨穭穮穵穸窿闃窀窂窅窆窈窕窊窋窌窒窓窔窞窣窬黷蹙窰窳窴窵窶窸窻竁竃竈竑竜竝竦竪篦篾笆鮫竾笉笊笎笏笐靨笓笤籙笪笫笭笮笰笱笲笳笵笸笻筀筅筇筈筎筑筘筠筤筥筦筧筩筭筯筰筱筳筴讌筸箂箇箊箎箑箒箘箙箛箜篌箝箠箬鏃箯箴箾篁篔簹篘篙篚篛篜篝篟篠篡篢篥篧篨篭篰篲篳篴篶篹篼簀簁簃簆簉簋簌簏簜簟簠簥簦簨簬簰簸簻籊籐籒籓籔籖籚籛籜籣籥籧籩籪籫籯芾麴籵籸籹籼粁粃粋粑粔糲粛粞粢粧粨粲粳粺粻粽闢粿糅糆糈糌糍糒糔萼糗蛆蹋糢糨糬糭糯糱糴糶糸糺紃蹼鰹黴紆紈絝紉閩襻紑紕紘錠鳶鷂紝紞紟紥紩紬紱紲紵紽紾紿絁絃絅経絍絎絏縭褵絓絖絘絜絢絣螯絪絫聒絰絵絶絺絻絿綀綃綅綆綈綉綌綍綎綑綖綘継続緞綣綦綪綫綮綯綰罟蝽綷縩綹綾緁緄緅緆緇緋緌緎総緑緔緖緗緘緙緜緡緤緥緦纂緪緰緱緲緶緹縁縃縄縈縉縋縏縑縕縗縚縝縞縟縠縡縢縦縧縯縰騁縲縳縴縵縶縹縻衙縿繄繅繈繊繋繐繒繖繘繙繠繢繣繨繮繰繸繻繾纁纆纇纈纉纊纑纕纘纙纚纛缾罃罆罈罋罌罎罏罖罘罛罝罠罣罥罦罨罫罭鍰罳罶罹罻罽罿羂羃羇羋蕉51鴕羑羖羗羜羝羢羣羥羧羭羮羰羱羵羶羸藜鮐翀翃翄翊翌翏翕翛翟翡翣翥翦躚翪翫翬翮翯翺翽翾翿闆饕鴰鍁耋耇耎耏耑耒耜耔耞耡耤耨耩耪耬耰鬢耵聹聃聆聎聝聡聦聱聴聶聼閾聿肄肏肐肕腋肙肜肟肧胛肫肬肭肰肴肵肸肼胊胍胏胑胔胗胙胝胠銓胤胦胩胬胭胯胰胲胴胹胻胼胾脇脘脝脞脡脣脤脥脧脰脲脳腆腊腌臢腍腒腓腖腜腠腡腥腧腬腯踝蹬鐐腴腶蠕誹膂膃膆膇膋膔膕膗膙膟黐膣膦膫膰膴膵膷膾臃臄臇臈臌臐臑臓臕臖臙臛臝臞臧蓐詡臽臾臿舀舁鰟鮍舋舎舔舗舘舝舠舡舢舨舭舲舳舴舸舺艁艄艅艉艋艑艕艖艗艘艚艜艟艣艤艨艩艫艬艭荏艴艶艸艹艻艿芃芄芊萰陂藭芏芔芘芚蕙芟芣芤茉芧芨芩芪芮芰鰱芴芷芸蕘豢芼芿苄苒苘苙苜蓿苠苡苣蕒苤苧苪鎊苶苹苺苻苾茀茁范蠡萣茆茇茈茌茍茖茞茠茢茥茦菰茭茯茳藨茷藘茼荁荄荅荇荈菅蜢鴞荍荑荘荳荵荸薺莆莒莔莕莘莙莚莛莜莝莦莨菪莩莪莭莰莿菀菆菉菎菏菐菑菓菔菕菘菝菡菢菣菥蓂菧菫轂鎣菶菷菹醢菺菻菼菾萅萆萇萋萏萐萑萜萩萱萴萵萹萻葇葍葎葑葒葖葙葠葥葦葧葭葯葳葴葶葸葹葽蒄蒎蒓蘢薹蒞蒟蒻蒢蒦蒨蒭藁蒯蒱鉾蒴蒹蒺蒽蓀蓁蓆蓇蓊蓌蓍蓏蓓蓖蓧蓪蓫蓽跣藕蓯蓰蓱蓴蓷蓺蓼蔀蔂蔃蔆蔇蔉蔊蔋蔌蔎蔕蔘蔙蔞蔟鍔蔣雯蔦蔯蔳蔴蔵蔸蔾蕁蕆蕋蕍蕎蕐蕑蕓蕕蕖蕗蕝蕞蕠蕡蕢蕣蕤蕨蕳蕷蕸蕺蕻薀薁薃薅薆薈薉薌薏薐薔薖薘薙諤釵薜薠薢薤薧薨薫薬薳薶薷薸薽薾薿藄藇藋藎藐藙藚藟藦藳藴藶藷藾蘀蘁蘄蘋蘗蘘蘝蘤蘧蘩蘸蘼虀虆虍蟠虒虓虖虡虣虥虩虯虰蛵虵虷鱒虺虼蚆蚈蚋蚓蚔蚖蚘蚜蚡蚣蚧蚨蚩蚪蚯蚰蜒蚱蚳蚶蚹蚺蚻蚿蛀蛁蛄蛅蝮蛌蛍蛐蟮蛑蛓蛔蛘蛚蛜蛡蛣蜊蛩蛺蛻螫蜅蜆蜈蝣蜋蜍蜎蜑蠊蜛餞蜞蜣蜨蜩蜮蜱蜷蜺蜾蜿蝀蝃蝋蝌蝍蝎蝏蝗蝘蝙蝝鱝蝡蝤蝥蝯蝰蝱蝲蝴蝻螃蠏螄螉螋螒螓螗螘螙螚蟥螟螣螥螬螭螮螾螿蟀蟅蟈蟊蟋蟑蟓蟛蟜蟟蟢蟣蟨蟪蟭蟯蟳蟶蟷蟺蟿蠁蠂蠃蠆蠋蠐蠓蠔蠗蠙蠚蠛蠜蠧蠨蠩蠭蠮蠰蠲蠵蠸蠼蠽衁衂衄衇衈衉衋衎衒衕衖衚衞裳鈎衭衲衵衹衺衿袈裟袗袚袟袢袪袮袲袴袷袺袼褙袽裀裉裊裋裌裍裎裒裛裯裱裲裴裾褀褂褉褊褌褎褐褒褓褔褕褘褚褡褢褦褧褪褫褭褯褰褱襠褸褽褾襁襃襆襇襉襋襌襏襚襛襜襝襞襡襢襤襦襫襬襭襮襴襶襼襽襾覂覃覅覇覉覊覌覗覘覚覜覥覦覧覩覬覯覰観覿觔觕觖觜觽觝觡酲觩觫觭觱觳觶觷觼觾觿言賅訃訇訏訑訒詁託訧訬訳訹証訾詀詅詆譭詈詊詎詑詒詖詗詘詧詨詵詶詸詹詻詼詿誂誃誄鋤誆誋誑誒誖誙誚誥誧説読誯誶誾諂諄諆諌諍諏諑諕諗諛諝諞諟諠諡諴諵諶諼謄謆謇謌謍謏謑謖謚謡謦謪謫謳謷謼謾譁譅譆譈譊譌譒譔譖鑫譞譟譩譫譬譱譲譴譸譹譾讅讆讋讌讎讐讒讖讙讜讟谽豁豉豇豈豊豋豌豏豔豞豖豗豜豝豣豦豨豭豱豳豵豶豷豺豻貅貆貍貎貔貘貙貜貤饜貰餸貺賁賂賏賒賕賙賝賡賧賨賫鬭賮賵賸賺賻賾贇贉贐贔贕贗赬赭赱赳迄趁趂趄趐趑趒趔趡趦趫趮趯趲趴趵趷趹趺趿跁跂跅跆躓蹌跐跕跖跗跙跛跦跧跩跫跬跮跱跲跴跺跼跽踅踆踈踉踊踒���踘踜踟躇躕踠踡踣踤踥踦踧蹺踫踮踰踱踴踶踹踺踼踽躞蹁蹂躪蹎蹐蹓蹔蹕蹚蹜蹝蹟蹠蹡蹢躂蹧蹩蹪蹯鞠蹽躃躄躅躊躋躐躑躒躘躙躛躝躠躡躦躧躩躭躰躳躶軃軆輥軏軔軘軜軝齶転軥軨軭軱軲轆軷軹軺軽軿輀輂輦輅輇輈輓輗輙輜輞輠輤輬輭輮輳輴輵輶輹輼輾轀轇轏轑轒轔轕轖轗轘轙轝轞轢轤辠辢辤辵辶辺込辿迅迋迍麿迓迣迤邐迥迨迮迸迺迻迿逄逅逌逍逑逓逕逖逡逭逯逴逶逹遄遅遉遘遛遝遢遨遫遯遰遴遶遹遻邂邅邉邋邎邕邗邘邛邠邢邧邨邯鄲邰邲邳邴邶邷邽邾邿郃郄郇郈郔郕郗郙郚郜郝郞郟郠郢郪郫郯郰郲郳郴郷郹郾郿鄀鄄鄆鄇鄈鄋鄍鄎鄏鄐鄑鄒鄔鄕鄖鄗鄘鄚鄜鄞鄠鄢鄣鄤鄦鄩鄫鄬鄮鄯鄱鄶鄷鄹鄺鄻鄾鄿酃酅酆酇酈酊酋酎酏酐酣酔酕醄酖酗酞酡酢酤酩酴酹酺醁醅醆醊醍醐醑醓醖醝醞醡醤醨醪醭醯醰醱醲醴醵醸醹醼醽醾釂釃釅釆釈鱸鎦閶釓釔釕鈀釙鼢鼴釤釧釪釬釭釱釷釸釹鈁鈃鈄鈆鈇鈈鈊鈌鈐鈑鈒鈤鈥鈧鈬鈮鈰鈳鐺鈸鈹鈽鈿鉄鉆鉈鉋鉌鉍鉏鉑鉕鉚鉢鉥鉦鉨鉬鉭鉱鉲鉶鉸鉺鉼鉿銍銎銑銕鏤銚銛銠銣銤銥銦銧銩銪銫銭銰銲銶銻銼銾鋂鋃鋆鋈鋊鋌鋍鋏鋐鋑鋕鋘鋙鋝鋟鋦鋨鋩鋭鋮鋯鋰鋱鋳鋹鋺鋻鏰鐱錀錁錆錇錈錍錏錒錔錙錚錛錞錟錡錤錩錬録錸錼鍀鍆鍇鍉鍍鍏鍐鍘鍚鍛鍠鍤鍥鍩鍫鍭鍱鍴鍶鍹鍺鍼鍾鎄鎇鎉鎋鎌鎍鎏鎒鎓鎗鎘鎚鎞鎡鎤鎩鎪鎭鎯鎰鎳鎴鎵鎸鎹鎿鏇鏊鏌鏐鏑鏖鏗鏘鏚鏜鏝鏞鏠鏦鏨鏷鏸鏹鏻鏽鏾鐃鐄鐇鐏鐒鐓鐔鐗馗鐙鐝鐠鐡鐦鐨鐩鐫鐬鐱鐳鐶鐻鐽鐿鑀鑅鑌鑐鑕鑚鑛鑢鑤鑥鑪鑭鑯鑱鑴鑵鑷钁钃镻閆閈閌閎閒閔閗閟閡関閤閤閧閬閲閹閺閻閼閽閿闇闉闋闐闑闒闓闘闚闞闟闠闤闥阞阢阤阨阬阯阹阼阽陁陑陔陛陜陡陥陬騭陴険陼陾隂隃隈隒隗隞隠隣隤隩隮隰顴隳隷隹雂雈雉雊雎雑雒雗雘雚雝雟雩雰雱驛霂霅霈霊霑霒霓霙霝霢霣霤霨霩霪霫霮靁靆靉靑靚靣靦靪靮靰靳靷靸靺靼靿鞀鞃鞄鞌鞗鞙鞚鞝鞞鞡鞣鞨鞫鞬鞮鞶鞹鞾韃韅韉馱韍韎韔韖韘韝韞韡韣韭韮韱韹韺頀颳頄頇頊頍頎頏頒頖頞頠頫頬顱頯頲頴頼顇顋顑顒顓顔顕顚顜顢顣顬顳颭颮颱颶颸颺颻颽颾颿飀飂飈飌飜飡飣飤飥飩飫飮飱飶餀餂餄餎餇餈餑餔餕餖餗餚餛餜餟餠餤餧餩餪餫餬餮餱餲餳餺餻餼餽餿饁饅饇饉饊饍饎饐饘饟饢馘馥馝馡馣騮騾馵馹駃駄駅駆駉駋駑駓駔駗駘駙駜駡駢駪駬駰駴駸駹駽駾騂騄騅騆騉騋騍騏驎騑騒験騕騖騠騢騣騤騧驤騵騶騸騺驀驂驃驄驆驈驊驌驍驎驏驒驔驖驙驦驩驫骺鯁骫骭骯骱骴骶骷髏骾髁髂髄髆髈髐髑髕髖髙髝髞髟髡髣髧髪髫髭髯髲髳髹髺髽髾鬁鬃鬅鬈鬋鬎鬏鬐鬑鬒鬖鬗鬘鬙鬠鬣鬪鬫鬬鬮鬯鬰鬲鬵鬷魆魈魊魋魍魎魑魖鰾魛魟魣魦魨魬魴魵魸鮀鮁鮆鮌鮎鮑鮒鮓鮚鮞鮟鱇鮠鮦鮨鮪鮭鮶鮸鮿鯀鯄鯆鯇鯈鯔鯕鯖鯗鯙鯠鯤鯥鯫鯰鯷鯸鯿鰂鰆鶼鰉鰋鰐鰒鰕鰛鰜鰣鰤鰥鰦鰨鰩鰮鰳鰶鰷鱺鰼鰽鱀鱄鱅鱆鱈鱎鱐鱓鱔鱖鱘鱟鱠鱣鱨鱭鱮鱲鱵鱻鲅鳦鳧鳯鳲鳷鳻鴂鴃鴄鴆鴈鴎鴒鴔鴗鴛鴦鴝鵒鴟鴠鴢鴣鴥鴯鶓鴳鴴鴷鴽鵀鵁鵂鵓鵖鵙鵜鶘鵞鵟鵩鵪鵫鵵鵷鵻鵾鶂鶊鶏鶒鶖鶗鶡鶤鶦鶬鶱鶲鶵鶸鶹鶺鶿鷀鷁鷃鷄鷇鷈鷉鷊鷏鷓鷕鷖鷙鷞鷟鷥鷦鷯鷩鷫鷭鷳鷴鷽鷾鷿鸂鸇鸊鸏鸑鸒鸓鸕鸛鸜鸝鹸鹹鹺麀麂麃麄麇麋麌麐麑麒麚麛麝麤麩麪麫麮麯麰麺麾黁黈黌黢黒黓黕黙黝黟黥黦黧黮黰黱黲黶黹黻黼黽黿鼂鼃鼅鼈鼉鼏鼐鼒鼕鼖鼙鼚鼛鼡鼩鼱鼪鼫鼯鼷鼽齁齆齇齈齉齌齎齏齔齕齗齙齚齜齞齟齬齠齢齣齧齩齮齯齰齱齵齾龎龑龒龔龖龘龝龡龢龤"
20
+
21
+ assert len(simplified_charcters) == len(simplified_charcters)
22
+
23
+ s2t_dict = {}
24
+ t2s_dict = {}
25
+ for i, item in enumerate(simplified_charcters):
26
+ s2t_dict[item] = traditional_characters[i]
27
+ t2s_dict[traditional_characters[i]] = item
28
+
29
+
30
+ def tranditional_to_simplified(text: str) -> str:
31
+ return "".join([t2s_dict[item] if item in t2s_dict else item for item in text])
32
+
33
+
34
+ def simplified_to_traditional(text: str) -> str:
35
+ return "".join([s2t_dict[item] if item in s2t_dict else item for item in text])
genie_tts/G2P/Chinese/Normalization/chronology.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import re
15
+
16
+ from .num import (
17
+ DIGITS,
18
+ num2str,
19
+ verbalize_cardinal,
20
+ verbalize_digit,
21
+ )
22
+
23
+
24
+ def _time_num2str(num_string: str) -> str:
25
+ """A special case for verbalizing number in time."""
26
+ result = num2str(num_string.lstrip("0"))
27
+ if num_string.startswith("0"):
28
+ result = DIGITS["0"] + result
29
+ return result
30
+
31
+
32
+ # 时刻表达式
33
+ RE_TIME = re.compile(
34
+ r"([0-1]?[0-9]|2[0-3])"
35
+ r":([0-5][0-9])"
36
+ r"(:([0-5][0-9]))?"
37
+ )
38
+
39
+ # 时间范围,如8:30-12:30
40
+ RE_TIME_RANGE = re.compile(
41
+ r"([0-1]?[0-9]|2[0-3])"
42
+ r":([0-5][0-9])"
43
+ r"(:([0-5][0-9]))?"
44
+ r"(~|-)"
45
+ r"([0-1]?[0-9]|2[0-3])"
46
+ r":([0-5][0-9])"
47
+ r"(:([0-5][0-9]))?"
48
+ )
49
+
50
+
51
+ def replace_time(match) -> str:
52
+ """
53
+ Args:
54
+ match (re.Match)
55
+ Returns:
56
+ str
57
+ """
58
+
59
+ is_range = len(match.groups()) > 5
60
+
61
+ hour = match.group(1)
62
+ minute = match.group(2)
63
+ second = match.group(4)
64
+ hour_2 = ""
65
+ minute_2 = ""
66
+ second_2 = ""
67
+
68
+ if is_range:
69
+ hour_2 = match.group(6)
70
+ minute_2 = match.group(7)
71
+ second_2 = match.group(9)
72
+
73
+ result = f"{num2str(hour)}点"
74
+ if minute.lstrip("0"):
75
+ if int(minute) == 30:
76
+ result += "半"
77
+ else:
78
+ result += f"{_time_num2str(minute)}分"
79
+ if second and second.lstrip("0"):
80
+ result += f"{_time_num2str(second)}秒"
81
+
82
+ if is_range:
83
+ result += "至"
84
+ result += f"{num2str(hour_2)}点"
85
+ if minute_2.lstrip("0"):
86
+ if int(minute) == 30:
87
+ result += "半"
88
+ else:
89
+ result += f"{_time_num2str(minute_2)}分"
90
+ if second_2 and second_2.lstrip("0"):
91
+ result += f"{_time_num2str(second_2)}秒"
92
+
93
+ return result
94
+
95
+
96
+ RE_DATE = re.compile(
97
+ r"(\d{4}|\d{2})年"
98
+ r"((0?[1-9]|1[0-2])月)?"
99
+ r"(((0?[1-9])|((1|2)[0-9])|30|31)([日号]))?"
100
+ )
101
+
102
+
103
+ def replace_date(match) -> str:
104
+ """
105
+ Args:
106
+ match (re.Match)
107
+ Returns:
108
+ str
109
+ """
110
+ year = match.group(1)
111
+ month = match.group(3)
112
+ day = match.group(5)
113
+ result = ""
114
+ if year:
115
+ result += f"{verbalize_digit(year)}年"
116
+ if month:
117
+ result += f"{verbalize_cardinal(month)}月"
118
+ if day:
119
+ result += f"{verbalize_cardinal(day)}{match.group(9)}"
120
+ return result
121
+
122
+
123
+ # 用 / 或者 - 分隔的 YY/MM/DD 或者 YY-MM-DD 日期
124
+ RE_DATE2 = re.compile(r"(\d{4})([- /.])(0[1-9]|1[012])\2(0[1-9]|[12][0-9]|3[01])")
125
+
126
+
127
+ def replace_date2(match) -> str:
128
+ """
129
+ Args:
130
+ match (re.Match)
131
+ Returns:
132
+ str
133
+ """
134
+ year = match.group(1)
135
+ month = match.group(3)
136
+ day = match.group(4)
137
+ result = ""
138
+ if year:
139
+ result += f"{verbalize_digit(year)}年"
140
+ if month:
141
+ result += f"{verbalize_cardinal(month)}月"
142
+ if day:
143
+ result += f"{verbalize_cardinal(day)}日"
144
+ return result
genie_tts/G2P/Chinese/Normalization/constants.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import re
15
+ import string
16
+ from pypinyin.compat import SUPPORT_UCS4
17
+
18
+ # 全角半角转换
19
+ # 英文字符全角 -> 半角映射表 (num: 52)
20
+ F2H_ASCII_LETTERS = {ord(char) + 65248: ord(char) for char in string.ascii_letters}
21
+
22
+ # 英文字符半角 -> 全角映射表
23
+ H2F_ASCII_LETTERS = {value: key for key, value in F2H_ASCII_LETTERS.items()}
24
+
25
+ # 数字字符全角 -> 半角映射表 (num: 10)
26
+ F2H_DIGITS = {ord(char) + 65248: ord(char) for char in string.digits}
27
+ # 数字字符半角 -> 全角映射表
28
+ H2F_DIGITS = {value: key for key, value in F2H_DIGITS.items()}
29
+
30
+ # 标点符号全角 -> 半角映射表 (num: 32)
31
+ F2H_PUNCTUATIONS = {ord(char) + 65248: ord(char) for char in string.punctuation}
32
+ # 标点符号半角 -> 全角映射表
33
+ H2F_PUNCTUATIONS = {value: key for key, value in F2H_PUNCTUATIONS.items()}
34
+
35
+ # 空格 (num: 1)
36
+ F2H_SPACE = {"\u3000": " "}
37
+ H2F_SPACE = {" ": "\u3000"}
38
+
39
+ # 非"有拼音的汉字"的字符串,可用于NSW提取
40
+ if SUPPORT_UCS4:
41
+ RE_NSW = re.compile(
42
+ r"(?:[^"
43
+ r"\u3007" # 〇
44
+ r"\u3400-\u4dbf" # CJK扩展A:[3400-4DBF]
45
+ r"\u4e00-\u9fff" # CJK基本:[4E00-9FFF]
46
+ r"\uf900-\ufaff" # CJK兼容:[F900-FAFF]
47
+ r"\U00020000-\U0002A6DF" # CJK扩展B:[20000-2A6DF]
48
+ r"\U0002A703-\U0002B73F" # CJK扩展C:[2A700-2B73F]
49
+ r"\U0002B740-\U0002B81D" # CJK扩展D:[2B740-2B81D]
50
+ r"\U0002F80A-\U0002FA1F" # CJK兼容扩展:[2F800-2FA1F]
51
+ r"])+"
52
+ )
53
+ else:
54
+ RE_NSW = re.compile( # pragma: no cover
55
+ r"(?:[^"
56
+ r"\u3007" # 〇
57
+ r"\u3400-\u4dbf" # CJK扩展A:[3400-4DBF]
58
+ r"\u4e00-\u9fff" # CJK基本:[4E00-9FFF]
59
+ r"\uf900-\ufaff" # CJK兼容:[F900-FAFF]
60
+ r"])+"
61
+ )
genie_tts/G2P/Chinese/Normalization/num.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ Rules to verbalize numbers into Chinese characters.
16
+ https://zh.wikipedia.org/wiki/中文数字#現代中文
17
+ """
18
+
19
+ import re
20
+ from collections import OrderedDict
21
+ from typing import List
22
+
23
+ DIGITS = {str(i): tran for i, tran in enumerate("零一二三四五六七八九")}
24
+ UNITS = OrderedDict(
25
+ {
26
+ 1: "十",
27
+ 2: "百",
28
+ 3: "千",
29
+ 4: "万",
30
+ 8: "亿",
31
+ }
32
+ )
33
+
34
+ COM_QUANTIFIERS = "(处|台|架|枚|趟|幅|平|方|堵|间|床|株|批|项|例|列|篇|栋|注|亩|封|艘|把|目|套|段|人|所|朵|匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|毫|厘|(公)分|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|小时|旬|纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块|元|(亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|美|)元|(亿|千万|百万|万|千|百|十|)吨|(亿|千万|百万|万|千|百|)块|角|毛|分)"
35
+
36
+ # 分数表达式
37
+ RE_FRAC = re.compile(r"(-?)(\d+)/(\d+)")
38
+
39
+
40
+ def replace_frac(match) -> str:
41
+ """
42
+ Args:
43
+ match (re.Match)
44
+ Returns:
45
+ str
46
+ """
47
+ sign = match.group(1)
48
+ nominator = match.group(2)
49
+ denominator = match.group(3)
50
+ sign: str = "负" if sign else ""
51
+ nominator: str = num2str(nominator)
52
+ denominator: str = num2str(denominator)
53
+ result = f"{sign}{denominator}分之{nominator}"
54
+ return result
55
+
56
+
57
+ # 百分数表达式
58
+ RE_PERCENTAGE = re.compile(r"(-?)(\d+(\.\d+)?)%")
59
+
60
+
61
+ def replace_percentage(match) -> str:
62
+ """
63
+ Args:
64
+ match (re.Match)
65
+ Returns:
66
+ str
67
+ """
68
+ sign = match.group(1)
69
+ percent = match.group(2)
70
+ sign: str = "负" if sign else ""
71
+ percent: str = num2str(percent)
72
+ result = f"{sign}百分之{percent}"
73
+ return result
74
+
75
+
76
+ # 整数表达式
77
+ # 带负号的整数 -10
78
+ RE_INTEGER = re.compile(r"(-)" r"(\d+)")
79
+
80
+
81
+ def replace_negative_num(match) -> str:
82
+ """
83
+ Args:
84
+ match (re.Match)
85
+ Returns:
86
+ str
87
+ """
88
+ sign = match.group(1)
89
+ number = match.group(2)
90
+ sign: str = "负" if sign else ""
91
+ number: str = num2str(number)
92
+ result = f"{sign}{number}"
93
+ return result
94
+
95
+
96
+ # 编号-无符号整形
97
+ # 00078
98
+ RE_DEFAULT_NUM = re.compile(r"\d{3}\d*")
99
+
100
+
101
+ def replace_default_num(match):
102
+ """
103
+ Args:
104
+ match (re.Match)
105
+ Returns:
106
+ str
107
+ """
108
+ number = match.group(0)
109
+ return verbalize_digit(number, alt_one=True)
110
+
111
+
112
+ # 加减乘除
113
+ # RE_ASMD = re.compile(
114
+ # r'((-?)((\d+)(\.\d+)?)|(\.(\d+)))([\+\-\×÷=])((-?)((\d+)(\.\d+)?)|(\.(\d+)))')
115
+ RE_ASMD = re.compile(
116
+ r"((-?)((\d+)(\.\d+)?[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|(\.\d+[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|([A-Za-z][⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*))([+\-×÷=])((-?)((\d+)(\.\d+)?[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|(\.\d+[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*)|([A-Za-z][⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]*))"
117
+ )
118
+
119
+ asmd_map = {"+": "加", "-": "减", "×": "乘", "÷": "除", "=": "等于"}
120
+
121
+
122
+ def replace_asmd(match) -> str:
123
+ """
124
+ Args:
125
+ match (re.Match)
126
+ Returns:
127
+ str
128
+ """
129
+ result = match.group(1) + asmd_map[match.group(8)] + match.group(9)
130
+ return result
131
+
132
+
133
+ # 次方专项
134
+ RE_POWER = re.compile(r"[⁰¹²³⁴⁵⁶⁷⁸⁹ˣʸⁿ]+")
135
+
136
+ power_map = {
137
+ "⁰": "0",
138
+ "¹": "1",
139
+ "²": "2",
140
+ "³": "3",
141
+ "⁴": "4",
142
+ "⁵": "5",
143
+ "⁶": "6",
144
+ "⁷": "7",
145
+ "⁸": "8",
146
+ "⁹": "9",
147
+ "ˣ": "x",
148
+ "ʸ": "y",
149
+ "ⁿ": "n",
150
+ }
151
+
152
+
153
+ def replace_power(match) -> str:
154
+ """
155
+ Args:
156
+ match (re.Match)
157
+ Returns:
158
+ str
159
+ """
160
+ power_num = ""
161
+ for m in match.group(0):
162
+ power_num += power_map[m]
163
+ result = "的" + power_num + "次方"
164
+ return result
165
+
166
+
167
+ # 数字表达式
168
+ # 纯小数
169
+ RE_DECIMAL_NUM = re.compile(r"(-?)((\d+)(\.\d+))" r"|(\.(\d+))")
170
+ # 正整数 + 量���
171
+ RE_POSITIVE_QUANTIFIERS = re.compile(r"(\d+)([多余几+])?" + COM_QUANTIFIERS)
172
+ RE_NUMBER = re.compile(r"(-?)((\d+)(\.\d+)?)" r"|(\.(\d+))")
173
+
174
+
175
+ def replace_positive_quantifier(match) -> str:
176
+ """
177
+ Args:
178
+ match (re.Match)
179
+ Returns:
180
+ str
181
+ """
182
+ number = match.group(1)
183
+ match_2 = match.group(2)
184
+ if match_2 == "+":
185
+ match_2 = "多"
186
+ match_2: str = match_2 if match_2 else ""
187
+ quantifiers: str = match.group(3)
188
+ number: str = num2str(number)
189
+ number = "两" if number == "二" else number
190
+ result = f"{number}{match_2}{quantifiers}"
191
+ return result
192
+
193
+
194
+ def replace_number(match) -> str:
195
+ """
196
+ Args:
197
+ match (re.Match)
198
+ Returns:
199
+ str
200
+ """
201
+ sign = match.group(1)
202
+ number = match.group(2)
203
+ pure_decimal = match.group(5)
204
+ if pure_decimal:
205
+ result = num2str(pure_decimal)
206
+ else:
207
+ sign: str = "负" if sign else ""
208
+ number: str = num2str(number)
209
+ result = f"{sign}{number}"
210
+ return result
211
+
212
+
213
+ # 范围表达式
214
+ # match.group(1) and match.group(8) are copy from RE_NUMBER
215
+
216
+ RE_RANGE = re.compile(
217
+ r"""
218
+ (?<![\d+\-×÷=]) # 使用反向前瞻以确保数字范围之前没有其他数字和操作符
219
+ ((-?)((\d+)(\.\d+)?)) # 匹配范围起始的负数或正数(整数或小数)
220
+ [-~] # 匹配范围分隔符
221
+ ((-?)((\d+)(\.\d+)?)) # 匹配范围结束的负数或正数(整数或小数)
222
+ (?![\d+\-×÷=]) # 使用正向前瞻以确保数字范围之后没有其他数字和操作符
223
+ """,
224
+ re.VERBOSE,
225
+ )
226
+
227
+
228
+ def replace_range(match) -> str:
229
+ """
230
+ Args:
231
+ match (re.Match)
232
+ Returns:
233
+ str
234
+ """
235
+ first, second = match.group(1), match.group(6)
236
+ first = RE_NUMBER.sub(replace_number, first)
237
+ second = RE_NUMBER.sub(replace_number, second)
238
+ result = f"{first}到{second}"
239
+ return result
240
+
241
+
242
+ # ~至表达式
243
+ RE_TO_RANGE = re.compile(
244
+ r"((-?)((\d+)(\.\d+)?)|(\.(\d+)))(%|°C|℃|度|摄氏度|cm2|cm²|cm3|cm³|cm|db|ds|kg|km|m2|m²|m³|m3|ml|m|mm|s)[~]((-?)((\d+)(\.\d+)?)|(\.(\d+)))(%|°C|℃|度|摄氏度|cm2|cm²|cm3|cm³|cm|db|ds|kg|km|m2|m²|m³|m3|ml|m|mm|s)"
245
+ )
246
+
247
+
248
+ def replace_to_range(match) -> str:
249
+ """
250
+ Args:
251
+ match (re.Match)
252
+ Returns:
253
+ str
254
+ """
255
+ result = match.group(0).replace("~", "至")
256
+ return result
257
+
258
+
259
+ RE_VERSION_NUM = re.compile(r"((\d+)(\.\d+)(\.\d+)?(\.\d+)+)")
260
+
261
+
262
+ def replace_vrsion_num(match) -> str:
263
+ """
264
+ Args:
265
+ match (re.Match)
266
+ Returns:
267
+ str
268
+ """
269
+ result = ""
270
+ for c in match.group(1):
271
+ if c == ".":
272
+ result += "点"
273
+ else:
274
+ result += num2str(c)
275
+ return result
276
+
277
+
278
+ def _get_value(value_string: str, use_zero: bool = True) -> List[str]:
279
+ stripped = value_string.lstrip("0")
280
+ if len(stripped) == 0:
281
+ return []
282
+ elif len(stripped) == 1:
283
+ if use_zero and len(stripped) < len(value_string):
284
+ return [DIGITS["0"], DIGITS[stripped]]
285
+ else:
286
+ return [DIGITS[stripped]]
287
+ else:
288
+ largest_unit = next(power for power in reversed(UNITS.keys()) if power < len(stripped))
289
+ first_part = value_string[:-largest_unit]
290
+ second_part = value_string[-largest_unit:]
291
+ return _get_value(first_part) + [UNITS[largest_unit]] + _get_value(second_part)
292
+
293
+
294
+ def verbalize_cardinal(value_string: str) -> str:
295
+ if not value_string:
296
+ return ""
297
+
298
+ # 000 -> '零' , 0 -> '零'
299
+ value_string = value_string.lstrip("0")
300
+ if len(value_string) == 0:
301
+ return DIGITS["0"]
302
+
303
+ result_symbols = _get_value(value_string)
304
+ # verbalized number starting with '一十*' is abbreviated as `十*`
305
+ if len(result_symbols) >= 2 and result_symbols[0] == DIGITS["1"] and result_symbols[1] == UNITS[1]:
306
+ result_symbols = result_symbols[1:]
307
+ return "".join(result_symbols)
308
+
309
+
310
+ def verbalize_digit(value_string: str, alt_one=False) -> str:
311
+ result_symbols = [DIGITS[digit] for digit in value_string]
312
+ result = "".join(result_symbols)
313
+ if alt_one:
314
+ result = result.replace("一", "幺")
315
+ return result
316
+
317
+
318
+ def num2str(value_string: str) -> str:
319
+ integer_decimal = value_string.split(".")
320
+ if len(integer_decimal) == 1:
321
+ integer = integer_decimal[0]
322
+ decimal = ""
323
+ elif len(integer_decimal) == 2:
324
+ integer, decimal = integer_decimal
325
+ else:
326
+ raise ValueError(f"The value string: '${value_string}' has more than one point in it.")
327
+
328
+ result = verbalize_cardinal(integer)
329
+
330
+ if decimal.endswith("0"):
331
+ decimal = decimal.rstrip("0") + "0"
332
+ else:
333
+ decimal = decimal.rstrip("0")
334
+
335
+ if decimal:
336
+ # '.22' is verbalized as '零点二二'
337
+ # '3.20' is verbalized as '三点二'
338
+ result = result if result else "零"
339
+ result += "点" + verbalize_digit(decimal)
340
+ return result
genie_tts/G2P/Chinese/Normalization/phonecode.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import re
15
+
16
+ from .num import verbalize_digit
17
+
18
+ # 规范化固话/手机号码
19
+ # 手机
20
+ # http://www.jihaoba.com/news/show/13680
21
+ # 移动:139、138、137、136、135、134、159、158、157、150、151、152、188、187、182、183、184、178、198
22
+ # 联通:130、131、132、156、155、186、185、176
23
+ # 电信:133、153、189、180、181、177
24
+ RE_MOBILE_PHONE = re.compile(r"(?<!\d)((\+?86 ?)?1([38]\d|5[0-35-9]|7[678]|9[89])\d{8})(?!\d)")
25
+ RE_TELEPHONE = re.compile(r"(?<!\d)((0(10|2[1-3]|[3-9]\d{2})-?)?[1-9]\d{6,7})(?!\d)")
26
+
27
+ # 全国统一的号码400开头
28
+ RE_NATIONAL_UNIFORM_NUMBER = re.compile(r"(400)(-)?\d{3}(-)?\d{4}")
29
+
30
+
31
+ def phone2str(phone_string: str, mobile=True) -> str:
32
+ if mobile:
33
+ sp_parts = phone_string.strip("+").split()
34
+ result = ",".join([verbalize_digit(part, alt_one=True) for part in sp_parts])
35
+ return result
36
+ else:
37
+ sil_parts = phone_string.split("-")
38
+ result = ",".join([verbalize_digit(part, alt_one=True) for part in sil_parts])
39
+ return result
40
+
41
+
42
+ def replace_phone(match) -> str:
43
+ """
44
+ Args:
45
+ match (re.Match)
46
+ Returns:
47
+ str
48
+ """
49
+ return phone2str(match.group(0), mobile=False)
50
+
51
+
52
+ def replace_mobile(match) -> str:
53
+ """
54
+ Args:
55
+ match (re.Match)
56
+ Returns:
57
+ str
58
+ """
59
+ return phone2str(match.group(0))
genie_tts/G2P/Chinese/Normalization/quantifier.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import re
15
+ from .num import num2str
16
+
17
+ # 温度表达式,温度会影响负号的读法
18
+ # -3°C 零下三度
19
+ RE_TEMPERATURE = re.compile(r"(-?)(\d+(\.\d+)?)(°C|℃|度|摄氏度)")
20
+ measure_dict = {
21
+ "cm2": "平方厘米",
22
+ "cm²": "平方厘米",
23
+ "cm3": "立方厘米",
24
+ "cm³": "立方厘米",
25
+ "cm": "厘米",
26
+ "db": "分贝",
27
+ "ds": "毫秒",
28
+ "kg": "千克",
29
+ "km": "千米",
30
+ "m2": "平方米",
31
+ "m²": "平方米",
32
+ "m³": "立方米",
33
+ "m3": "立方米",
34
+ "ml": "毫升",
35
+ "m": "米",
36
+ "mm": "毫米",
37
+ "s": "秒",
38
+ }
39
+
40
+
41
+ def replace_temperature(match) -> str:
42
+ """
43
+ Args:
44
+ match (re.Match)
45
+ Returns:
46
+ str
47
+ """
48
+ sign = match.group(1)
49
+ temperature = match.group(2)
50
+ unit = match.group(3)
51
+ sign: str = "零下" if sign else ""
52
+ temperature: str = num2str(temperature)
53
+ unit: str = "摄氏度" if unit == "摄氏度" else "度"
54
+ result = f"{sign}{temperature}{unit}"
55
+ return result
56
+
57
+
58
+ def replace_measure(sentence) -> str:
59
+ for q_notation in measure_dict:
60
+ if q_notation in sentence:
61
+ sentence = sentence.replace(q_notation, measure_dict[q_notation])
62
+ return sentence
genie_tts/G2P/Chinese/Normalization/text_normlization.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import re
15
+ from typing import List
16
+
17
+ from .char_convert import tranditional_to_simplified
18
+ from .chronology import (
19
+ RE_DATE,
20
+ RE_DATE2,
21
+ RE_TIME,
22
+ RE_TIME_RANGE,
23
+ replace_date,
24
+ replace_date2,
25
+ replace_time,
26
+ )
27
+ from .constants import (
28
+ F2H_ASCII_LETTERS,
29
+ F2H_DIGITS,
30
+ F2H_SPACE,
31
+ )
32
+ from .num import (
33
+ RE_VERSION_NUM, RE_DECIMAL_NUM, RE_DEFAULT_NUM, RE_FRAC,
34
+ RE_INTEGER, RE_NUMBER, RE_PERCENTAGE, RE_POSITIVE_QUANTIFIERS,
35
+ RE_RANGE, RE_TO_RANGE, RE_ASMD, RE_POWER,
36
+ replace_vrsion_num, replace_default_num, replace_frac,
37
+ replace_negative_num, replace_number, replace_percentage,
38
+ replace_positive_quantifier, replace_range, replace_to_range,
39
+ replace_asmd, replace_power
40
+ )
41
+ from .phonecode import (
42
+ RE_MOBILE_PHONE,
43
+ RE_NATIONAL_UNIFORM_NUMBER,
44
+ RE_TELEPHONE,
45
+ replace_mobile,
46
+ replace_phone,
47
+ )
48
+ from .quantifier import (
49
+ RE_TEMPERATURE,
50
+ replace_measure,
51
+ replace_temperature,
52
+ )
53
+
54
+
55
+ class TextNormalizer:
56
+ def __init__(self):
57
+ self.SENTENCE_SPLITOR = re.compile(r"([:、,;。?!,;?!][”’]?)")
58
+
59
+ def _split(self, text: str, lang="zh") -> List[str]:
60
+ """Split long text into sentences with sentence-splitting punctuations.
61
+ Args:
62
+ text (str): The input text.
63
+ Returns:
64
+ List[str]: Sentences.
65
+ """
66
+ # Only for pure Chinese here
67
+ if lang == "zh":
68
+ text = text.replace(" ", "")
69
+ # 过滤掉特殊字符
70
+ text = re.sub(r"[——《》【】<>{}()()#&@“”^_|\\]", "", text)
71
+ text = self.SENTENCE_SPLITOR.sub(r"\1\n", text)
72
+ text = text.strip()
73
+ sentences = [sentence.strip() for sentence in re.split(r"\n+", text)]
74
+ return sentences
75
+
76
+ def _post_replace(self, sentence: str) -> str:
77
+ sentence = sentence.replace("/", "每")
78
+ # sentence = sentence.replace('~', '至')
79
+ # sentence = sentence.replace('~', '至')
80
+ sentence = sentence.replace("①", "一")
81
+ sentence = sentence.replace("②", "二")
82
+ sentence = sentence.replace("③", "三")
83
+ sentence = sentence.replace("④", "四")
84
+ sentence = sentence.replace("⑤", "五")
85
+ sentence = sentence.replace("⑥", "六")
86
+ sentence = sentence.replace("⑦", "七")
87
+ sentence = sentence.replace("⑧", "八")
88
+ sentence = sentence.replace("⑨", "九")
89
+ sentence = sentence.replace("⑩", "十")
90
+ sentence = sentence.replace("α", "阿尔法")
91
+ sentence = sentence.replace("β", "贝塔")
92
+ sentence = sentence.replace("γ", "伽玛").replace("Γ", "伽玛")
93
+ sentence = sentence.replace("δ", "德尔塔").replace("Δ", "德尔塔")
94
+ sentence = sentence.replace("ε", "艾普西龙")
95
+ sentence = sentence.replace("ζ", "捷塔")
96
+ sentence = sentence.replace("η", "依塔")
97
+ sentence = sentence.replace("θ", "西塔").replace("Θ", "西塔")
98
+ sentence = sentence.replace("ι", "艾欧塔")
99
+ sentence = sentence.replace("κ", "喀帕")
100
+ sentence = sentence.replace("λ", "拉姆达").replace("Λ", "拉姆达")
101
+ sentence = sentence.replace("μ", "缪")
102
+ sentence = sentence.replace("ν", "拗")
103
+ sentence = sentence.replace("ξ", "克西").replace("Ξ", "克西")
104
+ sentence = sentence.replace("ο", "欧米克伦")
105
+ sentence = sentence.replace("π", "派").replace("Π", "派")
106
+ sentence = sentence.replace("ρ", "肉")
107
+ sentence = sentence.replace("ς", "西格玛").replace("Σ", "西格玛").replace("σ", "西格玛")
108
+ sentence = sentence.replace("τ", "套")
109
+ sentence = sentence.replace("υ", "宇普西龙")
110
+ sentence = sentence.replace("φ", "服艾").replace("Φ", "服艾")
111
+ sentence = sentence.replace("χ", "器")
112
+ sentence = sentence.replace("ψ", "普赛").replace("Ψ", "普赛")
113
+ sentence = sentence.replace("ω", "欧米伽").replace("Ω", "欧米伽")
114
+ # 兜底数学运算,顺便兼容懒人用语
115
+ sentence = sentence.replace("+", "加")
116
+ sentence = sentence.replace("-", "减")
117
+ sentence = sentence.replace("×", "乘")
118
+ sentence = sentence.replace("÷", "除")
119
+ sentence = sentence.replace("=", "等")
120
+ # re filter special characters, have one more character "-" than line 68
121
+ sentence = re.sub(r"[-——《》【】<=>{}()()#&@“”^_|\\]", "", sentence)
122
+ return sentence
123
+
124
+ def normalize_sentence(self, sentence: str) -> str:
125
+ # basic character conversions
126
+ sentence = tranditional_to_simplified(sentence)
127
+ sentence = sentence.translate(F2H_ASCII_LETTERS).translate(F2H_DIGITS).translate(F2H_SPACE)
128
+
129
+ # number related NSW verbalization
130
+ sentence = RE_DATE.sub(replace_date, sentence)
131
+ sentence = RE_DATE2.sub(replace_date2, sentence)
132
+
133
+ # range first
134
+ sentence = RE_TIME_RANGE.sub(replace_time, sentence)
135
+ sentence = RE_TIME.sub(replace_time, sentence)
136
+
137
+ # 处理~波浪号作为至的替换
138
+ sentence = RE_TO_RANGE.sub(replace_to_range, sentence)
139
+ sentence = RE_TEMPERATURE.sub(replace_temperature, sentence)
140
+ sentence = replace_measure(sentence)
141
+
142
+ # 处理数学运算
143
+ while RE_ASMD.search(sentence):
144
+ sentence = RE_ASMD.sub(replace_asmd, sentence)
145
+ sentence = RE_POWER.sub(replace_power, sentence)
146
+
147
+ sentence = RE_FRAC.sub(replace_frac, sentence)
148
+ sentence = RE_PERCENTAGE.sub(replace_percentage, sentence)
149
+ sentence = RE_MOBILE_PHONE.sub(replace_mobile, sentence)
150
+
151
+ sentence = RE_TELEPHONE.sub(replace_phone, sentence)
152
+ sentence = RE_NATIONAL_UNIFORM_NUMBER.sub(replace_phone, sentence)
153
+
154
+ sentence = RE_RANGE.sub(replace_range, sentence)
155
+
156
+ sentence = RE_INTEGER.sub(replace_negative_num, sentence)
157
+ sentence = RE_VERSION_NUM.sub(replace_vrsion_num, sentence)
158
+ sentence = RE_DECIMAL_NUM.sub(replace_number, sentence)
159
+ sentence = RE_POSITIVE_QUANTIFIERS.sub(replace_positive_quantifier, sentence)
160
+ sentence = RE_DEFAULT_NUM.sub(replace_default_num, sentence)
161
+ sentence = RE_NUMBER.sub(replace_number, sentence)
162
+ sentence = self._post_replace(sentence)
163
+
164
+ return sentence
165
+
166
+ def normalize(self, text: str) -> List[str]:
167
+ sentences = self._split(text)
168
+ sentences = [self.normalize_sentence(sent) for sent in sentences]
169
+ return sentences
genie_tts/G2P/Chinese/ToneSandhi.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ 中文拼音变调(Tone Sandhi)自动处理器
17
+ """
18
+
19
+ from typing import List
20
+ from typing import Tuple
21
+ import jieba_fast as jieba
22
+ from pypinyin import lazy_pinyin
23
+ from pypinyin import Style
24
+
25
+
26
+ class ToneSandhi:
27
+ def __init__(self):
28
+ self.must_neural_tone_words = {
29
+ "麻烦", "麻利", "鸳鸯", "高粱", "骨头", "骆驼", "马虎", "首饰", "馒头", "馄饨",
30
+ "风筝", "难为", "队伍", "阔气", "闺女", "门道", "锄头", "铺盖", "铃铛", "铁匠",
31
+ "钥匙", "里脊", "里头", "部分", "那么", "道士", "造化", "迷糊", "连累", "这么",
32
+ "这个", "运气", "过去", "软和", "转悠", "踏实", "跳蚤", "跟头", "趔趄", "财主",
33
+ "豆腐", "讲究", "记性", "记号", "认识", "规矩", "见识", "裁缝", "补丁", "衣裳",
34
+ "衣服", "衙门", "街坊", "行李", "行当", "蛤蟆", "蘑菇", "薄荷", "葫芦", "葡萄",
35
+ "萝卜", "荸荠", "苗条", "苗头", "苍蝇", "芝麻", "舒服", "舒坦", "舌头", "自在",
36
+ "膏药", "脾气", "脑袋", "脊梁", "能耐", "胳膊", "胭脂", "胡萝", "胡琴", "胡同",
37
+ "聪明", "耽误", "耽搁", "耷拉", "耳朵", "老爷", "老实", "老婆", "老头", "老太",
38
+ "翻腾", "罗嗦", "罐头", "编辑", "结实", "红火", "累赘", "糨糊", "糊涂", "精神",
39
+ "粮食", "簸箕", "篱笆", "算计", "算盘", "答应", "笤帚", "笑语", "笑话", "窟窿",
40
+ "窝囊", "窗户", "稳当", "稀罕", "称呼", "秧歌", "秀气", "秀才", "福气", "祖宗",
41
+ "砚台", "码头", "石榴", "石头", "石匠", "知识", "眼睛", "眯缝", "眨巴", "眉毛",
42
+ "相声", "盘算", "白净", "痢疾", "痛快", "疟疾", "疙瘩", "疏忽", "畜生", "生意",
43
+ "甘蔗", "琵琶", "琢磨", "琉璃", "玻璃", "玫瑰", "玄乎", "狐狸", "状元", "特务",
44
+ "牲口", "牙碜", "牌楼", "爽快", "爱人", "热闹", "烧饼", "烟筒", "烂糊", "点心",
45
+ "炊帚", "灯笼", "火候", "漂亮", "滑溜", "溜达", "温和", "清楚", "消息", "浪头",
46
+ "活泼", "比方", "正经", "欺负", "模糊", "槟榔", "棺材", "棒槌", "棉花", "核桃",
47
+ "栅栏", "柴火", "架势", "枕头", "枇杷", "机灵", "本事", "木头", "木匠", "朋友",
48
+ "月饼", "月亮", "暖和", "明白", "时候", "新鲜", "故事", "收拾", "收成", "提防",
49
+ "挖苦", "挑剔", "指甲", "指头", "拾掇", "拳头", "拨弄", "招牌", "招呼", "抬举",
50
+ "护士", "折腾", "扫帚", "打量", "打算", "打点", "打扮", "打听", "打发", "扎实",
51
+ "扁担", "戒指", "懒得", "意识", "意思", "情形", "悟性", "怪物", "思量", "怎么",
52
+ "念头", "念叨", "快活", "忙活", "志气", "心思", "得罪", "张罗", "弟兄", "开通",
53
+ "应酬", "庄稼", "干事", "帮手", "帐篷", "希罕", "师父", "师傅", "巴结", "巴掌",
54
+ "差事", "工夫", "岁数", "屁股", "尾巴", "少爷", "小气", "小伙", "将就", "对头",
55
+ "对付", "寡妇", "家伙", "客气", "实在", "官司", "学问", "学生", "字号", "嫁妆",
56
+ "媳妇", "媒人", "婆家", "娘家", "委屈", "姑娘", "姐夫", "妯娌", "妥当", "妖精",
57
+ "奴才", "女婿", "头发", "太阳", "大爷", "大方", "大意", "大夫", "多少", "多么",
58
+ "外甥", "壮实", "地道", "地方", "在乎", "困难", "嘴巴", "嘱咐", "嘟囔", "嘀咕",
59
+ "喜欢", "喇嘛", "喇叭", "商量", "唾沫", "哑巴", "哈欠", "哆嗦", "咳嗽", "和尚",
60
+ "告诉", "告示", "含糊", "吓唬", "后头", "名字", "名堂", "合同", "吆喝", "叫唤",
61
+ "口袋", "厚道", "厉害", "千斤", "包袱", "包涵", "匀称", "勤快", "动静", "动弹",
62
+ "功夫", "力气", "前头", "刺猬", "刺激", "别扭", "利落", "利索", "利害", "分析",
63
+ "出息", "凑合", "凉快", "冷战", "冤枉", "冒失", "养活", "关系", "先生", "兄弟",
64
+ "便宜", "使唤", "佩服", "作坊", "体面", "位置", "似的", "伙计", "休息", "什么",
65
+ "人家", "亲戚", "亲家", "交情", "云彩", "事情", "买卖", "主意", "丫头", "丧气",
66
+ "两口", "东西", "东家", "世故", "不由", "不在", "下水", "下巴", "上头", "上司",
67
+ "丈夫", "丈人", "一辈", "那个", "菩萨", "父亲", "母亲", "咕噜", "邋遢", "费用",
68
+ "冤家", "甜头", "介绍", "荒唐", "大人", "泥鳅", "幸福", "熟悉", "计划", "扑腾",
69
+ "蜡烛", "姥爷", "照顾", "喉咙", "吉他", "弄堂", "蚂蚱", "凤凰", "拖沓", "寒碜",
70
+ "糟蹋", "倒腾", "报复", "逻辑", "盘缠", "喽啰", "牢骚", "咖喱", "扫把", "惦记",
71
+ }
72
+ self.must_not_neural_tone_words = {
73
+ "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人",
74
+ "虎虎", "幺幺", "干嘛", "学子", "哈哈", "数数", "袅袅", "局地", "以下", "娃哈哈",
75
+ "花花草草", "留得", "耕地", "想想", "熙熙", "攘攘", "卵子", "死死", "冉冉", "恳恳",
76
+ "佼佼", "吵吵", "打打", "考考", "整整", "莘莘", "落地", "算子", "家家户户", "青青",
77
+ }
78
+ self.punc = ":,;。?!“”‘’':,;.?!"
79
+
80
+ # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041
81
+ # e.g.
82
+ # word: "家里"
83
+ # pos: "s"
84
+ # finals: ['ia1', 'i3']
85
+ def _neural_sandhi(self, word: str, pos: str, finals: List[str]) -> List[str]:
86
+ # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺
87
+ for j, item in enumerate(word):
88
+ if (
89
+ j - 1 >= 0
90
+ and item == word[j - 1]
91
+ and pos[0] in {"n", "v", "a"}
92
+ and word not in self.must_not_neural_tone_words
93
+ ):
94
+ finals[j] = finals[j][:-1] + "5"
95
+ ge_idx = word.find("个")
96
+ if len(word) >= 1 and word[-1] in "吧呢哈啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶":
97
+ finals[-1] = finals[-1][:-1] + "5"
98
+ elif len(word) >= 1 and word[-1] in "的地得":
99
+ finals[-1] = finals[-1][:-1] + "5"
100
+ # e.g. 走了, 看着, 去过
101
+ elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}:
102
+ finals[-1] = finals[-1][:-1] + "5"
103
+ elif len(word) > 1 and word[-1] in "们子" and pos in {"r", "n"} and word not in self.must_not_neural_tone_words:
104
+ finals[-1] = finals[-1][:-1] + "5"
105
+ # e.g. 桌上, 地下, 家里
106
+ elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}:
107
+ finals[-1] = finals[-1][:-1] + "5"
108
+ # e.g. 上来, 下去
109
+ elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开":
110
+ finals[-1] = finals[-1][:-1] + "5"
111
+ # 个做量词
112
+ elif (
113
+ ge_idx >= 1 and (word[ge_idx - 1].isnumeric() or word[ge_idx - 1] in "几有两半多各整每做是")
114
+ ) or word == "个":
115
+ finals[ge_idx] = finals[ge_idx][:-1] + "5"
116
+ else:
117
+ if word in self.must_neural_tone_words or word[-2:] in self.must_neural_tone_words:
118
+ finals[-1] = finals[-1][:-1] + "5"
119
+
120
+ word_list = self._split_word(word)
121
+ finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]):]]
122
+ for i, word in enumerate(word_list):
123
+ # conventional neural in Chinese
124
+ if word in self.must_neural_tone_words or word[-2:] in self.must_neural_tone_words:
125
+ finals_list[i][-1] = finals_list[i][-1][:-1] + "5"
126
+ finals = sum(finals_list, [])
127
+ return finals
128
+
129
+ @staticmethod
130
+ def _bu_sandhi(word: str, finals: List[str]) -> List[str]:
131
+ # e.g. 看不懂
132
+ if len(word) == 3 and word[1] == "不":
133
+ finals[1] = finals[1][:-1] + "5"
134
+ else:
135
+ for i, char in enumerate(word):
136
+ # "不" before tone4 should be bu2, e.g. 不怕
137
+ if char == "不" and i + 1 < len(word) and finals[i + 1][-1] == "4":
138
+ finals[i] = finals[i][:-1] + "2"
139
+ return finals
140
+
141
+ def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:
142
+ # "一" in number sequences, e.g. 一零零, 二一零
143
+ if word.find("一") != -1 and all([item.isnumeric() for item in word if item != "一"]):
144
+ return finals
145
+ # "一" between reduplication words should be yi5, e.g. 看一看
146
+ elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]:
147
+ finals[1] = finals[1][:-1] + "5"
148
+ # when "一" is ordinal word, it should be yi1
149
+ elif word.startswith("第一"):
150
+ finals[1] = finals[1][:-1] + "1"
151
+ else:
152
+ for i, char in enumerate(word):
153
+ if char == "一" and i + 1 < len(word):
154
+ # "一" before tone4 should be yi2, e.g. 一段
155
+ if finals[i + 1][-1] == "4":
156
+ finals[i] = finals[i][:-1] + "2"
157
+ # "一" before non-tone4 should be yi4, e.g. 一天
158
+ else:
159
+ # "一" 后面如果是标点,还读一声
160
+ if word[i + 1] not in self.punc:
161
+ finals[i] = finals[i][:-1] + "4"
162
+ return finals
163
+
164
+ @staticmethod
165
+ def _split_word(word: str) -> List[str]:
166
+ word_list = jieba.cut_for_search(word)
167
+ word_list = sorted(word_list, key=lambda i: len(i), reverse=False)
168
+ first_subword = word_list[0]
169
+ first_begin_idx = word.find(first_subword)
170
+ if first_begin_idx == 0:
171
+ second_subword = word[len(first_subword):]
172
+ new_word_list = [first_subword, second_subword]
173
+ else:
174
+ second_subword = word[: -len(first_subword)]
175
+ new_word_list = [second_subword, first_subword]
176
+ return new_word_list
177
+
178
+ def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:
179
+ if len(word) == 2 and self._all_tone_three(finals):
180
+ finals[0] = finals[0][:-1] + "2"
181
+ elif len(word) == 3:
182
+ word_list = self._split_word(word)
183
+ if self._all_tone_three(finals):
184
+ # disyllabic + monosyllabic, e.g. 蒙古/包
185
+ if len(word_list[0]) == 2:
186
+ finals[0] = finals[0][:-1] + "2"
187
+ finals[1] = finals[1][:-1] + "2"
188
+ # monosyllabic + disyllabic, e.g. 纸/老虎
189
+ elif len(word_list[0]) == 1:
190
+ finals[1] = finals[1][:-1] + "2"
191
+ else:
192
+ finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]):]]
193
+ if len(finals_list) == 2:
194
+ for i, sub in enumerate(finals_list):
195
+ # e.g. 所有/人
196
+ if self._all_tone_three(sub) and len(sub) == 2:
197
+ finals_list[i][0] = finals_list[i][0][:-1] + "2"
198
+ # e.g. 好/喜欢
199
+ elif (
200
+ i == 1
201
+ and not self._all_tone_three(sub)
202
+ and finals_list[i][0][-1] == "3"
203
+ and finals_list[0][-1][-1] == "3"
204
+ ):
205
+ finals_list[0][-1] = finals_list[0][-1][:-1] + "2"
206
+ finals = sum(finals_list, [])
207
+ # split idiom into two words whose length is 2
208
+ elif len(word) == 4:
209
+ finals_list = [finals[:2], finals[2:]]
210
+ finals = []
211
+ for sub in finals_list:
212
+ if self._all_tone_three(sub):
213
+ sub[0] = sub[0][:-1] + "2"
214
+ finals += sub
215
+
216
+ return finals
217
+
218
+ @staticmethod
219
+ def _all_tone_three(finals: List[str]) -> bool:
220
+ # 增加 len(x) > 0 的判断,防止空字符串导致崩溃
221
+ return all(len(x) > 0 and x[-1] == "3" for x in finals)
222
+
223
+ @staticmethod
224
+ def _merge_bu(seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
225
+ new_seg = []
226
+ last_word = ""
227
+ for word, pos in seg:
228
+ if last_word == "不":
229
+ word = last_word + word
230
+ if word != "不":
231
+ new_seg.append((word, pos))
232
+ last_word = word[:]
233
+ if last_word == "不":
234
+ new_seg.append((last_word, "d"))
235
+ return new_seg
236
+
237
+ @staticmethod
238
+ def _merge_yi(seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
239
+ new_seg = []
240
+ i = 0
241
+ # function 1
242
+ while i < len(seg):
243
+ word, pos = seg[i]
244
+ merged = False
245
+ if i - 1 >= 0 and word == "一" and i + 1 < len(seg):
246
+ last = new_seg[-1] if new_seg else seg[i - 1]
247
+ if last[0] == seg[i + 1][0] and last[1] == "v" and seg[i + 1][1] == "v":
248
+ combined = last[0] + "一" + seg[i + 1][0]
249
+ new_seg[-1] = [combined, last[1]]
250
+ i += 2
251
+ merged = True
252
+ if not merged:
253
+ new_seg.append([word, pos])
254
+ i += 1
255
+ seg = new_seg
256
+ new_seg = []
257
+ # function 2
258
+ for word, pos in seg:
259
+ if new_seg and new_seg[-1][0] == "一":
260
+ new_seg[-1][0] = new_seg[-1][0] + word
261
+ else:
262
+ new_seg.append([word, pos])
263
+ return new_seg
264
+
265
+ # the first and the second words are all_tone_three
266
+ def _merge_continuous_three_tones(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
267
+ new_seg = []
268
+ sub_finals_list = [
269
+ lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) for (word, pos) in seg
270
+ ]
271
+ assert len(sub_finals_list) == len(seg)
272
+ merge_last = [False] * len(seg)
273
+ for i, (word, pos) in enumerate(seg):
274
+ if (
275
+ i - 1 >= 0
276
+ and self._all_tone_three(sub_finals_list[i - 1])
277
+ and self._all_tone_three(sub_finals_list[i])
278
+ and not merge_last[i - 1]
279
+ ):
280
+ # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
281
+ if not self._is_reduplication(seg[i - 1][0]) and len(seg[i - 1][0]) + len(seg[i][0]) <= 3:
282
+ new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
283
+ merge_last[i] = True
284
+ else:
285
+ new_seg.append([word, pos])
286
+ else:
287
+ new_seg.append([word, pos])
288
+
289
+ return new_seg
290
+
291
+ @staticmethod
292
+ def _is_reduplication(word: str) -> bool:
293
+ return len(word) == 2 and word[0] == word[1]
294
+
295
+ # the last char of first word and the first char of second word is tone_three
296
+ def _merge_continuous_three_tones_2(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
297
+ new_seg = []
298
+ sub_finals_list = [
299
+ lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) for (word, pos) in seg
300
+ ]
301
+ assert len(sub_finals_list) == len(seg)
302
+ merge_last = [False] * len(seg)
303
+ for i, (word, pos) in enumerate(seg):
304
+ if (
305
+ i - 1 >= 0
306
+ and sub_finals_list[i - 1][-1][-1] == "3"
307
+ and sub_finals_list[i][0][-1] == "3"
308
+ and not merge_last[i - 1]
309
+ ):
310
+ # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
311
+ if not self._is_reduplication(seg[i - 1][0]) and len(seg[i - 1][0]) + len(seg[i][0]) <= 3:
312
+ new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
313
+ merge_last[i] = True
314
+ else:
315
+ new_seg.append([word, pos])
316
+ else:
317
+ new_seg.append([word, pos])
318
+ return new_seg
319
+
320
+ @staticmethod
321
+ def _merge_er(seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
322
+ new_seg = []
323
+ for i, (word, pos) in enumerate(seg):
324
+ if i - 1 >= 0 and word == "儿" and seg[i - 1][0] != "#":
325
+ new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
326
+ else:
327
+ new_seg.append([word, pos])
328
+ return new_seg
329
+
330
+ @staticmethod
331
+ def _merge_reduplication(seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
332
+ new_seg = []
333
+ for i, (word, pos) in enumerate(seg):
334
+ if new_seg and word == new_seg[-1][0]:
335
+ new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
336
+ else:
337
+ new_seg.append([word, pos])
338
+ return new_seg
339
+
340
+ def pre_merge_for_modify(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
341
+ seg = self._merge_bu(seg)
342
+ seg = self._merge_yi(seg)
343
+ seg = self._merge_reduplication(seg)
344
+ seg = self._merge_continuous_three_tones(seg)
345
+ seg = self._merge_continuous_three_tones_2(seg)
346
+ seg = self._merge_er(seg)
347
+ return seg
348
+
349
+ def modified_tone(self, word: str, pos: str, finals: List[str]) -> List[str]:
350
+ finals = self._bu_sandhi(word, finals)
351
+ finals = self._yi_sandhi(word, finals)
352
+ finals = self._neural_sandhi(word, pos, finals)
353
+ finals = self._three_sandhi(word, finals)
354
+ return finals
genie_tts/G2P/Chinese/__init__.py ADDED
File without changes
genie_tts/G2P/English/EnglishG2P.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import os
3
+ import re
4
+ from typing import List, Dict, Tuple
5
+
6
+ import numpy as np
7
+ import nltk
8
+ from nltk.tokenize import TweetTokenizer
9
+ from nltk import pos_tag
10
+
11
+ from .Normalization import normalize
12
+ from .WordSegment import segment_text
13
+ from ..SymbolsV2 import symbols_v2, symbol_to_id_v2
14
+ from ..SymbolsV2 import PUNCTUATION
15
+ from ...Core.Resources import English_G2P_DIR
16
+
17
+ # nltk 路径和分词器初始化
18
+ nltk.data.path.append(English_G2P_DIR)
19
+ word_tokenize = TweetTokenizer().tokenize
20
+
21
+ # 路径定义
22
+ CMU_DICT_PATH = os.path.join(English_G2P_DIR, "cmudict.rep")
23
+ CMU_DICT_FAST_PATH = os.path.join(English_G2P_DIR, "cmudict-fast.rep")
24
+ CMU_DICT_HOT_PATH = os.path.join(English_G2P_DIR, "engdict-hot.rep")
25
+ CACHE_PATH = os.path.join(English_G2P_DIR, "engdict_cache.pickle")
26
+ NAMECACHE_PATH = os.path.join(English_G2P_DIR, "namedict_cache.pickle")
27
+ MODEL_PATH = os.path.join(English_G2P_DIR, "checkpoint20.npz")
28
+
29
+ # 正则表达式和映射
30
+ REP_MAP = {
31
+ "[;::,;]": ",",
32
+ '["’]': "'",
33
+ "。": ".",
34
+ "!": "!",
35
+ "?": "?",
36
+ }
37
+ REP_MAP_PATTERN = re.compile("|".join(re.escape(p) for p in REP_MAP.keys()))
38
+ PUNCTUATIONS_FOR_REGEX = "".join(re.escape(p) for p in PUNCTUATION)
39
+ CONSECUTIVE_PUNCTUATION_PATTERN = re.compile(rf"([{PUNCTUATIONS_FOR_REGEX}\s])([{PUNCTUATIONS_FOR_REGEX}])+")
40
+
41
+
42
+ # 辅助函数
43
+ def _read_cmu_dict(file_path: str) -> Dict[str, List[str]]:
44
+ g2p_dict = {}
45
+ with open(file_path, 'r', encoding='utf-8') as f:
46
+ for line in f:
47
+ line = line.strip()
48
+ if not line or line.startswith(';;;'): continue
49
+ parts = re.split(r'\s+', line, maxsplit=1)
50
+ if len(parts) < 2: continue
51
+ word, pron_str = parts[0].lower(), parts[1]
52
+ pron = pron_str.split(" ")
53
+ word = re.sub(r'\(\d+\)$', '', word)
54
+ if word not in g2p_dict: g2p_dict[word] = [pron]
55
+ return g2p_dict
56
+
57
+
58
+ def _load_and_cache_dict() -> Dict[str, List[List[str]]]:
59
+ with open(CACHE_PATH, "rb") as f:
60
+ g2p_dict = pickle.load(f)
61
+ hot_dict = _read_cmu_dict(CMU_DICT_HOT_PATH)
62
+ if hot_dict: g2p_dict.update(hot_dict)
63
+ return g2p_dict
64
+
65
+
66
+ def replace_phs(phs: List[str]) -> List[str]:
67
+ rep_map = {"'": "-"}
68
+ phs_new = []
69
+ for ph in phs:
70
+ if ph in symbols_v2:
71
+ phs_new.append(ph)
72
+ elif ph in rep_map:
73
+ phs_new.append(rep_map[ph])
74
+ return phs_new
75
+
76
+
77
+ def replace_consecutive_punctuation(text: str) -> str:
78
+ return CONSECUTIVE_PUNCTUATION_PATTERN.sub(r"\1", text)
79
+
80
+
81
+ def text_normalize(text: str) -> str:
82
+ text = REP_MAP_PATTERN.sub(lambda x: REP_MAP[x.group()], text)
83
+ text = normalize(text)
84
+ text = replace_consecutive_punctuation(text)
85
+ return text
86
+
87
+
88
+ class CleanG2p:
89
+ """
90
+ 一个集成了神经网络预测功能的、独立的英文G2P转换器。
91
+ - 不再依赖 g2p_en 库,将模型推理逻辑直接内置。
92
+ - 依赖 numpy 库进行计算。
93
+ """
94
+
95
+ def __init__(self):
96
+ # 1. 初始化标准组件
97
+ self.cmu = _load_and_cache_dict()
98
+ self.namedict = self._load_name_dict()
99
+ for word in ["AE", "AI", "AR", "IOS", "HUD", "OS"]:
100
+ self.cmu.pop(word.lower(), None)
101
+ self._setup_homographs()
102
+
103
+ # 2. 初始化神经网络模型组件
104
+ self._setup_nn_components()
105
+ self._load_nn_model()
106
+
107
+ def _setup_nn_components(self):
108
+ """设置 G2P 神经网络所需的字母和音素表。"""
109
+ self.graphemes = ["<pad>", "<unk>", "</s>"] + list("abcdefghijklmnopqrstuvwxyz")
110
+ self.phonemes = ["<pad>", "<unk>", "<s>", "</s>"] + ['AA0', 'AA1', 'AA2', 'AE0', 'AE1', 'AE2', 'AH0', 'AH1',
111
+ 'AH2', 'AO0',
112
+ 'AO1', 'AO2', 'AW0', 'AW1', 'AW2', 'AY0', 'AY1', 'AY2',
113
+ 'B', 'CH', 'D', 'DH',
114
+ 'EH0', 'EH1', 'EH2', 'ER0', 'ER1', 'ER2', 'EY0', 'EY1',
115
+ 'EY2', 'F', 'G', 'HH',
116
+ 'IH0', 'IH1', 'IH2', 'IY0', 'IY1', 'IY2', 'JH', 'K', 'L',
117
+ 'M', 'N', 'NG', 'OW0', 'OW1',
118
+ 'OW2', 'OY0', 'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH',
119
+ 'UH0', 'UH1', 'UH2', 'UW',
120
+ 'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH']
121
+ self.g2idx = {g: idx for idx, g in enumerate(self.graphemes)}
122
+ self.idx2g = {idx: g for idx, g in enumerate(self.graphemes)}
123
+ self.p2idx = {p: idx for idx, p in enumerate(self.phonemes)}
124
+ self.idx2p = {idx: p for idx, p in enumerate(self.phonemes)}
125
+
126
+ def _load_nn_model(self):
127
+ """从 .npz 文件加载预训练的神经网络权重。"""
128
+ if not os.path.exists(MODEL_PATH):
129
+ raise FileNotFoundError(f"G2P model file not found at: {MODEL_PATH}. "
130
+ f"Please ensure 'checkpoint20.npz' is in the correct directory.")
131
+
132
+ variables = np.load(MODEL_PATH)
133
+ self.enc_emb = variables["enc_emb"]
134
+ self.enc_w_ih = variables["enc_w_ih"]
135
+ self.enc_w_hh = variables["enc_w_hh"]
136
+ self.enc_b_ih = variables["enc_b_ih"]
137
+ self.enc_b_hh = variables["enc_b_hh"]
138
+ self.dec_emb = variables["dec_emb"]
139
+ self.dec_w_ih = variables["dec_w_ih"]
140
+ self.dec_w_hh = variables["dec_w_hh"]
141
+ self.dec_b_ih = variables["dec_b_ih"]
142
+ self.dec_b_hh = variables["dec_b_hh"]
143
+ self.fc_w = variables["fc_w"]
144
+ self.fc_b = variables["fc_b"]
145
+ # logger.info("G2P neural network model loaded successfully.")
146
+
147
+ @staticmethod
148
+ def _sigmoid(x):
149
+ return 1 / (1 + np.exp(-x))
150
+
151
+ def _grucell(self, x, h, w_ih, w_hh, b_ih, b_hh):
152
+ rzn_ih = np.matmul(x, w_ih.T) + b_ih
153
+ rzn_hh = np.matmul(h, w_hh.T) + b_hh
154
+ rz_ih, n_ih = rzn_ih[:, :rzn_ih.shape[-1] * 2 // 3], rzn_ih[:, rzn_ih.shape[-1] * 2 // 3:]
155
+ rz_hh, n_hh = rzn_hh[:, :rzn_hh.shape[-1] * 2 // 3], rzn_hh[:, rzn_hh.shape[-1] * 2 // 3:]
156
+ rz = self._sigmoid(rz_ih + rz_hh)
157
+ r, z = np.split(rz, 2, -1)
158
+ n = np.tanh(n_ih + r * n_hh)
159
+ h = (1 - z) * n + z * h
160
+ return h
161
+
162
+ def _gru(self, x, steps, w_ih, w_hh, b_ih, b_hh, h0=None):
163
+ if h0 is None:
164
+ h0 = np.zeros((x.shape[0], w_hh.shape[1]), np.float32)
165
+ h = h0
166
+ outputs = np.zeros((x.shape[0], steps, w_hh.shape[1]), np.float32)
167
+ for t in range(steps):
168
+ h = self._grucell(x[:, t, :], h, w_ih, w_hh, b_ih, b_hh)
169
+ outputs[:, t, ::] = h
170
+ return outputs
171
+
172
+ def _encode(self, word: str) -> np.ndarray:
173
+ chars = list(word.lower()) + ["</s>"]
174
+ x = [self.g2idx.get(char, self.g2idx["<unk>"]) for char in chars]
175
+ x = np.take(self.enc_emb, np.expand_dims(x, 0), axis=0)
176
+ return x
177
+
178
+ def predict(self, word: str) -> List[str]:
179
+ """使用内置的神经网络模型预测单词的发音。"""
180
+ # Encoder
181
+ enc = self._encode(word)
182
+ enc = self._gru(enc, len(word) + 1, self.enc_w_ih, self.enc_w_hh,
183
+ self.enc_b_ih, self.enc_b_hh, h0=np.zeros((1, self.enc_w_hh.shape[-1]), np.float32))
184
+ last_hidden = enc[:, -1, :]
185
+
186
+ # Decoder
187
+ dec = np.take(self.dec_emb, [self.p2idx["<s>"]], axis=0) # Start with <s>
188
+ h = last_hidden
189
+ preds = []
190
+ for _ in range(20): # Max steps
191
+ h = self._grucell(dec, h, self.dec_w_ih, self.dec_w_hh, self.dec_b_ih, self.dec_b_hh)
192
+ logits = np.matmul(h, self.fc_w.T) + self.fc_b
193
+ pred_idx = logits.argmax()
194
+ if pred_idx == self.p2idx["</s>"]: break
195
+ preds.append(pred_idx)
196
+ dec = np.take(self.dec_emb, [pred_idx], axis=0)
197
+
198
+ return [self.idx2p.get(idx, "<unk>") for idx in preds]
199
+
200
+ # --- 标准 G2P 逻辑 ---
201
+
202
+ @staticmethod
203
+ def _load_name_dict() -> Dict[str, List[List[str]]]:
204
+ if os.path.exists(NAMECACHE_PATH):
205
+ with open(NAMECACHE_PATH, "rb") as f: return pickle.load(f)
206
+ return {}
207
+
208
+ def _setup_homographs(self):
209
+ self.homograph2features: Dict[str, Tuple[List[str], List[str], str]] = {
210
+ "read": (["R", "EH1", "D"], ["R", "IY1", "D"], "VBD"),
211
+ "complex": (["K", "AH0", "M", "P", "L", "EH1", "K", "S"], ["K", "AA1", "M", "P", "L", "EH0", "K", "S"],
212
+ "JJ"),
213
+ "lead": (["L", "IY1", "D"], ["L", "EH1", "D"], "NN"),
214
+ "presents": (["P", "R", "IY0", "Z", "EH1", "N", "T", "S"], ["P", "R", "EH1", "Z", "AH0", "N", "T", "S"],
215
+ "VBZ"),
216
+ }
217
+
218
+ def __call__(self, text: str) -> List[str]:
219
+ original_words = word_tokenize(text)
220
+ normalized_text = text_normalize(text)
221
+ normalized_words = word_tokenize(normalized_text)
222
+
223
+ corrected_words = []
224
+ original_idx, normalized_idx = 0, 0
225
+ while original_idx < len(original_words) and normalized_idx < len(normalized_words):
226
+ if original_words[original_idx] == "I" and \
227
+ " ".join(normalized_words[normalized_idx:normalized_idx + 2]) == "the first":
228
+ corrected_words.append("I")
229
+ original_idx += 1
230
+ normalized_idx += 2
231
+ else:
232
+ corrected_words.append(normalized_words[normalized_idx])
233
+ original_idx += 1
234
+ normalized_idx += 1
235
+ if normalized_idx < len(normalized_words):
236
+ corrected_words.extend(normalized_words[normalized_idx:])
237
+
238
+ if not corrected_words: return []
239
+
240
+ tokens = pos_tag(corrected_words)
241
+ prons = []
242
+ for o_word, pos in tokens:
243
+ word = o_word.lower()
244
+ if re.search("[a-z]", word) is None:
245
+ pron = [word]
246
+ elif word in self.homograph2features:
247
+ pron1, pron2, pos1 = self.homograph2features[word]
248
+ pron = pron1 if pos.startswith(pos1) else pron2
249
+ else:
250
+ pron = self._query_word(o_word)
251
+ prons.extend(pron)
252
+ prons.extend([" "])
253
+ return prons[:-1] if prons else []
254
+
255
+ def _query_word(self, o_word: str) -> List[str]:
256
+ word = o_word.lower()
257
+ if word in self.cmu:
258
+ if o_word == "A": return ["AH0"]
259
+ return self.cmu[word][0]
260
+ if o_word.istitle() and word in self.namedict:
261
+ return self.namedict[word][0]
262
+ if word.endswith("'s") and len(word) > 2:
263
+ base_pron = self._query_word(word[:-2])
264
+ if base_pron:
265
+ last_ph = base_pron[-1]
266
+ if last_ph in {"S", "Z", "SH", "ZH", "CH", "JH"}: return base_pron + ["AH0", "Z"]
267
+ if last_ph in {"P", "T", "K", "F", "TH"}: return base_pron + ["S"]
268
+ return base_pron + ["Z"]
269
+ if "-" in word and len(word) > 1:
270
+ parts = [p for p in word.split("-") if p]
271
+ if len(parts) > 1:
272
+ result = [ph for part in parts for ph in self._query_word(part)]
273
+ if result: return result
274
+ segments = segment_text(word)
275
+ if len(segments) > 1 and "".join(segments) == word:
276
+ result = [ph for segment in segments for ph in self._query_word(segment)]
277
+ if result: return result
278
+
279
+ return self.predict(o_word)
280
+
281
+
282
+ _g2p_instance: CleanG2p = CleanG2p()
283
+
284
+
285
+ def g2p(text: str) -> List[str]:
286
+ if _g2p_instance is None: raise RuntimeError("G2P model is not available.")
287
+ raw_phonemes = _g2p_instance(text)
288
+ undesired = {" ", "<pad>", "UW", "</s>", "<s>"}
289
+ phones = ["UNK" if ph == "<unk>" else ph for ph in raw_phonemes if ph not in undesired]
290
+ return replace_phs(phones)
291
+
292
+
293
+ def english_to_phones(text: str) -> List[int]:
294
+ phones = g2p(text)
295
+ phones = [symbol_to_id_v2[ph] for ph in phones]
296
+ return phones
genie_tts/G2P/English/Normalization.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import unicodedata
3
+ from calendar import month_name
4
+
5
+
6
+ # ------------------- 核心:自实现数字转单词 (替代 inflect) -------------------
7
+
8
+ def _number_to_words_custom(num_str):
9
+ """一个不依赖inflect的、简化的数字到单词转换器。"""
10
+ num_str = str(num_str).strip()
11
+ if not num_str.isdigit(): return num_str
12
+
13
+ num = int(num_str)
14
+ if num == 0: return 'zero'
15
+
16
+ units = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
17
+ teens = ["ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen",
18
+ "nineteen"]
19
+ tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
20
+ thousands = ["", "thousand", "million", "billion", "trillion"]
21
+
22
+ def convert_less_than_thousand(n):
23
+ if n == 0: return ""
24
+ if n < 10: return units[n]
25
+ if n < 20: return teens[n - 10]
26
+ if n < 100: return tens[n // 10] + (" " + units[n % 10] if n % 10 != 0 else "")
27
+ return units[n // 100] + " hundred" + (" " + convert_less_than_thousand(n % 100) if n % 100 != 0 else "")
28
+
29
+ words = []
30
+ i = 0
31
+ if num == 0: return "zero"
32
+ while num > 0:
33
+ if num % 1000 != 0:
34
+ words.insert(0, convert_less_than_thousand(num % 1000) + " " + thousands[i])
35
+ num //= 1000
36
+ i += 1
37
+ return " ".join(words).strip()
38
+
39
+
40
+ def _ordinal_custom(num_str):
41
+ """一个不依赖inflect的、简化的序数词转换器。"""
42
+ num = int(num_str)
43
+ if 10 <= num % 100 <= 20:
44
+ suffix = 'th'
45
+ else:
46
+ suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get(num % 10, 'th')
47
+ return _number_to_words_custom(str(num)) + suffix
48
+
49
+
50
+ # ------------------- 初始化和常量定义 (无 inflect) -------------------
51
+
52
+ _measurement_map = {
53
+ "km/h": ["kilometer per hour", "kilometers per hour"], "mph": ["mile per hour", "miles per hour"],
54
+ "°C": ["degree celsius", "degrees celsius"], "°F": ["degree fahrenheit", "degrees fahrenheit"],
55
+ "tbsp": ["tablespoon", "tablespoons"], "tsp": ["teaspoon", "teaspoons"],
56
+ "km": ["kilometer", "kilometers"], "kg": ["kilogram", "kilograms"], "min": ["minute", "minutes"],
57
+ "ft": ["foot", "feet"], "cm": ["centimeter", "centimeters"], "m": ["meter", "meters"],
58
+ "L": ["liter", "liters"], "h": ["hour", "hours"], "s": ["second", "seconds"],
59
+ }
60
+
61
+ _abbreviations = [
62
+ (re.compile(r"\bMr\.(?=[\s,.]|\Z)", re.IGNORECASE), "Mister"),
63
+ (re.compile(r"\bMrs\.(?=[\s,.]|\Z)", re.IGNORECASE), "Missus"),
64
+ (re.compile(r"\bDr\.(?=[\s,.]|\Z)", re.IGNORECASE), "Doctor"),
65
+ (re.compile(r"\bProf\.(?=[\s,.]|\Z)", re.IGNORECASE), "Professor"),
66
+ (re.compile(r"\bSt\.(?=[\s,.]|\Z)", re.IGNORECASE), "Street"),
67
+ (re.compile(r"\bCo\.(?=[\s,.]|\Z)", re.IGNORECASE), "Company"),
68
+ (re.compile(r"\bLtd\.(?=[\s,.]|\Z)", re.IGNORECASE), "Limited"),
69
+ (re.compile(r"\be\.g\.(?=[\s,.]|\Z)", re.IGNORECASE), "for example"),
70
+ (re.compile(r"\bi\.e\.(?=[\s,.]|\Z)", re.IGNORECASE), "that is"),
71
+ ]
72
+
73
+ # ------------------- 正则表达式定义 (与原来保持一致) -------------------
74
+ _currency_suffix_re = re.compile(r"([£$€])([\d,.]*\d)\s*(million|billion|thousand)\b", re.IGNORECASE)
75
+ _phone_re = re.compile(r"(\+?\d{1,3}-)?\b(\d{3})-(?:(\d{3})-)?(\d{4})\b")
76
+ _roman_re = re.compile(r"\b(XIX|XVIII|XVII|XVI|XV|XIV|XIII|XII|XI|X|IX|VIII|VII|VI|V|IV|III|II)\b", re.IGNORECASE)
77
+ _decade_re = re.compile(r"\b((?:1[89]|20)\d0)s\b")
78
+ _score_re = re.compile(r"\b(\d{1,2})-(\d{1,2})\b")
79
+ _dimension_re = re.compile(r"\b(\d+(?:\.\d+)?)\s*x\s*(\d+(?:\.\d+)?)(?:\s*x\s*(\d+(?:\.\d+)?))?\b")
80
+ _alphanumeric_re = re.compile(r"\b([a-zA-Z]+[0-9]+|[0-9]+[a-zA-Z]+)\b")
81
+ _date_re = re.compile(r"\b(0?[1-9]|1[0-2])/([0-2]?\d|3[01])/(\d{2,4})\b")
82
+ _ordinal_number_re = re.compile(r"\b(\d+)\. ")
83
+ _comma_number_re = re.compile(r"(\d[\d,]+\d)")
84
+ _currency_re = re.compile(r"([£$€])(\d*\.?\d+)|(\d*\.?\d+)\s*([£$€])")
85
+ _time_re = re.compile(r"\b([01]?\d|2[0-3]):([0-5]\d)(?::([0-5]\d))?(\s*(?:a\.?m\.?|p\.?m\.?))?\b", re.IGNORECASE)
86
+ units = "|".join(re.escape(key) for key in sorted(_measurement_map.keys(), key=len, reverse=True))
87
+ _measurement_re = re.compile(rf"(?<!\w)(-?(?:\d+/\d+|\d+(?:\.\d+)?))\s*({units})\b")
88
+ _fraction_re = re.compile(r"\b(\d+)/(\d+)\b")
89
+ _decimal_number_re = re.compile(r"(\d+\.\d+)")
90
+ _ordinal_re = re.compile(r"\b\d+(st|nd|rd|th)\b")
91
+ _acronym_re = re.compile(r"\b[A-Z]{2,}\b")
92
+ _number_re = re.compile(r"(?<!\w)-?\d+(?!\w)")
93
+
94
+
95
+ # ------------------- 替换与扩展函数 (全部使用 _number_to_words_custom) -------------------
96
+ def _expand_currency_suffix(m):
97
+ symbol, amount_str, suffix = m.groups()
98
+ major_map = {"$": "dollars", "£": "pounds", "€": "euros"}
99
+ amount_word = _number_to_words_custom(amount_str.replace(",", ""))
100
+ currency_word = major_map.get(symbol, "")
101
+ return f"{amount_word} {suffix} {currency_word}"
102
+
103
+
104
+ def _expand_phone_number(m):
105
+ country, area, exch, line = m.groups()
106
+ parts = []
107
+ if country:
108
+ country_words = []
109
+ if country.startswith('+'): country_words.append('plus')
110
+ digits_only = re.sub(r'\D', '', country)
111
+ if digits_only: country_words.append(' '.join(_number_to_words_custom(d) for d in digits_only))
112
+ parts.append(' '.join(country_words))
113
+ parts.append(' '.join(_number_to_words_custom(c) for c in area))
114
+ if exch: parts.append(' '.join(_number_to_words_custom(c) for c in exch))
115
+ parts.append(' '.join(_number_to_words_custom(c) for c in line))
116
+ return ", ".join(parts)
117
+
118
+
119
+ def _expand_roman(m):
120
+ roman_map = {
121
+ "ii": "two", "iii": "three", "iv": "four", "v": "five", "vi": "six", "vii": "seven",
122
+ "viii": "eight", "ix": "nine", "x": "ten", "xi": "eleven", "xii": "twelve",
123
+ "xiii": "thirteen", "xiv": "fourteen", "xv": "fifteen", "xvi": "sixteen",
124
+ "xvii": "seventeen", "xviii": "eighteen", "xix": "nineteen"
125
+ }
126
+ return roman_map.get(m.group(1).lower(), m.group(1))
127
+
128
+
129
+ def _expand_decade(m):
130
+ year_str = m.group(1)
131
+ year_words = _expand_number_positive(year_str)
132
+ if year_words.endswith('ty'): return f"{year_words[:-1]}ies"
133
+ return f"{year_words}s"
134
+
135
+
136
+ def _expand_dimension(m):
137
+ parts = [p for p in m.groups() if p is not None]
138
+ expanded_parts = [_number_to_words_custom(p) for p in parts]
139
+ return " by ".join(expanded_parts)
140
+
141
+
142
+ def _expand_score(m):
143
+ return f"{_number_to_words_custom(m.group(1))} to {_number_to_words_custom(m.group(2))}"
144
+
145
+
146
+ def _expand_alphanumeric(m):
147
+ text = m.group(0)
148
+ parts = re.findall(r'[a-zA-Z]+|[0-9]+', text)
149
+ expanded_parts = []
150
+ for part in parts:
151
+ if part.isalpha():
152
+ expanded_parts.append(' '.join(list(part)))
153
+ elif part.isdigit():
154
+ expanded_parts.append(' '.join(_number_to_words_custom(c) for c in part))
155
+ return ' '.join(expanded_parts)
156
+
157
+
158
+ def _convert_ordinal(m):
159
+ return _ordinal_custom(m.group(1)) + ", "
160
+
161
+
162
+ def _remove_commas(m): return m.group(1).replace(",", "")
163
+
164
+
165
+ def _expand_time(m):
166
+ h_str, m_str, s_str, am_pm = m.groups()
167
+ h, m = int(h_str), int(m_str)
168
+ h_word = _number_to_words_custom(h if h <= 12 or not am_pm else h - 12)
169
+ if h == 0 and am_pm: h_word = "twelve"
170
+ m_word = ""
171
+ if m > 0: m_word = f" oh {_number_to_words_custom(m)}" if m < 10 else f" {_number_to_words_custom(m)}"
172
+ result = f"{h_word}{m_word}".lstrip()
173
+ if s_str: result += f" and {_number_to_words_custom(int(s_str))} seconds"
174
+ if am_pm: result += ' pm' if 'p' in am_pm.lower() else ' am'
175
+ return result
176
+
177
+
178
+ def _expand_measurement(m):
179
+ num_str, unit = m.groups()
180
+ is_neg = num_str.startswith('-')
181
+ if is_neg: num_str = num_str[1:]
182
+ if '/' in num_str:
183
+ num_word = _expand_fraction(re.match(_fraction_re, num_str))
184
+ is_plural = True
185
+ else:
186
+ num_word = _number_to_words_custom(num_str)
187
+ is_plural = float(num_str) != 1
188
+ unit_word = _measurement_map[unit][1] if is_plural else _measurement_map[unit][0]
189
+ result = f"{num_word} {unit_word}"
190
+ return f"minus {result}" if is_neg else result
191
+
192
+
193
+ def _expand_currency(m):
194
+ symbol, amount_str = (m.group(1), m.group(2)) if m.group(1) else (m.group(4), m.group(3))
195
+ amount_str = (amount_str or "").replace(",", "")
196
+ if amount_str.startswith('.'): amount_str = '0' + amount_str
197
+ major_map = {"$": ("dollar", "dollars"), "£": ("pound", "pounds"), "€": ("euro", "euros")}
198
+ minor_map = {"$": ("cent", "cents"), "£": ("penny", "pence"), "€": ("cent", "cents")}
199
+ major_singular, major_plural = major_map.get(symbol, ("", ""))
200
+ parts = amount_str.split('.')
201
+ major_val = int(parts[0]) if parts[0] else 0
202
+ minor_val = int(parts[1].ljust(2, '0')) if len(parts) > 1 and parts[1] else 0
203
+ result = []
204
+ if major_val > 0:
205
+ result.append(f"{_number_to_words_custom(major_val)} {major_singular if major_val == 1 else major_plural}")
206
+ if minor_val > 0:
207
+ minor_singular, minor_plural = minor_map.get(symbol, ("", ""))
208
+ result.append(f"{_number_to_words_custom(minor_val)} {minor_singular if minor_val == 1 else minor_plural}")
209
+ return " and ".join(result) or f"zero {major_plural}"
210
+
211
+
212
+ def _expand_decimal_number(m):
213
+ num_str = m.group(1)
214
+ parts = num_str.split('.')
215
+ integer_part = _number_to_words_custom(parts[0])
216
+ fractional_part = ' '.join(_number_to_words_custom(digit) for digit in parts[1])
217
+ return f"{integer_part} point {fractional_part}"
218
+
219
+
220
+ def _expand_date(m):
221
+ month, day, year = m.groups()
222
+ month_word = month_name[int(month)]
223
+ day_word = _ordinal_custom(day)
224
+ year_num = int(year)
225
+ if len(year) == 2: year_num += 2000 if year_num < 50 else 1900
226
+ return f"{month_word} {day_word}, {_expand_number_positive(str(year_num))}"
227
+
228
+
229
+ def _expand_fraction(m):
230
+ n, d = int(m.group(1)), int(m.group(2))
231
+ if d == 0: return m.group(0)
232
+ common_fractions = {(1, 2): "one half", (1, 4): "one quarter", (3, 4): "three quarters"}
233
+ if (n, d) in common_fractions: return common_fractions[(n, d)]
234
+ return f"{_number_to_words_custom(n)} over {_number_to_words_custom(d)}"
235
+
236
+
237
+ def _expand_ordinal_word(m):
238
+ return _ordinal_custom(m.group(0)[:-2])
239
+
240
+
241
+ def _expand_number(m):
242
+ num_str = m.group(0)
243
+ if num_str.startswith('-'): return f"minus {_expand_number_positive(num_str[1:])}"
244
+ return _expand_number_positive(num_str)
245
+
246
+
247
+ def _expand_number_positive(num_str):
248
+ num = int(num_str)
249
+ if 2000 <= num < 2010: return f"two thousand and {_number_to_words_custom(num % 100)}"
250
+ if 1100 <= num < 2100 and num % 100 != 0:
251
+ return f"{_number_to_words_custom(num // 100)} {_number_to_words_custom(num % 100)}"
252
+ return _number_to_words_custom(num_str)
253
+
254
+
255
+ def _expand_acronym(m): return " ".join(m.group(0))
256
+
257
+
258
+ def normalize(text):
259
+ text = "".join(char for char in unicodedata.normalize("NFD", text) if unicodedata.category(char) != "Mn")
260
+ text = re.sub(r"@", " at ", text)
261
+ for regex, replacement in _abbreviations: text = regex.sub(replacement, text)
262
+ text = re.sub(_currency_suffix_re, _expand_currency_suffix, text)
263
+ text = re.sub(_phone_re, _expand_phone_number, text)
264
+ text = re.sub(_dimension_re, _expand_dimension, text)
265
+ text = re.sub(_roman_re, _expand_roman, text)
266
+ text = re.sub(_decade_re, _expand_decade, text)
267
+ text = re.sub(_score_re, _expand_score, text)
268
+ text = re.sub(_date_re, _expand_date, text)
269
+ text = re.sub(_time_re, _expand_time, text)
270
+ text = re.sub(_ordinal_number_re, _convert_ordinal, text)
271
+ text = re.sub(_comma_number_re, _remove_commas, text)
272
+ text = re.sub(_currency_re, _expand_currency, text)
273
+ text = re.sub(_measurement_re, _expand_measurement, text)
274
+ text = re.sub(_fraction_re, _expand_fraction, text)
275
+ text = re.sub(_decimal_number_re, _expand_decimal_number, text)
276
+ text = re.sub(_ordinal_re, _expand_ordinal_word, text)
277
+ text = re.sub(_alphanumeric_re, _expand_alphanumeric, text)
278
+ text = re.sub(_acronym_re, _expand_acronym, text)
279
+ text = re.sub(_number_re, _expand_number, text)
280
+ text = text.lower()
281
+ text = re.sub(r"%", " percent", text)
282
+ domain_re = re.compile(r'\b([a-z0-9-]+)\.([a-z]{2,})\b')
283
+ while domain_re.search(text): text = domain_re.sub(r'\1 dot \2', text)
284
+ text = re.sub(r"[^a-z0-9'.,?!:;-]", " ", text)
285
+ text = re.sub(r"\s+", " ", text)
286
+ return text.strip()
genie_tts/G2P/English/WordSegment.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import math
3
+ import os
4
+ from typing import List, Iterator, Tuple, Dict
5
+
6
+ from ...Core.Resources import English_G2P_DIR
7
+
8
+
9
+ class WordSegmenter:
10
+ """
11
+ Contains the core logic for word segmentation, adapted from the original library.
12
+ """
13
+ ALPHABET = set('abcdefghijklmnopqrstuvwxyz0123456789')
14
+ TOTAL = 1024908267229.0
15
+ LIMIT = 24
16
+
17
+ def __init__(self):
18
+ self.unigrams: Dict[str, float] = {}
19
+ self.bigrams: Dict[str, float] = {}
20
+ self.words: List[str] = []
21
+ self.total: float = 0.0
22
+
23
+ def load(self, data_directory: str):
24
+ """
25
+ Load unigram, bigram, and word counts from the specified data directory.
26
+ This is the main modification from the original library.
27
+ """
28
+ unigrams_path = os.path.join(data_directory, 'unigrams.txt')
29
+ bigrams_path = os.path.join(data_directory, 'bigrams.txt')
30
+ words_path = os.path.join(data_directory, 'words.txt')
31
+
32
+ for file_path in [unigrams_path, bigrams_path, words_path]:
33
+ if not os.path.exists(file_path):
34
+ raise FileNotFoundError(
35
+ f"Word segmentation data file not found: {file_path}. "
36
+ "Please ensure the data directory is correct."
37
+ )
38
+
39
+ self.unigrams.update(self._parse(unigrams_path))
40
+ self.bigrams.update(self._parse(bigrams_path))
41
+ with io.open(words_path, encoding='utf-8') as reader:
42
+ self.words.extend(reader.read().splitlines())
43
+
44
+ self.total = self.TOTAL
45
+
46
+ @staticmethod
47
+ def _parse(filename: str) -> Dict[str, float]:
48
+ """Read `filename` and parse tab-separated file of word and count pairs."""
49
+ with io.open(filename, encoding='utf-8') as reader:
50
+ # CORRECTED LINE: The generator now defines `line` before the comprehension uses it.
51
+ lines = (line.split('\t') for line in reader)
52
+ return {word: float(number) for word, number in lines if len(word) > 0 and len(number) > 0}
53
+
54
+ def score(self, word: str, previous: str = None) -> float:
55
+ """Score `word` in the context of `previous` word."""
56
+ if previous is None:
57
+ if word in self.unigrams:
58
+ return self.unigrams[word] / self.total
59
+ return 10.0 / (self.total * 10 ** len(word))
60
+
61
+ bigram = f'{previous} {word}'
62
+ if bigram in self.bigrams and previous in self.unigrams:
63
+ return self.bigrams[bigram] / self.total / self.score(previous)
64
+
65
+ return self.score(word)
66
+
67
+ def isegment(self, text: str) -> Iterator[str]:
68
+ """Return iterator of words that is the best segmenation of `text`."""
69
+ memo = {}
70
+
71
+ def search(text: str, previous: str = '<s>') -> Tuple[float, List[str]]:
72
+ if text == '':
73
+ return 0.0, []
74
+
75
+ def candidates() -> Iterator[Tuple[float, List[str]]]:
76
+ for prefix, suffix in self._divide(text):
77
+ prefix_score = math.log10(self.score(prefix, previous))
78
+
79
+ pair = (suffix, prefix)
80
+ if pair not in memo:
81
+ memo[pair] = search(suffix, prefix)
82
+ suffix_score, suffix_words = memo[pair]
83
+
84
+ yield prefix_score + suffix_score, [prefix] + suffix_words
85
+
86
+ return max(candidates())
87
+
88
+ clean_text = self._clean(text)
89
+
90
+ # Original logic to avoid recursion limits by chunking
91
+ size = 250
92
+ prefix = ''
93
+ if len(clean_text) > size:
94
+ for offset in range(0, len(clean_text), size):
95
+ chunk = clean_text[offset:(offset + size)]
96
+ _, chunk_words = search(prefix + chunk)
97
+
98
+ if len(chunk_words) > 5:
99
+ prefix = ''.join(chunk_words[-5:])
100
+ del chunk_words[-5:]
101
+ else: # handle case where chunk is small
102
+ prefix = ''.join(chunk_words)
103
+ chunk_words = []
104
+
105
+ for word in chunk_words:
106
+ yield word
107
+
108
+ _, prefix_words = search(prefix)
109
+ for word in prefix_words:
110
+ yield word
111
+ else:
112
+ _, words = search(clean_text)
113
+ for word in words:
114
+ yield word
115
+
116
+ def segment(self, text: str) -> List[str]:
117
+ """Return list of words that is the best segmenation of `text`."""
118
+ return list(self.isegment(text))
119
+
120
+ def _divide(self, text: str) -> Iterator[Tuple[str, str]]:
121
+ """Yield `(prefix, suffix)` pairs from `text`."""
122
+ for pos in range(1, min(len(text), self.LIMIT) + 1):
123
+ yield text[:pos], text[pos:]
124
+
125
+ @classmethod
126
+ def _clean(cls, text: str) -> str:
127
+ """Return `text` lower-cased with non-alphanumeric characters removed."""
128
+ text_lower = text.lower()
129
+ return ''.join(letter for letter in text_lower if letter in cls.ALPHABET)
130
+
131
+
132
+ # --- Public Interface ---
133
+ # Create a single instance to be used by the importing module.
134
+
135
+ _segmenter = WordSegmenter()
136
+ _segmenter.load(os.path.join(English_G2P_DIR, 'wordsegment'))
137
+
138
+
139
+ def segment_text(text: str) -> List[str]:
140
+ """
141
+ Public function to segment a text string into a list of words.
142
+ """
143
+ return _segmenter.segment(text)
genie_tts/G2P/English/__init__.py ADDED
File without changes
genie_tts/G2P/Japanese/JapaneseG2P.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ 用于纯日语的 G2P。
4
+ """
5
+ import re
6
+ import pyopenjtalk
7
+ from typing import List
8
+ from ..SymbolsV2 import symbols_v2, symbol_to_id_v2
9
+
10
+ # 匹配连续的标点符号
11
+ _CONSECUTIVE_PUNCTUATION_RE = re.compile(r"([,./?!~…・])\1+")
12
+
13
+ # 匹配需要转换为日语读法的特殊符号
14
+ _SYMBOLS_TO_JAPANESE = [
15
+ (re.compile("%"), "パーセント"),
16
+ (re.compile("%"), "パーセント"),
17
+ ]
18
+
19
+ # 匹配日语字符(汉字、假名、全角字母数字等)
20
+ _JAPANESE_CHARACTERS_RE = re.compile(
21
+ r"[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]"
22
+ )
23
+
24
+ # 匹配非日语字符(标点、空格等)
25
+ _JAPANESE_MARKS_RE = re.compile(
26
+ r"[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]"
27
+ )
28
+
29
+
30
+ class JapaneseG2P:
31
+ """
32
+ 一个简化的、封装好的日语Grapheme-to-Phoneme(字素到音素)转换器。
33
+
34
+ 本版本假设 pyopenjtalk 库已安装,并且不使用任何用户自定义词典。
35
+ 它专注于提供一个纯粹、高效的文本到音素转换接口。
36
+ """
37
+
38
+ @staticmethod
39
+ def _text_normalize(text: str) -> str:
40
+ """对输入文本进行基础的规范化处理。"""
41
+ for regex, replacement in _SYMBOLS_TO_JAPANESE:
42
+ text = re.sub(regex, replacement, text)
43
+ text = _CONSECUTIVE_PUNCTUATION_RE.sub(r"\1", text)
44
+ text = text.lower()
45
+ return text
46
+
47
+ @staticmethod
48
+ def _post_replace_phoneme(ph: str) -> str:
49
+ """对单个音素或标点进行后处理替换。"""
50
+ rep_map = {
51
+ ":": ",", ";": ",", ",": ",", "。": ".",
52
+ "!": "!", "?": "?", "\n": ".", "·": ",",
53
+ "、": ",", "...": "…",
54
+ }
55
+ return rep_map.get(ph, ph)
56
+
57
+ @staticmethod
58
+ def _numeric_feature_by_regex(regex: str, s: str) -> int:
59
+ """从OpenJTalk标签中提取数值特征。"""
60
+ match = re.search(regex, s)
61
+ return int(match.group(1)) if match else -50
62
+
63
+ @staticmethod
64
+ def _pyopenjtalk_g2p_prosody(text: str) -> List[str]:
65
+ """使用pyopenjtalk提取音素及韵律符号。"""
66
+ labels = pyopenjtalk.make_label(pyopenjtalk.run_frontend(text))
67
+ phones = []
68
+ for n, lab_curr in enumerate(labels):
69
+ p3 = re.search(r"-(.*?)\+", lab_curr).group(1)
70
+ if p3 in "AEIOU":
71
+ p3 = p3.lower()
72
+
73
+ if p3 == "sil":
74
+ if n == 0:
75
+ phones.append("^")
76
+ elif n == len(labels) - 1:
77
+ e3 = JapaneseG2P._numeric_feature_by_regex(r"!(\d+)_", lab_curr)
78
+ phones.append("?" if e3 == 1 else "$")
79
+ continue
80
+ elif p3 == "pau":
81
+ phones.append("_")
82
+ continue
83
+ else:
84
+ phones.append(p3)
85
+
86
+ a1 = JapaneseG2P._numeric_feature_by_regex(r"/A:([0-9\-]+)\+", lab_curr)
87
+ a2 = JapaneseG2P._numeric_feature_by_regex(r"\+(\d+)\+", lab_curr)
88
+ a3 = JapaneseG2P._numeric_feature_by_regex(r"\+(\d+)/", lab_curr)
89
+ f1 = JapaneseG2P._numeric_feature_by_regex(r"/F:(\d+)_", lab_curr)
90
+ lab_next = labels[n + 1] if n + 1 < len(labels) else ""
91
+ a2_next = JapaneseG2P._numeric_feature_by_regex(r"\+(\d+)\+", lab_next)
92
+
93
+ if a3 == 1 and a2_next == 1 and p3 in "aeiouAEIOUNcl":
94
+ phones.append("#")
95
+ elif a1 == 0 and a2_next == a2 + 1 and a2 != f1:
96
+ phones.append("]")
97
+ elif a2 == 1 and a2_next == 2:
98
+ phones.append("[")
99
+
100
+ return phones
101
+
102
+ @staticmethod
103
+ def g2p(text: str, with_prosody: bool = True) -> List[str]:
104
+ """
105
+ 将日语文本转换为音素序列。
106
+
107
+ Args:
108
+ text (str): 待转换的日语文本。
109
+ with_prosody (bool): 是否在输出中包含韵律符号。默认为 True。
110
+
111
+ Returns:
112
+ List[str]: 音素和符号的列表。
113
+ """
114
+ if not text.strip():
115
+ return []
116
+
117
+ # 1. 文本规范化
118
+ norm_text = JapaneseG2P._text_normalize(text)
119
+
120
+ # 2. 使用标点符号分割字符串,得到日语文本片段
121
+ japanese_segments = _JAPANESE_MARKS_RE.split(norm_text)
122
+ punctuation_marks = _JAPANESE_MARKS_RE.findall(norm_text)
123
+
124
+ phonemes = []
125
+ for i, segment in enumerate(japanese_segments):
126
+ if segment:
127
+ if with_prosody: # 移除分析结果中句首(^)/句尾($)的符号,因为我们按片段处理
128
+ phones = JapaneseG2P._pyopenjtalk_g2p_prosody(segment)[1:-1]
129
+ else:
130
+ phones = pyopenjtalk.g2p(segment).split(" ")
131
+ phonemes.extend(phones)
132
+
133
+ # 将对应的��点符号添加回来
134
+ if i < len(punctuation_marks):
135
+ mark = punctuation_marks[i].strip()
136
+ if mark:
137
+ phonemes.append(mark)
138
+
139
+ # 3. 对最终列表中的每个元素进行后处理(主要转换全角标点)
140
+ processed_phonemes = [JapaneseG2P._post_replace_phoneme(p) for p in phonemes]
141
+
142
+ return processed_phonemes
143
+
144
+
145
+ def japanese_to_phones(text: str) -> List[int]:
146
+ phones = JapaneseG2P.g2p(text)
147
+ phones = [ph for ph in phones if ph in symbols_v2]
148
+ # print(phones)
149
+ phones = [symbol_to_id_v2[ph] for ph in phones]
150
+ return phones
genie_tts/G2P/Japanese/__init__.py ADDED
File without changes
genie_tts/G2P/SymbolsV2.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from typing import List, Dict
4
+
5
+ # -------------------------
6
+ # 基础符号集定义
7
+ # -------------------------
8
+
9
+ # 标点和特殊分隔符
10
+ PUNCTUATION = ["!", "?", "…", ",", "."]
11
+ PUNCTUATION_SYMBOLS = ["!", "?", "…", ",", ".", "-", "SP", "SP2", "SP3", "UNK"]
12
+
13
+ # 中文普通话(Pinyin)符号
14
+ # 声母
15
+ PINYIN_INITIALS = [
16
+ "AA", "EE", "OO", "b", "c", "ch", "d", "f", "g", "h", "j", "k", "l",
17
+ "m", "n", "p", "q", "r", "s", "sh", "t", "w", "x", "y", "z", "zh",
18
+ ]
19
+ # 基础韵母 (不带声调)
20
+ PINYIN_FINALS_BASE = [
21
+ "E", "En", "a", "ai", "an", "ang", "ao", "e", "ei", "en", "eng", "er",
22
+ "i", "i0", "ia", "ian", "iang", "iao", "ie", "in", "ing", "iong",
23
+ "ir", "iu", "o", "ong", "ou", "u", "ua", "uai", "uan", "uang", "ui",
24
+ "un", "uo", "v", "van", "ve", "vn",
25
+ ]
26
+
27
+ # 日语 (Romaji) 符号
28
+ JAPANESE_SYMBOLS = [
29
+ "I", "N", "U", "a", "b", "by", "ch", "cl", "d", "dy", "e", "f", "g",
30
+ "gy", "h", "hy", "i", "j", "k", "ky", "m", "my", "n", "ny", "o", "p",
31
+ "py", "r", "ry", "s", "sh", "t", "ts", "u", "v", "w", "y", "z",
32
+ ]
33
+
34
+ # 英语 (ARPAbet) 符号
35
+ ARPABET_SYMBOLS = {
36
+ "AH0", "S", "AH1", "EY2", "AE2", "EH0", "OW2", "UH0", "NG", "B", "G",
37
+ "AY0", "M", "AA0", "F", "AO0", "ER2", "UH1", "IY1", "AH2", "DH", "IY0",
38
+ "EY1", "IH0", "K", "N", "W", "IY2", "T", "AA1", "ER1", "EH2", "OY0",
39
+ "UH2", "UW1", "Z", "AW2", "AW1", "V", "UW2", "AA2", "ER", "AW0",
40
+ "UW0", "R", "OW1", "EH1", "ZH", "AE0", "IH2", "IH", "Y", "JH", "P",
41
+ "AY1", "EY0", "OY2", "TH", "HH", "D", "ER0", "CH", "AO1", "AE1",
42
+ "AO2", "OY1", "AY2", "IH1", "OW0", "L", "SH",
43
+ }
44
+
45
+ # 韩语 (Hangul) 符号
46
+ KOREAN_SYMBOLS = "ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ空停"
47
+
48
+ # 粤语 (Jyutping/Yale) 符号
49
+ CANTONESE_SYMBOLS = {
50
+ "Yeot3", "Yip1", "Yyu3", "Yeng4", "Yut5", "Yaan5", "Ym5", "Yaan6", "Yang1", "Yun4",
51
+ "Yon2", "Yui5", "Yun2", "Yat3", "Ye", "Yeot1", "Yoeng5", "Yoek2", "Yam2", "Yeon6",
52
+ "Yu6", "Yiu3", "Yaang6", "Yp5", "Yai4", "Yoek4", "Yit6", "Yam5", "Yoeng6", "Yg1",
53
+ "Yk3", "Yoe4", "Yam3", "Yc", "Yyu4", "Yyut1", "Yiu4", "Ying3", "Yip3", "Yaap3",
54
+ "Yau3", "Yan4", "Yau1", "Yap4", "Yk6", "Yok3", "Yai1", "Yeot6", "Yan2", "Yoek6",
55
+ "Yt1", "Yoi1", "Yit5", "Yn4", "Yaau3", "Yau4", "Yuk6", "Ys", "Yuk", "Yin6",
56
+ "Yung6", "Ya", "You", "Yaai5", "Yau5", "Yoi3", "Yaak3", "Yaat3", "Ying2", "Yok5",
57
+ "Yeng2", "Yyut3", "Yam1", "Yip5", "You1", "Yam6", "Yaa5", "Yi6", "Yek4", "Yyu2",
58
+ "Yuk5", "Yaam1", "Yang2", "Yai", "Yiu6", "Yin4", "Yok4", "Yot3", "Yui2", "Yeoi5",
59
+ "Yyun6", "Yyu5", "Yoi5", "Yeot2", "Yim4", "Yeoi2", "Yaan1", "Yang6", "Yong1", "Yaang4",
60
+ "Yung5", "Yeon1", "Yin2", "Ya3", "Yaang3", "Yg", "Yk2", "Yaau5", "Yut1", "Yt5",
61
+ "Yip4", "Yung4", "Yj", "Yong3", "Ya1", "Yg6", "Yaau6", "Yit3", "Yun3", "Ying1",
62
+ "Yn2", "Yg4", "Yl", "Yp3", "Yn3", "Yak1", "Yang5", "Yoe6", "You2", "Yap2",
63
+ "Yak2", "Yt3", "Yot5", "Yim2", "Yi1", "Yn6", "Yaat5", "Yaam3", "Yoek5", "Ye3",
64
+ "Yeon4", "Yaa2", "Yu3", "Yim6", "Ym", "Yoe3", "Yaai2", "Ym2", "Ya6", "Yeng6",
65
+ "Yik4", "Yot4", "Yaai4", "Yyun3", "Yu1", "Yoeng1", "Yaap2", "Yuk3", "Yoek3", "Yeng5",
66
+ "Yeoi1", "Yiu2", "Yok1", "Yo1", "Yoek1", "Yoeng2", "Yeon5", "Yiu1", "Yoeng4", "Yuk2",
67
+ "Yat4", "Yg5", "Yut4", "Yan6", "Yin3", "Yaa6", "Yap1", "Yg2", "Yoe5", "Yt4",
68
+ "Ya5", "Yo4", "Yyu1", "Yak3", "Yeon2", "Yong4", "Ym1", "Ye2", "Yaang5", "Yoi2",
69
+ "Yeng3", "Yn", "Yyut4", "Yau", "Yaak2", "Yaan4", "Yek2", "Yin1", "Yi5", "Yoe2",
70
+ "Yei5", "Yaat6", "Yak5", "Yp6", "Yok6", "Yei2", "Yaap1", "Yyut5", "Yi4", "Yim1",
71
+ "Yk5", "Ye4", "Yok2", "Yaam6", "Yat2", "Yon6", "Yei3", "Yyu6", "Yeot5", "Yk4",
72
+ "Yai6", "Yd", "Yg3", "Yei6", "Yau2", "Yok", "Yau6", "Yung3", "Yim5", "Yut6",
73
+ "Yit1", "Yon3", "Yat1", "Yaam2", "Yyut2", "Yui6", "Yt2", "Yek6", "Yt", "Ye6",
74
+ "Yang3", "Ying6", "Yaau1", "Yeon3", "Yng", "Yh", "Yang4", "Ying5", "Yaap6", "Yoeng3",
75
+ "Yyun4", "You3", "Yan5", "Yat5", "Yot1", "Yun1", "Yi3", "Yaa1", "Yaap4", "You6",
76
+ "Yaang2", "Yaap5", "Yaa3", "Yaak6", "Yeng1", "Yaak1", "Yo5", "Yoi4", "Yam4", "Yik1",
77
+ "Ye1", "Yai5", "Yung1", "Yp2", "Yui4", "Yaak4", "Yung2", "Yak4", "Yaat4", "Yeoi4",
78
+ "Yut2", "Yin5", "Yaau4", "Yap6", "Yb", "Yaam4", "Yw", "Yut3", "Yong2", "Yt6",
79
+ "Yaai6", "Yap5", "Yik5", "Yun6", "Yaam5", "Yun5", "Yik3", "Ya2", "Yyut6", "Yon4",
80
+ "Yk1", "Yit4", "Yak6", "Yaan2", "Yuk1", "Yai2", "Yik2", "Yaat2", "Yo3", "Ykw",
81
+ "Yn5", "Yaa", "Ye5", "Yu4", "Yei1", "Yai3", "Yyun5", "Yip2", "Yaau2", "Yiu5",
82
+ "Ym4", "Yeoi6", "Yk", "Ym6", "Yoe1", "Yeoi3", "Yon", "Yuk4", "Yaai3", "Yaa4",
83
+ "Yot6", "Yaang1", "Yei4", "Yek1", "Yo", "Yp", "Yo6", "Yp4", "Yan3", "Yoi",
84
+ "Yap3", "Yek3", "Yim3", "Yz", "Yot2", "Yoi6", "Yit2", "Yu5", "Yaan3", "Yan1",
85
+ "Yon5", "Yp1", "Yong5", "Ygw", "Yak", "Yat6", "Ying4", "Yu2", "Yf", "Ya4",
86
+ "Yon1", "You4", "Yik6", "Yui1", "Yaat1", "Yeot4", "Yi2", "Yaai1", "Yek5", "Ym3",
87
+ "Yong6", "You5", "Yyun1", "Yn1", "Yo2", "Yip6", "Yui3", "Yaak5", "Yyun2"
88
+ }
89
+
90
+
91
+ def _generate_pinyin_finals_with_tones(base_finals, num_tones=5):
92
+ """根据基础韵母和声调数量,自动生成带声调的韵母列表。"""
93
+ finals_with_tones = []
94
+ for tone in range(1, num_tones + 1):
95
+ for final in base_finals:
96
+ finals_with_tones.append(f"{final}{tone}")
97
+ return finals_with_tones
98
+
99
+
100
+ def create_master_symbol_list():
101
+ pinyin_finals = _generate_pinyin_finals_with_tones(PINYIN_FINALS_BASE)
102
+
103
+ main_symbols = set()
104
+ main_symbols.add("_") # 添加下划线符号
105
+ main_symbols.update(PINYIN_INITIALS)
106
+ main_symbols.update(pinyin_finals)
107
+ main_symbols.update(JAPANESE_SYMBOLS)
108
+ main_symbols.update(PUNCTUATION_SYMBOLS)
109
+ main_symbols.update(ARPABET_SYMBOLS)
110
+
111
+ master_list = sorted(list(main_symbols))
112
+ master_list.extend(["[", "]"])
113
+ master_list.extend(sorted(list(KOREAN_SYMBOLS)))
114
+ master_list.extend(sorted(list(CANTONESE_SYMBOLS)))
115
+ return master_list
116
+
117
+
118
+ symbols_v2: List[str] = create_master_symbol_list()
119
+ symbol_to_id_v2: Dict[str, int] = {s: i for i, s in enumerate(symbols_v2)}
genie_tts/G2P/__init__.py ADDED
File without changes
genie_tts/GUI/AudioPlayer.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sounddevice as sd
2
+ import threading
3
+ import queue
4
+ from typing import Union, Optional, Callable
5
+ import numpy as np
6
+ import os
7
+ import soundfile as sf
8
+
9
+
10
+ def run_in_sub_thread(func) -> Callable[..., threading.Thread]:
11
+ def wrapper(*args, **kwargs) -> threading.Thread:
12
+ thread = threading.Thread(target=func, args=args, kwargs=kwargs)
13
+ thread.daemon = True
14
+ thread.start()
15
+ return thread
16
+
17
+ return wrapper
18
+
19
+
20
+ class AudioPlayer:
21
+ CHUNK_SIZE: int = 1024
22
+
23
+ def __init__(self):
24
+ self._task_queue: queue.Queue[bytes | str] = queue.Queue()
25
+ self._worker_thread: Optional[threading.Thread] = None
26
+ self._stop_event: threading.Event = threading.Event()
27
+ self._start_worker()
28
+
29
+ def _start_worker(self):
30
+ """启动工作线程(如果它尚未运行或已关闭)。"""
31
+ if self._worker_thread and self._worker_thread.is_alive():
32
+ return
33
+ self._stop_event.clear()
34
+ self._worker_thread = self._playback_worker()
35
+
36
+ @run_in_sub_thread
37
+ def _playback_worker(self) -> None:
38
+ while not self._stop_event.is_set():
39
+ try:
40
+ task: str = self._task_queue.get(timeout=0.1)
41
+ except queue.Empty:
42
+ continue
43
+
44
+ stream = None
45
+ try:
46
+ if isinstance(task, str) and os.path.isfile(task):
47
+ with sf.SoundFile(task, 'r') as f:
48
+ if sd is not None:
49
+ stream = sd.OutputStream(
50
+ samplerate=f.samplerate,
51
+ channels=f.channels,
52
+ dtype='float32',
53
+ )
54
+ stream.start()
55
+ while not self._stop_event.is_set():
56
+ chunk = f.read(self.CHUNK_SIZE, dtype='float32')
57
+ if not chunk.any():
58
+ break
59
+ stream.write(chunk)
60
+ except Exception as e:
61
+ if isinstance(e, sf.SoundFileError):
62
+ print(f"无法读取或解析音频文件: {task}, 错误: {e}")
63
+ else:
64
+ print(f"播放时发生错误: {e}")
65
+ finally:
66
+ if stream:
67
+ stream.stop()
68
+ stream.close()
69
+ self._task_queue.task_done()
70
+
71
+ def play(self, source: Union[str, np.ndarray]):
72
+ """将音频源加入播放队列。"""
73
+ self._start_worker()
74
+ self._task_queue.put(source)
75
+
76
+ def stop(self):
77
+ """停止播放并清空播放队列。"""
78
+ self._stop_event.set()
79
+ if self._worker_thread and self._worker_thread.is_alive():
80
+ self._worker_thread.join()
81
+ self._stop_event.clear()
82
+
83
+ with self._task_queue.mutex:
84
+ self._task_queue.queue.clear()
85
+ while self._task_queue.unfinished_tasks > 0:
86
+ self._task_queue.task_done()
87
+
88
+ def wait(self):
89
+ """阻塞,直到队列中所有任务都播放完成。"""
90
+ self._task_queue.join()
91
+
92
+ def close(self):
93
+ """永久关闭播放器并释放资源。"""
94
+ self.stop()