Spaces:
Runtime error
Runtime error
Migrasi penuh dari Blue-Archive/MOE-TTS via Colab
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +11 -0
- app.py +227 -0
- attentions.py +300 -0
- chinese_dialect_lexicons/changzhou.json +23 -0
- chinese_dialect_lexicons/changzhou.ocd2 +0 -0
- chinese_dialect_lexicons/cixi.json +23 -0
- chinese_dialect_lexicons/cixi.ocd2 +0 -0
- chinese_dialect_lexicons/fuyang.json +23 -0
- chinese_dialect_lexicons/fuyang.ocd2 +0 -0
- chinese_dialect_lexicons/hangzhou.json +19 -0
- chinese_dialect_lexicons/hangzhou.ocd2 +3 -0
- chinese_dialect_lexicons/jiading.json +23 -0
- chinese_dialect_lexicons/jiading.ocd2 +3 -0
- chinese_dialect_lexicons/jiashan.json +23 -0
- chinese_dialect_lexicons/jiashan.ocd2 +0 -0
- chinese_dialect_lexicons/jingjiang.json +23 -0
- chinese_dialect_lexicons/jingjiang.ocd2 +0 -0
- chinese_dialect_lexicons/jyutjyu.json +19 -0
- chinese_dialect_lexicons/jyutjyu.ocd2 +3 -0
- chinese_dialect_lexicons/linping.json +23 -0
- chinese_dialect_lexicons/linping.ocd2 +0 -0
- chinese_dialect_lexicons/ningbo.json +19 -0
- chinese_dialect_lexicons/ningbo.ocd2 +3 -0
- chinese_dialect_lexicons/pinghu.json +23 -0
- chinese_dialect_lexicons/pinghu.ocd2 +0 -0
- chinese_dialect_lexicons/ruao.json +23 -0
- chinese_dialect_lexicons/ruao.ocd2 +0 -0
- chinese_dialect_lexicons/sanmen.json +23 -0
- chinese_dialect_lexicons/sanmen.ocd2 +0 -0
- chinese_dialect_lexicons/shaoxing.json +23 -0
- chinese_dialect_lexicons/shaoxing.ocd2 +3 -0
- chinese_dialect_lexicons/suichang.json +23 -0
- chinese_dialect_lexicons/suichang.ocd2 +0 -0
- chinese_dialect_lexicons/suzhou.json +19 -0
- chinese_dialect_lexicons/suzhou.ocd2 +3 -0
- chinese_dialect_lexicons/tiantai.json +23 -0
- chinese_dialect_lexicons/tiantai.ocd2 +3 -0
- chinese_dialect_lexicons/tongxiang.json +23 -0
- chinese_dialect_lexicons/tongxiang.ocd2 +3 -0
- chinese_dialect_lexicons/wenzhou.json +23 -0
- chinese_dialect_lexicons/wenzhou.ocd2 +0 -0
- chinese_dialect_lexicons/wuxi.json +19 -0
- chinese_dialect_lexicons/wuxi.ocd2 +3 -0
- chinese_dialect_lexicons/xiaoshan.json +23 -0
- chinese_dialect_lexicons/xiaoshan.ocd2 +0 -0
- chinese_dialect_lexicons/xiashi.json +19 -0
- chinese_dialect_lexicons/xiashi.ocd2 +0 -0
- chinese_dialect_lexicons/yixing.json +19 -0
- chinese_dialect_lexicons/yixing.ocd2 +3 -0
- chinese_dialect_lexicons/youbu.json +23 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,14 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
chinese_dialect_lexicons/hangzhou.ocd2 filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
chinese_dialect_lexicons/jiading.ocd2 filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
chinese_dialect_lexicons/jyutjyu.ocd2 filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
chinese_dialect_lexicons/ningbo.ocd2 filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
chinese_dialect_lexicons/shaoxing.ocd2 filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
chinese_dialect_lexicons/suzhou.ocd2 filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
chinese_dialect_lexicons/tiantai.ocd2 filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
chinese_dialect_lexicons/tongxiang.ocd2 filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
chinese_dialect_lexicons/wuxi.ocd2 filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
chinese_dialect_lexicons/yixing.ocd2 filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
chinese_dialect_lexicons/zaonhe.ocd2 filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import os
|
| 3 |
+
import torch
|
| 4 |
+
import commons
|
| 5 |
+
import utils
|
| 6 |
+
from models import SynthesizerTrn
|
| 7 |
+
import numpy as np
|
| 8 |
+
import json
|
| 9 |
+
import shutil
|
| 10 |
+
import logging
|
| 11 |
+
import random
|
| 12 |
+
import re
|
| 13 |
+
from huggingface_hub import snapshot_download
|
| 14 |
+
|
| 15 |
+
# --- 1. SETUP LOGGING ---
|
| 16 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
# --- 2. ROMAJI CONVERTER ---
|
| 20 |
+
try:
|
| 21 |
+
import pykakasi
|
| 22 |
+
kks = pykakasi.kakasi()
|
| 23 |
+
def to_romaji(text):
|
| 24 |
+
if not text or text == "None": return ""
|
| 25 |
+
try:
|
| 26 |
+
result = kks.convert(str(text))
|
| 27 |
+
return "".join([item['hepburn'].capitalize() for item in result])
|
| 28 |
+
except:
|
| 29 |
+
return str(text)
|
| 30 |
+
except:
|
| 31 |
+
def to_romaji(text): return str(text)
|
| 32 |
+
|
| 33 |
+
# --- 3. CLEAN HARDCODED INFO ---
|
| 34 |
+
CLEAN_INFO = {
|
| 35 |
+
"0": {"title": "Sanoba Witch & Senren Banka", "example": "こんにちは。", "type": "vits"},
|
| 36 |
+
"1": {"title": "Hamidashi Creative", "example": "こんにちは。", "type": "vits"},
|
| 37 |
+
"2": {"title": "Cafe Stella & Shinigami no Chou", "example": "こんにちは。", "type": "vits"},
|
| 38 |
+
"3": {"title": "Yosuga no Sora", "example": "こんにちは。", "type": "vits"},
|
| 39 |
+
"4": {"title": "Bishoujo Mangekyou", "example": "こんにちは。", "type": "vits"},
|
| 40 |
+
"5": {"title": "Nene & Nanami Pack (Multi)", "example": "[JA]こんにちは。[JA]", "type": "vits"},
|
| 41 |
+
"6": {"title": "The Fox Waiting for You", "example": "안녕하세요.", "type": "vits"},
|
| 42 |
+
"7": {"title": "Galgame Characters Pack (13)", "example": "こんにちは。", "type": "vits"},
|
| 43 |
+
"8": {"title": "Zero no Tsukaima", "example": "こんにちは。", "type": "vits"},
|
| 44 |
+
"9": {"title": "Zero no Tsukaima (VC Mode)", "example": "", "type": "soft-vits-vc"},
|
| 45 |
+
"10": {"title": "Toaru Majutsu no Index (VC)", "example": "", "type": "soft-vits-vc"},
|
| 46 |
+
"11": {"title": "Shiki Natsume (VC Mode)", "example": "", "type": "soft-vits-vc"},
|
| 47 |
+
"12": {"title": "DRACU-RIOT!", "example": "こんにちは。", "type": "vits"},
|
| 48 |
+
"13": {"title": "To LOVE-Ru Series", "example": "こんにちは。", "type": "vits"},
|
| 49 |
+
"14": {"title": "CJKS Multi-Language", "example": "[JA]こんにちは。[JA]", "type": "vits"},
|
| 50 |
+
"15": {"title": "Voistock Mega Pack (2891 Chars)", "example": "[JA]こんにちは。[JA]", "type": "vits"},
|
| 51 |
+
"16": {"title": "Shanghainese Dialect", "example": "侬好!", "type": "vits"},
|
| 52 |
+
"17": {"title": "Chinese Dialects Pack", "example": "[SH]侬好![SH]", "type": "vits"},
|
| 53 |
+
"18": {"title": "Umamusume: Pretty Derby", "example": "こんにちは。", "type": "vits"},
|
| 54 |
+
"19": {"title": "Princess Connect! Re:Dive", "example": "[JA]こんにちは。[JA]", "type": "vits"},
|
| 55 |
+
"20": {"title": "Magia Record (Madoka Magica)", "example": "こんにちは。", "type": "vits"}
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
# --- 4. DOWNLOAD ASSETS ---
|
| 59 |
+
REPO_ID = "Plana-Archive/Plana-TTS"
|
| 60 |
+
LOCAL_ROOT = "saved_model"
|
| 61 |
+
|
| 62 |
+
def download_assets():
|
| 63 |
+
os.makedirs(LOCAL_ROOT, exist_ok=True)
|
| 64 |
+
if not os.path.exists(os.path.join(LOCAL_ROOT, "0")):
|
| 65 |
+
try:
|
| 66 |
+
logger.info("Downloading Assets...")
|
| 67 |
+
snapshot_download(repo_id=REPO_ID, local_dir="temp_dir", allow_patterns=["MOE-TTS/saved_model/*"])
|
| 68 |
+
src_path = os.path.join("temp_dir", "MOE-TTS", "saved_model")
|
| 69 |
+
if os.path.exists(src_path):
|
| 70 |
+
shutil.copytree(src_path, LOCAL_ROOT, dirs_exist_ok=True)
|
| 71 |
+
shutil.rmtree("temp_dir")
|
| 72 |
+
except Exception as e:
|
| 73 |
+
logger.error(f"Download error: {e}")
|
| 74 |
+
|
| 75 |
+
download_assets()
|
| 76 |
+
|
| 77 |
+
# --- 5. ENGINE LOAD MODEL ---
|
| 78 |
+
loaded_models = {}
|
| 79 |
+
|
| 80 |
+
def clean_config(conf):
|
| 81 |
+
if isinstance(conf, dict): return {str(k): clean_config(v) for k, v in conf.items()}
|
| 82 |
+
elif isinstance(conf, list): return [clean_config(i) for i in conf]
|
| 83 |
+
return conf
|
| 84 |
+
|
| 85 |
+
def get_vits_model(m_id):
|
| 86 |
+
mid = str(m_id)
|
| 87 |
+
if mid in loaded_models: return loaded_models[mid]
|
| 88 |
+
try:
|
| 89 |
+
p = os.path.join(LOCAL_ROOT, mid)
|
| 90 |
+
cfg_p = os.path.join(p, "config.json")
|
| 91 |
+
if not os.path.exists(cfg_p): return None
|
| 92 |
+
hps = utils.get_hparams_from_file(cfg_p)
|
| 93 |
+
m_params = clean_config(hps.model.__dict__ if hasattr(hps.model, '__dict__') else dict(hps.model))
|
| 94 |
+
net = SynthesizerTrn(len(hps.symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, **m_params)
|
| 95 |
+
utils.load_checkpoint(os.path.join(p, "model.pth"), net, None)
|
| 96 |
+
net.eval()
|
| 97 |
+
raw_spks = hps.speakers if hasattr(hps, 'speakers') else [f"Character {i}" for i in range(hps.data.n_speakers)]
|
| 98 |
+
display_spks = []
|
| 99 |
+
for s in raw_spks:
|
| 100 |
+
romaji_name = to_romaji(s)
|
| 101 |
+
if romaji_name and romaji_name.lower() != "none":
|
| 102 |
+
display_spks.append(romaji_name)
|
| 103 |
+
loaded_models[mid] = (hps, net, display_spks)
|
| 104 |
+
return loaded_models[mid]
|
| 105 |
+
except Exception as e:
|
| 106 |
+
logger.error(f"Load Error {mid}: {e}")
|
| 107 |
+
return None
|
| 108 |
+
|
| 109 |
+
def tts_execute(m_id, text, speaker_romaji, speed):
|
| 110 |
+
data = get_vits_model(m_id)
|
| 111 |
+
if not data: return "❌ Model Loading...", None
|
| 112 |
+
hps, net, display_spks = data
|
| 113 |
+
if not speaker_romaji:
|
| 114 |
+
if display_spks:
|
| 115 |
+
speaker_romaji = display_spks[0]
|
| 116 |
+
else:
|
| 117 |
+
return "❌ No Speaker Selected", None
|
| 118 |
+
try:
|
| 119 |
+
sid = display_spks.index(speaker_romaji)
|
| 120 |
+
from text import text_to_sequence
|
| 121 |
+
clean_text = re.sub(r'\[[A-Z]{2}\]', '', text)
|
| 122 |
+
cleaners = hps.data.text_cleaners if hasattr(hps.data, 'text_cleaners') else ['japanese_cleaners']
|
| 123 |
+
seq = text_to_sequence(clean_text, hps.symbols, cleaners)
|
| 124 |
+
if hps.data.add_blank: seq = commons.intersperse(seq, 0)
|
| 125 |
+
with torch.no_grad():
|
| 126 |
+
audio = net.infer(torch.LongTensor(seq).unsqueeze(0), torch.LongTensor([len(seq)]), sid=torch.LongTensor([sid]), noise_scale=0.667, noise_scale_w=0.8, length_scale=1.0/speed)[0][0,0].data.cpu().float().numpy()
|
| 127 |
+
return f"✅ Done!", (hps.data.sampling_rate, (audio * 32767).astype(np.int16))
|
| 128 |
+
except Exception as e: return f"Error: {e}", None
|
| 129 |
+
|
| 130 |
+
def get_random_jp():
|
| 131 |
+
return random.choice(["こんにちは!", "お元気ですか?", "先生、お疲れ様です!", "大好きだよ!", "また明日ね。"])
|
| 132 |
+
|
| 133 |
+
# --- 6. UI STYLE ---
|
| 134 |
+
css = """
|
| 135 |
+
:root { --primary-600: #1299ff !important; --accent-600: #1299ff !important; --loader-color: #A2D2FF !important; }
|
| 136 |
+
.slim-card { max-width: 480px; margin: 0 auto; background: white; border-radius: 20px; padding: 25px; box-shadow: 0 10px 30px rgba(0,0,0,0.05); }
|
| 137 |
+
.ba-header { text-align: center; margin-bottom: 25px; border-bottom: 2px solid #f0f4f8; padding-bottom: 15px; }
|
| 138 |
+
.ba-header h1 { color: #1299ff !important; font-weight: 800; font-size: 26px; margin: 0; }
|
| 139 |
+
.ba-header p { color: #8a99af; font-size: 11px; font-weight: 700; letter-spacing: 2px; margin: 5px 0 0 0; }
|
| 140 |
+
|
| 141 |
+
/* CSS UNTUK SCROLLABLE TABS */
|
| 142 |
+
.tabs > .tab-nav {
|
| 143 |
+
display: flex !important;
|
| 144 |
+
overflow-x: auto !important;
|
| 145 |
+
white-space: nowrap !important;
|
| 146 |
+
flex-wrap: nowrap !important;
|
| 147 |
+
scrollbar-width: thin;
|
| 148 |
+
scrollbar-color: #1299ff #f0f4f8;
|
| 149 |
+
}
|
| 150 |
+
.tabs > .tab-nav::-webkit-scrollbar { height: 4px; }
|
| 151 |
+
.tabs > .tab-nav::-webkit-scrollbar-thumb { background: #1299ff; border-radius: 10px; }
|
| 152 |
+
.tabs > .tab-nav button { flex: 0 0 auto !important; }
|
| 153 |
+
|
| 154 |
+
.scroll-box { height: 200px; overflow-y: auto; border: 1px solid #f0f4f8; border-radius: 12px; padding: 10px; background: #fafbfc; margin-bottom: 10px; }
|
| 155 |
+
.char-btn { background: white !important; border: 1px solid #e2e8f0 !important; border-left: 5px solid #1299ff !important; text-align: left !important; padding: 8px !important; font-size: 12px !important; margin-bottom: 4px !important; width: 100%; color: #4a5568 !important; }
|
| 156 |
+
.char-btn:hover { background: #f0f7ff !important; border-color: #1299ff !important; }
|
| 157 |
+
.warning-card { background: #fff9f0; border: 1px solid #ffe4bc; border-radius: 10px; padding: 12px; margin-bottom: 15px; text-align: center; color: #855d1a; font-size: 11px; line-height: 1.5; }
|
| 158 |
+
.jp-btn { background: #f8fafc !important; border: 1px solid #cbd5e1 !important; color: #475569 !important; font-weight: 700 !important; border-radius: 10px !important; margin-bottom: 10px; font-size: 12px !important; width: 100%; }
|
| 159 |
+
.gen-btn { background: #1299ff !important; color: white !important; font-weight: 700 !important; border-radius: 12px !important; height: 45px !important; width: 100%; border: none !important; cursor: pointer; transition: 0.3s; }
|
| 160 |
+
.gen-btn:hover { background: #0084ff !important; transform: translateY(-2px); box-shadow: 0 5px 15px rgba(18, 153, 255, 0.3); }
|
| 161 |
+
.credit-footer { margin-top: 25px; padding: 15px; background: white; border-radius: 12px; text-align: center; border: 1px solid #eef2f6; border-bottom: 4px solid #1299ff; color: #94a3b8; font-weight: 700; font-size: 12px; letter-spacing: 2px; }
|
| 162 |
+
"""
|
| 163 |
+
|
| 164 |
+
with gr.Blocks(css=css) as demo:
|
| 165 |
+
with gr.Column(elem_classes="slim-card"):
|
| 166 |
+
gr.HTML("""
|
| 167 |
+
<div class="ba-header">
|
| 168 |
+
<h1>Library Anime TTS</h1>
|
| 169 |
+
<p>🍂 STYLE-BERT-VITS 🍂</p>
|
| 170 |
+
</div>
|
| 171 |
+
""")
|
| 172 |
+
|
| 173 |
+
with gr.Tabs(elem_classes="tabs"):
|
| 174 |
+
for m_id in sorted(CLEAN_INFO.keys(), key=int):
|
| 175 |
+
with gr.Tab(f"Model {m_id}"):
|
| 176 |
+
gr.Markdown(f"### 📂 {CLEAN_INFO[m_id]['title']}")
|
| 177 |
+
|
| 178 |
+
m_data = get_vits_model(m_id)
|
| 179 |
+
chars = m_data[2] if m_data else []
|
| 180 |
+
|
| 181 |
+
m_p = os.path.join(LOCAL_ROOT, str(m_id))
|
| 182 |
+
cov = next((os.path.join(m_p, f"cover.{e}") for e in ['png','jpg','jpeg','webp'] if os.path.exists(os.path.join(m_p, f"cover.{e}"))), None)
|
| 183 |
+
if cov: gr.Image(cov, show_label=False, interactive=False, height=140)
|
| 184 |
+
|
| 185 |
+
sel_name = gr.State("")
|
| 186 |
+
char_display = gr.Markdown("📍 *Silakan pilih karakter...*")
|
| 187 |
+
|
| 188 |
+
gr.HTML("<p style='font-weight:800; font-size:11px; color:#8a99af; margin-bottom:8px;'>CHARACTER LIST (ROMAJI)</p>")
|
| 189 |
+
|
| 190 |
+
with gr.Column(elem_classes="scroll-box"):
|
| 191 |
+
if not chars:
|
| 192 |
+
gr.Markdown("⏳ Sedang memuat karakter...")
|
| 193 |
+
else:
|
| 194 |
+
for name in chars:
|
| 195 |
+
btn = gr.Button(f"👤 {name}", elem_classes="char-btn")
|
| 196 |
+
btn.click(
|
| 197 |
+
fn=lambda n=name: (n, f"📍 Selected: **{n}**"),
|
| 198 |
+
outputs=[sel_name, char_display]
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
gr.HTML("""
|
| 202 |
+
<div class="warning-card">
|
| 203 |
+
<div style="font-weight:800; margin-bottom:4px;">🔖 PERINGATAN MINNA 🔖</div>
|
| 204 |
+
Setelah di klik character nya akan Loading otomatis dan ke Select sendiri dan baru bisa di Gunakan!
|
| 205 |
+
</div>
|
| 206 |
+
""")
|
| 207 |
+
|
| 208 |
+
ex_text = re.sub(r'\[[A-Z]{2}\]', '', CLEAN_INFO[m_id].get("example", "こんにちは。"))
|
| 209 |
+
txt_in = gr.TextArea(label="Input Text", value=ex_text, lines=3)
|
| 210 |
+
gr.Button("🎲 INPUTS RANDOM TEXT 🎲", elem_classes="jp-btn").click(get_random_jp, outputs=[txt_in])
|
| 211 |
+
|
| 212 |
+
spd = gr.Slider(0.5, 2.0, 1.0, step=0.1, label="Speed Audio")
|
| 213 |
+
btn_gen = gr.Button("🎐 GENERATE VOICE 🎐", elem_classes="gen-btn")
|
| 214 |
+
aud_out = gr.Audio(label="Voice Output")
|
| 215 |
+
|
| 216 |
+
status_log = gr.Textbox(visible=False)
|
| 217 |
+
|
| 218 |
+
btn_gen.click(
|
| 219 |
+
fn=tts_execute,
|
| 220 |
+
inputs=[gr.State(m_id), txt_in, sel_name, spd],
|
| 221 |
+
outputs=[status_log, aud_out]
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
gr.HTML("""<div class="credit-footer">🌥️ CREATED BY MUTSUMI 🌥️</div>""")
|
| 225 |
+
|
| 226 |
+
if __name__ == "__main__":
|
| 227 |
+
demo.launch()
|
attentions.py
ADDED
|
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
from torch import nn
|
| 4 |
+
from torch.nn import functional as F
|
| 5 |
+
|
| 6 |
+
import commons
|
| 7 |
+
from modules import LayerNorm
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class Encoder(nn.Module):
|
| 11 |
+
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
|
| 12 |
+
super().__init__()
|
| 13 |
+
self.hidden_channels = hidden_channels
|
| 14 |
+
self.filter_channels = filter_channels
|
| 15 |
+
self.n_heads = n_heads
|
| 16 |
+
self.n_layers = n_layers
|
| 17 |
+
self.kernel_size = kernel_size
|
| 18 |
+
self.p_dropout = p_dropout
|
| 19 |
+
self.window_size = window_size
|
| 20 |
+
|
| 21 |
+
self.drop = nn.Dropout(p_dropout)
|
| 22 |
+
self.attn_layers = nn.ModuleList()
|
| 23 |
+
self.norm_layers_1 = nn.ModuleList()
|
| 24 |
+
self.ffn_layers = nn.ModuleList()
|
| 25 |
+
self.norm_layers_2 = nn.ModuleList()
|
| 26 |
+
for i in range(self.n_layers):
|
| 27 |
+
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
| 28 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
| 29 |
+
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
| 30 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
| 31 |
+
|
| 32 |
+
def forward(self, x, x_mask):
|
| 33 |
+
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
| 34 |
+
x = x * x_mask
|
| 35 |
+
for i in range(self.n_layers):
|
| 36 |
+
y = self.attn_layers[i](x, x, attn_mask)
|
| 37 |
+
y = self.drop(y)
|
| 38 |
+
x = self.norm_layers_1[i](x + y)
|
| 39 |
+
|
| 40 |
+
y = self.ffn_layers[i](x, x_mask)
|
| 41 |
+
y = self.drop(y)
|
| 42 |
+
x = self.norm_layers_2[i](x + y)
|
| 43 |
+
x = x * x_mask
|
| 44 |
+
return x
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class Decoder(nn.Module):
|
| 48 |
+
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
|
| 49 |
+
super().__init__()
|
| 50 |
+
self.hidden_channels = hidden_channels
|
| 51 |
+
self.filter_channels = filter_channels
|
| 52 |
+
self.n_heads = n_heads
|
| 53 |
+
self.n_layers = n_layers
|
| 54 |
+
self.kernel_size = kernel_size
|
| 55 |
+
self.p_dropout = p_dropout
|
| 56 |
+
self.proximal_bias = proximal_bias
|
| 57 |
+
self.proximal_init = proximal_init
|
| 58 |
+
|
| 59 |
+
self.drop = nn.Dropout(p_dropout)
|
| 60 |
+
self.self_attn_layers = nn.ModuleList()
|
| 61 |
+
self.norm_layers_0 = nn.ModuleList()
|
| 62 |
+
self.encdec_attn_layers = nn.ModuleList()
|
| 63 |
+
self.norm_layers_1 = nn.ModuleList()
|
| 64 |
+
self.ffn_layers = nn.ModuleList()
|
| 65 |
+
self.norm_layers_2 = nn.ModuleList()
|
| 66 |
+
for i in range(self.n_layers):
|
| 67 |
+
self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
|
| 68 |
+
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
| 69 |
+
self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
| 70 |
+
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
| 71 |
+
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
| 72 |
+
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
| 73 |
+
|
| 74 |
+
def forward(self, x, x_mask, h, h_mask):
|
| 75 |
+
"""
|
| 76 |
+
x: decoder input
|
| 77 |
+
h: encoder output
|
| 78 |
+
"""
|
| 79 |
+
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
| 80 |
+
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
| 81 |
+
x = x * x_mask
|
| 82 |
+
for i in range(self.n_layers):
|
| 83 |
+
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
| 84 |
+
y = self.drop(y)
|
| 85 |
+
x = self.norm_layers_0[i](x + y)
|
| 86 |
+
|
| 87 |
+
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
| 88 |
+
y = self.drop(y)
|
| 89 |
+
x = self.norm_layers_1[i](x + y)
|
| 90 |
+
|
| 91 |
+
y = self.ffn_layers[i](x, x_mask)
|
| 92 |
+
y = self.drop(y)
|
| 93 |
+
x = self.norm_layers_2[i](x + y)
|
| 94 |
+
x = x * x_mask
|
| 95 |
+
return x
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class MultiHeadAttention(nn.Module):
|
| 99 |
+
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
| 100 |
+
super().__init__()
|
| 101 |
+
assert channels % n_heads == 0
|
| 102 |
+
|
| 103 |
+
self.channels = channels
|
| 104 |
+
self.out_channels = out_channels
|
| 105 |
+
self.n_heads = n_heads
|
| 106 |
+
self.p_dropout = p_dropout
|
| 107 |
+
self.window_size = window_size
|
| 108 |
+
self.heads_share = heads_share
|
| 109 |
+
self.block_length = block_length
|
| 110 |
+
self.proximal_bias = proximal_bias
|
| 111 |
+
self.proximal_init = proximal_init
|
| 112 |
+
self.attn = None
|
| 113 |
+
|
| 114 |
+
self.k_channels = channels // n_heads
|
| 115 |
+
self.conv_q = nn.Conv1d(channels, channels, 1)
|
| 116 |
+
self.conv_k = nn.Conv1d(channels, channels, 1)
|
| 117 |
+
self.conv_v = nn.Conv1d(channels, channels, 1)
|
| 118 |
+
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
| 119 |
+
self.drop = nn.Dropout(p_dropout)
|
| 120 |
+
|
| 121 |
+
if window_size is not None:
|
| 122 |
+
n_heads_rel = 1 if heads_share else n_heads
|
| 123 |
+
rel_stddev = self.k_channels**-0.5
|
| 124 |
+
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
| 125 |
+
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
| 126 |
+
|
| 127 |
+
nn.init.xavier_uniform_(self.conv_q.weight)
|
| 128 |
+
nn.init.xavier_uniform_(self.conv_k.weight)
|
| 129 |
+
nn.init.xavier_uniform_(self.conv_v.weight)
|
| 130 |
+
if proximal_init:
|
| 131 |
+
with torch.no_grad():
|
| 132 |
+
self.conv_k.weight.copy_(self.conv_q.weight)
|
| 133 |
+
self.conv_k.bias.copy_(self.conv_q.bias)
|
| 134 |
+
|
| 135 |
+
def forward(self, x, c, attn_mask=None):
|
| 136 |
+
q = self.conv_q(x)
|
| 137 |
+
k = self.conv_k(c)
|
| 138 |
+
v = self.conv_v(c)
|
| 139 |
+
|
| 140 |
+
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
| 141 |
+
|
| 142 |
+
x = self.conv_o(x)
|
| 143 |
+
return x
|
| 144 |
+
|
| 145 |
+
def attention(self, query, key, value, mask=None):
|
| 146 |
+
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
| 147 |
+
b, d, t_s, t_t = (*key.size(), query.size(2))
|
| 148 |
+
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
| 149 |
+
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
| 150 |
+
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
| 151 |
+
|
| 152 |
+
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
| 153 |
+
if self.window_size is not None:
|
| 154 |
+
assert t_s == t_t, "Relative attention is only available for self-attention."
|
| 155 |
+
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
| 156 |
+
rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
|
| 157 |
+
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
| 158 |
+
scores = scores + scores_local
|
| 159 |
+
if self.proximal_bias:
|
| 160 |
+
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
| 161 |
+
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
| 162 |
+
if mask is not None:
|
| 163 |
+
scores = scores.masked_fill(mask == 0, -1e4)
|
| 164 |
+
if self.block_length is not None:
|
| 165 |
+
assert t_s == t_t, "Local attention is only available for self-attention."
|
| 166 |
+
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
| 167 |
+
scores = scores.masked_fill(block_mask == 0, -1e4)
|
| 168 |
+
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
| 169 |
+
p_attn = self.drop(p_attn)
|
| 170 |
+
output = torch.matmul(p_attn, value)
|
| 171 |
+
if self.window_size is not None:
|
| 172 |
+
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
| 173 |
+
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
| 174 |
+
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
| 175 |
+
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
| 176 |
+
return output, p_attn
|
| 177 |
+
|
| 178 |
+
def _matmul_with_relative_values(self, x, y):
|
| 179 |
+
"""
|
| 180 |
+
x: [b, h, l, m]
|
| 181 |
+
y: [h or 1, m, d]
|
| 182 |
+
ret: [b, h, l, d]
|
| 183 |
+
"""
|
| 184 |
+
ret = torch.matmul(x, y.unsqueeze(0))
|
| 185 |
+
return ret
|
| 186 |
+
|
| 187 |
+
def _matmul_with_relative_keys(self, x, y):
|
| 188 |
+
"""
|
| 189 |
+
x: [b, h, l, d]
|
| 190 |
+
y: [h or 1, m, d]
|
| 191 |
+
ret: [b, h, l, m]
|
| 192 |
+
"""
|
| 193 |
+
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
| 194 |
+
return ret
|
| 195 |
+
|
| 196 |
+
def _get_relative_embeddings(self, relative_embeddings, length):
|
| 197 |
+
max_relative_position = 2 * self.window_size + 1
|
| 198 |
+
# Pad first before slice to avoid using cond ops.
|
| 199 |
+
pad_length = max(length - (self.window_size + 1), 0)
|
| 200 |
+
slice_start_position = max((self.window_size + 1) - length, 0)
|
| 201 |
+
slice_end_position = slice_start_position + 2 * length - 1
|
| 202 |
+
if pad_length > 0:
|
| 203 |
+
padded_relative_embeddings = F.pad(
|
| 204 |
+
relative_embeddings,
|
| 205 |
+
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
| 206 |
+
else:
|
| 207 |
+
padded_relative_embeddings = relative_embeddings
|
| 208 |
+
used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
|
| 209 |
+
return used_relative_embeddings
|
| 210 |
+
|
| 211 |
+
def _relative_position_to_absolute_position(self, x):
|
| 212 |
+
"""
|
| 213 |
+
x: [b, h, l, 2*l-1]
|
| 214 |
+
ret: [b, h, l, l]
|
| 215 |
+
"""
|
| 216 |
+
batch, heads, length, _ = x.size()
|
| 217 |
+
# Concat columns of pad to shift from relative to absolute indexing.
|
| 218 |
+
x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
|
| 219 |
+
|
| 220 |
+
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
| 221 |
+
x_flat = x.view([batch, heads, length * 2 * length])
|
| 222 |
+
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
|
| 223 |
+
|
| 224 |
+
# Reshape and slice out the padded elements.
|
| 225 |
+
x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
|
| 226 |
+
return x_final
|
| 227 |
+
|
| 228 |
+
def _absolute_position_to_relative_position(self, x):
|
| 229 |
+
"""
|
| 230 |
+
x: [b, h, l, l]
|
| 231 |
+
ret: [b, h, l, 2*l-1]
|
| 232 |
+
"""
|
| 233 |
+
batch, heads, length, _ = x.size()
|
| 234 |
+
# padd along column
|
| 235 |
+
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
|
| 236 |
+
x_flat = x.view([batch, heads, length**2 + length*(length -1)])
|
| 237 |
+
# add 0's in the beginning that will skew the elements after reshape
|
| 238 |
+
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
| 239 |
+
x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
|
| 240 |
+
return x_final
|
| 241 |
+
|
| 242 |
+
def _attention_bias_proximal(self, length):
|
| 243 |
+
"""Bias for self-attention to encourage attention to close positions.
|
| 244 |
+
Args:
|
| 245 |
+
length: an integer scalar.
|
| 246 |
+
Returns:
|
| 247 |
+
a Tensor with shape [1, 1, length, length]
|
| 248 |
+
"""
|
| 249 |
+
r = torch.arange(length, dtype=torch.float32)
|
| 250 |
+
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
| 251 |
+
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
class FFN(nn.Module):
|
| 255 |
+
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
|
| 256 |
+
super().__init__()
|
| 257 |
+
self.in_channels = in_channels
|
| 258 |
+
self.out_channels = out_channels
|
| 259 |
+
self.filter_channels = filter_channels
|
| 260 |
+
self.kernel_size = kernel_size
|
| 261 |
+
self.p_dropout = p_dropout
|
| 262 |
+
self.activation = activation
|
| 263 |
+
self.causal = causal
|
| 264 |
+
|
| 265 |
+
if causal:
|
| 266 |
+
self.padding = self._causal_padding
|
| 267 |
+
else:
|
| 268 |
+
self.padding = self._same_padding
|
| 269 |
+
|
| 270 |
+
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
| 271 |
+
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
| 272 |
+
self.drop = nn.Dropout(p_dropout)
|
| 273 |
+
|
| 274 |
+
def forward(self, x, x_mask):
|
| 275 |
+
x = self.conv_1(self.padding(x * x_mask))
|
| 276 |
+
if self.activation == "gelu":
|
| 277 |
+
x = x * torch.sigmoid(1.702 * x)
|
| 278 |
+
else:
|
| 279 |
+
x = torch.relu(x)
|
| 280 |
+
x = self.drop(x)
|
| 281 |
+
x = self.conv_2(self.padding(x * x_mask))
|
| 282 |
+
return x * x_mask
|
| 283 |
+
|
| 284 |
+
def _causal_padding(self, x):
|
| 285 |
+
if self.kernel_size == 1:
|
| 286 |
+
return x
|
| 287 |
+
pad_l = self.kernel_size - 1
|
| 288 |
+
pad_r = 0
|
| 289 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
| 290 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
| 291 |
+
return x
|
| 292 |
+
|
| 293 |
+
def _same_padding(self, x):
|
| 294 |
+
if self.kernel_size == 1:
|
| 295 |
+
return x
|
| 296 |
+
pad_l = (self.kernel_size - 1) // 2
|
| 297 |
+
pad_r = self.kernel_size // 2
|
| 298 |
+
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
| 299 |
+
x = F.pad(x, commons.convert_pad_shape(padding))
|
| 300 |
+
return x
|
chinese_dialect_lexicons/changzhou.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Changzhou dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "changzhou.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "changzhou.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/changzhou.ocd2
ADDED
|
Binary file (96.1 kB). View file
|
|
|
chinese_dialect_lexicons/cixi.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Cixi dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "cixi.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "cixi.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/cixi.ocd2
ADDED
|
Binary file (98 kB). View file
|
|
|
chinese_dialect_lexicons/fuyang.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Fuyang dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "fuyang.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "fuyang.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/fuyang.ocd2
ADDED
|
Binary file (83.7 kB). View file
|
|
|
chinese_dialect_lexicons/hangzhou.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Hangzhounese to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "hangzhou.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [{
|
| 11 |
+
"dict": {
|
| 12 |
+
"type": "group",
|
| 13 |
+
"dicts": [{
|
| 14 |
+
"type": "ocd2",
|
| 15 |
+
"file": "hangzhou.ocd2"
|
| 16 |
+
}]
|
| 17 |
+
}
|
| 18 |
+
}]
|
| 19 |
+
}
|
chinese_dialect_lexicons/hangzhou.ocd2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c7a9eb5fbd3b8c91745dbb2734f2700b75a47c3821e381566afc567d7da4d9d5
|
| 3 |
+
size 427268
|
chinese_dialect_lexicons/jiading.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Jiading dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "jiading.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "jiading.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/jiading.ocd2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f3ac33214e65e7223e8c561bc12ec90a2d87db3cf8d20e87a30bbd8eb788187
|
| 3 |
+
size 111144
|
chinese_dialect_lexicons/jiashan.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Jiashan dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "jiashan.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "jiashan.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/jiashan.ocd2
ADDED
|
Binary file (71.7 kB). View file
|
|
|
chinese_dialect_lexicons/jingjiang.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Jingjiang dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "jingjiang.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "jingjiang.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/jingjiang.ocd2
ADDED
|
Binary file (86.1 kB). View file
|
|
|
chinese_dialect_lexicons/jyutjyu.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Cantonese to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "jyutjyu.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [{
|
| 11 |
+
"dict": {
|
| 12 |
+
"type": "group",
|
| 13 |
+
"dicts": [{
|
| 14 |
+
"type": "ocd2",
|
| 15 |
+
"file": "jyutjyu.ocd2"
|
| 16 |
+
}]
|
| 17 |
+
}
|
| 18 |
+
}]
|
| 19 |
+
}
|
chinese_dialect_lexicons/jyutjyu.ocd2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aea11bfe51b184b3f000d20ab49757979b216219203839d2b2e3c1f990a13fa5
|
| 3 |
+
size 2432991
|
chinese_dialect_lexicons/linping.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Linping dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "linping.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "linping.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/linping.ocd2
ADDED
|
Binary file (65.4 kB). View file
|
|
|
chinese_dialect_lexicons/ningbo.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Ningbonese to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "ningbo.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [{
|
| 11 |
+
"dict": {
|
| 12 |
+
"type": "group",
|
| 13 |
+
"dicts": [{
|
| 14 |
+
"type": "ocd2",
|
| 15 |
+
"file": "ningbo.ocd2"
|
| 16 |
+
}]
|
| 17 |
+
}
|
| 18 |
+
}]
|
| 19 |
+
}
|
chinese_dialect_lexicons/ningbo.ocd2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5876b000f611ea52bf18cda5bcbdd0cfcc55e1c09774d9a24e3b5c7d90002435
|
| 3 |
+
size 386414
|
chinese_dialect_lexicons/pinghu.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Pinghu dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "pinghu.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "pinghu.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/pinghu.ocd2
ADDED
|
Binary file (69.4 kB). View file
|
|
|
chinese_dialect_lexicons/ruao.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Ruao dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "ruao.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "ruao.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/ruao.ocd2
ADDED
|
Binary file (58.8 kB). View file
|
|
|
chinese_dialect_lexicons/sanmen.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Sanmen dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "sanmen.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "sanmen.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/sanmen.ocd2
ADDED
|
Binary file (80.2 kB). View file
|
|
|
chinese_dialect_lexicons/shaoxing.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Shaoxing dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "shaoxing.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "shaoxing.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/shaoxing.ocd2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a347aa25bf435803727b4194cf34de4de3e61f03427ee21043a711cdb0b9d940
|
| 3 |
+
size 113108
|
chinese_dialect_lexicons/suichang.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Suichang dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "suichang.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "suichang.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/suichang.ocd2
ADDED
|
Binary file (81 kB). View file
|
|
|
chinese_dialect_lexicons/suzhou.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Suzhounese to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "suzhou.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [{
|
| 11 |
+
"dict": {
|
| 12 |
+
"type": "group",
|
| 13 |
+
"dicts": [{
|
| 14 |
+
"type": "ocd2",
|
| 15 |
+
"file": "suzhou.ocd2"
|
| 16 |
+
}]
|
| 17 |
+
}
|
| 18 |
+
}]
|
| 19 |
+
}
|
chinese_dialect_lexicons/suzhou.ocd2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a8815595a248135874329e7f34662dd243a266be3e8375e8409f95da95d6d540
|
| 3 |
+
size 506184
|
chinese_dialect_lexicons/tiantai.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Tiantai dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "tiantai.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "tiantai.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/tiantai.ocd2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:339e0ba454444dbf8fbe75de6f49769d11dfe2f2f5ba7dea74ba20fba5d6d343
|
| 3 |
+
size 120951
|
chinese_dialect_lexicons/tongxiang.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Tongxiang dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "tongxiang.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "tongxiang.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/tongxiang.ocd2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7432d85588eb8ba34e7baea9f26af8d332572037ff7d41a6730f96c02e5fd063
|
| 3 |
+
size 137499
|
chinese_dialect_lexicons/wenzhou.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Wenzhou dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "wenzhou.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "wenzhou.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/wenzhou.ocd2
ADDED
|
Binary file (83.1 kB). View file
|
|
|
chinese_dialect_lexicons/wuxi.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Wuxinese to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "wuxi.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [{
|
| 11 |
+
"dict": {
|
| 12 |
+
"type": "group",
|
| 13 |
+
"dicts": [{
|
| 14 |
+
"type": "ocd2",
|
| 15 |
+
"file": "wuxi.ocd2"
|
| 16 |
+
}]
|
| 17 |
+
}
|
| 18 |
+
}]
|
| 19 |
+
}
|
chinese_dialect_lexicons/wuxi.ocd2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:64f27ffaa75e542e4464e53c4acf94607be1526a90922ac8b28870104aaebdff
|
| 3 |
+
size 358666
|
chinese_dialect_lexicons/xiaoshan.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Xiaoshan dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "xiaoshan.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "xiaoshan.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|
chinese_dialect_lexicons/xiaoshan.ocd2
ADDED
|
Binary file (77.1 kB). View file
|
|
|
chinese_dialect_lexicons/xiashi.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Xiashi dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "xiashi.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [{
|
| 11 |
+
"dict": {
|
| 12 |
+
"type": "group",
|
| 13 |
+
"dicts": [{
|
| 14 |
+
"type": "ocd2",
|
| 15 |
+
"file": "xiashi.ocd2"
|
| 16 |
+
}]
|
| 17 |
+
}
|
| 18 |
+
}]
|
| 19 |
+
}
|
chinese_dialect_lexicons/xiashi.ocd2
ADDED
|
Binary file (70.3 kB). View file
|
|
|
chinese_dialect_lexicons/yixing.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Yixing dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "yixing.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [{
|
| 11 |
+
"dict": {
|
| 12 |
+
"type": "group",
|
| 13 |
+
"dicts": [{
|
| 14 |
+
"type": "ocd2",
|
| 15 |
+
"file": "yixing.ocd2"
|
| 16 |
+
}]
|
| 17 |
+
}
|
| 18 |
+
}]
|
| 19 |
+
}
|
chinese_dialect_lexicons/yixing.ocd2
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6c56a73eb531f49f64562bdb714753d37dc015baac943b3264bccba9b2aacf9b
|
| 3 |
+
size 155050
|
chinese_dialect_lexicons/youbu.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "Youbu dialect to IPA",
|
| 3 |
+
"segmentation": {
|
| 4 |
+
"type": "mmseg",
|
| 5 |
+
"dict": {
|
| 6 |
+
"type": "ocd2",
|
| 7 |
+
"file": "youbu.ocd2"
|
| 8 |
+
}
|
| 9 |
+
},
|
| 10 |
+
"conversion_chain": [
|
| 11 |
+
{
|
| 12 |
+
"dict": {
|
| 13 |
+
"type": "group",
|
| 14 |
+
"dicts": [
|
| 15 |
+
{
|
| 16 |
+
"type": "ocd2",
|
| 17 |
+
"file": "youbu.ocd2"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
]
|
| 23 |
+
}
|