File size: 4,528 Bytes
01a8e32 c903252 500204a 01a8e32 c903252 01a8e32 c903252 01a8e32 c903252 01a8e32 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 | import TshetUinh from "tshet-uinh";
import * as Examples from "tshet-uinh-examples";
function tokensFromAnnotatedText(text, derivePron) {
const tokens = [];
const re = /([\s\S])\(([^)]+)\)/g;
let lastIndex = 0;
for (let m; (m = re.exec(text)); ) {
const start = m.index;
if (start > lastIndex) {
tokens.push({ type: "raw", text: text.slice(lastIndex, start) });
}
const ch = m[1];
const desc = m[2];
let pron = "";
try {
const pos = TshetUinh.音韻地位.from描述(desc);
pron = derivePron(pos, ch) ?? "";
} catch {
// 保底:不中断整段推导
pron = "";
}
tokens.push({ type: "annotated", ch, desc, pron });
lastIndex = start + m[0].length;
}
if (lastIndex < text.length) tokens.push({ type: "raw", text: text.slice(lastIndex) });
return tokens;
}
function choosePositionByChar(ch) {
const entries = TshetUinh.資料.query字頭(ch);
if (!entries || !entries.length) return null;
// 尽量贴近原站:优先选「廣韻」代表条目
const guangyun = entries.find(e => e?.來源?.文獻 === "廣韻");
return (guangyun || entries[0]).音韻地位 || null;
}
function tokensFromCharText(text, derivePron) {
const tokens = [];
let rawBuf = "";
const flushRaw = () => {
if (!rawBuf) return;
tokens.push({ type: "raw", text: rawBuf });
rawBuf = "";
};
for (const ch of Array.from(text)) {
const pos = choosePositionByChar(ch);
if (!pos) {
rawBuf += ch;
continue;
}
flushRaw();
let pron = "";
try {
pron = derivePron(pos, ch) ?? "";
} catch {
pron = "";
}
tokens.push({ type: "annotated", ch, desc: pos.描述, pron });
}
flushRaw();
return tokens;
}
function getDeriver(scheme) {
return getDeriverWithOptions(scheme, undefined);
}
function getDeriverWithOptions(scheme, options) {
if (scheme === "tupa") return Examples.tupa(options ?? {});
if (scheme === "high_tang") return Examples.high_tang(options ?? {});
if (scheme === "unt") return Examples.unt(options ?? {});
if (scheme === "unt_legacy") return Examples.unt_legacy(options ?? {});
if (scheme === "putonghua") return Examples.putonghua(options ?? {});
if (scheme === "gwongzau") return Examples.gwongzau(options ?? {});
if (scheme === "zaonhe") return Examples.zaonhe(options ?? {});
throw new Error(`Unsupported scheme: ${scheme}`);
}
export function derive({ scheme = "tupa", text = "", options = undefined } = {}) {
const s = scheme.toString();
const t = text.toString();
if (s === "tupa_high_tang") {
const deriveTupa = getDeriverWithOptions("tupa", undefined);
const deriveHighTang = getDeriverWithOptions("high_tang", undefined);
const tokens = /[\s\S]\([^)]+\)/.test(t)
? // 兼容旧格式:括号内地位直接推导(两套方案)
(() => {
const base = tokensFromAnnotatedText(t, () => "");
return base.map(tok => {
if (tok.type !== "annotated") return tok;
const pos = TshetUinh.音韻地位.from描述(tok.desc);
const tupa = (deriveTupa(pos, tok.ch) ?? "").trim();
const ipa = (deriveHighTang(pos, tok.ch) ?? "").trim();
return { ...tok, pron: `${tupa} /${ipa}/` };
});
})()
: // 新格式:按字头查資料推导(两套方案)
(() => {
const tokens = [];
let rawBuf = "";
const flushRaw = () => {
if (!rawBuf) return;
tokens.push({ type: "raw", text: rawBuf });
rawBuf = "";
};
for (const ch of Array.from(t)) {
const pos = choosePositionByChar(ch);
if (!pos) {
rawBuf += ch;
continue;
}
flushRaw();
const tupa = (deriveTupa(pos, ch) ?? "").trim();
const ipa = (deriveHighTang(pos, ch) ?? "").trim();
tokens.push({ type: "annotated", ch, desc: pos.描述, pron: `${tupa} /${ipa}/` });
}
flushRaw();
return tokens;
})();
return { scheme: s, tokens };
}
const derivePron = getDeriverWithOptions(s, options);
// 兼容两种输入:
// 1) 旧格式:風(幫三C東平) —— 直接用括号内地位推导
// 2) 新格式:只输入汉字 —— 按字头查資料推导
const tokens = /[\s\S]\([^)]+\)/.test(t) ? tokensFromAnnotatedText(t, derivePron) : tokensFromCharText(t, derivePron);
return { scheme: s, tokens };
}
|