add opencc
Browse files- app.py +13 -5
- requirements.txt +2 -1
app.py
CHANGED
|
@@ -3,6 +3,10 @@ import gradio as gr
|
|
| 3 |
from gradio import update
|
| 4 |
from functools import lru_cache
|
| 5 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
# 可選模型列表
|
| 8 |
MODEL_LIST = [
|
|
@@ -23,14 +27,16 @@ MODEL_LIST = [
|
|
| 23 |
@lru_cache(maxsize=None)
|
| 24 |
def get_pipeline(model_name):
|
| 25 |
tok = AutoTokenizer.from_pretrained(model_name)
|
| 26 |
-
mdl = AutoModelForCausalLM.from_pretrained(
|
|
|
|
|
|
|
| 27 |
mdl.to("cuda")
|
| 28 |
return pipeline("text-generation", model=mdl, tokenizer=tok, device=0)
|
| 29 |
|
| 30 |
@spaces.GPU
|
| 31 |
def suggest_next(text, model_name, k, m):
|
| 32 |
"""
|
| 33 |
-
使用 Beam Search 產生 M
|
| 34 |
"""
|
| 35 |
gen_pipe = get_pipeline(model_name)
|
| 36 |
outs = gen_pipe(
|
|
@@ -41,10 +47,12 @@ def suggest_next(text, model_name, k, m):
|
|
| 41 |
do_sample=False,
|
| 42 |
early_stopping=True
|
| 43 |
)
|
|
|
|
| 44 |
suggestions = [out["generated_text"][len(text):].strip() for out in outs]
|
| 45 |
suggestions = [s for s in suggestions if s]
|
|
|
|
|
|
|
| 46 |
|
| 47 |
-
# 更新候選條
|
| 48 |
return update(choices=suggestions, value=None)
|
| 49 |
|
| 50 |
def append_suggestion(current, choice):
|
|
@@ -53,7 +61,7 @@ def append_suggestion(current, choice):
|
|
| 53 |
# 模擬輸入法候選選中
|
| 54 |
return current + choice
|
| 55 |
|
| 56 |
-
#
|
| 57 |
custom_css = """
|
| 58 |
#suggestions-bar .candidate-list {
|
| 59 |
display: flex;
|
|
@@ -83,7 +91,7 @@ custom_css = """
|
|
| 83 |
with gr.Blocks(css=custom_css) as demo:
|
| 84 |
# 標題和說明
|
| 85 |
gr.Markdown(
|
| 86 |
-
"## 🇹🇼
|
| 87 |
"結合小型語言模型與 ZeroGPU,即時 IME 風格候選條。"
|
| 88 |
)
|
| 89 |
|
|
|
|
| 3 |
from gradio import update
|
| 4 |
from functools import lru_cache
|
| 5 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 6 |
+
from opencc import OpenCC # 用於簡體轉繁體
|
| 7 |
+
|
| 8 |
+
# 初始化簡體到繁體轉換器
|
| 9 |
+
cc = OpenCC('s2t')
|
| 10 |
|
| 11 |
# 可選模型列表
|
| 12 |
MODEL_LIST = [
|
|
|
|
| 27 |
@lru_cache(maxsize=None)
|
| 28 |
def get_pipeline(model_name):
|
| 29 |
tok = AutoTokenizer.from_pretrained(model_name)
|
| 30 |
+
mdl = AutoModelForCausalLM.from_pretrained(
|
| 31 |
+
model_name, weights_only=False, trust_remote_code=True
|
| 32 |
+
)
|
| 33 |
mdl.to("cuda")
|
| 34 |
return pipeline("text-generation", model=mdl, tokenizer=tok, device=0)
|
| 35 |
|
| 36 |
@spaces.GPU
|
| 37 |
def suggest_next(text, model_name, k, m):
|
| 38 |
"""
|
| 39 |
+
使用 Beam Search 產生 M 條最可能的下段建議,並一次更新候選列表,最後將簡體字轉為繁體字。
|
| 40 |
"""
|
| 41 |
gen_pipe = get_pipeline(model_name)
|
| 42 |
outs = gen_pipe(
|
|
|
|
| 47 |
do_sample=False,
|
| 48 |
early_stopping=True
|
| 49 |
)
|
| 50 |
+
# 提取並清理生成內容
|
| 51 |
suggestions = [out["generated_text"][len(text):].strip() for out in outs]
|
| 52 |
suggestions = [s for s in suggestions if s]
|
| 53 |
+
# 簡體轉繁體
|
| 54 |
+
suggestions = [cc.convert(s) for s in suggestions]
|
| 55 |
|
|
|
|
| 56 |
return update(choices=suggestions, value=None)
|
| 57 |
|
| 58 |
def append_suggestion(current, choice):
|
|
|
|
| 61 |
# 模擬輸入法候選選中
|
| 62 |
return current + choice
|
| 63 |
|
| 64 |
+
# 自訂 CSS:模擬經典中文輸入法候選欄樣式
|
| 65 |
custom_css = """
|
| 66 |
#suggestions-bar .candidate-list {
|
| 67 |
display: flex;
|
|
|
|
| 91 |
with gr.Blocks(css=custom_css) as demo:
|
| 92 |
# 標題和說明
|
| 93 |
gr.Markdown(
|
| 94 |
+
"## 🇹🇼 繁體中文輸入法加速器 \n"
|
| 95 |
"結合小型語言模型與 ZeroGPU,即時 IME 風格候選條。"
|
| 96 |
)
|
| 97 |
|
requirements.txt
CHANGED
|
@@ -2,4 +2,5 @@ gradio>=5.0.0
|
|
| 2 |
torch>=2.1.2,<2.6.0
|
| 3 |
transformers>=4.30.0
|
| 4 |
accelerate
|
| 5 |
-
sentencepiece
|
|
|
|
|
|
| 2 |
torch>=2.1.2,<2.6.0
|
| 3 |
transformers>=4.30.0
|
| 4 |
accelerate
|
| 5 |
+
sentencepiece
|
| 6 |
+
opencc-python-reimplemented
|