Upload folder using huggingface_hub
Browse files- README.md +1 -2
- app.py +94 -112
- requirements.txt +6 -2
README.md
CHANGED
|
@@ -9,5 +9,4 @@ app_file: app.py
|
|
| 9 |
pinned: false
|
| 10 |
---
|
| 11 |
|
| 12 |
-
|
| 13 |
-
若延遲偏高,可在介面取消勾選「啟用 LLM」,就只走 SymPy(即時回覆)。
|
|
|
|
| 9 |
pinned: false
|
| 10 |
---
|
| 11 |
|
| 12 |
+
混合路線:先用 SymPy 精準解(代數 / 化簡 / 微積分),必要時用 Phi LLM 補步驟與敘述。
|
|
|
app.py
CHANGED
|
@@ -1,141 +1,123 @@
|
|
| 1 |
-
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
import sympy as sp
|
| 4 |
-
from functools import lru_cache
|
| 5 |
-
|
| 6 |
-
# 允許用環境變數覆蓋
|
| 7 |
-
MODEL_ID = os.getenv("MODEL_ID", "microsoft/phi-2")
|
| 8 |
|
| 9 |
-
|
| 10 |
-
|
| 11 |
|
| 12 |
-
|
| 13 |
-
tok = None
|
| 14 |
|
| 15 |
-
def
|
| 16 |
-
global
|
| 17 |
-
if
|
| 18 |
-
return
|
| 19 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 20 |
-
kwargs = dict(torch_dtype=DTYPE, low_cpu_mem_usage=True, trust_remote_code=False)
|
| 21 |
-
if USE_CUDA:
|
| 22 |
-
kwargs["device_map"] = "auto"
|
| 23 |
-
kwargs["attn_implementation"] = "sdpa"
|
| 24 |
-
# 優先嘗試 4bit(若後端不支援會自動回退)
|
| 25 |
-
try:
|
| 26 |
-
kwargs.update(dict(
|
| 27 |
-
load_in_4bit=True,
|
| 28 |
-
bnb_4bit_compute_dtype=torch.float16,
|
| 29 |
-
bnb_4bit_quant_type="nf4",
|
| 30 |
-
bnb_4bit_use_double_quant=True,
|
| 31 |
-
))
|
| 32 |
-
except Exception:
|
| 33 |
-
pass
|
| 34 |
-
tok = AutoTokenizer.from_pretrained(MODEL_ID)
|
| 35 |
-
if tok.pad_token_id is None and tok.eos_token_id is not None:
|
| 36 |
-
tok.pad_token = tok.eos_token
|
| 37 |
-
model = AutoModelForCausalLM.from_pretrained(MODEL_ID, **kwargs)
|
| 38 |
-
model.eval()
|
| 39 |
try:
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
except Exception:
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
@lru_cache(maxsize=64)
|
| 45 |
-
def _looks_like_math(s: str) -> bool:
|
| 46 |
-
return bool(re.search(r"[=+\-*/^()]|sin|cos|tan|sqrt|\^|\d", s or ""))
|
| 47 |
|
| 48 |
-
def
|
| 49 |
q = (q or "").strip()
|
| 50 |
if not q:
|
| 51 |
-
return
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
lines = []
|
| 71 |
-
for i, s in enumerate(
|
| 72 |
-
lines.append("解 {}: "
|
| 73 |
return "\n".join(lines)
|
| 74 |
-
return "無解或需要更多條件。"
|
| 75 |
|
| 76 |
-
# 非方程:嘗試化簡 / 微分 / 積分建議
|
| 77 |
-
try:
|
| 78 |
expr = sp.sympify(q)
|
| 79 |
-
|
| 80 |
try:
|
| 81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
except Exception:
|
| 83 |
pass
|
| 84 |
try:
|
| 85 |
x = list(expr.free_symbols)[0] if expr.free_symbols else sp.symbols("x")
|
| 86 |
-
|
| 87 |
-
|
| 88 |
except Exception:
|
| 89 |
pass
|
| 90 |
-
if
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
pass
|
| 94 |
-
return None
|
| 95 |
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
|
|
|
|
|
|
| 99 |
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
with torch.inference_mode():
|
| 104 |
-
out = model.generate(
|
| 105 |
-
**inputs,
|
| 106 |
-
max_new_tokens=max_new_tokens,
|
| 107 |
-
do_sample=False,
|
| 108 |
-
temperature=0.2,
|
| 109 |
-
top_p=0.9,
|
| 110 |
-
repetition_penalty=1.05,
|
| 111 |
-
use_cache=True,
|
| 112 |
-
eos_token_id=tok.eos_token_id,
|
| 113 |
-
pad_token_id=tok.eos_token_id,
|
| 114 |
-
)
|
| 115 |
-
return tok.decode(out[0], skip_special_tokens=True)
|
| 116 |
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
|
|
|
| 128 |
|
| 129 |
-
with gr.Blocks(title=
|
| 130 |
-
gr.Markdown("
|
| 131 |
-
q = gr.Textbox(lines=6, label="題目 / 算式(可含聯立)")
|
| 132 |
with gr.Row():
|
| 133 |
-
|
| 134 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 135 |
out = gr.Textbox(lines=12, label="輸出")
|
| 136 |
btn = gr.Button("送出 🚀")
|
| 137 |
-
btn.click(hybrid_solve, inputs=[q,
|
| 138 |
-
gr.Markdown("
|
| 139 |
|
| 140 |
-
|
| 141 |
-
demo.queue(concurrency_count=2).launch()
|
|
|
|
| 1 |
+
|
| 2 |
+
import os, re
|
| 3 |
import gradio as gr
|
| 4 |
import sympy as sp
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
+
TITLE = "LanguageBridge — Math Hybrid (Phi + SymPy)"
|
| 7 |
+
MODEL_ID = "microsoft/phi-2"
|
| 8 |
|
| 9 |
+
_pipe = None
|
|
|
|
| 10 |
|
| 11 |
+
def lazy_load_llm():
|
| 12 |
+
global _pipe
|
| 13 |
+
if _pipe is not None:
|
| 14 |
+
return _pipe
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
try:
|
| 16 |
+
import torch
|
| 17 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 18 |
+
use_cuda = torch.cuda.is_available()
|
| 19 |
+
tok = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
|
| 20 |
+
if tok.pad_token_id is None and tok.eos_token_id is not None:
|
| 21 |
+
tok.pad_token = tok.eos_token
|
| 22 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 23 |
+
MODEL_ID,
|
| 24 |
+
torch_dtype=torch.float32,
|
| 25 |
+
device_map="cuda" if use_cuda else "cpu"
|
| 26 |
+
)
|
| 27 |
+
_pipe = pipeline(
|
| 28 |
+
"text-generation",
|
| 29 |
+
model=model,
|
| 30 |
+
tokenizer=tok,
|
| 31 |
+
device=0 if use_cuda else -1
|
| 32 |
+
)
|
| 33 |
+
return _pipe
|
| 34 |
except Exception:
|
| 35 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
+
def solve_with_sympy(q: str) -> str:
|
| 38 |
q = (q or "").strip()
|
| 39 |
if not q:
|
| 40 |
+
return "請輸入算式或方程,例如:2x+5=11;或:sin(x)**2 + cos(x)**2;或:factor(x**2-9)"
|
| 41 |
+
try:
|
| 42 |
+
if "=" in q:
|
| 43 |
+
parts = [s.strip() for seg in q.split(";") for s in seg.split("\n")]
|
| 44 |
+
eqs, syms = [], set()
|
| 45 |
+
for s in parts:
|
| 46 |
+
if not s:
|
| 47 |
+
continue
|
| 48 |
+
left, right = s.split("=", 1)
|
| 49 |
+
eq = sp.Eq(sp.sympify(left), sp.sympify(right))
|
| 50 |
+
eqs.append(eq)
|
| 51 |
+
syms |= eq.free_symbols
|
| 52 |
+
if hasattr(eq, "rhs"):
|
| 53 |
+
syms |= eq.rhs.free_symbols
|
| 54 |
+
if not syms:
|
| 55 |
+
syms = {sp.symbols("x")}
|
| 56 |
+
sols = sp.solve(eqs, list(syms), dict=True)
|
| 57 |
+
if not sols:
|
| 58 |
+
return "(SymPy)無解或需要更多條件。"
|
| 59 |
lines = []
|
| 60 |
+
for i, s in enumerate(sols, 1):
|
| 61 |
+
lines.append(f"解 {i}: " + ", ".join([f"{k} = {sp.simplify(v)}" for k, v in s.items()]))
|
| 62 |
return "\n".join(lines)
|
|
|
|
| 63 |
|
|
|
|
|
|
|
| 64 |
expr = sp.sympify(q)
|
| 65 |
+
out = []
|
| 66 |
try:
|
| 67 |
+
out.append(f"簡化:{sp.simplify(expr)}")
|
| 68 |
+
except Exception:
|
| 69 |
+
pass
|
| 70 |
+
try:
|
| 71 |
+
fctr = sp.factor(expr)
|
| 72 |
+
if fctr != expr:
|
| 73 |
+
out.append(f"因式分解:{fctr}")
|
| 74 |
except Exception:
|
| 75 |
pass
|
| 76 |
try:
|
| 77 |
x = list(expr.free_symbols)[0] if expr.free_symbols else sp.symbols("x")
|
| 78 |
+
out.append(f"對 {x} 微分:{sp.diff(expr, x)}")
|
| 79 |
+
out.append(f"對 {x} 積分:{sp.integrate(expr, x)}")
|
| 80 |
except Exception:
|
| 81 |
pass
|
| 82 |
+
return "\n".join(out) if out else f"結果:{expr}"
|
| 83 |
+
except Exception as e:
|
| 84 |
+
return f"(SymPy 解析失敗) {e}"
|
|
|
|
|
|
|
| 85 |
|
| 86 |
+
def hybrid_solve(q: str, max_new_tokens: int, temperature: float, top_p: float):
|
| 87 |
+
q = (q or "").strip()
|
| 88 |
+
sym = solve_with_sympy(q)
|
| 89 |
+
if sym and not sym.startswith("(SymPy 解析失敗)"):
|
| 90 |
+
return sym
|
| 91 |
|
| 92 |
+
pipe = lazy_load_llm()
|
| 93 |
+
if pipe is None:
|
| 94 |
+
return sym + "\n\n(提示) LLM 尚未就緒或未安裝 torch/transformers,僅回傳 SymPy 嘗試結果。"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
+
prompt = f"請閱讀題目並用中文說明步驟,最後給出答案。\n題目:{q}\n"
|
| 97 |
+
outs = pipe(
|
| 98 |
+
prompt,
|
| 99 |
+
max_new_tokens=max_new_tokens,
|
| 100 |
+
do_sample=True if temperature>0 else False,
|
| 101 |
+
top_p=top_p,
|
| 102 |
+
temperature=temperature,
|
| 103 |
+
repetition_penalty=1.05,
|
| 104 |
+
pad_token_id=pipe.tokenizer.eos_token_id,
|
| 105 |
+
)
|
| 106 |
+
txt = outs[0]["generated_text"]
|
| 107 |
+
return txt[len(prompt):].strip()
|
| 108 |
|
| 109 |
+
with gr.Blocks(title=TITLE) as demo:
|
| 110 |
+
gr.Markdown(f"## {TITLE}\n貼上文字題或算式:LLM 解析 → SymPy 精算(可聯立)")
|
|
|
|
| 111 |
with gr.Row():
|
| 112 |
+
q = gr.Textbox(lines=7, label="題目 / 算式(可含聯立方程)",
|
| 113 |
+
placeholder="例如:一個數加 5 等於 11,求此數;\n或:2x+5=11;或:sin(x)**2+cos(x)**2")
|
| 114 |
+
with gr.Accordion("進階(LLM 生成)", open=False):
|
| 115 |
+
mx_tok = gr.Slider(32, 256, value=128, step=8, label="max_new_tokens")
|
| 116 |
+
temp = gr.Slider(0.0, 1.2, value=0.3, step=0.05, label="temperature")
|
| 117 |
+
top_p = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="top_p")
|
| 118 |
out = gr.Textbox(lines=12, label="輸出")
|
| 119 |
btn = gr.Button("送出 🚀")
|
| 120 |
+
btn.click(hybrid_solve, inputs=[q, mx_tok, temp, top_p], outputs=out)
|
| 121 |
+
gr.Markdown("**小技巧**:先輸入方程/算式讓 SymPy 直接求;純文字題會呼叫 LLM 先轉譯再解。")
|
| 122 |
|
| 123 |
+
demo.queue(concurrency_count=1).launch()
|
|
|
requirements.txt
CHANGED
|
@@ -1,4 +1,8 @@
|
|
| 1 |
gradio==4.44.1
|
| 2 |
sympy>=1.12
|
| 3 |
-
|
| 4 |
-
transformers==4.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
gradio==4.44.1
|
| 2 |
sympy>=1.12
|
| 3 |
+
torch==2.1.2
|
| 4 |
+
transformers==4.41.2
|
| 5 |
+
accelerate==0.31.0
|
| 6 |
+
safetensors>=0.4.3
|
| 7 |
+
sentencepiece>=0.1.99
|
| 8 |
+
huggingface_hub>=0.24.0
|