File size: 3,491 Bytes
d14f8c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87

import os, torch, gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer, BitsAndBytesConfig

TITLE    = os.getenv("SPACE_TITLE", "LanguageBridge — Math Fast Agent (Phi-3.5)")
MODEL_ID = os.getenv("MODEL_ID", "microsoft/phi-3.5-mini-instruct")

SYSTEM = (
  "你是數學與規則推理助教。原則:"
  "1) 先『列出必要步驟』;2) 再給『最終答案』;3) 嚴禁瞎掰,資訊不足要明說。"
)

def load_llm():
    bnb = BitsAndBytesConfig(
        load_in_4bit=True, bnb_4bit_quant_type="nf4",
        bnb_4bit_use_double_quant=True,
        bnb_4bit_compute_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float16
    )
    kwargs = dict(device_map="auto", quantization_config=bnb, trust_remote_code=False)
    try:
        model = AutoModelForCausalLM.from_pretrained(MODEL_ID, **kwargs)
    except Exception as e:
        print("[4-bit failed] → fallback:", e)
        kwargs.pop("quantization_config", None)
        model = AutoModelForCausalLM.from_pretrained(
            MODEL_ID,
            torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
            device_map="auto" if torch.cuda.is_available() else None,
            trust_remote_code=False
        )
    tok = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=True)
    if tok.pad_token is None: tok.pad_token = tok.eos_token
    tok.padding_side = "left"
    if torch.cuda.is_available():
        torch.backends.cuda.matmul.allow_tf32 = True
    model.config.use_cache = True
    return tok, model

tokenizer, llm = load_llm(); llm.eval()

def format_prompt(q:str)->str:
    return f"{SYSTEM}\n\n題目:{q}\n請照原則作答:"

@torch.inference_mode()
def stream_answer(q, mx=192, temp=0.1, top_p=0.9):
    prompt = format_prompt(q)
    inputs = tokenizer(prompt, return_tensors="pt").to(llm.device)
    streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
    gen = dict(
        **inputs, streamer=streamer, max_new_tokens=int(mx),
        temperature=float(temp), top_p=float(top_p),
        do_sample=True if float(temp)>0 else False,
        eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id
    )
    import threading
    t = threading.Thread(target=llm.generate, kwargs=gen); t.start()
    buf=""
    for tok in streamer:
        buf += tok
        yield buf

def warmup():
    try:
        _ = list(stream_answer("π 的前三位有效數字?", mx=32))[-1]
        print("[warmup] done")
    except Exception as e:
        print("[warmup] skip:", e)

with gr.Blocks(title=TITLE, theme="soft") as demo:
    gr.Markdown(f"## {TITLE}\n模型:`{MODEL_ID}`|建議:短題短答、先步驟後答案(已流式)")
    q  = gr.Textbox(label="數學題 / 規則題(可貼LaTeX)", placeholder="例:f(x)=(x^2+1)e^x 求 f'(x)", lines=3)
    mx   = gr.Slider(64, 512, value=192, step=32, label="max_new_tokens")
    temp = gr.Slider(0.0, 0.8, value=0.1, step=0.05, label="temperature")
    top  = gr.Slider(0.6, 1.0, value=0.9, step=0.01, label="top_p")
    go = gr.Button("計算 🚀", variant="primary")
    out= gr.Textbox(label="逐步輸出", lines=14)
    clr= gr.Button("清除")

    go.click(stream_answer, inputs=[q, mx, temp, top], outputs=out)
    clr.click(lambda:"", outputs=out)

    demo.queue()
    warmup()

if __name__ == "__main__":
    demo.launch(share=False, server_name="0.0.0.0", server_port=7860, show_error=True)