File size: 4,657 Bytes
377ed00
139a6d6
377ed00
 
aca161a
377ed00
 
01f27e4
377ed00
 
 
139a6d6
 
377ed00
48c2c2d
139a6d6
377ed00
aca161a
377ed00
 
 
 
e5f29cb
 
 
377ed00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aca161a
 
 
 
 
e5f29cb
 
 
 
aca161a
 
 
01f27e4
377ed00
 
 
aca161a
377ed00
 
e5f29cb
 
 
8731577
377ed00
 
 
 
 
aca161a
377ed00
 
 
8731577
aca161a
377ed00
e083b3d
 
377ed00
 
 
 
aca161a
 
377ed00
8731577
377ed00
 
8731577
e083b3d
e5f29cb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377ed00
01f27e4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

# โ”€โ”€ Load Model โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
print("Loading model...")
MODEL_ID = "newtechdevng/qwen-math-tutor"

tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForCausalLM.from_pretrained(
    MODEL_ID,
    torch_dtype = torch.float16,
    device_map  = "cpu",
)
model.eval()
print("โœ… Model loaded!")

# โ”€โ”€ Inference Function โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
def solve(question, history):
    messages = [
        {"role": "system", "content": "You are a helpful math tutor. Solve problems step by step, showing all working clearly."},
    ]
    for msg in history:
        if msg["role"] in ("user", "assistant"):
            messages.append({"role": msg["role"], "content": msg["content"]})
    messages.append({"role": "user", "content": question})

    text = tokenizer.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    inputs = tokenizer(text, return_tensors="pt")

    with torch.no_grad():
        out = model.generate(
            **inputs,
            max_new_tokens = 512,
            temperature    = 0.1,
            do_sample      = True,
            pad_token_id   = tokenizer.eos_token_id,
        )
    return tokenizer.decode(
        out[0][inputs["input_ids"].shape[1]:],
        skip_special_tokens=True
    )

# โ”€โ”€ Response Handler โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
def respond(question, history):
    if not question.strip():
        return "", history
    answer = solve(question, history)
    history = history + [
        {"role": "user",      "content": question},
        {"role": "assistant", "content": answer},
    ]
    return "", history

# โ”€โ”€ UI โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
with gr.Blocks(title="Math Tutor AI") as demo:
    gr.Markdown("""
    # ๐Ÿงฎ Math Tutor AI
    ### Powered by Qwen2.5-Math โ€” Fine-tuned for NCERT & competitive math
    ---
    """)

    # Store history as state
    history_state = gr.State([])

    chatbot = gr.Chatbot(height=450)

    with gr.Row():
        question = gr.Textbox(
            placeholder = "Type your math question here...",
            label       = "Your Question",
            scale       = 4,
        )
        submit = gr.Button("Solve โ†’", variant="primary", scale=1)

    clear = gr.Button("๐Ÿ—‘๏ธ Clear Chat")

    gr.Examples(
        label    = "Try these examples:",
        examples = [
            "Solve: 2xยฒ - 7x + 3 = 0",
            "Find the area of a triangle with base 12 cm and height 8 cm.",
            "If sin ฮธ = 3/5, find cos ฮธ and tan ฮธ.",
            "A train travels 360 km in 4 hours. Find its speed in m/s.",
            "Find the compound interest on โ‚น5000 at 10% per annum for 2 years.",
            "Prove that โˆš2 is irrational.",
        ],
        inputs = question,
    )

    gr.Markdown("*Model: Qwen2.5-Math-1.5B fine-tuned on 10K math problems*")

    # โ”€โ”€ Event Handlers โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
    def respond_and_display(question, history):
        if not question.strip():
            return "", history, [[m["content"] for m in history[i:i+2]] 
                                  for i in range(0, len(history), 2)]
        answer = solve(question, history)
        history = history + [
            {"role": "user",      "content": question},
            {"role": "assistant", "content": answer},
        ]
        # Convert to tuple pairs for display
        display = []
        for i in range(0, len(history), 2):
            u = history[i]["content"]   if i   < len(history) else ""
            a = history[i+1]["content"] if i+1 < len(history) else ""
            display.append((u, a))
        return "", history, display

    def clear_all():
        return "", [], []

    submit.click(
        respond_and_display,
        [question, history_state],
        [question, history_state, chatbot]
    )
    question.submit(
        respond_and_display,
        [question, history_state],
        [question, history_state, chatbot]
    )
    clear.click(clear_all, outputs=[question, history_state, chatbot])

demo.launch(theme=gr.themes.Soft())