File size: 2,812 Bytes
dec1173
 
 
 
 
5dafbcb
dec1173
5dafbcb
 
df9191a
5dafbcb
dec1173
 
5dafbcb
df9191a
dec1173
 
 
 
 
 
 
 
 
 
 
 
5dafbcb
dec1173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5dafbcb
24acbfe
 
dec1173
 
5dafbcb
dec1173
 
 
 
 
 
 
24acbfe
dec1173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

MODEL_ID = "newtechdevng/math-tutor-smollm2-360M"
BASE_ID  = "HuggingFaceTB/SmolLM2-360M-Instruct"

print("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(BASE_ID)

print("Loading model...")
model = AutoModelForCausalLM.from_pretrained(
    MODEL_ID,
    torch_dtype=torch.float16,
    device_map="cpu"
)
model.eval()
print("Ready!")

SYSTEM_PROMPT = (
    "You are an expert math teacher for students from Class 6 to Class 10. "
    "Solve problems step by step, show all working, and explain clearly."
)

def solve(question, class_level, history):
    if not question.strip():
        return history, ""

    prompt = (
        "<|im_start|>system\n"
        + SYSTEM_PROMPT
        + " You are helping a Class " + class_level + " student.<|im_end|>\n"
        + "<|im_start|>user\n"
        + question
        + "<|im_end|>\n<|im_start|>assistant\n"
    )
    inputs = tokenizer(prompt, return_tensors="pt")
    with torch.no_grad():
        outputs = model.generate(
            **inputs,
            max_new_tokens=350,
            temperature=0.1,
            do_sample=True,
            pad_token_id=tokenizer.eos_token_id,
        )
    reply = tokenizer.decode(outputs[0], skip_special_tokens=False)
    answer = reply.split("<|im_start|>assistant\n")[-1]
    answer = answer.replace("<|im_end|>", "").strip()

    # βœ… Simple tuple format β€” works across all Gradio versions
    history.append((question, answer))
    return history, ""

with gr.Blocks(title="Math Tutor Class 6-10") as app:
    gr.Markdown("# πŸŽ“ Math Tutor β€” Class 6 to 10")
    gr.Markdown("Ask any math question and get step-by-step solutions!")
    with gr.Row():
        class_level = gr.Dropdown(
            choices=["6", "7", "8", "9", "10"],
            value="8", label="Select Class", scale=1
        )
    chatbot = gr.Chatbot(label="Math Solutions", height=450)
    with gr.Row():
        question = gr.Textbox(
            label="Your Question",
            placeholder="e.g. Solve 3x - 7 = 14. Show all steps.",
            lines=2, scale=4
        )
        btn = gr.Button("Solve", variant="primary", scale=1)
    gr.Examples(
        examples=[
            ["Find the LCM of 12, 18 and 24.", "6"],
            ["Solve: 3x - 7 = 14", "8"],
            ["Find roots of x2 - 5x + 6 = 0", "10"],
            ["A train travels 360 km in 4 hours. What is its speed?", "7"],
            ["Find the area of a triangle with base 10 cm and height 6 cm.", "9"],
        ],
        inputs=[question, class_level]
    )
    state = gr.State([])
    btn.click(solve, [question, class_level, state], [chatbot, question])
    question.submit(solve, [question, class_level, state], [chatbot, question])

app.launch()