import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForCausalLM from gtts import gTTS import re import os # Branding TITLE = "🇧🇩 Polymath-Bengali-Tutor" DESC = "A Neuro-Symbolic AI Tutor for Rural Education in Bangladesh" # Load Model (Fallback logic included) model_id = "Qwen/Qwen2.5-1.5B-Instruct" try: tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto") except: tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto") # Logic def safe_calculator(text): bangla_digits = "ā§Ļā§§ā§¨ā§Šā§Ēā§Ģā§Ŧā§­ā§Žā§¯" english_digits = "0123456789" text = text.translate(str.maketrans(bangla_digits, english_digits)) nums = [int(n) for n in re.findall(r'\d+', text)] if not nums: return None, None, [] if any(x in text for x in ["āĻŦāĻžāĻ•āĻŋ", "āĻŦāĻžāĻĻ", "āĻŦāĻŋā§Ÿā§‹āĻ—", "-", "āĻ–āϰāϚ"]): if len(nums)>=2: return nums[0]-nums[1], "āĻŦāĻŋā§Ÿā§‹āĻ—", nums elif any(x in text for x in ["āĻ­āĻžāĻ—", "āĻ…āĻ‚āĻļ"]): if len(nums)>=2: return nums[0]/nums[1], "āĻ­āĻžāĻ—", nums elif any(x in text for x in ["āϗ⧁āĻŖ", "āĻĻāĻžāĻŽ"]): if len(nums)>=2: return nums[0]*nums[1], "āϗ⧁āĻŖ", nums elif any(x in text for x in ["āϝ⧋āĻ—", "āĻŽā§‹āϟ", "āĻĒ⧇āϞ"]): return sum(nums), "āϝ⧋āĻ—", nums return None, None, [] def ai_tutor(user_input): val, op, nums = safe_calculator(user_input) if val is not None: if op == "āϝ⧋āĻ—": ans = f"āϏāĻ āĻŋāĻ• āωāĻ¤ā§āϤāϰ {val}āĨ¤ āĻ•āĻžāϰāĻŖ {nums[0]} āφāϰ {nums[1]} āϝ⧋āĻ— āĻ•āϰāϞ⧇ {val} āĻšā§ŸāĨ¤" elif op == "āĻŦāĻŋā§Ÿā§‹āĻ—": ans = f"āωāĻ¤ā§āϤāϰ {val}āĨ¤ {nums[0]} āĻĨ⧇āϕ⧇ {nums[1]} āĻŦāĻžāĻĻ āĻĻāĻŋāϞ⧇ {val} āĻĨāĻžāϕ⧇āĨ¤" elif op == "āϗ⧁āĻŖ": ans = f"āωāĻ¤ā§āϤāϰ {val} āϟāĻžāĻ•āĻžāĨ¤" elif op == "āĻ­āĻžāĻ—": ans = f"āωāĻ¤ā§āϤāϰ {val}āĨ¤" else: ans = f"āωāĻ¤ā§āϤāϰ āĻšāϞ⧋ {val}āĨ¤" else: inp = f"<|im_start|>user\n{user_input}<|im_end|>\n<|im_start|>assistant\n" inputs = tokenizer([inp], return_tensors="pt").to("cuda") out = model.generate(inputs.input_ids, max_new_tokens=100) ans = tokenizer.decode(out[0], skip_special_tokens=True).split("assistant")[-1].strip() try: tts = gTTS(ans, lang='bn') tts.save("voice.mp3") return ans, "voice.mp3" except: return ans, None # UI LAUNCH (Removed 'theme' argument to fix error) with gr.Blocks() as demo: gr.Markdown(f"# {TITLE}") gr.Markdown(f"### {DESC}") with gr.Row(): inp = gr.Textbox(label="Question") btn = gr.Button("Ask Polymath 🧠", variant="primary") with gr.Row(): out_txt = gr.Textbox(label="Answer") out_aud = gr.Audio(label="Voice", autoplay=True) btn.click(ai_tutor, inputs=[inp], outputs=[out_txt, out_aud]) demo.launch()