import gradio as gr from huggingface_hub import hf_hub_download from llama_cpp import Llama # 1. Download the GGUF file from your Model repository print("Downloading/Locating model from Hugging Face...") model_path = hf_hub_download( repo_id="iamabhayaditya/EfficientMath-AI", filename="Meta-Llama-3.1-8B.Q4_K_M.gguf" ) # 2. Load the model using llama.cpp (Optimized for free CPU) print("Loading model into memory...") llm = Llama( model_path=model_path, n_ctx=2048, n_threads=4, ) # 3. Define the prediction function with a Polite Fallback def solve_math_problem(question): try: prompt = f"Below is a math word problem. Solve it step by step and provide the final answer.\n\n### Problem:\n{question}\n\n### Solution:\n" stream = llm( prompt, max_tokens=256, temperature=0.2, top_p=0.9, stream=True, stop=["<|end_of_text|>", "", "<|eot_id|>"] ) generated_text = "" for output in stream: generated_text += output["choices"][0]["text"] yield generated_text except Exception as e: # Prints the actual technical error to your server logs for debugging print(f"Server Error: {str(e)}") # Yields a safe, friendly message to the end-user yield "Oops! I encountered a slight issue calculating that problem. Could you please try again or rephrase the question?" # 4. Build the Black & Orange Custom UI custom_css = """ .gradio-container { background-color: #000000 !important; } .markdown-text h1 { color: #ff7f00 !important; } .markdown-text p { color: #cccccc !important; } textarea { border: 2px solid #ff7f00 !important; background-color: #111111 !important; color: #ffffff !important; } button.primary { background: linear-gradient(90deg, #ff7f00, #ffaa00) !important; border: none !important; color: black !important; font-weight: bold !important; } span.svelte-1gfkn6j, .label { color: #ff7f00 !important; } """ with gr.Blocks(theme=gr.themes.Monochrome(), css=custom_css) as app: gr.Markdown("
This is a custom fine-tuned Llama 3.1 8B model, trained to solve grade school math word problems.
") with gr.Row(): with gr.Column(scale=1): user_input = gr.Textbox(lines=5, placeholder="Enter a math word problem here...", label="Question") gr.Examples( examples=[ "A bag containing 30 apples weighs 6 kg. How much will 1080 apples weigh?", "If the cost of 18 apples is 90 rupees, what is the cost of 24 apples?", "Abhay has 16 apples, he borrowed 5 from Akash then gave 14 to Shivam. How many apples is Abhay left with?" ], inputs=user_input, label="Click an example below to test:" ) with gr.Row(): clear_btn = gr.ClearButton([user_input]) submit_btn = gr.Button("Submit", variant="primary") with gr.Column(scale=1): model_output = gr.Textbox(label="Model Solution", lines=5, max_lines=50, interactive=False) submit_btn.click(fn=solve_math_problem, inputs=user_input, outputs=model_output) # Launch natively in Hugging Face (no debug mode, no share link needed) app.launch()