iamabhayaditya commited on
Commit
50f1271
·
verified ·
1 Parent(s): 0420653

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -0
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import hf_hub_download
3
+ from llama_cpp import Llama
4
+
5
+ # 1. Download the GGUF file from your Model repository
6
+ print("Downloading/Locating model from Hugging Face...")
7
+ model_path = hf_hub_download(
8
+ repo_id="iamabhayaditya/EfficientMath-AI",
9
+ filename="Meta-Llama-3.1-8B.Q4_K_M.gguf"
10
+ )
11
+
12
+ # 2. Load the model using llama.cpp (Optimized for free CPU)
13
+ print("Loading model into memory...")
14
+ llm = Llama(
15
+ model_path=model_path,
16
+ n_ctx=2048,
17
+ n_threads=4,
18
+ )
19
+
20
+ # 3. Define the prediction function with a Polite Fallback
21
+ def solve_math_problem(question):
22
+ try:
23
+ prompt = f"Below is a math word problem. Solve it step by step and provide the final answer.\n\n### Problem:\n{question}\n\n### Solution:\n"
24
+
25
+ stream = llm(
26
+ prompt,
27
+ max_tokens=256,
28
+ temperature=0.2,
29
+ top_p=0.9,
30
+ stream=True,
31
+ stop=["<|end_of_text|>", "</s>", "<|eot_id|>"]
32
+ )
33
+
34
+ generated_text = ""
35
+ for output in stream:
36
+ generated_text += output["choices"][0]["text"]
37
+ yield generated_text
38
+
39
+ except Exception as e:
40
+ # Prints the actual technical error to your server logs for debugging
41
+ print(f"Server Error: {str(e)}")
42
+ # Yields a safe, friendly message to the end-user
43
+ yield "Oops! I encountered a slight issue calculating that problem. Could you please try again or rephrase the question?"
44
+
45
+ # 4. Build the Black & Orange Custom UI
46
+ custom_css = """
47
+ .gradio-container { background-color: #000000 !important; }
48
+ .markdown-text h1 { color: #ff7f00 !important; }
49
+ .markdown-text p { color: #cccccc !important; }
50
+ textarea {
51
+ border: 2px solid #ff7f00 !important;
52
+ background-color: #111111 !important;
53
+ color: #ffffff !important;
54
+ }
55
+ button.primary {
56
+ background: linear-gradient(90deg, #ff7f00, #ffaa00) !important;
57
+ border: none !important;
58
+ color: black !important;
59
+ font-weight: bold !important;
60
+ }
61
+ span.svelte-1gfkn6j, .label { color: #ff7f00 !important; }
62
+ """
63
+
64
+ with gr.Blocks(theme=gr.themes.Monochrome(), css=custom_css) as app:
65
+ gr.Markdown("<h1 style='text-align: center; margin-top: 20px;'>EfficientMath-AI</h1>")
66
+ gr.Markdown("<p style='text-align: center;'>This is a custom fine-tuned Llama 3.1 8B model, trained to solve grade school math word problems.</p>")
67
+
68
+ with gr.Row():
69
+ with gr.Column(scale=1):
70
+ user_input = gr.Textbox(lines=5, placeholder="Enter a math word problem here...", label="Question")
71
+
72
+ gr.Examples(
73
+ examples=[
74
+ "A bag containing 30 apples weighs 6 kg. How much will 1080 apples weigh?",
75
+ "If the cost of 18 apples is 90 rupees, what is the cost of 24 apples?",
76
+ "Abhay has 16 apples, he borrowed 5 from Akash then gave 14 to Shivam. How many apples is Abhay left with?"
77
+ ],
78
+ inputs=user_input,
79
+ label="Click an example below to test:"
80
+ )
81
+
82
+ with gr.Row():
83
+ clear_btn = gr.ClearButton([user_input])
84
+ submit_btn = gr.Button("Submit", variant="primary")
85
+
86
+ with gr.Column(scale=1):
87
+ model_output = gr.Textbox(label="Model Solution", lines=5, max_lines=50, interactive=False)
88
+
89
+ submit_btn.click(fn=solve_math_problem, inputs=user_input, outputs=model_output)
90
+
91
+ # Launch natively in Hugging Face (no debug mode, no share link needed)
92
+ app.launch()