Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,35 +10,34 @@ import re
|
|
| 10 |
import os
|
| 11 |
|
| 12 |
# --------------------------
|
| 13 |
-
# GPU Acceleration Setup
|
| 14 |
# --------------------------
|
| 15 |
|
| 16 |
def force_gpu_acceleration():
|
| 17 |
-
"""
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
device = torch.device("
|
| 34 |
-
|
|
|
|
| 35 |
return device
|
| 36 |
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
print("⚠️ Using CPU (no GPU available)")
|
| 41 |
-
return device
|
| 42 |
|
| 43 |
device = force_gpu_acceleration()
|
| 44 |
|
|
@@ -64,7 +63,7 @@ def load_model_with_retries():
|
|
| 64 |
low_cpu_mem_usage=True
|
| 65 |
)
|
| 66 |
|
| 67 |
-
#
|
| 68 |
if device.type == 'cuda' and model.device.type != 'cuda':
|
| 69 |
model = model.to(device)
|
| 70 |
|
|
@@ -80,166 +79,7 @@ def load_model_with_retries():
|
|
| 80 |
model, tokenizer = load_model_with_retries()
|
| 81 |
|
| 82 |
# --------------------------
|
| 83 |
-
# Response Generation
|
| 84 |
# --------------------------
|
| 85 |
|
| 86 |
-
|
| 87 |
-
return [int(num) for num in re.findall(r'\d+', text)]
|
| 88 |
-
|
| 89 |
-
def generate_math_response(prompt):
|
| 90 |
-
"""Special handling for math problems with guaranteed responses"""
|
| 91 |
-
numbers = extract_numbers(prompt)
|
| 92 |
-
|
| 93 |
-
# 2+2 problem
|
| 94 |
-
if "2+2" in prompt.lower():
|
| 95 |
-
return """Step-by-Step Solution:
|
| 96 |
-
1. Start with the first number: 2
|
| 97 |
-
2. Add the second number: + 2
|
| 98 |
-
3. Combine the values: 2 + 2 = 4
|
| 99 |
-
|
| 100 |
-
Final Answer: 4"""
|
| 101 |
-
|
| 102 |
-
# Sara's shopping problem
|
| 103 |
-
if ("notebook" in prompt.lower() and "pen" in prompt.lower() and
|
| 104 |
-
len(numbers) >= 4 and "rs." in prompt.lower()):
|
| 105 |
-
notebook_qty = numbers[0]
|
| 106 |
-
pen_qty = numbers[1]
|
| 107 |
-
notebook_price = numbers[2]
|
| 108 |
-
pen_price = numbers[3]
|
| 109 |
-
|
| 110 |
-
notebook_total = notebook_qty * notebook_price
|
| 111 |
-
pen_total = pen_qty * pen_price
|
| 112 |
-
total = notebook_total + pen_total
|
| 113 |
-
|
| 114 |
-
return f"""Step-by-Step Solution:
|
| 115 |
-
1. Notebook cost: {notebook_qty} × Rs.{notebook_price} = Rs.{notebook_total}
|
| 116 |
-
2. Pen cost: {pen_qty} × Rs.{pen_price} = Rs.{pen_total}
|
| 117 |
-
3. Total: Rs.{notebook_total} + Rs.{pen_total} = Rs.{total}
|
| 118 |
-
|
| 119 |
-
Total amount spent: Rs.{total}"""
|
| 120 |
-
|
| 121 |
-
# Sales comparison
|
| 122 |
-
if ("difference" in prompt.lower() and "sales" in prompt.lower() and
|
| 123 |
-
len(numbers) >= 2):
|
| 124 |
-
today = numbers[0]
|
| 125 |
-
yesterday = numbers[1]
|
| 126 |
-
difference = today - yesterday
|
| 127 |
-
|
| 128 |
-
# Create plot
|
| 129 |
-
plt.figure(figsize=(8,4))
|
| 130 |
-
plt.bar(['Yesterday', 'Today'], [yesterday, today], color=['orange', 'blue'])
|
| 131 |
-
plt.title("Sales Comparison")
|
| 132 |
-
plt.ylabel("Number of Sales")
|
| 133 |
-
|
| 134 |
-
buf = BytesIO()
|
| 135 |
-
plt.savefig(buf, format='png')
|
| 136 |
-
buf.seek(0)
|
| 137 |
-
img_str = base64.b64encode(buf.read()).decode('utf-8')
|
| 138 |
-
plt.close()
|
| 139 |
-
|
| 140 |
-
return f"""Step-by-Step Solution:
|
| 141 |
-
1. Today's sales: {today}
|
| 142 |
-
2. Yesterday's sales: {yesterday}
|
| 143 |
-
3. Difference: {today} - {yesterday} = {difference}
|
| 144 |
-
|
| 145 |
-
The difference is {difference} sales.
|
| 146 |
-
|
| 147 |
-
"""
|
| 148 |
-
|
| 149 |
-
# Complex number problem
|
| 150 |
-
if "z^2" in prompt and "complex number" in prompt:
|
| 151 |
-
return """Step-by-Step Solution:
|
| 152 |
-
1. Given equation: z² + 16 - 30i = 0
|
| 153 |
-
2. Rewrite: z² = -16 + 30i
|
| 154 |
-
3. Let z = a + bi
|
| 155 |
-
4. Then z² = (a² - b²) + (2ab)i
|
| 156 |
-
5. Set real parts equal: a² - b² = -16
|
| 157 |
-
6. Set imaginary parts equal: 2ab = 30 → ab = 15
|
| 158 |
-
7. Solve the system:
|
| 159 |
-
- From ab=15: b=15/a
|
| 160 |
-
- Substitute: a² - (15/a)² = -16
|
| 161 |
-
- Multiply through by a²: a⁴ + 16a² - 225 = 0
|
| 162 |
-
8. Let x=a²: x² + 16x - 225 = 0
|
| 163 |
-
9. Quadratic formula: x = [-16 ± √(256 + 900)]/2
|
| 164 |
-
10. Solutions: x = (-16 ± 34)/2 → x=9 or x=-25
|
| 165 |
-
11. a²=9 → a=±3
|
| 166 |
-
12. Then b=15/a → b=±5
|
| 167 |
-
13. Valid solutions: z = 3 + 5i or z = -3 - 5i
|
| 168 |
-
|
| 169 |
-
Final Answers: z = 3 + 5i or z = -3 - 5i"""
|
| 170 |
-
|
| 171 |
-
return None
|
| 172 |
-
|
| 173 |
-
def generate_response(prompt):
|
| 174 |
-
try:
|
| 175 |
-
start_time = time.time()
|
| 176 |
-
|
| 177 |
-
# First check for known problem patterns
|
| 178 |
-
math_response = generate_math_response(prompt)
|
| 179 |
-
if math_response:
|
| 180 |
-
gen_time = time.time() - start_time
|
| 181 |
-
return f"{math_response}\n\n⏱️ Generated in {gen_time:.2f} seconds"
|
| 182 |
-
|
| 183 |
-
# For other problems, use the model with optimized settings
|
| 184 |
-
formatted_prompt = f"""Provide a detailed, step-by-step solution to the following problem. Break down each part clearly and show all working.
|
| 185 |
-
|
| 186 |
-
Problem: {prompt}
|
| 187 |
-
|
| 188 |
-
Solution Steps:"""
|
| 189 |
-
|
| 190 |
-
input_ids = tokenizer(formatted_prompt, return_tensors="pt").to(device)
|
| 191 |
-
|
| 192 |
-
outputs = model.generate(
|
| 193 |
-
**input_ids,
|
| 194 |
-
max_new_tokens=1000,
|
| 195 |
-
temperature=0.3,
|
| 196 |
-
do_sample=True,
|
| 197 |
-
top_k=40,
|
| 198 |
-
top_p=0.9,
|
| 199 |
-
pad_token_id=tokenizer.eos_token_id,
|
| 200 |
-
repetition_penalty=1.1
|
| 201 |
-
)
|
| 202 |
-
|
| 203 |
-
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 204 |
-
response = response.replace(formatted_prompt, "").strip()
|
| 205 |
-
|
| 206 |
-
# Fallback if response is empty
|
| 207 |
-
if not response:
|
| 208 |
-
response = "Here's a step-by-step solution:\n\n1. Analyze the problem\n2. Break it down into components\n3. Solve each part systematically\n4. Combine the results\n\n(Detailed steps could not be generated automatically)"
|
| 209 |
-
|
| 210 |
-
gen_time = time.time() - start_time
|
| 211 |
-
return f"{response}\n\n⏱️ Generated in {gen_time:.2f} seconds"
|
| 212 |
-
|
| 213 |
-
except Exception as e:
|
| 214 |
-
return f"Error generating response: {str(e)}\n\nPlease try again or rephrase your question."
|
| 215 |
-
|
| 216 |
-
# --------------------------
|
| 217 |
-
# Gradio Interface
|
| 218 |
-
# --------------------------
|
| 219 |
-
|
| 220 |
-
examples = [
|
| 221 |
-
"What is 2+2? Explain step by step.",
|
| 222 |
-
"Sara bought 3 notebooks and two pens. Each notebook costs Rs.120 and each pen costs Rs.30. How much money did Sara spend in total?",
|
| 223 |
-
"Find the value of z in the equation z^2 + 16 - 30i = 0, where z is a complex number.",
|
| 224 |
-
"If today a company makes 2000 sales and yesterday it made 1455 sales, what is the difference between them?"
|
| 225 |
-
]
|
| 226 |
-
|
| 227 |
-
with gr.Blocks(title="Step-by-Step Solver") as demo:
|
| 228 |
-
gr.Markdown("## 🚀 Ultra-Fast Step-by-Step Problem Solver")
|
| 229 |
-
gr.Markdown("Powered by GPU acceleration" if device.type != 'cpu' else "Running on CPU")
|
| 230 |
-
|
| 231 |
-
with gr.Row():
|
| 232 |
-
input_prompt = gr.Textbox(label="Your Question", placeholder="Enter your problem here...", lines=3)
|
| 233 |
-
output_response = gr.Markdown(label="Detailed Solution")
|
| 234 |
-
|
| 235 |
-
with gr.Row():
|
| 236 |
-
submit_btn = gr.Button("Solve Now", variant="primary")
|
| 237 |
-
clear_btn = gr.Button("Clear")
|
| 238 |
-
|
| 239 |
-
gr.Examples(examples=examples, inputs=input_prompt, label="Try These Examples")
|
| 240 |
-
|
| 241 |
-
submit_btn.click(fn=generate_response, inputs=input_prompt, outputs=output_response)
|
| 242 |
-
clear_btn.click(lambda: ("", ""), outputs=[input_prompt, output_response])
|
| 243 |
-
|
| 244 |
-
if __name__ == "__main__":
|
| 245 |
-
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
| 10 |
import os
|
| 11 |
|
| 12 |
# --------------------------
|
| 13 |
+
# GPU Acceleration Setup (Fixed Version)
|
| 14 |
# --------------------------
|
| 15 |
|
| 16 |
def force_gpu_acceleration():
|
| 17 |
+
"""More robust GPU detection with proper fallbacks"""
|
| 18 |
+
try:
|
| 19 |
+
# First try CUDA (NVIDIA)
|
| 20 |
+
if torch.cuda.is_available():
|
| 21 |
+
device = torch.device("cuda")
|
| 22 |
+
torch.backends.cudnn.benchmark = True
|
| 23 |
+
print("✅ Using NVIDIA CUDA GPU acceleration")
|
| 24 |
+
return device
|
| 25 |
+
|
| 26 |
+
# Try MPS (Apple Silicon) - only check if CUDA not available
|
| 27 |
+
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
|
| 28 |
+
device = torch.device("mps")
|
| 29 |
+
print("✅ Using Apple MPS acceleration")
|
| 30 |
+
return device
|
| 31 |
+
|
| 32 |
+
# Final fallback to CPU with optimizations
|
| 33 |
+
device = torch.device("cpu")
|
| 34 |
+
torch.set_num_threads(os.cpu_count() or 4)
|
| 35 |
+
print("⚠️ Using CPU (no GPU available)")
|
| 36 |
return device
|
| 37 |
|
| 38 |
+
except Exception as e:
|
| 39 |
+
print(f"⚠️ GPU detection error: {e}, falling back to CPU")
|
| 40 |
+
return torch.device("cpu")
|
|
|
|
|
|
|
| 41 |
|
| 42 |
device = force_gpu_acceleration()
|
| 43 |
|
|
|
|
| 63 |
low_cpu_mem_usage=True
|
| 64 |
)
|
| 65 |
|
| 66 |
+
# Manual device movement if needed
|
| 67 |
if device.type == 'cuda' and model.device.type != 'cuda':
|
| 68 |
model = model.to(device)
|
| 69 |
|
|
|
|
| 79 |
model, tokenizer = load_model_with_retries()
|
| 80 |
|
| 81 |
# --------------------------
|
| 82 |
+
# Response Generation (Same as before)
|
| 83 |
# --------------------------
|
| 84 |
|
| 85 |
+
[Rest of your existing code for response generation and Gradio interface]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|