Spaces:
Sleeping
Sleeping
File size: 3,222 Bytes
76e2e5b 8058700 76e2e5b 8058700 78b4e34 76e2e5b f5dff0e 78b4e34 f5dff0e 78b4e34 76e2e5b 8058700 f5dff0e 8058700 f5dff0e 8058700 f5dff0e 8058700 f5dff0e 8058700 527648c 76e2e5b 8058700 76e2e5b 8058700 76e2e5b 8058700 76e2e5b 2a3527e 76e2e5b f5dff0e 4c45043 f5dff0e 76e2e5b 8058700 f5dff0e 8058700 f5dff0e 8058700 527648c f5dff0e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
import requests
import yaml
# === Load theorems.yaml and render it into clean context text for LLM ===
def load_theorem_context(yaml_path="theorems.yaml"):
with open(yaml_path, 'r') as f:
data = yaml.safe_load(f)
if not isinstance(data, dict):
return "⚠️ Invalid theorems format in YAML."
context_lines = []
for idx, th in enumerate(data.get('theorems', []), 1):
context_lines.append(
f"**Theorem {idx}: {th.get('name', 'Unnamed')}**\n"
f"- **Statement**: {th.get('statement', 'N/A')}\n"
f"- **Tags**: {', '.join(th.get('tags', []))}\n"
f"- **When to Use**: {th.get('when_to_use', 'N/A')}\n"
f"- **Short Explanation**: {th.get('short_explanation', 'N/A')}\n"
)
context_lines.append('---')
return "\n".join(context_lines)
# === Build prompt using user steps and theorem context ===
def build_prompt(equation_type, solution_text, theorem_context):
return (
f"You are a helpful math tutor. Below is a theorem reference database.\n\n"
f"Each theorem includes:\n"
f"- Name\n- Statement\n- Tags\n- When to use\n- Short Explanation\n\n"
f"---\n\n"
f"### 📘 Theorem Database:\n\n"
f"{theorem_context}\n\n"
f"---\n\n"
f"### 🧮 User Steps for solving a {equation_type} equation:\n\n"
f"{solution_text}\n\n"
f"---\n\n"
f"### 🎯 Task:\n"
f"Explain each solution step clearly.\n"
f"Use relevant theorems by number or name.\n"
f"Make it understandable to a smart high school student.\n"
f"Focus on reasoning, not just restating the steps or theorems."
)
# === Request LLM explanation ===
def explain_with_llm(solution_text, equation_type, llm_url, yaml_path="theorems.yaml"):
try:
if not llm_url or not llm_url.strip().startswith("http"):
return "❌ Invalid or missing LLM URL."
theorem_context = load_theorem_context(yaml_path)
prompt = build_prompt(equation_type, solution_text, theorem_context)
response = requests.post(
f"{llm_url.strip()}/explain",
json={"prompt": prompt},
timeout=90
)
if response.status_code == 200:
result = response.json()
if isinstance(result, dict):
return result.get("explanation", "❌ No explanation returned.")
elif isinstance(result, list):
return result[0] if result else "❌ Empty response list."
else:
return f"❌ Unexpected LLM response format: {type(result)}"
return f"❌ LLM request failed: {response.status_code}"
except Exception as e:
return f"❌ LLM Error: {e}"
# === Request fallback if parsing failed ===
def request_llm_fallback(bad_input, llm_url):
try:
response = requests.post(
f"{llm_url.strip()}/clean",
json={"prompt": bad_input},
timeout=20
)
result = response.json()
if isinstance(result, dict):
return result.get("cleaned_latex", bad_input)
return bad_input
except:
return bad_input
|