|
|
import requests |
|
|
import yaml |
|
|
|
|
|
|
|
|
def load_theorem_context(yaml_path="theorems.yaml"): |
|
|
with open(yaml_path, 'r') as f: |
|
|
data = yaml.safe_load(f) |
|
|
|
|
|
if not isinstance(data, dict): |
|
|
return "โ ๏ธ Invalid theorems format in YAML." |
|
|
|
|
|
context_lines = [] |
|
|
for idx, th in enumerate(data.get('theorems', []), 1): |
|
|
context_lines.append( |
|
|
f"**Theorem {idx}: {th.get('name', 'Unnamed')}**\n" |
|
|
f"- **Statement**: {th.get('statement', 'N/A')}\n" |
|
|
f"- **Tags**: {', '.join(th.get('tags', []))}\n" |
|
|
f"- **When to Use**: {th.get('when_to_use', 'N/A')}\n" |
|
|
f"- **Short Explanation**: {th.get('short_explanation', 'N/A')}\n" |
|
|
) |
|
|
context_lines.append('---') |
|
|
|
|
|
return "\n".join(context_lines) |
|
|
|
|
|
|
|
|
def build_prompt(equation_type, solution_text, theorem_context): |
|
|
return ( |
|
|
f"You are a helpful math tutor. Below is a theorem reference database.\n\n" |
|
|
f"Each theorem includes:\n" |
|
|
f"- Name\n- Statement\n- Tags\n- When to use\n- Short Explanation\n\n" |
|
|
f"---\n\n" |
|
|
f"### ๐ Theorem Database:\n\n" |
|
|
f"{theorem_context}\n\n" |
|
|
f"---\n\n" |
|
|
f"### ๐งฎ User Steps for solving a {equation_type} equation:\n\n" |
|
|
f"{solution_text}\n\n" |
|
|
f"---\n\n" |
|
|
f"### ๐ฏ Task:\n" |
|
|
f"Explain each solution step clearly.\n" |
|
|
f"Use relevant theorems by number or name.\n" |
|
|
f"Make it understandable to a smart high school student.\n" |
|
|
f"Focus on reasoning, not just restating the steps or theorems." |
|
|
) |
|
|
|
|
|
|
|
|
def explain_with_llm(solution_text, equation_type, llm_url, yaml_path="theorems.yaml"): |
|
|
try: |
|
|
if not llm_url or not llm_url.strip().startswith("http"): |
|
|
return "โ Invalid or missing LLM URL." |
|
|
|
|
|
theorem_context = load_theorem_context(yaml_path) |
|
|
prompt = build_prompt(equation_type, solution_text, theorem_context) |
|
|
|
|
|
response = requests.post( |
|
|
f"{llm_url.strip()}/explain", |
|
|
json={"prompt": prompt}, |
|
|
timeout=90 |
|
|
) |
|
|
|
|
|
if response.status_code == 200: |
|
|
result = response.json() |
|
|
|
|
|
if isinstance(result, dict): |
|
|
return result.get("explanation", "โ No explanation returned.") |
|
|
elif isinstance(result, list): |
|
|
return result[0] if result else "โ Empty response list." |
|
|
else: |
|
|
return f"โ Unexpected LLM response format: {type(result)}" |
|
|
|
|
|
return f"โ LLM request failed: {response.status_code}" |
|
|
|
|
|
except Exception as e: |
|
|
return f"โ LLM Error: {e}" |
|
|
|
|
|
|
|
|
def request_llm_fallback(bad_input, llm_url): |
|
|
try: |
|
|
response = requests.post( |
|
|
f"{llm_url.strip()}/clean", |
|
|
json={"prompt": bad_input}, |
|
|
timeout=90 |
|
|
) |
|
|
result = response.json() |
|
|
if isinstance(result, dict): |
|
|
return result.get("cleaned_latex", bad_input) |
|
|
return bad_input |
|
|
except: |
|
|
return bad_input |
|
|
|
|
|
|
|
|
|