File size: 14,569 Bytes
5d8fd4f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 | """
AnveshAI Edge Prototype
=======================
Terminal-based offline AI assistant β hierarchical modular architecture
with chain-of-thought (CoT) reasoning.
Routing & reasoning chain:
/commands β inline handler (instant)
Plain arithmetic β math_engine (instant, AST-based)
Advanced math β ReasoningEngine.analyze() (problem decomposition)
ββ advanced_math_engine (SymPy symbolic)
ββ ReasoningEngine.build_math_prompt()
β LLM step-by-step explanation
Knowledge query β ReasoningEngine.analyze() (CoT planning)
ββ knowledge_engine (local KB)
ββ match found β return KB paragraph
ββ no match β ReasoningEngine.build_general_prompt()
β LLM structured answer
Conversation β conversation_engine (pattern rules)
ββ no pattern β ReasoningEngine.build_general_prompt()
β LLM structured answer
Commands:
/help β list commands
/history β last 10 interactions
/exit β quit
"""
import sys
# ββ Optional colour support βββββββββββββββββββββββββββββββββββββββββββββββββββ
try:
from colorama import init as colorama_init, Fore, Style
colorama_init(autoreset=True)
except ImportError:
class _NoColor:
def __getattr__(self, _): return ""
Fore = Style = _NoColor()
# ββ Module imports ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
from router import classify_intent
from math_engine import evaluate as math_evaluate
from advanced_math_engine import solve as advanced_math_solve
from knowledge_engine import KnowledgeEngine
from conversation_engine import ConversationEngine
from llm_engine import LLMEngine, MATH_SYSTEM_PROMPT, MATH_TEMPERATURE
from reasoning_engine import ReasoningEngine
from memory import initialize_db, save_interaction, format_history
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
BANNER = r"""
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
β ___ __ ___ ____ β
β / _ | ___ _ _____ ___ / / / _ | / _/ β
β / __ |/ _ \ |/ / -_|_-</ _ \/ __ |_/ / β
β /_/ |_/_//_/___/\__/___/_//_/_/ |_/___/ EDGE β
β β
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
"""
HELP_TEXT = """
Available commands:
/help β show this help message
/history β display last 10 conversation entries
/exit β quit AnveshAI Edge
How to use:
β’ Advanced math β symbolic engine computes the EXACT answer,
LLM explains step-by-step working
Calculus:
"integrate x^2 sin(x)"
"definite integral of x^2 from 0 to 3"
"derivative of x^3 + 2x"
"second derivative of sin(x) * e^x"
"limit of sin(x)/x as x approaches 0"
Algebra & equations:
"solve x^2 - 5x + 6 = 0"
"solve 2x + 3 = 7"
Differential equations:
"solve differential equation y'' + y = 0"
"solve ode dy/dx = y"
Series & transforms:
"taylor series of e^x around 0 order 6"
"laplace transform of sin(t)"
"inverse laplace of 1/(s^2 + 1)"
"fourier transform of exp(-x^2)"
Matrices:
"determinant of [[1,2],[3,4]]"
"inverse matrix [[2,1],[5,3]]"
"eigenvalue [[4,1],[2,3]]"
"rank of matrix [[1,2,3],[4,5,6]]"
Symbolic manipulation:
"factor x^3 - 8"
"simplify (x^2 - 1)/(x - 1)"
"expand (x + y)^4"
"partial fraction 1/(x^2 - 1)"
Number theory:
"gcd of 48 and 18"
"lcm of 12 and 15"
"prime factorization of 360"
"17 mod 5"
"modular inverse of 3 mod 7"
Statistics:
"mean of 2, 4, 6, 8, 10"
"standard deviation of 1, 2, 3, 4, 5"
Combinatorics:
"factorial of 10"
"binomial coefficient 10 choose 3"
"permutation 6 P 2"
Summations:
"sum of k^2 for k from 1 to 10"
"summation of 1/n^2 for n from 1 to infinity"
Complex numbers:
"real part of 3 + 4*I"
"modulus of 3 + 4*I"
β’ Arithmetic β computed instantly
e.g. "2 + 3 * (4 ^ 2)"
β’ Knowledge β local KB first, then LLM
e.g. "What is quantum computing?"
β’ Chat β pattern rules, then LLM
e.g. "Hello!"
"""
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# Terminal helpers
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
def _print(text: str, color: str = "") -> None:
print(f"{color}{text}{Style.RESET_ALL}" if color else text)
def _prompt() -> str:
try:
return input(f"\n{Fore.CYAN}You{Style.RESET_ALL} βΊ ").strip()
except (EOFError, KeyboardInterrupt):
return "/exit"
def _respond(label: str, text: str) -> None:
print(
f"\n{Fore.GREEN}AnveshAI{Style.RESET_ALL} "
f"[{Fore.YELLOW}{label}{Style.RESET_ALL}] βΊ {text}"
)
def _system(text: str) -> None:
print(f"{Fore.MAGENTA} {text}{Style.RESET_ALL}")
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# Response Composer
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
def compose_response(
user_input: str,
intent: str,
knowledge_engine: KnowledgeEngine,
conversation_engine: ConversationEngine,
llm_engine: LLMEngine,
reasoning_engine: ReasoningEngine,
) -> tuple[str, str]:
"""
Route input through the full hierarchy with chain-of-thought reasoning.
Returns (label, response_text).
Labels:
Math β plain arithmetic result (instant)
AdvMath+CoT+LLM β SymPy computed, reasoning planned, LLM explained
AdvMath+CoT β SymPy computed, reasoning-guided LLM fallback
Knowledge β local KB answered
LLM+CoT-KB β KB miss; reasoning-guided LLM answered
Chat β conversation pattern matched
LLM+CoT β reasoning-guided LLM answered
"""
# ββ Simple arithmetic βββββββββββββββββββββββββββββββββββββββββββββββββββββ
if intent == "math":
return "Math", math_evaluate(user_input)
# ββ Advanced math βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
if intent == "advanced_math":
success, result_str, _latex = advanced_math_solve(user_input)
if success:
_system(f"SymPy β {result_str}")
_system("Reasoning engine: decomposing problemβ¦")
plan = reasoning_engine.analyze(
user_input, intent, has_symbolic_result=True
)
_system(plan.summary())
if plan.warnings:
for w in plan.warnings:
_system(f" β {w}")
_system("Building chain-of-thought prompt β LLMβ¦")
prompt = reasoning_engine.build_math_prompt(user_input, result_str, plan)
explanation = llm_engine.generate(
prompt,
system_prompt=MATH_SYSTEM_PROMPT,
temperature=MATH_TEMPERATURE,
)
full_response = (
f"{result_str}\n\n"
f"[Reasoning: {plan.problem_type} | {plan.strategy[:60]}]\n\n"
f"{explanation}"
)
return "AdvMath+CoT+LLM", full_response
else:
_system(f"SymPy error: {result_str}")
_system("Reasoning engine: building fallback chain-of-thoughtβ¦")
plan = reasoning_engine.analyze(user_input, intent)
_system(plan.summary())
prompt = reasoning_engine.build_math_fallback_prompt(
user_input, plan, error_context=result_str
)
llm_response = llm_engine.generate(prompt)
return "AdvMath+CoT", llm_response
# ββ Knowledge βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
if intent == "knowledge":
kb_response, kb_found = knowledge_engine.query(user_input)
if kb_found:
return "Knowledge", kb_response
_system("KB: no match β reasoning engine + LLMβ¦")
plan = reasoning_engine.analyze(user_input, intent)
_system(plan.summary())
prompt = reasoning_engine.build_general_prompt(
user_input, intent, kb_response, plan
)
return "LLM+CoT-KB", llm_engine.generate(prompt)
# ββ Conversation ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
chat_response, pattern_matched = conversation_engine.respond(user_input)
if pattern_matched:
return "Chat", chat_response
_system("No pattern match β reasoning engine + LLMβ¦")
plan = reasoning_engine.analyze(user_input, intent)
_system(plan.summary())
prompt = reasoning_engine.build_general_prompt(user_input, intent, "", plan)
return "LLM+CoT", llm_engine.generate(prompt)
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
# Main loop
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
def main() -> None:
_print(BANNER, Fore.CYAN)
_system("Initialising modulesβ¦")
initialize_db()
_system("β Memory (SQLite) ready")
knowledge_engine = KnowledgeEngine()
_system("β Knowledge base loaded" if knowledge_engine.is_loaded() else "β knowledge.txt not found")
conversation_engine = ConversationEngine()
_system("β Conversation engine ready")
_system("β Math engine ready (AST safe-eval)")
_system("β Advanced math engine ready (SymPy β 31+ operations)")
_system("β Reasoning engine ready (chain-of-thought + CoT planning)")
_system("β Intent router ready")
llm_engine = LLMEngine()
reasoning_eng = ReasoningEngine()
_system("β LLM engine ready (Qwen2.5-0.5B loads on first use)")
_print(f"\n{Fore.WHITE}Type /help for commands or just start chatting!{Style.RESET_ALL}")
while True:
user_input = _prompt()
if not user_input:
continue
intent = classify_intent(user_input)
# ββ System commands βββββββββββββββββββββββββββββββββββββββββββββββββββ
if intent == "system":
cmd = user_input.lower().split()[0]
if cmd == "/exit":
_print(f"\n{Fore.CYAN}Goodbye! Session closed.{Style.RESET_ALL}")
sys.exit(0)
elif cmd == "/history":
_print(f"\n{Fore.YELLOW}ββ Conversation History βββββββββββββββββββββ{Style.RESET_ALL}")
_print(format_history())
_print(f"{Fore.YELLOW}βββββββββββββββββββββββββββββββββββββββββββββ{Style.RESET_ALL}")
elif cmd == "/help":
_print(f"\n{Fore.YELLOW}ββ Help ββββββββββββββββββββββββββββββββββββββ{Style.RESET_ALL}")
_print(HELP_TEXT)
_print(f"{Fore.YELLOW}βββββββββββββββββββββββββββββββββββββββββββββ{Style.RESET_ALL}")
else:
_respond("System", f"Unknown command '{user_input}'. Type /help.")
continue
# ββ Compose response ββββββββββββββββββββββββββββββββββββββββββββββββββ
label, response = compose_response(
user_input, intent, knowledge_engine,
conversation_engine, llm_engine, reasoning_eng
)
_respond(label, response)
save_interaction(user_input, response)
if __name__ == "__main__":
main()
|