algorythmtechnologies commited on
Commit
4e8d206
·
verified ·
1 Parent(s): a38941f

Update supernova/reasoning_engine.py

Browse files
Files changed (1) hide show
  1. supernova/reasoning_engine.py +320 -315
supernova/reasoning_engine.py CHANGED
@@ -1,315 +1,320 @@
1
- """
2
- Enhanced Reasoning Engine for Supernova AI
3
- Provides sophisticated problem-solving capabilities through structured reasoning,
4
- multi-tool coordination, and knowledge synthesis.
5
- """
6
-
7
- import re
8
- import json
9
- from typing import List, Dict, Any, Optional, Tuple
10
- from dataclasses import dataclass
11
- from enum import Enum
12
-
13
- from .tools import ToolOrchestrator, ToolCall
14
-
15
-
16
- class ReasoningType(Enum):
17
- ANALYTICAL = "analytical"
18
- CREATIVE = "creative"
19
- COMPARATIVE = "comparative"
20
- CAUSAL = "causal"
21
- SEQUENTIAL = "sequential"
22
- EVALUATIVE = "evaluative"
23
-
24
-
25
- @dataclass
26
- class ReasoningStep:
27
- step_number: int
28
- description: str
29
- reasoning_type: ReasoningType
30
- tool_needed: Optional[str] = None
31
- query: Optional[str] = None
32
- result: Optional[str] = None
33
- confidence: float = 0.8
34
-
35
-
36
- @dataclass
37
- class KnowledgeDomain:
38
- domain: str
39
- confidence: float
40
- sources: List[str]
41
- key_facts: List[str]
42
-
43
-
44
- class EnhancedReasoningEngine:
45
- """Advanced reasoning engine that mimics sophisticated AI reasoning patterns."""
46
-
47
- def __init__(self, tool_orchestrator: ToolOrchestrator):
48
- self.tools = tool_orchestrator
49
- self.conversation_context = []
50
- self.domain_expertise = {
51
- 'science': ['physics', 'chemistry', 'biology', 'mathematics', 'astronomy'],
52
- 'technology': ['programming', 'ai', 'computing', 'engineering', 'electronics'],
53
- 'humanities': ['history', 'literature', 'philosophy', 'psychology', 'sociology'],
54
- 'medicine': ['anatomy', 'pharmacology', 'diagnosis', 'treatment', 'research'],
55
- 'business': ['finance', 'management', 'economics', 'marketing', 'strategy'],
56
- 'arts': ['music', 'visual arts', 'design', 'architecture', 'performance']
57
- }
58
-
59
- def analyze_query_complexity(self, query: str) -> Dict[str, Any]:
60
- """Analyze the complexity and requirements of a user query."""
61
- complexity_indicators = {
62
- 'simple': ['what is', 'define', 'who is', 'when did'],
63
- 'moderate': ['how does', 'why does', 'explain', 'compare', 'analyze'],
64
- 'complex': ['evaluate', 'synthesize', 'create', 'design', 'solve for multiple', 'consider all factors']
65
- }
66
-
67
- domains_detected = []
68
- for domain, keywords in self.domain_expertise.items():
69
- if any(keyword in query.lower() for keyword in keywords):
70
- domains_detected.append(domain)
71
-
72
- complexity_level = 'simple'
73
- for level, indicators in complexity_indicators.items():
74
- if any(indicator in query.lower() for indicator in indicators):
75
- complexity_level = level
76
-
77
- requires_multi_step = any(phrase in query.lower() for phrase in [
78
- 'step by step', 'first...then', 'multiple', 'several', 'both', 'compare and contrast'
79
- ])
80
-
81
- return {
82
- 'complexity': complexity_level,
83
- 'domains': domains_detected,
84
- 'multi_step_needed': requires_multi_step,
85
- 'estimated_steps': min(5, len(domains_detected) + (2 if requires_multi_step else 1))
86
- }
87
-
88
- def decompose_complex_query(self, query: str, analysis: Dict[str, Any]) -> List[ReasoningStep]:
89
- """Break down complex queries into manageable reasoning steps."""
90
- steps = []
91
- step_num = 1
92
-
93
- # Step 1: Information Gathering
94
- if analysis['complexity'] in ['moderate', 'complex']:
95
- # Determine if we need current information
96
- if any(term in query.lower() for term in ['current', 'latest', 'recent', 'today', '2024', '2025']):
97
- steps.append(ReasoningStep(
98
- step_number=step_num,
99
- description="Gather current information from web sources",
100
- reasoning_type=ReasoningType.ANALYTICAL,
101
- tool_needed="serper",
102
- query=query
103
- ))
104
- step_num += 1
105
-
106
- # Check if mathematical computation is needed
107
- if any(term in query.lower() for term in ['calculate', 'compute', 'solve', 'derivative', 'integral']):
108
- steps.append(ReasoningStep(
109
- step_number=step_num,
110
- description="Perform mathematical computation",
111
- reasoning_type=ReasoningType.ANALYTICAL,
112
- tool_needed="math_engine",
113
- query=query
114
- ))
115
- step_num += 1
116
-
117
- # Step 2: Domain-specific analysis
118
- for domain in analysis['domains']:
119
- steps.append(ReasoningStep(
120
- step_number=step_num,
121
- description=f"Analyze from {domain} perspective",
122
- reasoning_type=ReasoningType.ANALYTICAL,
123
- tool_needed=None, # Will use model generation with domain context
124
- query=f"From a {domain} perspective: {query}"
125
- ))
126
- step_num += 1
127
-
128
- # Step 3: Synthesis and evaluation
129
- if analysis['complexity'] == 'complex':
130
- steps.append(ReasoningStep(
131
- step_number=step_num,
132
- description="Synthesize information and provide comprehensive analysis",
133
- reasoning_type=ReasoningType.EVALUATIVE,
134
- tool_needed=None,
135
- query=query
136
- ))
137
-
138
- return steps if steps else [ReasoningStep(1, "Direct response", ReasoningType.ANALYTICAL, query=query)]
139
-
140
- def execute_reasoning_chain(self, steps: List[ReasoningStep], model, tokenizer) -> List[ReasoningStep]:
141
- """Execute a chain of reasoning steps, using tools and model generation as needed."""
142
- results = []
143
- context_info = []
144
-
145
- for step in steps:
146
- if step.tool_needed:
147
- # Use appropriate tool
148
- tool_call = ToolCall(tool=step.tool_needed, query=step.query)
149
- executed_call = self.tools.execute_tool_call(tool_call)
150
-
151
- if executed_call.result:
152
- step.result = executed_call.result
153
- step.confidence = 0.9
154
- context_info.append(f"{step.description}: {executed_call.result}")
155
- else:
156
- step.result = f"Tool execution failed: {executed_call.error}"
157
- step.confidence = 0.3
158
- else:
159
- # Use model generation with enhanced context
160
- enhanced_context = self._build_enhanced_context(step, context_info)
161
- try:
162
- response = self._generate_with_context(model, tokenizer, enhanced_context, step.query)
163
- step.result = response
164
- step.confidence = 0.7
165
- context_info.append(f"{step.description}: {response}")
166
- except Exception as e:
167
- step.result = f"Generation failed: {str(e)}"
168
- step.confidence = 0.2
169
-
170
- results.append(step)
171
-
172
- return results
173
-
174
- def _build_enhanced_context(self, step: ReasoningStep, context_info: List[str]) -> str:
175
- """Build enhanced context for model generation."""
176
- context_parts = [
177
- "You are Supernova, an advanced AI assistant with deep expertise across multiple domains.",
178
- "Apply sophisticated reasoning and provide comprehensive, nuanced responses.",
179
- ""
180
- ]
181
-
182
- if context_info:
183
- context_parts.extend([
184
- "Previous analysis steps:",
185
- *[f"- {info}" for info in context_info],
186
- ""
187
- ])
188
-
189
- reasoning_guidance = {
190
- ReasoningType.ANALYTICAL: "Analyze systematically, consider multiple factors, and provide evidence-based insights.",
191
- ReasoningType.CREATIVE: "Think creatively, explore innovative solutions, and consider unconventional approaches.",
192
- ReasoningType.COMPARATIVE: "Compare different perspectives, weigh pros and cons, and identify key differences.",
193
- ReasoningType.CAUSAL: "Identify cause-and-effect relationships, trace underlying mechanisms, and explain why things happen.",
194
- ReasoningType.SEQUENTIAL: "Break down into logical steps, show progression, and maintain clear sequencing.",
195
- ReasoningType.EVALUATIVE: "Make judgments based on criteria, assess quality and effectiveness, and provide recommendations."
196
- }
197
-
198
- context_parts.extend([
199
- f"Reasoning approach: {reasoning_guidance.get(step.reasoning_type, 'Provide thorough analysis.')}",
200
- f"Focus area: {step.description}",
201
- ""
202
- ])
203
-
204
- return "\n".join(context_parts)
205
-
206
- def _generate_with_context(self, model, tokenizer, context: str, query: str, max_tokens: int = 400) -> str:
207
- """Generate response using the model with enhanced context."""
208
- full_prompt = f"{context}\nUser Query: {query}\n\nDetailed Response:"
209
-
210
- # Use the existing generate function (simplified version)
211
- model.eval()
212
- device = next(model.parameters()).device
213
- input_ids = tokenizer.encode(full_prompt, return_tensors="pt").to(device)
214
-
215
- with torch.no_grad():
216
- for _ in range(max_tokens):
217
- if input_ids.size(1) >= model.cfg.n_positions:
218
- input_cond = input_ids[:, -model.cfg.n_positions:]
219
- else:
220
- input_cond = input_ids
221
-
222
- logits, _ = model(input_cond)
223
- logits = logits[:, -1, :] / 0.8 # temperature
224
-
225
- # Top-k sampling
226
- v, _ = torch.topk(logits, min(50, logits.size(-1)))
227
- logits[logits < v[:, [-1]]] = -float("Inf")
228
-
229
- probs = torch.softmax(logits, dim=-1)
230
- next_id = torch.multinomial(probs, num_samples=1)
231
- input_ids = torch.cat([input_ids, next_id], dim=1)
232
-
233
- response = tokenizer.decode(input_ids[0].tolist())
234
-
235
- # Extract the response part
236
- if "Detailed Response:" in response:
237
- response = response.split("Detailed Response:", 1)[1].strip()
238
-
239
- return response
240
-
241
- def synthesize_final_response(self, steps: List[ReasoningStep], original_query: str) -> str:
242
- """Synthesize all reasoning steps into a comprehensive final response."""
243
- successful_steps = [step for step in steps if step.result and step.confidence > 0.5]
244
-
245
- if not successful_steps:
246
- return "I apologize, but I encountered difficulties processing your request. Could you please rephrase or provide more specific details?"
247
-
248
- # Build comprehensive response
249
- response_parts = []
250
-
251
- # Add executive summary for complex queries
252
- if len(successful_steps) > 2:
253
- response_parts.append("Here's my comprehensive analysis:")
254
- response_parts.append("")
255
-
256
- # Include results from each step
257
- for step in successful_steps:
258
- if step.tool_needed in ['math_engine', 'serper']:
259
- # Tool results are already well-formatted
260
- response_parts.append(step.result)
261
- else:
262
- # Model-generated responses
263
- response_parts.append(step.result)
264
-
265
- response_parts.append("")
266
-
267
- # Add synthesis for multi-step responses
268
- if len(successful_steps) > 2:
269
- confidence_score = sum(step.confidence for step in successful_steps) / len(successful_steps)
270
-
271
- synthesis_parts = [
272
- "**Key Insights:**",
273
- "• Multiple perspectives have been considered",
274
- f"• Analysis confidence: {confidence_score:.1%}",
275
- "• Both current information and domain expertise were utilized"
276
- ]
277
-
278
- response_parts.extend(synthesis_parts)
279
-
280
- return "\n".join(response_parts).strip()
281
-
282
- def process_complex_query(self, query: str, model, tokenizer) -> str:
283
- """Main method to process complex queries with enhanced reasoning."""
284
- # Analyze query complexity and requirements
285
- analysis = self.analyze_query_complexity(query)
286
-
287
- # For simple queries, use direct processing
288
- if analysis['complexity'] == 'simple' and not analysis['multi_step_needed']:
289
- tool_call = self.tools.route_query(query)
290
- if tool_call:
291
- executed_call = self.tools.execute_tool_call(tool_call)
292
- if executed_call.result:
293
- return executed_call.result
294
-
295
- # Fall back to enhanced model generation
296
- context = self._build_enhanced_context(
297
- ReasoningStep(1, "Direct response", ReasoningType.ANALYTICAL),
298
- []
299
- )
300
- return self._generate_with_context(model, tokenizer, context, query)
301
-
302
- # For complex queries, use multi-step reasoning
303
- reasoning_steps = self.decompose_complex_query(query, analysis)
304
- executed_steps = self.execute_reasoning_chain(reasoning_steps, model, tokenizer)
305
-
306
- return self.synthesize_final_response(executed_steps, query)
307
-
308
-
309
- # Import torch and other needed modules here to avoid import issues
310
- import torch
311
- try:
312
- import sympy as sp
313
- import numpy as np
314
- except ImportError:
315
- pass
 
 
 
 
 
 
1
+ """
2
+ Enhanced Reasoning Engine for Supernova AI
3
+ Provides sophisticated problem-solving capabilities through structured reasoning,
4
+ multi-tool coordination, and knowledge synthesis.
5
+ """
6
+ import torch
7
+ import numpy as np
8
+ try:
9
+ import sympy as sp
10
+ except ImportError:
11
+ sp = None
12
+ import re
13
+ import json
14
+ from typing import List, Dict, Any, Optional, Tuple
15
+ from dataclasses import dataclass
16
+ from enum import Enum
17
+
18
+ from .tools import ToolOrchestrator, ToolCall
19
+
20
+
21
+ class ReasoningType(Enum):
22
+ ANALYTICAL = "analytical"
23
+ CREATIVE = "creative"
24
+ COMPARATIVE = "comparative"
25
+ CAUSAL = "causal"
26
+ SEQUENTIAL = "sequential"
27
+ EVALUATIVE = "evaluative"
28
+
29
+
30
+ @dataclass
31
+ class ReasoningStep:
32
+ step_number: int
33
+ description: str
34
+ reasoning_type: ReasoningType
35
+ tool_needed: Optional[str] = None
36
+ query: Optional[str] = None
37
+ result: Optional[str] = None
38
+ confidence: float = 0.8
39
+
40
+
41
+ @dataclass
42
+ class KnowledgeDomain:
43
+ domain: str
44
+ confidence: float
45
+ sources: List[str]
46
+ key_facts: List[str]
47
+
48
+
49
+ class EnhancedReasoningEngine:
50
+ """Advanced reasoning engine that mimics sophisticated AI reasoning patterns."""
51
+
52
+ def __init__(self, tool_orchestrator: ToolOrchestrator):
53
+ self.tools = tool_orchestrator
54
+ self.conversation_context = []
55
+ self.domain_expertise = {
56
+ 'science': ['physics', 'chemistry', 'biology', 'mathematics', 'astronomy'],
57
+ 'technology': ['programming', 'ai', 'computing', 'engineering', 'electronics'],
58
+ 'humanities': ['history', 'literature', 'philosophy', 'psychology', 'sociology'],
59
+ 'medicine': ['anatomy', 'pharmacology', 'diagnosis', 'treatment', 'research'],
60
+ 'business': ['finance', 'management', 'economics', 'marketing', 'strategy'],
61
+ 'arts': ['music', 'visual arts', 'design', 'architecture', 'performance']
62
+ }
63
+
64
+ def analyze_query_complexity(self, query: str) -> Dict[str, Any]:
65
+ """Analyze the complexity and requirements of a user query."""
66
+ complexity_indicators = {
67
+ 'simple': ['what is', 'define', 'who is', 'when did'],
68
+ 'moderate': ['how does', 'why does', 'explain', 'compare', 'analyze'],
69
+ 'complex': ['evaluate', 'synthesize', 'create', 'design', 'solve for multiple', 'consider all factors']
70
+ }
71
+
72
+ domains_detected = []
73
+ for domain, keywords in self.domain_expertise.items():
74
+ if any(keyword in query.lower() for keyword in keywords):
75
+ domains_detected.append(domain)
76
+
77
+ complexity_level = 'simple'
78
+ for level, indicators in complexity_indicators.items():
79
+ if any(indicator in query.lower() for indicator in indicators):
80
+ complexity_level = level
81
+
82
+ requires_multi_step = any(phrase in query.lower() for phrase in [
83
+ 'step by step', 'first...then', 'multiple', 'several', 'both', 'compare and contrast'
84
+ ])
85
+
86
+ return {
87
+ 'complexity': complexity_level,
88
+ 'domains': domains_detected,
89
+ 'multi_step_needed': requires_multi_step,
90
+ 'estimated_steps': min(5, len(domains_detected) + (2 if requires_multi_step else 1))
91
+ }
92
+
93
+ def decompose_complex_query(self, query: str, analysis: Dict[str, Any]) -> List[ReasoningStep]:
94
+ """Break down complex queries into manageable reasoning steps."""
95
+ steps = []
96
+ step_num = 1
97
+
98
+ # Step 1: Information Gathering
99
+ if analysis['complexity'] in ['moderate', 'complex']:
100
+ # Determine if we need current information
101
+ if any(term in query.lower() for term in ['current', 'latest', 'recent', 'today', '2024', '2025']):
102
+ steps.append(ReasoningStep(
103
+ step_number=step_num,
104
+ description="Gather current information from web sources",
105
+ reasoning_type=ReasoningType.ANALYTICAL,
106
+ tool_needed="serper",
107
+ query=query
108
+ ))
109
+ step_num += 1
110
+
111
+ # Check if mathematical computation is needed
112
+ if any(term in query.lower() for term in ['calculate', 'compute', 'solve', 'derivative', 'integral']):
113
+ steps.append(ReasoningStep(
114
+ step_number=step_num,
115
+ description="Perform mathematical computation",
116
+ reasoning_type=ReasoningType.ANALYTICAL,
117
+ tool_needed="math_engine",
118
+ query=query
119
+ ))
120
+ step_num += 1
121
+
122
+ # Step 2: Domain-specific analysis
123
+ for domain in analysis['domains']:
124
+ steps.append(ReasoningStep(
125
+ step_number=step_num,
126
+ description=f"Analyze from {domain} perspective",
127
+ reasoning_type=ReasoningType.ANALYTICAL,
128
+ tool_needed=None, # Will use model generation with domain context
129
+ query=f"From a {domain} perspective: {query}"
130
+ ))
131
+ step_num += 1
132
+
133
+ # Step 3: Synthesis and evaluation
134
+ if analysis['complexity'] == 'complex':
135
+ steps.append(ReasoningStep(
136
+ step_number=step_num,
137
+ description="Synthesize information and provide comprehensive analysis",
138
+ reasoning_type=ReasoningType.EVALUATIVE,
139
+ tool_needed=None,
140
+ query=query
141
+ ))
142
+
143
+ return steps if steps else [ReasoningStep(1, "Direct response", ReasoningType.ANALYTICAL, query=query)]
144
+
145
+ def execute_reasoning_chain(self, steps: List[ReasoningStep], model, tokenizer) -> List[ReasoningStep]:
146
+ """Execute a chain of reasoning steps, using tools and model generation as needed."""
147
+ results = []
148
+ context_info = []
149
+
150
+ for step in steps:
151
+ if step.tool_needed:
152
+ # Use appropriate tool
153
+ tool_call = ToolCall(tool=step.tool_needed, query=step.query)
154
+ executed_call = self.tools.execute_tool_call(tool_call)
155
+
156
+ if executed_call.result:
157
+ step.result = executed_call.result
158
+ step.confidence = 0.9
159
+ context_info.append(f"{step.description}: {executed_call.result}")
160
+ else:
161
+ step.result = f"Tool execution failed: {executed_call.error}"
162
+ step.confidence = 0.3
163
+ else:
164
+ # Use model generation with enhanced context
165
+ enhanced_context = self._build_enhanced_context(step, context_info)
166
+ try:
167
+ response = self._generate_with_context(model, tokenizer, enhanced_context, step.query)
168
+ step.result = response
169
+ step.confidence = 0.7
170
+ context_info.append(f"{step.description}: {response}")
171
+ except Exception as e:
172
+ step.result = f"Generation failed: {str(e)}"
173
+ step.confidence = 0.2
174
+
175
+ results.append(step)
176
+
177
+ return results
178
+
179
+ def _build_enhanced_context(self, step: ReasoningStep, context_info: List[str]) -> str:
180
+ """Build enhanced context for model generation."""
181
+ context_parts = [
182
+ "You are Supernova, an advanced AI assistant with deep expertise across multiple domains.",
183
+ "Apply sophisticated reasoning and provide comprehensive, nuanced responses.",
184
+ ""
185
+ ]
186
+
187
+ if context_info:
188
+ context_parts.extend([
189
+ "Previous analysis steps:",
190
+ *[f"- {info}" for info in context_info],
191
+ ""
192
+ ])
193
+
194
+ reasoning_guidance = {
195
+ ReasoningType.ANALYTICAL: "Analyze systematically, consider multiple factors, and provide evidence-based insights.",
196
+ ReasoningType.CREATIVE: "Think creatively, explore innovative solutions, and consider unconventional approaches.",
197
+ ReasoningType.COMPARATIVE: "Compare different perspectives, weigh pros and cons, and identify key differences.",
198
+ ReasoningType.CAUSAL: "Identify cause-and-effect relationships, trace underlying mechanisms, and explain why things happen.",
199
+ ReasoningType.SEQUENTIAL: "Break down into logical steps, show progression, and maintain clear sequencing.",
200
+ ReasoningType.EVALUATIVE: "Make judgments based on criteria, assess quality and effectiveness, and provide recommendations."
201
+ }
202
+
203
+ context_parts.extend([
204
+ f"Reasoning approach: {reasoning_guidance.get(step.reasoning_type, 'Provide thorough analysis.')}",
205
+ f"Focus area: {step.description}",
206
+ ""
207
+ ])
208
+
209
+ return "\n".join(context_parts)
210
+
211
+ def _generate_with_context(self, model, tokenizer, context: str, query: str, max_tokens: int = 400) -> str:
212
+ """Generate response using the model with enhanced context."""
213
+ full_prompt = f"{context}\nUser Query: {query}\n\nDetailed Response:"
214
+
215
+ # Use the existing generate function (simplified version)
216
+ model.eval()
217
+ device = next(model.parameters()).device
218
+ input_ids = tokenizer.encode(full_prompt, return_tensors="pt").to(device)
219
+
220
+ with torch.no_grad():
221
+ for _ in range(max_tokens):
222
+ if input_ids.size(1) >= model.cfg.n_positions:
223
+ input_cond = input_ids[:, -model.cfg.n_positions:]
224
+ else:
225
+ input_cond = input_ids
226
+
227
+ logits, _ = model(input_cond)
228
+ logits = logits[:, -1, :] / 0.8 # temperature
229
+
230
+ # Top-k sampling
231
+ v, _ = torch.topk(logits, min(50, logits.size(-1)))
232
+ logits[logits < v[:, [-1]]] = -float("Inf")
233
+
234
+ probs = torch.softmax(logits, dim=-1)
235
+ next_id = torch.multinomial(probs, num_samples=1)
236
+ input_ids = torch.cat([input_ids, next_id], dim=1)
237
+
238
+ response = tokenizer.decode(input_ids[0].tolist())
239
+
240
+ # Extract the response part
241
+ if "Detailed Response:" in response:
242
+ response = response.split("Detailed Response:", 1)[1].strip()
243
+
244
+ return response
245
+
246
+ def synthesize_final_response(self, steps: List[ReasoningStep], original_query: str) -> str:
247
+ """Synthesize all reasoning steps into a comprehensive final response."""
248
+ successful_steps = [step for step in steps if step.result and step.confidence > 0.5]
249
+
250
+ if not successful_steps:
251
+ return "I apologize, but I encountered difficulties processing your request. Could you please rephrase or provide more specific details?"
252
+
253
+ # Build comprehensive response
254
+ response_parts = []
255
+
256
+ # Add executive summary for complex queries
257
+ if len(successful_steps) > 2:
258
+ response_parts.append("Here's my comprehensive analysis:")
259
+ response_parts.append("")
260
+
261
+ # Include results from each step
262
+ for step in successful_steps:
263
+ if step.tool_needed in ['math_engine', 'serper']:
264
+ # Tool results are already well-formatted
265
+ response_parts.append(step.result)
266
+ else:
267
+ # Model-generated responses
268
+ response_parts.append(step.result)
269
+
270
+ response_parts.append("")
271
+
272
+ # Add synthesis for multi-step responses
273
+ if len(successful_steps) > 2:
274
+ confidence_score = sum(step.confidence for step in successful_steps) / len(successful_steps)
275
+
276
+ synthesis_parts = [
277
+ "**Key Insights:**",
278
+ "• Multiple perspectives have been considered",
279
+ f"• Analysis confidence: {confidence_score:.1%}",
280
+ "• Both current information and domain expertise were utilized"
281
+ ]
282
+
283
+ response_parts.extend(synthesis_parts)
284
+
285
+ return "\n".join(response_parts).strip()
286
+
287
+ def process_complex_query(self, query: str, model, tokenizer) -> str:
288
+ """Main method to process complex queries with enhanced reasoning."""
289
+ # Analyze query complexity and requirements
290
+ analysis = self.analyze_query_complexity(query)
291
+
292
+ # For simple queries, use direct processing
293
+ if analysis['complexity'] == 'simple' and not analysis['multi_step_needed']:
294
+ tool_call = self.tools.route_query(query)
295
+ if tool_call:
296
+ executed_call = self.tools.execute_tool_call(tool_call)
297
+ if executed_call.result:
298
+ return executed_call.result
299
+
300
+ # Fall back to enhanced model generation
301
+ context = self._build_enhanced_context(
302
+ ReasoningStep(1, "Direct response", ReasoningType.ANALYTICAL),
303
+ []
304
+ )
305
+ return self._generate_with_context(model, tokenizer, context, query)
306
+
307
+ # For complex queries, use multi-step reasoning
308
+ reasoning_steps = self.decompose_complex_query(query, analysis)
309
+ executed_steps = self.execute_reasoning_chain(reasoning_steps, model, tokenizer)
310
+
311
+ return self.synthesize_final_response(executed_steps, query)
312
+
313
+
314
+ # Import torch and other needed modules here to avoid import issues
315
+ import torch
316
+ try:
317
+ import sympy as sp
318
+ import numpy as np
319
+ except ImportError:
320
+ pass