File size: 8,532 Bytes
5ec1e1b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
#!/usr/bin/env python3
"""
Final Answer Tool for GAIA Agent System
Extracts precise, EXACT MATCH compliant answers from agent results
"""

import re
import logging
from typing import Dict, Any, Optional

from models.qwen_client import QwenClient, ModelTier

logger = logging.getLogger(__name__)

class FinalAnswerTool:
    """
    Tool for extracting precise, GAIA-compliant final answers
    Ensures EXACT MATCH compatibility for Unit 4 API submission
    """
    
    def __init__(self, llm_client: QwenClient):
        self.llm_client = llm_client
        
    def extract_final_answer(self, question: str, agent_results: str, question_type: str = "") -> Dict[str, Any]:
        """
        Extract the precise final answer in GAIA-compliant format
        
        Args:
            question: The original GAIA question
            agent_results: Combined results from multiple agents
            question_type: Type of question (for specialized extraction)
            
        Returns:
            Dict with extracted answer, confidence, and reasoning
        """
        try:
            logger.info("🎯 Extracting GAIA-compliant final answer")
            
            # Create specialized extraction prompt
            extraction_prompt = self._create_extraction_prompt(question, agent_results, question_type)
            
            # Use 72B model for precise extraction
            result = self.llm_client.generate(
                extraction_prompt,
                tier=ModelTier.COMPLEX,  # 72B model
                max_tokens=50  # Force concise answers
            )
            
            if not result.success:
                logger.error("Final answer extraction failed")
                return {
                    "answer": "Processing failed",
                    "confidence": 0.0,
                    "reasoning": f"Extraction failed: {result.response}"
                }
            
            # Parse and clean the extracted answer
            extracted_answer = self._clean_answer(result.response, question_type)
            
            # Validate answer format
            validation_result = self._validate_answer(extracted_answer, question_type)
            
            logger.info(f"✅ Final answer extracted: '{extracted_answer}'")
            
            return {
                "answer": extracted_answer,
                "confidence": validation_result["confidence"],
                "reasoning": f"Extracted using 72B model. Validation: {validation_result['status']}"
            }
            
        except Exception as e:
            error_msg = f"Final answer extraction error: {str(e)}"
            logger.error(error_msg)
            return {
                "answer": "Extraction error",
                "confidence": 0.0,
                "reasoning": error_msg
            }
    
    def _create_extraction_prompt(self, question: str, agent_results: str, question_type: str) -> str:
        """Create specialized extraction prompt based on question type"""
        
        base_prompt = f"""
CRITICAL: This is for GAIA benchmark evaluation using EXACT MATCH comparison.
Your response must be ONLY the precise answer - no explanations, no "FINAL ANSWER:", no extra text.

Question: {question}

Agent Analysis Results:
{agent_results}

EXTRACTION RULES:
"""
        
        # Add type-specific rules
        if "mathematical" in question_type.lower() or any(word in question.lower() for word in ["how many", "count", "number", "calculate"]):
            base_prompt += """
- If asking for a count/number: respond with ONLY the number (e.g., "5", "23", "0")
- If asking for calculation: respond with ONLY the result (e.g., "42", "3.14", "100")
- No units unless specifically requested in the question
"""
        elif "text_manipulation" in question_type.lower() or "reverse" in question.lower():
            base_prompt += """
- If text is reversed: provide the corrected text
- If asking for opposite: provide ONLY the opposite word (e.g., "right" for opposite of "left")
- If asking to decode: provide ONLY the decoded answer
"""
        elif "yes" in question.lower() or "true" in question.lower() or "false" in question.lower():
            base_prompt += """
- If yes/no question: respond with ONLY "yes" or "no" (lowercase)
- If true/false question: respond with ONLY "true" or "false" (lowercase)
"""
        elif any(word in question.lower() for word in ["name", "who", "which person"]):
            base_prompt += """
- If asking for a name: provide ONLY the name (e.g., "John Smith", "Einstein")
- If asking for first name only: provide ONLY first name (e.g., "John")
- If asking for last name only: provide ONLY last name (e.g., "Smith")
"""
        elif any(word in question.lower() for word in ["where", "location", "city", "country"]):
            base_prompt += """
- If asking for location: provide ONLY the location name (e.g., "Paris", "USA", "New York")
- No additional descriptors unless specifically requested
"""
        else:
            base_prompt += """
- Provide ONLY the direct answer to the question
- No explanations, context, or additional information
- Be as concise as possible while being accurate
"""
        
        base_prompt += """

EXAMPLES OF CORRECT FORMAT:
- Question: "How many albums?" → Answer: "5"
- Question: "What is the opposite of left?" → Answer: "right"  
- Question: "True or false?" → Answer: "true"
- Question: "Who discovered X?" → Answer: "Einstein"
- Question: "Which city?" → Answer: "London"

Extract the precise answer NOW:"""
        
        return base_prompt
    
    def _clean_answer(self, raw_answer: str, question_type: str) -> str:
        """Clean and format the extracted answer"""
        
        # Remove common unwanted prefixes/suffixes
        answer = raw_answer.strip()
        
        # Remove common prefixes
        prefixes_to_remove = [
            "the answer is",
            "answer:",
            "final answer:",
            "result:",
            "response:",
            "conclusion:",
        ]
        
        for prefix in prefixes_to_remove:
            if answer.lower().startswith(prefix):
                answer = answer[len(prefix):].strip()
        
        # Remove quotes if they wrap the entire answer
        if answer.startswith('"') and answer.endswith('"'):
            answer = answer[1:-1]
        if answer.startswith("'") and answer.endswith("'"):
            answer = answer[1:-1]
        
        # Handle specific formatting based on question type
        if "mathematical" in question_type.lower():
            # Extract just the number for mathematical questions
            number_match = re.search(r'-?\d+(?:\.\d+)?', answer)
            if number_match:
                answer = number_match.group()
        
        elif "text_manipulation" in question_type.lower():
            # For reversed text questions, ensure clean output
            if len(answer.split()) == 1:  # Single word answer
                answer = answer.lower()
        
        # Remove any trailing punctuation that's not part of the answer
        answer = answer.rstrip('.,!?;:')
        
        return answer.strip()
    
    def _validate_answer(self, answer: str, question_type: str) -> Dict[str, Any]:
        """Validate the extracted answer format"""
        
        if not answer:
            return {"status": "empty_answer", "confidence": 0.0}
        
        # Check length - GAIA answers should be concise
        if len(answer) > 100:
            return {"status": "too_long", "confidence": 0.3}
        
        # Type-specific validation
        if "mathematical" in question_type.lower():
            if re.match(r'^-?\d+(?:\.\d+)?$', answer):
                return {"status": "valid_number", "confidence": 0.9}
            else:
                return {"status": "invalid_number_format", "confidence": 0.5}
        
        elif "yes_no" in question_type.lower():
            if answer.lower() in ["yes", "no", "true", "false"]:
                return {"status": "valid_boolean", "confidence": 0.9}
            else:
                return {"status": "invalid_boolean_format", "confidence": 0.4}
        
        # General validation - prefer short, direct answers
        if len(answer) <= 20:
            return {"status": "concise_answer", "confidence": 0.8}
        elif len(answer) <= 50:
            return {"status": "moderate_length", "confidence": 0.6}
        else:
            return {"status": "long_answer", "confidence": 0.4}