File size: 17,260 Bytes
a9dc537
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
"""
CriticAgent for SPARKNET - LangChain Version
Reviews and validates outputs against VISTA quality standards
Uses LangChain chains for structured validation and feedback
"""

from typing import Optional, Dict, Any, List
from loguru import logger
import json

from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.messages import HumanMessage, SystemMessage

from .base_agent import BaseAgent, Task, Message
from ..llm.langchain_ollama_client import LangChainOllamaClient
from ..workflow.langgraph_state import ValidationResult


class CriticAgent(BaseAgent):
    """
    Agent specialized in output validation and quality assurance.
    Uses LangChain chains with mistral for balanced analysis.
    Ensures outputs meet VISTA quality standards.
    """

    # VISTA-aligned quality criteria
    QUALITY_CRITERIA = {
        'patent_analysis': {
            'completeness': {
                'weight': 0.30,
                'threshold': 0.90,
                'description': 'Must extract >90% of claims and key information',
            },
            'clarity': {
                'weight': 0.25,
                'threshold': 0.85,
                'description': 'Summaries and explanations must be clear and understandable',
            },
            'actionability': {
                'weight': 0.25,
                'threshold': 0.80,
                'description': 'Must include clear next steps and recommendations',
            },
            'accuracy': {
                'weight': 0.20,
                'threshold': 0.90,
                'description': 'Information must be factually correct',
            },
        },
        'legal_review': {
            'accuracy': {
                'weight': 0.35,
                'threshold': 0.95,
                'description': 'Risk identification must be precise',
            },
            'coverage': {
                'weight': 0.30,
                'threshold': 0.90,
                'description': 'Must check all major clauses and sections',
            },
            'compliance': {
                'weight': 0.25,
                'threshold': 1.00,
                'description': 'GDPR/Law 25 compliance must be 100%',
            },
            'actionability': {
                'weight': 0.10,
                'threshold': 0.85,
                'description': 'Must provide clear remediation steps',
            },
        },
        'stakeholder_matching': {
            'relevance': {
                'weight': 0.35,
                'threshold': 0.85,
                'description': 'Matches must be relevant to objectives',
            },
            'diversity': {
                'weight': 0.20,
                'threshold': 0.75,
                'description': 'Should include diverse perspectives',
            },
            'justification': {
                'weight': 0.25,
                'threshold': 0.80,
                'description': 'Must explain why matches are appropriate',
            },
            'actionability': {
                'weight': 0.20,
                'threshold': 0.85,
                'description': 'Must include concrete next steps',
            },
        },
        'general': {
            'completeness': {
                'weight': 0.30,
                'threshold': 0.80,
                'description': 'All required elements present',
            },
            'clarity': {
                'weight': 0.25,
                'threshold': 0.80,
                'description': 'Clear and understandable',
            },
            'accuracy': {
                'weight': 0.25,
                'threshold': 0.85,
                'description': 'Factually correct',
            },
            'actionability': {
                'weight': 0.20,
                'threshold': 0.75,
                'description': 'Provides next steps',
            },
        },
    }

    def __init__(
        self,
        llm_client: LangChainOllamaClient,
        memory_agent: Optional['MemoryAgent'] = None,
        temperature: float = 0.6,
    ):
        """
        Initialize CriticAgent with LangChain client.

        Args:
            llm_client: LangChain Ollama client
            memory_agent: Optional memory agent for context
            temperature: LLM temperature for validation
        """
        self.llm_client = llm_client
        self.memory_agent = memory_agent
        self.temperature = temperature

        # Create validation chains
        self.validation_chain = self._create_validation_chain()
        self.feedback_chain = self._create_feedback_chain()

        # Store for backward compatibility
        self.name = "CriticAgent"
        self.description = "Output validation and quality assurance"

        logger.info(f"Initialized CriticAgent with LangChain (complexity: analysis)")

    def _create_validation_chain(self):
        """
        Create LangChain chain for output validation.

        Returns:
            Runnable chain: prompt | llm | parser
        """
        system_template = """You are a critical analysis agent for research valorization outputs.

Your role is to:
1. Review outputs from other agents objectively
2. Identify errors, inconsistencies, or gaps
3. Assess quality against specific criteria
4. Provide constructive feedback for improvement
5. Ensure alignment with VISTA project objectives

When reviewing output, evaluate:
- Completeness: Are all required elements present?
- Clarity: Is it easy to understand?
- Accuracy: Is the information correct?
- Actionability: Does it provide clear next steps?
- Relevance: Does it address the original task?

Be thorough but fair. Focus on constructive feedback that helps improve quality.

Output your assessment as JSON with this structure:
{{
  "dimension_scores": {{"completeness": 0.85, "clarity": 0.90, ...}},
  "issues": ["Issue 1", "Issue 2"],
  "suggestions": ["Suggestion 1", "Suggestion 2"],
  "details": {{}}
}}"""

        human_template = """Review the following output and assess its quality.

ORIGINAL TASK:
{task_description}

OUTPUT TO REVIEW:
{output_text}

QUALITY CRITERIA:
{criteria_text}

For each criterion, score from 0.0 to 1.0:
- 1.0 = Perfect
- 0.8-0.9 = Good, minor improvements possible
- 0.6-0.7 = Acceptable, some issues
- 0.4-0.5 = Poor, significant issues
- < 0.4 = Unacceptable

Provide:
1. Score for each dimension (dimension_scores)
2. List of specific issues found (issues)
3. Concrete suggestions for improvement (suggestions)
4. Additional details if needed (details)

Output JSON only."""

        prompt = ChatPromptTemplate.from_messages([
            ("system", system_template),
            ("human", human_template)
        ])

        # Use analysis model for validation
        llm = self.llm_client.get_llm(complexity="analysis", temperature=self.temperature)

        # JSON output parser
        parser = JsonOutputParser()

        # Create chain
        chain = prompt | llm | parser

        return chain

    def _create_feedback_chain(self):
        """
        Create LangChain chain for generating constructive feedback.

        Returns:
            Runnable chain for feedback generation
        """
        system_template = """You are an expert at providing constructive feedback for improvement.

Your role is to:
1. Analyze validation results and identify key issues
2. Generate specific, actionable improvement suggestions
3. Prioritize suggestions by impact
4. Explain why each suggestion matters
5. Be encouraging while being honest about problems

Focus on feedback that:
- Is specific and concrete
- Can be acted upon immediately
- Addresses root causes, not symptoms
- Builds on strengths while fixing weaknesses"""

        human_template = """Generate constructive feedback for the following output.

VALIDATION RESULTS:
- Overall Score: {overall_score}
- Issues: {issues}
- Dimension Scores: {dimension_scores}

ORIGINAL OUTPUT:
{output_text}

Provide prioritized suggestions for improvement. Output as JSON:
{{
  "priority_suggestions": ["Most important suggestion", "Second priority", ...],
  "strengths": ["What worked well", ...],
  "weaknesses": ["What needs improvement", ...],
  "next_steps": ["Specific action 1", "Specific action 2", ...]
}}"""

        prompt = ChatPromptTemplate.from_messages([
            ("system", system_template),
            ("human", human_template)
        ])

        llm = self.llm_client.get_llm(complexity="analysis", temperature=self.temperature)
        parser = JsonOutputParser()

        chain = prompt | llm | parser

        return chain

    async def process_task(self, task: Task) -> Task:
        """
        Process validation task.

        Args:
            task: Task containing output to validate

        Returns:
            Updated task with validation result
        """
        logger.info(f"CriticAgent validating output for task: {task.id}")
        task.status = "in_progress"

        try:
            # Extract output to validate from task metadata
            if not task.metadata or 'output_to_validate' not in task.metadata:
                raise ValueError("No output provided for validation")

            output = task.metadata['output_to_validate']
            output_type = task.metadata.get('output_type', 'general')
            criteria_override = task.metadata.get('criteria')

            # Validate the output
            validation_result = await self.validate_output(
                output=output,
                task=task,
                output_type=output_type,
                criteria=criteria_override,
            )

            # Store result
            task.result = validation_result
            task.status = "completed"

            logger.info(f"Validation completed: {validation_result.overall_score:.2f} score")

        except Exception as e:
            logger.error(f"Validation failed: {e}")
            task.status = "failed"
            task.error = str(e)

        return task

    async def validate_output(
        self,
        output: Any,
        task: Task,
        output_type: str = 'general',
        criteria: Optional[Dict[str, Any]] = None,
    ) -> ValidationResult:
        """
        Validate output against quality criteria using LangChain.

        Args:
            output: Output to validate (can be str, dict, list, etc.)
            task: Original task that produced this output
            output_type: Type of output (determines criteria)
            criteria: Optional custom criteria

        Returns:
            ValidationResult with score, issues, and suggestions
        """
        # Get quality criteria
        if criteria is None:
            criteria = self.QUALITY_CRITERIA.get(output_type, self.QUALITY_CRITERIA['general'])

        # Convert output to string for LLM analysis
        if isinstance(output, dict) or isinstance(output, list):
            output_str = json.dumps(output, indent=2)
        else:
            output_str = str(output)

        # Truncate if too long
        output_str = output_str[:2000]

        # Build criteria description
        criteria_desc = []
        for dim, props in criteria.items():
            criteria_desc.append(
                f"- {dim.capitalize()} (threshold: {props['threshold']:.0%}): {props['description']}"
            )
        criteria_text = "\n".join(criteria_desc)

        try:
            # Invoke validation chain
            result = await self.validation_chain.ainvoke({
                "task_description": task.description,
                "output_text": output_str,
                "criteria_text": criteria_text
            })

            # Extract scores
            dimension_scores = result.get('dimension_scores', {})

            # Calculate overall score (weighted average)
            total_weight = sum(props['weight'] for props in criteria.values())
            overall_score = 0.0

            for dim, props in criteria.items():
                score = dimension_scores.get(dim, 0.0)
                weight = props['weight']
                overall_score += score * weight

            if total_weight > 0:
                overall_score /= total_weight

            # Determine validity (all dimensions must meet threshold)
            valid = all(
                dimension_scores.get(dim, 0.0) >= props['threshold']
                for dim, props in criteria.items()
            )

            # Create ValidationResult
            validation_result = ValidationResult(
                valid=valid,
                overall_score=overall_score,
                dimension_scores=dimension_scores,
                issues=result.get('issues', []),
                suggestions=result.get('suggestions', []),
                details=result.get('details', {}),
            )

            return validation_result

        except Exception as e:
            logger.error(f"Failed to validate with LangChain: {e}")
            logger.debug(f"Output was: {output_str[:500]}")

            # Return a default "failed validation" result
            return ValidationResult(
                valid=False,
                overall_score=0.0,
                dimension_scores={},
                issues=[f"Failed to validate: {str(e)}"],
                suggestions=["Re-run validation with clearer output"],
                details={'error': str(e)},
            )

    async def suggest_improvements(
        self,
        validation_result: ValidationResult,
        original_output: Any,
    ) -> List[str]:
        """
        Generate actionable improvement suggestions using LangChain.

        Args:
            validation_result: Previous validation result
            original_output: The output that was validated

        Returns:
            List of improvement suggestions
        """
        if validation_result.valid and validation_result.overall_score >= 0.9:
            return ["Output is excellent. No major improvements needed."]

        # Use existing suggestions if available
        if validation_result.suggestions and len(validation_result.suggestions) > 0:
            return validation_result.suggestions

        # Generate new suggestions using feedback chain
        try:
            output_str = str(original_output)[:1000]
            
            result = await self.feedback_chain.ainvoke({
                "overall_score": f"{validation_result.overall_score:.2f}",
                "issues": ", ".join(validation_result.issues),
                "dimension_scores": json.dumps(validation_result.dimension_scores),
                "output_text": output_str
            })

            suggestions = result.get('priority_suggestions', [])
            next_steps = result.get('next_steps', [])

            return suggestions + next_steps

        except Exception as e:
            logger.error(f"Failed to generate suggestions: {e}")
            
            # Fallback: Generate suggestions from issues
            suggestions = []
            for issue in validation_result.issues:
                suggestions.append(f"Address: {issue}")

            # Add dimension-specific suggestions
            for dim, score in validation_result.dimension_scores.items():
                if score < 0.8:
                    suggestions.append(f"Improve {dim}: Current score {score:.2f}, aim for >0.80")

            return suggestions

    def get_feedback_for_iteration(
        self,
        validation_result: ValidationResult,
    ) -> str:
        """
        Format validation feedback for iterative improvement.

        Args:
            validation_result: Validation result

        Returns:
            Formatted feedback string
        """
        feedback_parts = []

        # Overall assessment
        if validation_result.valid:
            feedback_parts.append(f"✓ Output is VALID (score: {validation_result.overall_score:.2f})")
        else:
            feedback_parts.append(f"✗ Output is INVALID (score: {validation_result.overall_score:.2f})")

        # Dimension scores
        feedback_parts.append("\nQuality Dimensions:")
        for dim, score in validation_result.dimension_scores.items():
            status = "✓" if score >= 0.8 else "✗"
            feedback_parts.append(f"  {status} {dim.capitalize()}: {score:.2f}")

        # Issues
        if validation_result.issues:
            feedback_parts.append("\nIssues Found:")
            for i, issue in enumerate(validation_result.issues, 1):
                feedback_parts.append(f"  {i}. {issue}")

        # Suggestions
        if validation_result.suggestions:
            feedback_parts.append("\nSuggestions for Improvement:")
            for i, suggestion in enumerate(validation_result.suggestions, 1):
                feedback_parts.append(f"  {i}. {suggestion}")

        return "\n".join(feedback_parts)

    def get_vista_criteria(self, output_type: str) -> Dict[str, Any]:
        """
        Get VISTA quality criteria for a specific output type.

        Args:
            output_type: Type of output

        Returns:
            Quality criteria dictionary
        """
        return self.QUALITY_CRITERIA.get(output_type, self.QUALITY_CRITERIA['general'])