File size: 4,264 Bytes
402a62d
f504c37
 
402a62d
f504c37
402a62d
 
 
 
 
 
 
 
 
f504c37
402a62d
 
 
 
 
 
 
 
 
 
 
 
 
 
f504c37
402a62d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f504c37
 
402a62d
 
f504c37
402a62d
 
 
 
 
f504c37
402a62d
 
f504c37
402a62d
 
 
 
 
 
 
 
 
f504c37
402a62d
 
 
f504c37
 
402a62d
 
f504c37
 
 
402a62d
 
f504c37
 
 
 
402a62d
 
 
 
f504c37
 
402a62d
 
f504c37
402a62d
 
 
f504c37
402a62d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
"""
Processor Base com integração LLMManager REAL
Classe abstrata para processors que usam LLM
"""
from typing import Dict, Any, Optional
from datetime import datetime
import logging
from abc import ABC, abstractmethod

logger = logging.getLogger(__name__)


class ProcessorLLMBase(ABC):
    """
    Processor base que integra com LLM via client (GroqClient, etc).

    Substitui hardcoded por prompts e chamadas reais.
    """

    def __init__(
        self,
        specialist_id: int,
        specialist_name: str,
        llm_client=None
    ):
        """
        Args:
            specialist_id: ID do especialista (1-9)
            specialist_name: Nome descritivo
            llm_client: Cliente LLM (GroqClient, OpenAIClient, etc)
        """
        self.specialist_id = specialist_id
        self.specialist_name = specialist_name
        self.llm_client = llm_client
        self.execution_time = 0
        self.confidence_score = 0
        self.errors = []
        self.warnings = []

        if not llm_client:
            self.add_warning("LLM client não configurado - usando fallback mock")

    @abstractmethod
    def process(self, acordao_data: Dict[str, Any]) -> Dict[str, Any]:
        """Processa acórdão usando LLM real."""
        pass

    @abstractmethod
    def get_prompt(self, acordao_data: Dict[str, Any]) -> str:
        """Retorna prompt para o LLM."""
        pass

    @abstractmethod
    def validate(self, result: Dict[str, Any]) -> bool:
        """Valida resultado."""
        pass

    def call_llm(
        self,
        prompt: str,
        max_tokens: int = 2048,
        temperature: float = 0.3,
        model: Optional[str] = None
    ) -> str:
        """
        Faz chamada ao LLM real via client.

        Args:
            prompt: Prompt a enviar
            max_tokens: Máximo de tokens
            temperature: Temperatura (0-1)
            model: Modelo específico (opcional)

        Returns:
            Resposta do LLM (texto)
        """
        if not self.llm_client:
            self.add_error("LLM client não disponível")
            return ""

        try:
            start_time = datetime.now()

            logger.info(
                f"[{self.specialist_name}] 🤖 Chamando LLM... "
                f"(max_tokens={max_tokens}, temp={temperature})"
            )

            # Chamada real ao LLM via client
            # GroqClient.generate() retorna LLMResponse com .content
            response = self.llm_client.generate(
                prompt=prompt,
                model=model,  # Opcional, usa default se None
                temperature=temperature,
                max_tokens=max_tokens
            )

            # Extrair conteúdo da resposta
            # LLMResponse tem atributo .content
            content = response.content if hasattr(response, 'content') else str(response)

            elapsed = (datetime.now() - start_time).total_seconds()
            self.execution_time += elapsed

            logger.info(
                f"[{self.specialist_name}] ✅ LLM respondeu em {elapsed:.2f}s "
                f"({len(content)} chars)"
            )

            return content

        except Exception as e:
            self.add_error(f"Erro ao chamar LLM: {e}")
            logger.error(f"[{self.specialist_name}] ❌ Erro LLM: {e}", exc_info=True)
            return ""

    def add_error(self, error_msg: str):
        """Adiciona erro."""
        self.errors.append(error_msg)

    def add_warning(self, warning_msg: str):
        """Adiciona aviso."""
        self.warnings.append(warning_msg)

    def set_confidence(self, score: int):
        """Define score de confiança (0-100)."""
        if 0 <= score <= 100:
            self.confidence_score = score

    def postprocess(self, result: Dict[str, Any]) -> Dict[str, Any]:
        """Pós-processa resultado."""
        return {
            "specialist_id": self.specialist_id,
            "specialist_name": self.specialist_name,
            "result": result,
            "execution_time": self.execution_time,
            "confidence_score": self.confidence_score,
            "errors": self.errors,
            "warnings": self.warnings,
            "timestamp": datetime.now().isoformat()
        }