Preformu / layers /__init__.py
Kevinshh's picture
Upload __init__.py
dc1d5a3 verified
"""
Layers package initialization.
Three-Layer Architecture Components:
- Layer 1: IntentParser (LLM-based semantic understanding)
- Layer 2: RegulatoryDecisionEngine (Rule-based scientific decisions)
- Layer 3: ExplanationGenerator (LLM-based narrative generation)
Legacy Components (still available):
- ProfessionalAnalyzer (original 4-phase LLM pipeline)
"""
from .input_normalizer import InputNormalizer
from .prompt_orchestrator import PromptOrchestrator, PromptPackage, PromptChain
from .model_invoker import ModelInvoker, ModelResponse, ModelInvokerFactory
from .output_normalizer import OutputNormalizer
from .llm_providers import (
LLMProvider,
LLMConfig,
BaseLLMClient,
create_llm_client,
get_available_providers,
)
# Three-Layer Architecture Components (v2.0)
try:
from .intent_parser import IntentParser
from .regulatory_decision_engine import RegulatoryDecisionEngine
from .explanation_generator import ExplanationGenerator
_HAS_THREE_LAYER = True
except ImportError:
IntentParser = None
RegulatoryDecisionEngine = None
ExplanationGenerator = None
_HAS_THREE_LAYER = False
# Legacy: Professional Analyzer (may not be deployed)
try:
from .professional_analyzer import ProfessionalAnalyzer, professional_analyzer
_HAS_PROFESSIONAL_ANALYZER = True
except ImportError:
ProfessionalAnalyzer = None
professional_analyzer = None
_HAS_PROFESSIONAL_ANALYZER = False
__all__ = [
# Core infrastructure
"InputNormalizer",
"PromptOrchestrator",
"PromptPackage",
"PromptChain",
"ModelInvoker",
"ModelResponse",
"ModelInvokerFactory",
"OutputNormalizer",
"LLMProvider",
"LLMConfig",
"BaseLLMClient",
"create_llm_client",
"get_available_providers",
# Three-Layer Architecture (v2.0)
"IntentParser",
"RegulatoryDecisionEngine",
"ExplanationGenerator",
# Legacy
"ProfessionalAnalyzer",
"professional_analyzer",
]