coconut / src /pipeline /meaning_inference.py
alohaboy
feat: Add LLM-based chat mode and integrate YJ pipeline
caf53ab
# YJ/pipeline/meaning_inference.py
from __future__ import annotations
import json
from typing import Dict, Any
from .models import (
MeaningInferenceResult,
EcholaliaDetectionResult,
UtteranceAnalysisRequest,
UserProfile,
DialogueTurn,
)
from .llm_utils import load_prompt, call_llm_json
MEANING_PROMPT_TEMPLATE = load_prompt("meaning")
def run_meaning_inference(
preprocessed: Dict[str, Any],
echolalia: EcholaliaDetectionResult,
) -> MeaningInferenceResult:
request: UtteranceAnalysisRequest = preprocessed["request"]
profile: UserProfile = preprocessed["profile"]
normalized_dialogue = preprocessed["normalized_dialogue"]
dialogue_str = "\n".join(
f"[{t.turn_index}] {t.speaker}: {t.utterance}"
for t in normalized_dialogue
)
schema = MeaningInferenceResult.model_json_schema()
schema_json = json.dumps(schema, ensure_ascii=False, indent=2)
prompt = MEANING_PROMPT_TEMPLATE.format(
profile=json.dumps(profile.model_dump(), indent=2, ensure_ascii=False),
context_description=request.context_description,
normalized_dialogue=dialogue_str,
echolalia_detection=json.dumps(echolalia.model_dump(), indent=2, ensure_ascii=False),
json_schema=schema_json,
)
raw = call_llm_json(prompt, model="gpt-4.1-mini")
return MeaningInferenceResult(**raw)