| | """deliberation node — GPT peer review of Gemini's compliance analysis."""
|
| | from __future__ import annotations
|
| |
|
| | from datetime import datetime
|
| |
|
| | from openai import OpenAI
|
| |
|
| | from config import DELIBERATION_MODEL, OPENAI_API_KEY
|
| | from prompts.deliberation import DELIBERATION_SYSTEM_PROMPT
|
| | from state import AgentMessage, ComplianceState
|
| | from tools.image_store import ImageStore
|
| |
|
| |
|
| | def deliberation(state: ComplianceState, image_store: ImageStore) -> dict:
|
| | """Send compliance analysis + images + code report to GPT for peer review."""
|
| | question = state["question"]
|
| | compliance_analysis = state.get("compliance_analysis", "")
|
| | code_report = state.get("code_report", "")
|
| | image_refs = state.get("image_refs", [])
|
| |
|
| | if not compliance_analysis:
|
| | return {
|
| | "reviewer_analysis": "",
|
| | "discussion_log": [
|
| | AgentMessage(
|
| | timestamp=datetime.now().strftime("%H:%M:%S"),
|
| | agent="reviewer",
|
| | action="review",
|
| | summary="No analysis to review.",
|
| | detail="",
|
| | evidence_refs=[],
|
| | )
|
| | ],
|
| | "status_message": ["No analysis to review."],
|
| | }
|
| |
|
| | client = OpenAI(api_key=OPENAI_API_KEY)
|
| |
|
| |
|
| | user_content: list[dict] = [
|
| | {"type": "text", "text": f"USER COMPLIANCE QUESTION: {question}"},
|
| | {"type": "text", "text": f"\n=== LEGAL REQUIREMENTS ===\n{code_report}"},
|
| | {"type": "text", "text": f"\n=== ANALYST'S COMPLIANCE FINDINGS ===\n{compliance_analysis}"},
|
| | {"type": "text", "text": "\nBELOW ARE THE SAME CROPPED IMAGES THE ANALYST EXAMINED:"},
|
| | ]
|
| |
|
| | for ref in image_refs:
|
| | user_content.append(
|
| | {"type": "text", "text": f"\nImage: {ref['label']}"}
|
| | )
|
| | try:
|
| | user_content.append(image_store.to_openai_base64(ref))
|
| | except Exception as e:
|
| | user_content.append(
|
| | {"type": "text", "text": f"(Could not load image: {e})"}
|
| | )
|
| |
|
| | user_content.append(
|
| | {"type": "text", "text": "\nPerform your peer review of the compliance determination."}
|
| | )
|
| |
|
| | response = client.chat.completions.create(
|
| | model=DELIBERATION_MODEL,
|
| | messages=[
|
| | {"role": "system", "content": DELIBERATION_SYSTEM_PROMPT},
|
| | {"role": "user", "content": user_content},
|
| | ],
|
| | )
|
| |
|
| | review_text = response.choices[0].message.content or ""
|
| |
|
| | discussion_msg = AgentMessage(
|
| | timestamp=datetime.now().strftime("%H:%M:%S"),
|
| | agent="reviewer",
|
| | action="review",
|
| | summary=f"Peer review complete. {review_text[:100]}...",
|
| | detail=review_text[:1500],
|
| | evidence_refs=[],
|
| | )
|
| |
|
| | return {
|
| | "reviewer_analysis": review_text,
|
| | "discussion_log": [discussion_msg],
|
| | "status_message": ["Deliberation/peer review complete."],
|
| | }
|
| |
|